Repository: OpenSourceEconomics/estimagic Branch: main Commit: 15d2ef5b65ba Files: 381 Total size: 4.6 MB Directory structure: gitextract__osbgbsz/ ├── .github/ │ ├── CODE_OF_CONDUCT.md │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.md │ │ ├── enhancement.md │ │ └── feature_request.md │ ├── PULL_REQUEST_TEMPLATE/ │ │ └── pull_request_template.md │ └── workflows/ │ ├── main.yml │ └── publish-to-pypi.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── .tools/ │ ├── create_algo_selection_code.py │ ├── test_create_algo_selection_code.py │ └── update_algo_selection_hook.py ├── .yamllint.yml ├── CHANGES.md ├── CITATION ├── LICENSE ├── README.md ├── codecov.yml ├── docs/ │ ├── Makefile │ ├── make.bat │ └── source/ │ ├── _static/ │ │ ├── css/ │ │ │ ├── custom.css │ │ │ ├── termynal.css │ │ │ └── termynal_custom.css │ │ └── js/ │ │ ├── custom.js │ │ ├── require.js │ │ └── termynal.js │ ├── algorithms.md │ ├── conf.py │ ├── development/ │ │ ├── changes.md │ │ ├── code_of_conduct.md │ │ ├── credits.md │ │ ├── enhancement_proposals.md │ │ ├── ep-00-governance-model.md │ │ ├── ep-01-pytrees.md │ │ ├── ep-02-typing.md │ │ ├── ep-03-alignment.md │ │ ├── how_to_contribute.md │ │ ├── index.md │ │ └── styleguide.md │ ├── estimagic/ │ │ ├── explanation/ │ │ │ ├── bootstrap_ci.md │ │ │ ├── bootstrap_montecarlo_comparison.ipynb │ │ │ ├── cluster_robust_likelihood_inference.md │ │ │ └── index.md │ │ ├── index.md │ │ ├── reference/ │ │ │ └── index.md │ │ └── tutorials/ │ │ ├── bootstrap_overview.ipynb │ │ ├── estimation_tables_overview.ipynb │ │ ├── index.md │ │ ├── likelihood_overview.ipynb │ │ └── msm_overview.ipynb │ ├── explanation/ │ │ ├── explanation_of_numerical_optimizers.md │ │ ├── implementation_of_constraints.md │ │ ├── index.md │ │ ├── internal_optimizers.md │ │ ├── numdiff_background.md │ │ ├── tests_for_supported_optimizers.md │ │ └── why_optimization_is_hard.ipynb │ ├── how_to/ │ │ ├── how_to_add_optimizers.ipynb │ │ ├── how_to_algorithm_selection.ipynb │ │ ├── how_to_benchmarking.ipynb │ │ ├── how_to_bounds.ipynb │ │ ├── how_to_change_plotting_backend.ipynb │ │ ├── how_to_constraints.md │ │ ├── how_to_criterion_function.ipynb │ │ ├── how_to_derivatives.ipynb │ │ ├── how_to_document_optimizers.md │ │ ├── how_to_errors_during_optimization.ipynb │ │ ├── how_to_globalization.ipynb │ │ ├── how_to_logging.ipynb │ │ ├── how_to_multistart.ipynb │ │ ├── how_to_scaling.md │ │ ├── how_to_slice_plot.ipynb │ │ ├── how_to_slice_plot_3d.ipynb │ │ ├── how_to_specify_algorithm_and_algo_options.md │ │ ├── how_to_start_parameters.md │ │ ├── how_to_visualize_histories.ipynb │ │ └── index.md │ ├── index.md │ ├── installation.md │ ├── reference/ │ │ ├── algo_options.md │ │ ├── batch_evaluators.md │ │ ├── index.md │ │ ├── typing.md │ │ └── utilities.md │ ├── refs.bib │ ├── tutorials/ │ │ ├── bayes_opt_tutorial.ipynb │ │ ├── index.md │ │ ├── numdiff_overview.ipynb │ │ └── optimization_overview.ipynb │ └── videos.md ├── pyproject.toml ├── src/ │ ├── estimagic/ │ │ ├── __init__.py │ │ ├── batch_evaluators.py │ │ ├── bootstrap.py │ │ ├── bootstrap_ci.py │ │ ├── bootstrap_helpers.py │ │ ├── bootstrap_outcomes.py │ │ ├── bootstrap_samples.py │ │ ├── config.py │ │ ├── estimate_ml.py │ │ ├── estimate_msm.py │ │ ├── estimation_summaries.py │ │ ├── estimation_table.py │ │ ├── examples/ │ │ │ ├── __init__.py │ │ │ ├── diabetes.csv │ │ │ ├── exam_points.csv │ │ │ ├── logit.py │ │ │ └── sensitivity_probit_example_data.csv │ │ ├── lollipop_plot.py │ │ ├── ml_covs.py │ │ ├── msm_covs.py │ │ ├── msm_sensitivity.py │ │ ├── msm_weighting.py │ │ ├── py.typed │ │ ├── shared_covs.py │ │ └── utilities.py │ └── optimagic/ │ ├── __init__.py │ ├── algorithms.py │ ├── batch_evaluators.py │ ├── benchmarking/ │ │ ├── __init__.py │ │ ├── benchmark_reports.py │ │ ├── cartis_roberts.py │ │ ├── get_benchmark_problems.py │ │ ├── more_wild.py │ │ ├── noise_distributions.py │ │ ├── process_benchmark_results.py │ │ └── run_benchmark.py │ ├── config.py │ ├── constraints.py │ ├── decorators.py │ ├── deprecations.py │ ├── differentiation/ │ │ ├── __init__.py │ │ ├── derivatives.py │ │ ├── finite_differences.py │ │ ├── generate_steps.py │ │ ├── numdiff_options.py │ │ └── richardson_extrapolation.py │ ├── examples/ │ │ ├── __init__.py │ │ ├── criterion_functions.py │ │ └── numdiff_functions.py │ ├── exceptions.py │ ├── logging/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── logger.py │ │ ├── read_log.py │ │ ├── sqlalchemy.py │ │ └── types.py │ ├── mark.py │ ├── optimization/ │ │ ├── __init__.py │ │ ├── algo_options.py │ │ ├── algorithm.py │ │ ├── convergence_report.py │ │ ├── create_optimization_problem.py │ │ ├── error_penalty.py │ │ ├── fun_value.py │ │ ├── history.py │ │ ├── internal_optimization_problem.py │ │ ├── multistart.py │ │ ├── multistart_options.py │ │ ├── optimization_logging.py │ │ ├── optimize.py │ │ ├── optimize_result.py │ │ ├── process_results.py │ │ └── scipy_aliases.py │ ├── optimizers/ │ │ ├── __init__.py │ │ ├── _pounders/ │ │ │ ├── __init__.py │ │ │ ├── _conjugate_gradient.py │ │ │ ├── _steihaug_toint.py │ │ │ ├── _trsbox.py │ │ │ ├── bntr.py │ │ │ ├── gqtpar.py │ │ │ ├── linear_subsolvers.py │ │ │ ├── pounders_auxiliary.py │ │ │ └── pounders_history.py │ │ ├── bayesian_optimizer.py │ │ ├── bhhh.py │ │ ├── fides.py │ │ ├── gfo_optimizers.py │ │ ├── iminuit_migrad.py │ │ ├── ipopt.py │ │ ├── nag_optimizers.py │ │ ├── neldermead.py │ │ ├── nevergrad_optimizers.py │ │ ├── nlopt_optimizers.py │ │ ├── pounders.py │ │ ├── pygad/ │ │ │ └── __init__.py │ │ ├── pygad_optimizer.py │ │ ├── pygmo_optimizers.py │ │ ├── pyswarms_optimizers.py │ │ ├── scipy_optimizers.py │ │ ├── tao_optimizers.py │ │ └── tranquilo.py │ ├── parameters/ │ │ ├── __init__.py │ │ ├── block_trees.py │ │ ├── bounds.py │ │ ├── check_constraints.py │ │ ├── consolidate_constraints.py │ │ ├── constraint_tools.py │ │ ├── conversion.py │ │ ├── kernel_transformations.py │ │ ├── nonlinear_constraints.py │ │ ├── process_constraints.py │ │ ├── process_selectors.py │ │ ├── scale_conversion.py │ │ ├── scaling.py │ │ ├── space_conversion.py │ │ ├── tree_conversion.py │ │ └── tree_registry.py │ ├── py.typed │ ├── sandbox.py │ ├── shared/ │ │ ├── __init__.py │ │ ├── check_option_dicts.py │ │ ├── compat.py │ │ └── process_user_function.py │ ├── timing.py │ ├── type_conversion.py │ ├── typing.py │ ├── utilities.py │ └── visualization/ │ ├── __init__.py │ ├── backends.py │ ├── convergence_plot.py │ ├── deviation_plot.py │ ├── history_plots.py │ ├── plotting_utilities.py │ ├── profile_plot.py │ ├── slice_plot.py │ └── slice_plot_3d.py └── tests/ ├── __init__.py ├── conftest.py ├── estimagic/ │ ├── __init__.py │ ├── examples/ │ │ └── test_logit.py │ ├── pickled_statsmodels_ml_covs/ │ │ ├── logit_hessian.pickle │ │ ├── logit_hessian_matrix.pickle │ │ ├── logit_jacobian.pickle │ │ ├── logit_jacobian_matrix.pickle │ │ ├── logit_sandwich.pickle │ │ ├── probit_hessian.pickle │ │ ├── probit_hessian_matrix.pickle │ │ ├── probit_jacobian.pickle │ │ ├── probit_jacobian_matrix.pickle │ │ └── probit_sandwich.pickle │ ├── test_bootstrap.py │ ├── test_bootstrap_ci.py │ ├── test_bootstrap_outcomes.py │ ├── test_bootstrap_samples.py │ ├── test_estimate_ml.py │ ├── test_estimate_msm.py │ ├── test_estimate_msm_dict_params_and_moments.py │ ├── test_estimation_table.py │ ├── test_lollipop_plot.py │ ├── test_ml_covs.py │ ├── test_msm_covs.py │ ├── test_msm_sensitivity.py │ ├── test_msm_sensitivity_via_estimate_msm.py │ ├── test_msm_weighting.py │ └── test_shared.py └── optimagic/ ├── __init__.py ├── benchmarking/ │ ├── __init__.py │ ├── test_benchmark_reports.py │ ├── test_cartis_roberts.py │ ├── test_get_benchmark_problems.py │ ├── test_more_wild.py │ ├── test_noise_distributions.py │ └── test_run_benchmark.py ├── differentiation/ │ ├── binary_choice_inputs.pickle │ ├── test_compare_derivatives_with_jax.py │ ├── test_derivatives.py │ ├── test_finite_differences.py │ ├── test_generate_steps.py │ └── test_numdiff_options.py ├── examples/ │ └── test_criterion_functions.py ├── logging/ │ ├── test_base.py │ ├── test_logger.py │ ├── test_sqlalchemy.py │ └── test_types.py ├── optimization/ │ ├── test_algorithm.py │ ├── test_convergence_report.py │ ├── test_create_optimization_problem.py │ ├── test_error_penalty.py │ ├── test_fun_value.py │ ├── test_function_formats_ls.py │ ├── test_function_formats_scalar.py │ ├── test_history.py │ ├── test_history_collection.py │ ├── test_infinite_and_incomplete_bounds.py │ ├── test_internal_optimization_problem.py │ ├── test_invalid_jacobian_value.py │ ├── test_jax_derivatives.py │ ├── test_many_algorithms.py │ ├── test_multistart.py │ ├── test_multistart_options.py │ ├── test_optimize.py │ ├── test_optimize_result.py │ ├── test_params_versions.py │ ├── test_process_result.py │ ├── test_scipy_aliases.py │ ├── test_useful_exceptions.py │ ├── test_with_advanced_constraints.py │ ├── test_with_bounds.py │ ├── test_with_constraints.py │ ├── test_with_logging.py │ ├── test_with_multistart.py │ ├── test_with_nonlinear_constraints.py │ └── test_with_scaling.py ├── optimizers/ │ ├── __init__.py │ ├── _pounders/ │ │ ├── __init__.py │ │ ├── fixtures/ │ │ │ ├── add_points_until_main_model_fully_linear_i.yaml │ │ │ ├── add_points_until_main_model_fully_linear_ii.yaml │ │ │ ├── find_affine_points_nonzero_i.yaml │ │ │ ├── find_affine_points_nonzero_ii.yaml │ │ │ ├── find_affine_points_nonzero_iii.yaml │ │ │ ├── find_affine_points_zero_i.yaml │ │ │ ├── find_affine_points_zero_ii.yaml │ │ │ ├── find_affine_points_zero_iii.yaml │ │ │ ├── find_affine_points_zero_iv.yaml │ │ │ ├── get_coefficients_residual_model.yaml │ │ │ ├── get_interpolation_matrices_residual_model.yaml │ │ │ ├── interpolate_f_iter_4.yaml │ │ │ ├── interpolate_f_iter_7.yaml │ │ │ ├── pounders_example_data.csv │ │ │ ├── scalar_model.pkl │ │ │ ├── update_initial_residual_model.yaml │ │ │ ├── update_intial_residual_model.yaml │ │ │ ├── update_main_from_residual_model.yaml │ │ │ ├── update_main_with_new_accepted_x.yaml │ │ │ ├── update_residual_model.yaml │ │ │ └── update_residual_model_with_new_accepted_x.yaml │ │ ├── test_linear_subsolvers.py │ │ ├── test_pounders_history.py │ │ ├── test_pounders_unit.py │ │ └── test_quadratic_subsolvers.py │ ├── test_bayesian_optimizer.py │ ├── test_bhhh.py │ ├── test_fides_options.py │ ├── test_gfo_optimizers.py │ ├── test_iminuit_migrad.py │ ├── test_ipopt_options.py │ ├── test_nag_optimizers.py │ ├── test_neldermead.py │ ├── test_nevergrad.py │ ├── test_pounders_integration.py │ ├── test_pygad_optimizer.py │ ├── test_pygmo_optimizers.py │ ├── test_pyswarms_optimizers.py │ ├── test_tao_optimizers.py │ └── test_tranquilo.py ├── parameters/ │ ├── test_block_trees.py │ ├── test_bounds.py │ ├── test_check_constraints.py │ ├── test_constraint_tools.py │ ├── test_conversion.py │ ├── test_kernel_transformations.py │ ├── test_nonlinear_constraints.py │ ├── test_process_constraints.py │ ├── test_process_selectors.py │ ├── test_scale_conversion.py │ ├── test_scaling.py │ ├── test_space_conversion.py │ ├── test_tree_conversion.py │ └── test_tree_registry.py ├── shared/ │ ├── __init__.py │ └── test_process_user_functions.py ├── test_algo_selection.py ├── test_batch_evaluators.py ├── test_constraints.py ├── test_decorators.py ├── test_deprecations.py ├── test_mark.py ├── test_timing.py ├── test_type_conversion.py ├── test_typed_dicts_consistency.py ├── test_utilities.py └── visualization/ ├── test_backends.py ├── test_convergence_plot.py ├── test_deviation_plot.py ├── test_history_plots.py ├── test_plotting_utilities.py ├── test_profile_plot.py ├── test_slice_plot.py └── test_slice_plot_3d.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/CODE_OF_CONDUCT.md ================================================ # Code of Conduct - [NumFOCUS Code of Conduct](https://numfocus.org/code-of-conduct) ================================================ FILE: .github/ISSUE_TEMPLATE/bug-report.md ================================================ --- name: Bug Report about: Create a report to help us improve title: '' labels: bug assignees: '' --- ### Bug description A clear and concise description of what the bug is. ### To Reproduce Ideally, provide a minimal code example. If that's not possible, describe steps to reproduce the bug. ### Expected behavior A clear and concise description of what you expected to happen. ### Screenshots/Error messages If applicable, add screenshots to help explain your problem. ### System - OS: [e.g. Ubuntu 18.04] - Version [e.g. 0.0.1] ================================================ FILE: .github/ISSUE_TEMPLATE/enhancement.md ================================================ --- name: Enhancement about: Enhance an existing component. title: '' labels: enhancement assignees: '' --- * optimagic version used, if any: * Python version, if any: * Operating System: ### What would you like to enhance and why? Is it related to an issue/problem? A clear and concise description of the current implementation and its limitations. ### Describe the solution you'd like A clear and concise description of what you want to happen. ### Describe alternatives you've considered A clear and concise description of any alternative solutions or features you've considered and why you have discarded them. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: feature-request assignees: '' --- ### Current situation A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]; Currently there is no way of [...] ### Desired Situation What functionality should become possible or easier? ### Proposed implementation How would you implement the new feature? Did you consider alternative implementations? You can start by describing interface changes like a new argument or a new function. There is no need to get too detailed here. ================================================ FILE: .github/PULL_REQUEST_TEMPLATE/pull_request_template.md ================================================ ### What problem do you want to solve? Reference the issue or discussion, if there is any. Provide a description of your proposed solution. ### Todo - [ ] Target the right branch and pick an appropriate title. - [ ] Put `Closes #XXXX` in the first PR comment to auto-close the relevant issue once the PR is accepted. This is not applicable if there is no corresponding issue. - [ ] Any steps that still need to be done. ================================================ FILE: .github/workflows/main.yml ================================================ --- name: main concurrency: group: ${{ github.head_ref || github.run_id }} cancel-in-progress: true on: push: branches: - main pull_request: branches: - '*' jobs: run-tests-linux: name: Run tests on ubuntu-latest py${{ matrix.python-version }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: - '312' - '313' - '314' steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: tests-linux-py${{ matrix.python-version }} - name: Run pytest shell: bash -el {0} run: pixi run -e tests-linux-py${{ matrix.python-version }} tests-with-cov - name: Upload coverage report. if: matrix.python-version == '312' uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} run-tests-win-and-mac: name: Run tests on ${{ matrix.os }} py${{ matrix.python-version }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: - macos-latest - windows-latest python-version: - '312' - '313' - '314' steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: tests-py${{ matrix.python-version }} - name: Run pytest shell: bash -el {0} run: pixi run -e tests-py${{ matrix.python-version }} tests-fast run-tests-with-old-plotly: name: Run tests on ubuntu-latest with plotly < 6 runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: tests-old-plotly - name: Run pytest shell: bash -el {0} run: pixi run -e tests-old-plotly tests-fast run-tests-nevergrad: name: Run nevergrad tests py${{ matrix.python-version }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: - '312' - '313' - '314' steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: tests-nevergrad-py${{ matrix.python-version }} - name: Run pytest shell: bash -el {0} run: >- pixi run -e tests-nevergrad-py${{ matrix.python-version }} pytest tests/optimagic/optimizers/test_nevergrad.py code-in-docs: name: Run code snippets in documentation runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: tests-linux-py314 - name: Run doctest shell: bash -el {0} run: >- pixi run -e tests-linux-py314 python -m doctest -v docs/source/how_to/how_to_constraints.md run-mypy: name: Run mypy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: prefix-dev/setup-pixi@v0.9.4 with: pixi-version: v0.65.0 cache: true cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} frozen: true environments: type-checking - name: Run mypy shell: bash -el {0} run: pixi run -e type-checking mypy ================================================ FILE: .github/workflows/publish-to-pypi.yml ================================================ --- name: PyPI on: push jobs: build-n-publish: name: Build and publish optimagic Python 🐍 distributions 📦 to PyPI runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Set up Python 3.12 uses: actions/setup-python@v5 with: python-version: '3.12' - name: Install pypa/build run: >- python -m pip install build --user - name: Build a binary wheel and a source tarball run: >- python -m build --sdist --wheel --outdir dist/ - name: Publish distribution 📦 to PyPI if: startsWith(github.ref, 'refs/tags') uses: pypa/gh-action-pypi-publish@release/v1 with: password: ${{ secrets.PYPI_API_TOKEN_OPTIMAGIC }} ================================================ FILE: .gitignore ================================================ # AI CLAUDE.md # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # MacOS specific service store .DS_Store # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST *build/ # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec *.sublime-workspace *.sublime-project # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ docs/build/ docs/source/_build/ docs/source/**/*.db docs/source/**/*.db-shm docs/source/**/*.db-wal docs/source/refs.bib.bak # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ .pixi/ # Spyder project settings .spyderproject .spyproject # VSCode project settings .vscode # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ *notes/ .idea/ *.bak *.db .pytask.sqlite3 src/estimagic/_version.py src/optimagic/_version.py *.~lock.* ================================================ FILE: .pre-commit-config.yaml ================================================ --- repos: - repo: meta hooks: - id: check-hooks-apply - id: check-useless-excludes # - id: identity # Prints all files passed to pre-commits. Debugging. - repo: https://github.com/lyz-code/yamlfix rev: 1.19.1 hooks: - id: yamlfix exclude: tests/optimagic/optimizers/_pounders/fixtures - repo: local hooks: - id: update-algo-selection-code name: update algo selection code entry: python .tools/update_algo_selection_hook.py language: python files: ^(src/optimagic/optimizers/|src/optimagic/algorithms\.py|\.tools/) require_serial: true additional_dependencies: - hatchling - ruff - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: check-added-large-files args: - --maxkb=2500 exclude: tests/optimagic/optimizers/_pounders/fixtures/ - id: check-case-conflict - id: check-merge-conflict - id: check-vcs-permalinks - id: check-yaml - id: check-toml - id: debug-statements - id: end-of-file-fixer - id: fix-byte-order-marker types: - text - id: forbid-submodules - id: mixed-line-ending args: - --fix=lf description: Forces to replace line ending by the UNIX 'lf' character. - id: name-tests-test args: - --pytest-test-first - id: no-commit-to-branch args: - --branch - main - id: trailing-whitespace exclude: docs/ - id: check-ast - repo: https://github.com/adrienverge/yamllint.git rev: v1.38.0 hooks: - id: yamllint exclude: tests/optimagic/optimizers/_pounders/fixtures - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.15.5 hooks: # Run the linter. - id: ruff types_or: - python - pyi - jupyter args: - --fix # Run the formatter. - id: ruff-format types_or: - python - pyi - jupyter - repo: https://github.com/executablebooks/mdformat rev: 1.0.0 hooks: - id: mdformat additional_dependencies: - mdformat-gfm - mdformat-gfm-alerts - mdformat-ruff args: - --wrap - '88' files: (README\.md) - repo: https://github.com/executablebooks/mdformat rev: 1.0.0 hooks: - id: mdformat additional_dependencies: - mdformat-myst - mdformat-ruff args: - --wrap - '88' files: (docs/.) exclude: docs/source/how_to/how_to_specify_algorithm_and_algo_options.md - repo: https://github.com/kynan/nbstripout rev: 0.9.1 hooks: - id: nbstripout exclude: | (?x)^( docs/source/estimagic/tutorials/estimation_tables_overview.ipynb| docs/source/estimagic/explanation/bootstrap_montecarlo_comparison.ipynb| )$ args: - --drop-empty-cells ci: autoupdate_schedule: monthly skip: - update-algo-selection-code ================================================ FILE: .readthedocs.yml ================================================ --- version: 2 build: os: ubuntu-24.04 tools: python: '3.14' jobs: create_environment: - asdf plugin add pixi - asdf install pixi latest - asdf global pixi latest post_build: - pixi run -e docs build-docs - mkdir --parents $READTHEDOCS_OUTPUT/html/ - cp -a docs/build/html/. "$READTHEDOCS_OUTPUT/html" && rm -r docs/build ================================================ FILE: .tools/create_algo_selection_code.py ================================================ import importlib import inspect import pkgutil import textwrap from itertools import combinations from types import ModuleType from typing import Callable, Type from optimagic.config import OPTIMAGIC_ROOT from optimagic.optimization.algorithm import Algorithm from optimagic.typing import AggregationLevel def main() -> None: """Create the source code for algorithms.py. The main part of the generated code are nested dataclasses that enable filtered autocomplete for algorithm selection. Creating them entails the following steps: - Discover all modules that contain optimizer classes - Collect all optimizer classes - Create a mapping from a tuple of categories (e.g. Global, Bounded, ...) to the optimizer classes that belong to them. To find out which optimizers need to be included we use the attributes stored in optimizer_class.__algo_info__. - Create the dataclasses that enable autocomplete for algorithm selection In addition we need to create the code for import statements, a AlgoSelection base class and some code to instantiate the dataclasses. """ # create some basic inputs docstring = _get_docstring_code() modules = _import_optimizer_modules("optimagic.optimizers") all_algos = _get_all_algorithms(modules) filters = _get_filters() all_categories = list(filters) selection_info = _create_selection_info(all_algos, all_categories) # create the code for imports imports = _get_imports(modules) # create the code for the ABC AlgoSelection class parent_class_snippet = _get_base_class_code() # create the code for the dataclasses dataclass_snippets = [] for active_categories in selection_info: new_snippet = create_dataclass_code( active_categories=active_categories, all_categories=all_categories, selection_info=selection_info, ) dataclass_snippets.append(new_snippet) # create the code for the instantiation instantiation_snippet = _get_instantiation_code() # Combine all the content into a single string content = ( docstring + imports + "\n\n" + parent_class_snippet + "\n" + "\n\n".join(dataclass_snippets) + "\n\n" + instantiation_snippet ) # Write the combined content to the file with (OPTIMAGIC_ROOT / "algorithms.py").open("w") as f: f.write(content) # ====================================================================================== # Functions to collect algorithms # ====================================================================================== def _import_optimizer_modules(package_name: str) -> list[ModuleType]: """Collect all public modules in a given package in a list.""" package = importlib.import_module(package_name) modules = [] for _, module_name, is_pkg in pkgutil.walk_packages( package.__path__, package.__name__ + "." ): module_parts = module_name.split(".") if all(not part.startswith("_") for part in module_parts) and not is_pkg: module = importlib.import_module(module_name) modules.append(module) return modules def _get_all_algorithms(modules: list[ModuleType]) -> dict[str, Type[Algorithm]]: """Collect all algorithms in modules.""" out = {} for module in modules: out.update(_get_algorithms_in_module(module)) return out def _get_algorithms_in_module(module: ModuleType) -> dict[str, Type[Algorithm]]: """Collect all algorithms in a single module.""" candidate_dict = dict(inspect.getmembers(module, inspect.isclass)) candidate_dict = { k: v for k, v in candidate_dict.items() if hasattr(v, "__algo_info__") } algos = {} for candidate in candidate_dict.values(): name = candidate.algo_info.name if issubclass(candidate, Algorithm) and candidate is not Algorithm: algos[name] = candidate return algos # ====================================================================================== # Functions to filter algorithms by selectors # ====================================================================================== def _is_gradient_based(algo: Type[Algorithm]) -> bool: return algo.algo_info.needs_jac # type: ignore def _is_gradient_free(algo: Type[Algorithm]) -> bool: return not _is_gradient_based(algo) def _is_global(algo: Type[Algorithm]) -> bool: return algo.algo_info.is_global # type: ignore def _is_local(algo: Type[Algorithm]) -> bool: return not _is_global(algo) def _is_bounded(algo: Type[Algorithm]) -> bool: return algo.algo_info.supports_bounds # type: ignore def _is_linear_constrained(algo: Type[Algorithm]) -> bool: return algo.algo_info.supports_linear_constraints # type: ignore def _is_nonlinear_constrained(algo: Type[Algorithm]) -> bool: return algo.algo_info.supports_nonlinear_constraints # type: ignore def _is_scalar(algo: Type[Algorithm]) -> bool: return algo.algo_info.solver_type == AggregationLevel.SCALAR # type: ignore def _is_least_squares(algo: Type[Algorithm]) -> bool: return algo.algo_info.solver_type == AggregationLevel.LEAST_SQUARES # type: ignore def _is_likelihood(algo: Type[Algorithm]) -> bool: return algo.algo_info.solver_type == AggregationLevel.LIKELIHOOD # type: ignore def _is_parallel(algo: Type[Algorithm]) -> bool: return algo.algo_info.supports_parallelism # type: ignore def _get_filters() -> dict[str, Callable[[Type[Algorithm]], bool]]: """Create a dict mapping from category names to filter functions.""" filters: dict[str, Callable[[Type[Algorithm]], bool]] = { "GradientBased": _is_gradient_based, "GradientFree": _is_gradient_free, "Global": _is_global, "Local": _is_local, "Bounded": _is_bounded, "LinearConstrained": _is_linear_constrained, "NonlinearConstrained": _is_nonlinear_constrained, "Scalar": _is_scalar, "LeastSquares": _is_least_squares, "Likelihood": _is_likelihood, "Parallel": _is_parallel, } return filters # ====================================================================================== # Functions to create a mapping from a tuple of selectors to subsets of the dict # mapping algorithm names to algorithm classes # ====================================================================================== def _create_selection_info( all_algos: dict[str, Type[Algorithm]], categories: list[str], ) -> dict[tuple[str, ...], dict[str, Type[Algorithm]]]: """Create a dict mapping from a tuple of selectors to subsets of the all_algos dict. Args: all_algos: Dictionary mapping algorithm names to algorithm classes. categories: List of categories to filter by. Returns: A dictionary mapping tuples of selectors to dictionaries of algorithm names and their corresponding classes. """ category_combinations = _generate_category_combinations(categories) out = {} for comb in category_combinations: filtered_algos = _apply_filters(all_algos, comb) if filtered_algos: out[comb] = filtered_algos return out def _generate_category_combinations(categories: list[str]) -> list[tuple[str, ...]]: """Generate all combinations of categories, sorted by length in descending order. Args: categories: A list of category names. Returns: A list of tuples, where each tuple represents a combination of categories. """ result: list[tuple[str, ...]] = [] for r in range(len(categories) + 1): result.extend(map(tuple, map(sorted, combinations(categories, r)))) return sorted(result, key=len, reverse=True) def _apply_filters( all_algos: dict[str, Type[Algorithm]], categories: tuple[str, ...] ) -> dict[str, Type[Algorithm]]: """Apply filters to the algorithms based on the given categories. Args: all_algos: A dictionary mapping algorithm names to algorithm classes. categories: A tuple of category names to filter by. Returns: filtered dictionary of algorithms that match all given categories. """ filtered = all_algos filters = _get_filters() for category in categories: filter_func = filters[category] filtered = {name: algo for name, algo in filtered.items() if filter_func(algo)} return filtered # ====================================================================================== # Functions to create code for the dataclasses # ====================================================================================== def create_dataclass_code( active_categories: tuple[str, ...], all_categories: list[str], selection_info: dict[tuple[str, ...], dict[str, Type[Algorithm]]], ) -> str: """Create the source code for a dataclass representing a selection of algorithms. Args: active_categories: A tuple of active category names. all_categories: A list of all category names. selection_info: A dictionary that maps tuples of category names to dictionaries of algorithm names and their corresponding classes. Returns: A string containing the source code for the dataclass. """ # get the children of the active categories children = _get_children(active_categories, all_categories, selection_info) # get the name of the class to be generated class_name = _get_class_name(active_categories) # get code for the dataclass fields field_template = " {name}: Type[{class_name}] = {class_name}" field_strings = [] for name, algo_class in selection_info[active_categories].items(): field_strings.append( field_template.format(name=name, class_name=algo_class.__name__) ) fields = "\n".join(field_strings) # get code for the properties to select children child_template = textwrap.dedent(""" @property def {new_category}(self) -> {class_name}: return {class_name}() """) child_template = textwrap.indent(child_template, " ") child_strings = [] for new_category, categories in children.items(): child_class_name = _get_class_name(categories) child_strings.append( child_template.format( new_category=new_category, class_name=child_class_name ) ) children_code = "\n".join(child_strings) # assemble the class out = "@dataclass(frozen=True)\n" out += f"class {class_name}(AlgoSelection):\n" out += fields + "\n" if children: out += children_code return out def _get_class_name(active_categories: tuple[str, ...]) -> str: """Get the name of the class based on the active categories.""" return "".join(active_categories) + "Algorithms" def _get_children( active_categories: tuple[str, ...], all_categories: list[str], selection_info: dict[tuple[str, ...], dict[str, Type[Algorithm]]], ) -> dict[str, tuple[str, ...]]: """Get the children of the active categories. Args: active_categories: A tuple of active category names. all_categories: A list of all category names. selection_info: A dictionary that maps tuples of category names to dictionaries of algorithm names and their corresponding classes. Returns: A dict mapping additional categories to a sorted tuple of categories that contains all active categories and the additional category. Entries are only included if the selected categories are in `selection_info`, i.e. if there exist algorithms that are compatible with all categories. """ inactive_categories = sorted(set(all_categories) - set(active_categories)) out = {} for new_cat in inactive_categories: new_comb = tuple(sorted(active_categories + (new_cat,))) if new_comb in selection_info: out[new_cat] = new_comb return out # ====================================================================================== # Functions to create the imports # ====================================================================================== def _get_imports(modules: list[ModuleType]) -> str: """Create source code to import all algorithms.""" snippets = [ "from typing import Type", "from dataclasses import dataclass", "from optimagic.optimization.algorithm import Algorithm", "from typing import cast", ] for module in modules: algorithms = _get_algorithms_in_module(module) class_names = [algo.__name__ for algo in algorithms.values()] for class_name in class_names: snippets.append(f"from {module.__name__} import {class_name}") return "\n".join(snippets) # ====================================================================================== # Functions to create the static parts of the code # ====================================================================================== def _get_base_class_code() -> str: """Get the source code for the AlgoSelection class.""" out = textwrap.dedent(""" @dataclass(frozen=True) class AlgoSelection: def _all(self) -> list[Type[Algorithm]]: raw = [field.default for field in self.__dataclass_fields__.values()] return cast(list[Type[Algorithm]], raw) def _available(self) -> list[Type[Algorithm]]: _all = self._all() return [ a for a in _all if a.algo_info.is_available # type: ignore ] @property def All(self) -> list[Type[Algorithm]]: return self._all() @property def Available(self) -> list[Type[Algorithm]]: return self._available() @property def AllNames(self) -> list[str]: return [str(a.name) for a in self._all()] @property def AvailableNames(self) -> list[str]: return [str(a.name) for a in self._available()] @property def _all_algorithms_dict(self) -> dict[str, Type[Algorithm]]: return {str(a.name): a for a in self._all()} @property def _available_algorithms_dict(self) -> dict[str, Type[Algorithm]]: return {str(a.name): a for a in self._available()} """) return out def _get_docstring_code() -> str: """Get the source code for the docstring of the AlgoSelection class.""" raw = ( '"""This code was auto-generated by a pre-commit hook and should not be ' "changed.\n\nIf you manually change this code, all of your changes will be " "overwritten the next time\nthe pre-commit hook runs.\n\nDetailed information " "on the purpose of the code can be found here:\n" "https://optimagic.readthedocs.io/en/latest/development/ep-02-typing.html#" 'algorithm-selection\n\n"""\n' ) out = textwrap.dedent(raw) return out def _get_instantiation_code() -> str: """Get the source code for instantiating some classes at the end of the module.""" out = textwrap.dedent(""" algos = Algorithms() global_algos = GlobalAlgorithms() ALL_ALGORITHMS = algos._all_algorithms_dict AVAILABLE_ALGORITHMS = algos._available_algorithms_dict GLOBAL_ALGORITHMS = global_algos._available_algorithms_dict """) return out if __name__ == "__main__": main() ================================================ FILE: .tools/test_create_algo_selection_code.py ================================================ from create_algo_selection_code import _generate_category_combinations def test_generate_category_combinations() -> None: categories = ["a", "b", "c"] got = _generate_category_combinations(categories) expected = [ ("a", "b", "c"), ("a", "b"), ("a", "c"), ("b", "c"), ("a",), ("b",), ("c",), ] assert got == expected ================================================ FILE: .tools/update_algo_selection_hook.py ================================================ #!/usr/bin/env python import importlib.util import subprocess import sys from pathlib import Path from typing import Any ROOT = Path(__file__).resolve().parents[1] # sys.executable guarantees we stay inside the pre‑commit venv PYTHON = [sys.executable] def run(cmd: list[str], **kwargs: Any) -> None: subprocess.check_call(cmd, cwd=ROOT, **kwargs) def ensure_optimagic_is_locally_installed() -> None: if importlib.util.find_spec("optimagic") is None: run(["uv", "pip", "install", "--python", sys.executable, "-e", "."]) def main() -> int: ensure_optimagic_is_locally_installed() run(PYTHON + [".tools/create_algo_selection_code.py"]) ruff_args = [ "--silent", "--config", "pyproject.toml", "src/optimagic/algorithms.py", ] run(["ruff", "format", *ruff_args]) run(["ruff", "check", "--fix", *ruff_args]) return 0 # explicit success code if __name__ == "__main__": sys.exit(main()) ================================================ FILE: .yamllint.yml ================================================ --- yaml-files: - '*.yaml' - '*.yml' - .yamllint rules: braces: enable brackets: enable colons: enable commas: enable comments: level: warning comments-indentation: level: warning document-end: disable document-start: level: warning empty-lines: enable empty-values: disable float-values: disable hyphens: enable indentation: {spaces: 2} key-duplicates: enable key-ordering: disable line-length: max: 88 allow-non-breakable-words: true allow-non-breakable-inline-mappings: false new-line-at-end-of-file: enable new-lines: type: unix octal-values: disable quoted-strings: disable trailing-spaces: enable truthy: level: warning ================================================ FILE: CHANGES.md ================================================ # Changes This is a record of all past optimagic releases and what went into them in reverse chronological order. We follow [semantic versioning](https://semver.org/) and all releases are available on [Anaconda.org](https://anaconda.org/optimagic-dev/optimagic). ## 0.5.3 This release introduces **multi-backend plotting** with support for matplotlib, bokeh, and altair backends (in addition to the existing plotly backend), **3D visualizations** of optimization problems, and several **new optimizer libraries** including PySwarms, PyGAD, and gradient-free-optimizers. It also adds **lazy loading** for optional dependencies to improve import times. Many contributions in this release were made by Google Summer of Code (GSoC) 2025 contributors. - {gh}`665` Skips nag_dfols tests when DFO-LS is not installed ({ghuser}`Swayam-maurya`). - {gh}`664` Adds `from __future__ import annotations` to constraints.py to fix annotations issue with Python 3.13 and NumPy 2.4 ({ghuser}`timmens`). - {gh}`660` Renames the `bayes_opt` parameter `n_iter` to `stopping_maxiter` ({ghuser}`spline2hg`). - {gh}`659` Removes `None` as a valid option for `stopping_criterion` in `convergence_plot` and updates the docstring ({ghuser}`szd5654125`). - {gh}`658` Enhances documentation and minor fixes in backend plotting ({ghuser}`r3kste`). - {gh}`654` Implements the altair plotting backend ({ghuser}`r3kste`). - {gh}`653` Adds `llms.txt` and `llms-full.txt` to documentation ({ghuser}`mostafafaheem`). - {gh}`652` Implements the bokeh plotting backend ({ghuser}`r3kste`). - {gh}`649` Implements backend plotting for `slice_plot` ({ghuser}`r3kste`). - {gh}`647` Implements backend plotting for `convergence_plot` ({ghuser}`r3kste`). - {gh}`645` Implements backend plotting for `profile_plot` ({ghuser}`r3kste`). - {gh}`644` Adds a how-to guide for changing plotting backends ({ghuser}`r3kste`). - {gh}`643` Skips doctest that fails due to negative signed zero handling ({ghuser}`r3kste`). - {gh}`641` Implements backend plotting for `params_plot` ({ghuser}`r3kste`). - {gh}`639` Adds optimizers from PySwarms ({ghuser}`spline2hg`). - {gh}`637` Adds note about `__future__` import ({ghuser}`spline2hg`). - {gh}`636` Wraps population-based optimizers from gradient-free-optimizers ({ghuser}`gauravmanmode`). - {gh}`633` Migrates bayesian-optimizer docs to new documentation style ({ghuser}`spline2hg`). - {gh}`632` Migrates nevergrad optimizers to new documentation style ({ghuser}`gauravmanmode`). - {gh}`631` Migrates iminuit docs to new documentation style ({ghuser}`spline2hg`). - {gh}`624` Wraps local optimizers from gradient-free-optimizers ({ghuser}`gauravmanmode`). - {gh}`621` Implements lazy loading for optional dependencies ({ghuser}`spline2hg`). - {gh}`619` Adopts the NumFOCUS code of conduct ({ghuser}`timmens`). - {gh}`616` Adds optimizers from PyGAD ({ghuser}`spline2hg`). - {gh}`600` Separates data preparation and plotting for `criterion_plot()` ({ghuser}`r3kste`). - {gh}`599` Implements the matplotlib backend for `criterion_plot()` ({ghuser}`r3kste`). - {gh}`581` Adds 3D visualizations of optimization problems ({ghuser}`shammeer-s`). - {gh}`554` Improves documentation of algorithm options ({ghuser}`janosg`). ## 0.5.2 This minor release adds support for two additional optimizer libraries: - [Nevergrad](https://github.com/facebookresearch/nevergrad): A library for gradient-free optimization developed by Facebook Research. - [Bayesian Optimization](https://github.com/bayesian-optimization/BayesianOptimization): A library for constrained bayesian global optimization with Gaussian processes. In addition, this release includes several bug fixes and improvements to the documentation. Many contributions in this release were made by Google Summer of Code (GSoC) 2025 applicants, with @gauravmanmode and @spline2hg being the accepted contributors. - {gh}`620` Uses interactive plotly figures in documentation ({ghuser}`timmens`). - {gh}`618` Improves bounds processing when no bounds are specified ({ghuser}`timmens`). - {gh}`615` Adds pre-commit hook that checks mypy version consistency ({ghuser}`timmens`). - {gh}`613` Exposes converter functionality ({ghuser}`spline2hg`). - {gh}`612` Fixes results processing to work with new cobyla optimizer ({ghuser}`janosg`). - {gh}`610` Adds `needs_bounds` and `supports_infinite_bounds` fields to algorithm info ({ghuser}`gauravmanmode`). - {gh}`608` Adds support for plotly >= 6 ({ghuser}`hmgaudecker`, {ghuser}`timmens`). - {gh}`607` Returns `run_explorations` results in a dataclass ({ghuser}`r3kste`). - {gh}`605` Enhances batch evaluator checking and processing, introduces the internal `BatchEvaluatorLiteral` literal, and updates CHANGES.md ({ghuser}`janosg`, {ghuser}`timmens`). - {gh}`602` Adds optimizer wrapper for bayesian-optimization package ({ghuser}`spline2hg`). - {gh}`601` Updates pre-commit hooks and fixes mypy issues ({ghuser}`janosg`). - {gh}`598` Fixes and adds links to GitHub in the documentation ({ghuser}`hamogu`). - {gh}`594` Refines newly added optimizer wrappers ({ghuser}`janosg`). - {gh}`591` Adds multiple optimizers from the nevergrad package ({ghuser}`gauravmanmode`). - {gh}`589` Rewrites the algorithm selection pre-commit hook in pure Python to address issues with bash scripts on Windows ({ghuser}`timmens`). - {gh}`586` and {gh}`592` Ensure the SciPy `disp` parameter is exposed for the following SciPy algorithms: slsqp, neldermead, powell, conjugate_gradient, newton_cg, cobyla, truncated_newton, trust_constr ({ghuser}`sefmef`, {ghuser}`TimBerti`). - {gh}`585` Exposes all parameters of [SciPy's BFGS](https://docs.scipy.org/doc/scipy/reference/optimize.minimize-bfgs.html) optimizer in optimagic ({ghuser}`TimBerti`). - {gh}`582` Adds support for handling infinite gradients during optimization ({ghuser}`Aziz-Shameem`). - {gh}`579` Implements a wrapper for the PSO optimizer from the [nevergrad](https://github.com/facebookresearch/nevergrad) package ({ghuser}`r3kste`). - {gh}`578` Integrates the `intersphinx-registry` package into the documentation for automatic linking to up-to-date external documentation ({ghuser}`Schefflera-Arboricola`). - {gh}`576` Wraps oneplusone optimizer from nevergrad ({ghuser}`gauravmanmode`, {ghuser}`gulshan-123`). - {gh}`572` and {gh}`573` Fix bugs in error handling for parameter selector processing and constraints checking ({ghuser}`hmgaudecker`). - {gh}`570` Adds a how-to guide for adding algorithms to optimagic and improves internal documentation ({ghuser}`janosg`). - {gh}`569` Implements a threading batch evaluator ({ghuser}`spline2hg`). - {gh}`568` Introduces an initial wrapper for the migrad optimizer from the [iminuit](https://github.com/scikit-hep/iminuit) package ({ghuser}`spline2hg`). - {gh}`567` Makes the `fun` argument optional when `fun_and_jac` is provided ({ghuser}`gauravmanmode`). - {gh}`563` Fixes a bug in input harmonization for history plotting ({ghuser}`gauravmanmode`). - {gh}`552` Refactors and extends the `History` class, removing the internal `HistoryArrays` class ({ghuser}`timmens`). - {gh}`485` Adds bootstrap weights functionality ({ghuser}`alanlujan91`). ## 0.5.1 This is a minor release that introduces the new algorithm selection tool and several small improvements. To learn more about the algorithm selection feature check out the following resources: - [How to specify and configure algorithms](https://optimagic.readthedocs.io/en/latest/how_to/how_to_specify_algorithm_and_algo_options.html) - [How to select local optimizers](https://optimagic.readthedocs.io/en/latest/how_to/how_to_algorithm_selection.html) - {gh}`549` Add support for Python 3.13 ({ghuser}`timmens`) - {gh}`550` and {gh}`534` implement the new algorithm selection tool ({ghuser}`janosg`) - {gh}`548` and {gh}`531` improve the documentation ({ghuser}`ChristianZimpelmann`) - {gh}`544` Adjusts the results processing of the nag optimizers to be compatible with the latest releases ({ghuser}`timmens`) - {gh}`543` Adds support for numpy 2.x ({ghuser}`timmens`) - {gh}`536` Adds a how-to guide for choosing local optimizers ({ghuser}`mpetrosian`) - {gh}`535` Allows algorithm classes and instances in estimation functions ({ghuser}`timmens`) - {gh}`532` Makes several small improvements to the documentation. ## 0.5.0 This is a major release with several breaking changes and deprecations. In this release we started implementing two major enhancement proposals and renamed the package from estimagic to optimagic (while keeping the `estimagic` namespace for the estimation capabilities). - [EP-02: Static typing](https://estimagic.org/en/latest/development/ep-02-typing.html) - [EP-03: Alignment with SciPy](https://estimagic.org/en/latest/development/ep-03-alignment.html) The implementation of the two enhancement proposals is not complete and will likely take until version `0.6.0`. However, all breaking changes and deprecations (with the exception of a minor change in benchmarking) are already implemented such that updating to version `0.5.0` is future proof. - {gh}`500` removes the dashboard, the support for simopt optimizers and the `derivative_plot` ({ghuser}`janosg`) - {gh}`502` renames estimagic to optimagic ({ghuser}`janosg`) - {gh}`504` aligns `maximize` and `minimize` more closely with scipy. All related deprecations and breaking changes are listed below. As a result, scipy code that uses minimize with the arguments `x0`, `fun`, `jac` and `method` will run without changes in optimagic. Similarly, to `OptimizeResult` gets some aliases so it behaves more like SciPy's. - {gh}`506` introduces the new `Bounds` object and deprecates `lower_bounds`, `upper_bounds`, `soft_lower_bounds` and `soft_upper_bounds` ({ghuser}`janosg`) - {gh}`507` updates the infrastructure so we can make parallel releases under the names `optimagic` and `estimagic` ({ghuser}`timmens`) - {gh}`508` introduces the new `ScalingOptions` object and deprecates the `scaling_options` argument of `maximize` and `minimize` ({ghuser}`timmens`) - {gh}`512` implements the new interface for objective functions and derivatives ({ghuser}`janosg`) - {gh}`513` implements the new `optimagic.MultistartOptions` object and deprecates the `multistart_options` argument of `maximize` and `minimize` ({ghuser}`timmens`) - {gh}`514` and {gh}`516` introduce the `NumdiffResult` object that is returned from `first_derivative` and `second_derivative`. It also fixes several bugs in the pytree handling in `first_derivative` and `second_derivative` and deprecates Richardson Extrapolation and the `key` ({ghuser}`timmens`) - {gh}`517` introduces the new `NumdiffOptions` object for configuring numerical differentiation during optimization or estimation ({ghuser}`timmens`) - {gh}`519` rewrites the logging code and introduces new `LogOptions` objects ({ghuser}`schroedk`) - {gh}`521` introduces the new internal algorithm interface. ({ghuser}`janosg` and {ghuser}`mpetrosian`) - {gh}`522` introduces the new `Constraint` objects and deprecates passing dictionaries or lists of dictionaries as constraints ({ghuser}`timmens`) ### Breaking changes - When providing a path for the argument `logging` of the functions `maximize` and `minimize` and the file already exists, the default behavior is to raise an error now. Replacement or extension of an existing file must be explicitly configured. - The argument `if_table_exists` in `log_options` has no effect anymore and a corresponding warning is raised. - `OptimizeResult.history` is now a `optimagic.History` object instead of a dictionary. Dictionary style access is implemented but deprecated. Other dictionary methods might not work. - The result of `first_derivative` and `second_derivative` is now a `optimagic.NumdiffResult` object instead of a dictionary. Dictionary style access is implemented but other dictionary methods might not work. - The dashboard is removed - The `derivative_plot` is removed. - Optimizers from Simopt are removed. - Passing callables with the old internal algorithm interface as `algorithm` to `minimize` and `maximize` is not supported anymore. Use the new `Algorithm` objects instead. For examples see: https://tinyurl.com/24a5cner ### Deprecations - The `criterion` argument of `maximize` and `minimize` is renamed to `fun` (as in SciPy). - The `derivative` argument of `maximize` and `minimize` is renamed to `jac` (as in SciPy) - The `criterion_and_derivative` argument of `maximize` and `minimize` is renamed to `fun_and_jac` to align it with the other names. - The `criterion_kwargs` argument of `maximize` and `minimize` is renamed to `fun_kwargs` to align it with the other names. - The `derivative_kwargs` argument of `maximize` and `minimize` is renamed to `jac_kwargs` to align it with the other names. - The `criterion_and_derivative_kwargs` argument of `maximize` and `minimize` is renamed to `fun_and_jac_kwargs` to align it with the other names. - Algorithm specific convergence and stopping criteria are renamed to align them more with NlOpt and SciPy names. - `convergence_relative_criterion_tolerance` -> `convergence_ftol_rel` - `convergence_absolute_criterion_tolerance` -> `convergence_ftol_abs` - `convergence_relative_params_tolerance` -> `convergence_xtol_rel` - `convergence_absolute_params_tolerance` -> `convergence_xtol_abs` - `convergence_relative_gradient_tolerance` -> `convergence_gtol_rel` - `convergence_absolute_gradient_tolerance` -> `convergence_gtol_abs` - `convergence_scaled_gradient_tolerance` -> `convergence_gtol_scaled` - `stopping_max_criterion_evaluations` -> `stopping_maxfun` - `stopping_max_iterations` -> `stopping_maxiter` - The arguments `lower_bounds`, `upper_bounds`, `soft_lower_bounds` and `soft_upper_bounds` are deprecated and replaced by `optimagic.Bounds`. This affects `maximize`, `minimize`, `estimate_ml`, `estimate_msm`, `slice_plot` and several other functions. - The `log_options` argument of `minimize` and `maximize` is deprecated. Instead, `LogOptions` objects can be passed under the `logging` argument. - The class `OptimizeLogReader` is deprecated and redirects to `SQLiteLogReader`. - The `scaling_options` argument of `maximize` and `minimize` is deprecated. Instead a `ScalingOptions` object can be passed under the `scaling` argument that was previously just a bool. - Objective functions that return a dictionary with the special keys "value", "contributions" and "root_contributions" are deprecated. Instead, likelihood and least-squares functions are marked with a `mark.likelihood` or `mark.least_squares` decorator. There is a detailed how-to guide that shows the new behavior. This affects `maximize`, `minimize`, `slice_plot` and other functions that work with objective functions. - The `multistart_options` argument of `minimize` and `maximize` is deprecated. Instead, a `MultistartOptions` object can be passed under the `multistart` argument. - Richardson Extrapolation is deprecated in `first_derivative` and `second_derivative` - The `key` argument is deprecated in `first_derivative` and `second_derivative` - Passing dictionaries or lists of dictionaries as `constraints` to `maximize` or `minimize` is deprecated. Use the new `Constraint` objects instead. ## 0.4.7 This release contains minor improvements and bug fixes. It is the last release before the package will be renamed to optimagic and two large enhancement proposals will be implemented. - {gh}`490` adds the attribute `optimize_result` to the `MomentsResult` class ({ghuser}`timmens`) - {gh}`483` fixes a bug in the handling of keyword arguments in `bootstrap` ({ghuser}`alanlujan91`) - {gh}`477` allows to use an identity weighting matrix in MSM estimation ({ghuser}`sidd3888`) - {gh}`473` fixes a bug where bootstrap keyword arguments were ignored `get_moments_cov` ({ghuser}`timmens`) - {gh}`467`, {gh}`478`, {gh}`479` and {gh}`480` improve the documentation ({ghuser}`mpetrosian`, {ghuser}`segsell`, and {ghuser}`timmens`) ## 0.4.6 This release drastically improves the optimizer benchmarking capabilities, especially with noisy functions and parallel optimizers. It makes tranquilo and numba optional dependencies and is the first version of estimagic to be compatible with Python 3.11. - {gh}`464` Makes tranquilo and numba optional dependencies ({ghuser}`janosg`) - {gh}`461` Updates docstrings for procss_benchmark_results ({ghuser}`segsell`) - {gh}`460` Fixes several bugs in the processing of benchmark results with noisy functions ({ghuser}`janosg`) - {gh}`459` Prepares benchmarking functionality for parallel optimizers ({ghuser}`mpetrosian` and {ghuser}`janosg`) - {gh}`457` Removes some unused files ({ghuser}`segsell`) - {gh}`455` Improves a local pre-commit hook ({ghuser}`ChristianZimpelmann`) ## 0.4.5 - {gh}`379` Improves the estimation table ({ghuser}`ChristianZimpelmann`) - {gh}`445` fixes line endings in local pre-commit hook ({ghuser}`ChristianZimpelmann`) - {gh}`443`, {gh}`444`, {gh}`445`, {gh}`446`, {gh}`448` and {gh}`449` are a major refactoring of tranquilo ({ghuser}`timmens` and {ghuser}`janosg`) - {gh}`441` Adds an aggregated convergence plot for benchmarks ({ghuser}`mpetrosian`) - {gh}`435` Completes the cartis-roberts benchmark set ({ghuser}`segsell`) ## 0.4.4 - {gh}`437` removes fuzzywuzzy as dependency ({ghuser}`aidatak97`) - {gh}`432` makes logging compatible with sqlalchemy 2.x ({ghuser}`janosg`) - {gh}`430` refactors the getter functions in Tranquilo ({ghuser}`janosg`) - {gh}`427` improves pre-commit setup ({ghuser}`timmens` and {ghuser}`hmgaudecker`) - {gh}`425` improves handling of notebooks in documentation ({ghuser}`baharcos`) - {gh}`423` and {gh}`399` add code to calculate poisdeness constants ({ghuser}`segsell`) - {gh}`420` improve CI infrastructure ({ghuser}`hmgaudecker`, {ghuser}`janosg`) - {gh}`407` adds global optimizers from scipy ({ghuser}`baharcos`) ## 0.4.3 - {gh}`416` improves documentation and packaging ({ghuser}`janosg`) ## 0.4.2 - {gh}`412` Improves the output of the fides optimizer among other small changes ({ghuser}`janosg`) - {gh}`411` Fixes a bug in multistart optimizations with least squares optimizers. See {gh}`410` for details ({ghuser}`janosg`) - {gh}`404` speeds up the gqtpar subsolver ({ghuser}`mpetrosian` ) - {gh}`400` refactors subsolvers ({ghuser}`mpetrosian`) - {gh}`398`, {gh}`397`, {gh}`395`, {gh}`390`, {gh}`389`, {gh}`388` continue with the implementation of tranquilo ({ghuser}`segsell`, {ghuser}`timmens`, {ghuser}`mpetrosian`, {ghuser}`janosg`) - {gh}`391` speeds up the bntr subsolver ({ghuser}`mpetrosian`) ## 0.4.1 - {gh}`307` Adopts a code of condact and governance model - {gh}`384` Polish documentation ({ghuser}`janosg` and {ghuser}`mpetrosian`) - {gh}`374` Moves the documentation to MyST ({ghuser}`baharcos`) - {gh}`365` Adds copybuttos to documentation ({ghuser}`amageh`) - {gh}`371` Refactors the pounders algorithm ({ghuser}`segsell`) - {gh}`369` Fixes CI ({ghuser}`janosg`) - {gh}`367` Fixes the linux environment ({ghuser}`timmens`) - {gh}`294` Adds the very first experimental version of tranquilo ({ghuser}`janosg`, {ghuser}`timmens`, {ghuser}`segsell`, {ghuser}`mpetrosian`) ## 0.4.0 - {gh}`366` Update ({ghuser}`segsell`) - {gh}`362` Polish documentation ({ghuser}`segsell`) ## 0.3.4 - {gh}`364` Use local random number generators ({ghuser}`timmens`) - {gh}`363` Fix pounders test cases ({ghuser}`segsell`) - {gh}`361` Update estimation code ({ghuser}`timmens`) - {gh}`360` Update results object documentation ({ghuser}`timmens`) ## 0.3.3 - {gh}`357` Adds jax support ({ghuser}`janosg`) - {gh}`359` Improves error handling with violated constaints ({ghuser}`timmens`) - {gh}`358` Improves cartis roberts set of test functions and improves the default latex rendering of MultiIndex tables ({ghuser}`mpetrosian`) ## 0.3.2 - {gh}`355` Improves test coverage of contraints processing ({ghuser}`janosg`) - {gh}`354` Improves test coverage for bounds processing ({ghuser}`timmens`) - {gh}`353` Improves history plots ({ghuser}`timmens`) - {gh}`352` Improves scaling and benchmarking ({ghuser}`janosg`) - {gh}`351` Improves estimation summaries ({ghuser}`timmens`) - {gh}`350` Allow empty queries or selectors in constraints ({ghuser}`janosg`) ## 0.3.1 - {gh}`349` fixes multiple small bugs and adds test cases for all of them ({ghuser}`mpetrosian`, {ghuser}`janosg` and {ghuser}`timmens`) ## 0.3.0 Fist release with pytree support in optimization, estimation and differentiation and much better result objects in optimization and estimation. Breaking changes - New `OptimizeResult` object is returned by `maximize` and `minimize`. This breaks all code that expects the old result dictionary. Usage of the new result is explained in the getting started tutorial on optimization. - New internal optimizer interface that can break optimization with custom optimizers - The inferface of `process_constraints` changed quite drastically. This breaks code that used `process_constraints` to get the number of free parameters or check if constraints are valid. There are new high level functions `estimagic.check_constraints` and `estimagic.count_free_params` instead. - Some functions from `estimagic.logging.read_log` are removed and replaced by `estimagic.OptimizeLogReader`. - Convenience functions to create namedtuples are removed from `estimagic.utilities`. - {gh}`346` Add option to use nonlinear constraints ({ghuser}`timmens`) - {gh}`345` Moves estimation_table to new latex functionality of pandas ({ghuser}`mpetrosian`) - {gh}`344` Adds pytree support to slice_plot ({ghuser}`janosg`) - {gh}`343` Improves the result object of estimation functions and makes msm estimation pytree compatible ({ghuser}`janosg`) - {gh}`342` Improves default options of the fides optimizer, allows single constraints and polishes the documentation ({ghuser}`janosg`) - {gh}`340` Enables history collection for optimizers that evaluate the criterion function in parallel ({ghuser}`janosg`) - {gh}`339` Incorporates user feedback and polishes the documentation. - {gh}`338` Improves log reading functions ({ghuser}`janosg`) - {gh}`336` Adds pytree support to the dashboard ({ghuser}`roecla`). - {gh}`335` Introduces an `OptimizeResult` object and functionality for history plotting ({ghuser}`janosg`). - {gh}`333` Uses new history collection feature to speed up benchmarking ({ghuser}`segsell`). - {gh}`330` Is a major rewrite of the estimation code ({ghuser}`timmens`). - {gh}`328` Improves quadratic surrogate solvers used in pounders and tranquilo ({ghuser}`segsell`). - {gh}`326` Improves documentation of numerical derivatives ({ghuser}`timmens`). - {gh}`325` Improves the slice_plot ({ghuser}`mpetrosian`) - {gh}`324` Adds ability to collect optimization histories without logging ({ghuser}`janosg`). - {gh}`311` and {gh}`288` rewrite all plotting code in plotly ({ghuser}`timmens` and {ghuser}`aidatak97`). - {gh}`306` improves quadratic surrogate solvers used in pounders and tranquilo ({ghuser}`segsell`). - {gh}`305` allows pytrees during optimization and rewrites large parts of the constraints processing ({ghuser}`janosg`). - {gh}`303` introduces a new optimizer interface that makes it easier to add optimizers and makes it possible to access optimizer specific information outside of the intrenal_criterion_and_derivative ({ghuser}`janosg` and {ghuser}`roecla`). ## 0.2.5 - {gh}`302` Drastically improves error handling during optimization ({ghuser}`janosg`). ## 0.2.4 - {gh}`304` Removes the chaospy dependency ({ghuser}`segsell`). ## 0.2.3 - {gh}`295` Fixes a small bug in estimation_table ({ghuser}`mpetrosian`). - {gh}`286` Adds pytree support for first and second derivative ({ghuser}`timmens`). - {gh}`285` Allows to use estimation functions with external optimization ({ghuser}`janosg`). - {gh}`283` Adds fast solvers for quadratic trustregion subproblems ({ghuser}`segsell`). - {gh}`282` Vastly improves estimation tables ({ghuser}`mpetrosian`). - {gh}`281` Adds some tools to work with pytrees ({ghuser}`janosg` and {ghuser}`timmens`). - {gh}`278` adds Estimagic Enhancement Proposal 1 for the use of Pytrees in Estimagic ({ghuser}`janosg`) ## 0.2.2 - {gh}`276` Add parallel Nelder-Mead algorithm by {ghuser}`jacekb95` - {gh}`267` Update fides by {ghuser}`roecla` - {gh}`265` Refactor pounders algorithm by {ghuser}`segsell` and {ghuser}`janosg`. - {gh}`261` Add pure Python pounders algorithm by {ghuser}`segsell`. ## 0.2.1 - {gh}`260` Update MSM and ML notebooks by {ghuser}`timmens`. - {gh}`259` Several small fixes and improvements by {ghuser}`janosg` and {ghuser}`roecla`. ## 0.2.0 Add a lot of new functionality with a few minor breaking changes. We have more optimizers, better error handling, bootstrap and inference for method of simulated moments. The breaking changes are: \- logging is disabled by default during optimization. \- the log_option "if_exists" was renamed to "if_table_exists" \- The comparison plot function is removed. \- first_derivative now returns a dictionary, independent of arguments. \- structure of the logging database has changed \- there is an additional boolean flag named `scaling` in minimize and maximize - {gh}`251` Allows the loading, running and visualization of benchmarks ({ghuser}`janosg`, {ghuser}`mpetrosian` and {ghuser}`roecla`) - {gh}`196` Adds support for multistart optimizations ({ghuser}`asouther4` and {ghuser}`janosg`) - {gh}`248` Adds the fides optimizer ({ghuser}`roecla`) - {gh}`146` Adds `estimate_ml` functionality ({ghuser}`janosg`, {ghuser}`LuisCald` and {ghuser}`s6soverd`). - {gh}`235` Improves the documentation ({ghuser}`roecla`) - {gh}`216` Adds the ipopt optimizer ({ghuser}`roecla`) - {gh}`215` Adds optimizers from the pygmo library ({ghuser}`roecla` and {ghuser}`janosg`) - {gh}`212` Adds optimizers from the nlopt library ({ghuser}`mpetrosian`) - {gh}`228` Restructures testing and makes changes to log_options. - {gh}`149` Adds `estimate_msm` functionality ({ghuser}`janosg` and {ghuser}`loikein`) - {gh}`219` Several enhancements by ({ghuser}`tobiasraabe`) - {gh}`218` Improve documentation by ({ghuser}`sofyaakimova`) and ({ghuser}`effieHan`) - {gh}`214` Fix bug with overlapping "fixed" and "linear" constraints ({ghuser}`janosg`) - {gh}`211` Improve error handling of log reading functions by ({ghuser}`janosg`) - {gh}`210` Automatically drop empty constraints by ({ghuser}`janosg`) - {gh}`192` Add option to scale optimization problems by ({ghuser}`janosg`) - {gh}`202` Refactoring of bootstrap code ({ghuser}`janosg`) - {gh}`148` Add bootstrap functionality ({ghuser}`RobinMusolff`) - {gh}`208` Several small improvements ({ghuser}`janosg`) - {gh}`206` Improve latex and html tables ({ghuser}`mpetrosian`) - {gh}`205` Add scipy's least squares optimizers (based on {gh}`197` by ({ghuser}`yradeva93`) - {gh}`198` More unit tests for optimizers ({ghuser}`mchandra12`) - {gh}`200` Plot intermediate outputs of `first_derivative` ({ghuser}`timmens`) ## 0.1.3 - 2021-06-25 - {gh}`195` Illustrate optimizers in documentation ({ghuser}`sofyaakimova`), ({ghuser}`effieHan`) and ({ghuser}`janosg`) - {gh}`201` More stable covariance matrix calculation ({ghuser}`janosg`) - {gh}`199` Return intermediate outputs of first_derivative ({ghuser}`timmens`) ## 0.1.2 - 2021-02-07 - {gh}`189` Improve documentation and logging ({ghuser}`roecla`) ## 0.1.1 - 2021-01-13 This release greatly expands the set of available optimization algorithms, has a better and prettier dashboard and improves the documentation. - {gh}`187` Implement dot notation in algo_options ({ghuser}`roecla`) - {gh}`183` Improve documentation ({ghuser}`SofiaBadini`) - {gh}`182` Allow for constraints in likelihood inference ({ghuser}`janosg`) - {gh}`181` Add DF-OLS optimizer from Numerical Algorithm Group ({ghuser}`roecla`) - {gh}`180` Add pybobyqa optimizer from Numerical Algorithm Group ({ghuser}`roecla`) - {gh}`179` Allow base_steps and min_steps to be scalars ({ghuser}`tobiasraabe`) - {gh}`178` Refactoring of dashboard code ({ghuser}`roecla`) - {gh}`177` Add stride as a new dashboard argument ({ghuser}`roecla`) - {gh}`176` Minor fix of plot width in dashboard ({ghuser}`janosg`) - {gh}`174` Various dashboard improvements ({ghuser}`roecla`) - {gh}`173` Add new color palettes and use them in dashboard ({ghuser}`janosg`) - {gh}`172` Add high level log reading functions ({ghuser}`janosg`) ## 0.1.0dev1 - 2020-09-08 This release entails a complete rewrite of the optimization code with many breaking changes. In particular, some optimizers that were available before are not anymore. Those will be re-introduced soon. The breaking changes include: - The database is restructured. The new version simplifies the code, makes logging faster and avoids the sql column limit. - Users can provide closed form derivative and/or criterion_and_derivative where the latter one can exploit synergies in the calculation of criterion and derivative. This is also compatible with constraints. - Our own (parallelized) first_derivative function is used to calculate gradients during the optimization when no closed form gradients are provided. - Optimizer options like convergence criteria and optimization results are harmonized across optimizers. - Users can choose from several batch evaluators whenever we parallelize (e.g. for parallel optimizations or parallel function evaluations for numerical derivatives) or pass in their own batch evaluator function as long as it has a compatible interface. The batch evaluator interface also standardizes error handling. - There is a well defined internal optimizer interface. Users can select the pre-implemented optimizers by algorithm="name_of_optimizer" or their own optimizer by algorithm=custom_minimize_function - Optimizers from pygmo and nlopt are no longer supported (will be re-introduced) - Greatly improved error handling. - {gh}`169` Add additional dashboard arguments - {gh}`168` Rename lower and upper to lower_bound and upper_bound ({ghuser}`ChristianZimpelmann`) - {gh}`167` Improve dashboard styling ({ghuser}`roecla`) - {gh}`166` Re-add POUNDERS from TAO ({ghuser}`tobiasraabe`) - {gh}`165` Re-add the scipy optimizers with harmonized options ({ghuser}`roecla`) - {gh}`164` Closed form derivatives for parameter transformations ({ghuser}`timmens`) - {gh}`163` Complete rewrite of optimization with breaking changes ({ghuser}`janosg`) - {gh}`162` Improve packaging and relax version constraints ({ghuser}`tobiasraabe`) - {gh}`160` Generate parameter tables in tex and html ({ghuser}`mpetrosian`) ## 0.0.31 - 2020-06-20 - {gh}`130` Improve wrapping of POUNDERS algorithm ({ghuser}`mo2561057`) - {gh}`159` Add Richardson Extrapolation to first_derivative ({ghuser}`timmens`) ## 0.0.30 - 2020-04-22 - {gh}`158` allows to specify a gradient in maximize and minimize ({ghuser}`janosg`) ## 0.0.29 - 2020-04-16 - {gh}`154` Version restrictions for pygmo ({ghuser}`janosg`) - {gh}`153` adds documentation for the CLI ({ghuser}`tobiasraabe`) - {gh}`152` makes estimagic work with pandas 1.0 ({ghuser}`SofiaBadini`) ## 0.0.28 - 2020-03-17 - {gh}`151` estimagic becomes a noarch package. ({ghuser}`janosg`). - {gh}`150` adds command line interface to the dashboard ({ghuser}`tobiasraabe`) ================================================ FILE: CITATION ================================================ Please use one of the following samples to cite the optimagic version (change x.y) from this installation Text: [optimagic] optimagic x.y, 2024 Janos Gabler, https://github.com/optimagic-dev/optimagic BibTeX: @Unpublished{Gabler2024, Title = {optimagic: A library for nonlinear optimization}, Author = {Janos Gabler}, Year = {2024}, Url = {https://github.com/optimagic-dev/optimagic} } If you are unsure about which version of optimagic you are using run: `conda list optimagic`. ================================================ FILE: LICENSE ================================================ Copyright 2019-2021 Janos Gabler Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================

optimagic

______________________________________________________________________ [![PyPI](https://img.shields.io/pypi/v/optimagic?color=blue)](https://pypi.org/project/optimagic) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/optimagic)](https://pypi.org/project/optimagic) [![image](https://img.shields.io/conda/vn/conda-forge/optimagic.svg)](https://anaconda.org/conda-forge/optimagic) [![image](https://img.shields.io/conda/pn/conda-forge/optimagic.svg)](https://anaconda.org/conda-forge/optimagic) [![PyPI - License](https://img.shields.io/pypi/l/optimagic)](https://pypi.org/project/optimagic) [![image](https://readthedocs.org/projects/optimagic/badge/?version=latest)](https://optimagic.readthedocs.io/en/latest) [![image](https://img.shields.io/github/actions/workflow/status/optimagic-dev/optimagic/main.yml?branch=main)](https://github.com/optimagic-dev/optimagic/actions?query=branch%3Amain) [![image](https://codecov.io/gh/optimagic-dev/optimagic/branch/main/graph/badge.svg)](https://codecov.io/gh/optimagic-dev/optimagic) [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/optimagic-dev/optimagic/main.svg)](https://results.pre-commit.ci/latest/github/optimagic-dev/optimagic/main) [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) [![Downloads](https://pepy.tech/badge/optimagic/month)](https://pepy.tech/project/optimagic) [![NumFOCUS](https://img.shields.io/badge/NumFOCUS-affiliated%20project-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org/sponsored-projects/affiliated-projects) optimagic is a Python package for numerical optimization. It is a unified interface to optimizers from SciPy, NlOpt, and many other Python packages. Its features include: - **SciPy-compatible API.** optimagic's `minimize` function works just like SciPy's, so you don't have to adjust your code. You simply get more optimizers for free. - **Powerful diagnostic tools.** Visualize optimizer histories, compare runs, and diagnose convergence problems. - **Parallel numerical derivatives.** Compute gradients, jacobians, and hessians with parallel execution. - **Bounded, constrained, and unconstrained optimization.** Support for bounds, linear constraints, nonlinear constraints, fixed parameters, and more. - **Statistical inference on estimated parameters.** The estimagic subpackage provides functionality for confidence intervals, standard errors, and p-values. # Installation optimagic is available on [PyPI](https://pypi.org/project/optimagic) and on [conda-forge](https://anaconda.org/conda-forge/optimagic). Install the package with ```console $ pip install optimagic ``` or ```console $ conda install -c conda-forge optimagic ``` optimagic ships with all `scipy` optimizers out of the box. Additional algorithms become available if you install optional packages. For an overview of all supported optimizers and how to enable them, see the [list of algorithms](https://optimagic.readthedocs.io/en/latest/algorithms.html). # Usage ```python import optimagic as om import numpy as np def fun(x): return x @ x result = om.minimize(fun, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb") result.params.round(9) # np.array([0., 0., 0.]) ``` # Documentation You find the documentation at with [tutorials](https://optimagic.readthedocs.io/en/latest/tutorials/index.html) and [how-to guides](https://optimagic.readthedocs.io/en/latest/how_to/index.html). # Changes Consult the [release notes](https://optimagic.readthedocs.io/en/latest/development/changes.html) to find out about what is new. # License optimagic is distributed under the terms of the [MIT license](LICENSE). # Citation If you use optimagic for your research, please cite it with the following key to help others discover the tool. ```bibtex @Unpublished{Gabler2024, Title = {optimagic: A library for nonlinear optimization}, Author = {Janos Gabler}, Year = {2022}, Url = {https://github.com/optimagic-dev/optimagic} } ``` # Acknowledgment We thank all institutions that have funded or supported optimagic (formerly estimagic).
================================================ FILE: codecov.yml ================================================ --- codecov: notify: require_ci_to_pass: true coverage: precision: 2 round: down range: 50...100 status: patch: default: target: 80% project: default: target: 90% ignore: # Uses numba - src/optimagic/benchmarking/cartis_roberts.py - tests/**/* ================================================ FILE: docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = optimagic SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: docs/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build set SPHINXPROJ=optimagic if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd ================================================ FILE: docs/source/_static/css/custom.css ================================================ /* Remove execution count for notebook cells. */ div.prompt { display: none; } /* Classes for the index page. */ .index-card-image { padding-top: 1rem; height: 68px; text-align: center; } .index-card-link { color: var(--sd-color-card-text); font-weight: bold; } pre { padding-left: 20px } li pre { padding-left: 20px } .highlight { background: #f5f5f5 } .highlight button.copybtn{ background-color: #f5f5f5; } .highlight button.copybtn:hover { background-color: #f5f5f5; } ================================================ FILE: docs/source/_static/css/termynal.css ================================================ /** * termynal.js * * @author Ines Montani * @version 0.0.1 * @license MIT */ :root { --color-bg: #0c0c0c; --color-text: #f2f2f2; --color-text-subtle: #a2a2a2; } [data-termynal] { width: 750px; max-width: 100%; background: var(--color-bg); color: var(--color-text); /* font-size: 18px; */ font-size: 15px; /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */ font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; border-radius: 4px; padding: 75px 45px 35px; position: relative; -webkit-box-sizing: border-box; box-sizing: border-box; line-height: 1.2; } [data-termynal]:before { content: ''; position: absolute; top: 15px; left: 15px; display: inline-block; width: 15px; height: 15px; border-radius: 50%; /* A little hack to display the window buttons in one pseudo element. */ background: #d9515d; -webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; } [data-termynal]:after { content: 'bash'; position: absolute; color: var(--color-text-subtle); top: 5px; left: 0; width: 100%; text-align: center; } a[data-terminal-control] { text-align: right; display: block; color: #aebbff; } [data-ty] { display: block; line-height: 2; } [data-ty]:before { /* Set up defaults and ensure empty lines are displayed. */ content: ''; display: inline-block; vertical-align: middle; } [data-ty="input"]:before, [data-ty-prompt]:before { margin-right: 0.75em; color: var(--color-text-subtle); } [data-ty="input"]:before { content: '$'; } [data-ty][data-ty-prompt]:before { content: attr(data-ty-prompt); } [data-ty-cursor]:after { content: attr(data-ty-cursor); font-family: monospace; margin-left: 0.5em; -webkit-animation: blink 1s infinite; animation: blink 1s infinite; } /* Cursor animation */ @-webkit-keyframes blink { 50% { opacity: 0; } } @keyframes blink { 50% { opacity: 0; } } ================================================ FILE: docs/source/_static/css/termynal_custom.css ================================================ .termynal-comment { color: #4a968f; font-style: italic; display: block; } .termy [data-termynal] { white-space: pre-wrap; } a.external-link::after { /* \00A0 is a non-breaking space to make the mark be on the same line as the link */ content: "\00A0[↪]"; } a.internal-link::after { /* \00A0 is a non-breaking space to make the mark be on the same line as the link */ content: "\00A0↪"; } :root { --termynal-green: #137C39; --termynal-red: #BF2D2D; --termynal-yellow: #F4C041; --termynal-white: #f2f2f2; --termynal-black: #0c0c0c; --termynal-blue: #11a8cd; --termynal-grey: #7f7f7f; } .termynal-failed { color: var(--termynal-red); } .termynal-failed-textonly { color: var(--termynal-white); background: var(--termynal-red); font-weight: bold; } .termynal-success { color: var(--termynal-green); } .termynal-success-textonly { color: var(--termynal-white); background: var(--termynal-green); font-weight: bold; } .termynal-skipped { color: var(--termynal-yellow); } .termynal-skipped-textonly { color: var(--termynal-black); background: var(--termynal-yellow); font-weight: bold; } .termynal-warning { color: var(--termynal-yellow); } .termynal-command { color: var(--termynal-green); font-weight: bold; } .termynal-option { color: var(--termynal-yellow); font-weight: bold; } .termynal-switch { color: var(--termynal-red); font-weight: bold; } .termynal-metavar { color: yellow; font-weight: bold; } .termynal-dim { color: var(--termynal-grey); } .termynal-number { color: var(--termynal-blue); } ================================================ FILE: docs/source/_static/js/custom.js ================================================ /* The following code is copied from https://github.com/tiangolo/typer. The MIT License (MIT) Copyright (c) 2019 Sebastián Ramírez Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ document.querySelectorAll(".use-termynal").forEach(node => { node.style.display = "block"; new Termynal(node, { lineDelay: 500 }); }); const progressLiteralStart = "---> 100%"; const promptLiteralStart = "$ "; const customPromptLiteralStart = "# "; const termynalActivateClass = "termy"; let termynals = []; function createTermynals() { document .querySelectorAll(`.${termynalActivateClass} .highlight`) .forEach(node => { const text = node.textContent; const lines = text.split("\n"); const useLines = []; let buffer = []; function saveBuffer() { if (buffer.length) { let isBlankSpace = true; buffer.forEach(line => { if (line) { isBlankSpace = false; } }); dataValue = {}; if (isBlankSpace) { dataValue["delay"] = 0; } if (buffer[buffer.length - 1] === "") { // A last single
won't have effect // so put an additional one buffer.push(""); } const bufferValue = buffer.join("
"); dataValue["value"] = bufferValue; useLines.push(dataValue); buffer = []; } } for (let line of lines) { if (line === progressLiteralStart) { saveBuffer(); useLines.push({ type: "progress" }); } else if (line.startsWith(promptLiteralStart)) { saveBuffer(); const value = line.replace(promptLiteralStart, "").trimEnd(); useLines.push({ type: "input", value: value }); } else if (line.startsWith("// ")) { saveBuffer(); const value = "💬 " + line.replace("// ", "").trimEnd(); useLines.push({ value: value, class: "termynal-comment", delay: 0 }); } else if (line.startsWith(customPromptLiteralStart)) { saveBuffer(); const promptStart = line.indexOf(promptLiteralStart); if (promptStart === -1) { console.error("Custom prompt found but no end delimiter", line) } const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, "") let value = line.slice(promptStart + promptLiteralStart.length); useLines.push({ type: "input", value: value, prompt: prompt }); } else { buffer.push(line); } } saveBuffer(); const div = document.createElement("div"); node.replaceWith(div); const termynal = new Termynal(div, { lineData: useLines, noInit: true, lineDelay: 500 }); termynals.push(termynal); }); } function loadVisibleTermynals() { termynals = termynals.filter(termynal => { if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) { termynal.init(); return false; } return true; }); } window.addEventListener("scroll", loadVisibleTermynals); createTermynals(); loadVisibleTermynals(); ================================================ FILE: docs/source/_static/js/require.js ================================================ /** vim: et:ts=4:sw=4:sts=4 * @license RequireJS 2.3.7 Copyright jQuery Foundation and other contributors. * Released under MIT license, https://github.com/requirejs/requirejs/blob/master/LICENSE */ var requirejs,require,define;!function(global,setTimeout){var req,s,head,baseElement,dataMain,src,interactiveScript,currentlyAddingScript,mainScript,subPath,version="2.3.7",commentRegExp=/\/\*[\s\S]*?\*\/|([^:"'=]|^)\/\/.*$/gm,cjsRequireRegExp=/[^.]\s*require\s*\(\s*["']([^'"\s]+)["']\s*\)/g,jsSuffixRegExp=/\.js$/,currDirRegExp=/^\.\//,op=Object.prototype,ostring=op.toString,hasOwn=op.hasOwnProperty,isBrowser=!("undefined"==typeof window||"undefined"==typeof navigator||!window.document),isWebWorker=!isBrowser&&"undefined"!=typeof importScripts,readyRegExp=isBrowser&&"PLAYSTATION 3"===navigator.platform?/^complete$/:/^(complete|loaded)$/,defContextName="_",isOpera="undefined"!=typeof opera&&"[object Opera]"===opera.toString(),contexts={},cfg={},globalDefQueue=[],useInteractive=!1,disallowedProps=["__proto__","constructor"];function commentReplace(e,t){return t||""}function isFunction(e){return"[object Function]"===ostring.call(e)}function isArray(e){return"[object Array]"===ostring.call(e)}function each(e,t){if(e)for(var i=0;i @version 0.0.1 @license MIT Additions were made by https://github.com/tiangolo/typer. The MIT License (MIT) Copyright (c) 2019 Sebastián Ramírez Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ 'use strict'; /** Generate a terminal widget. */ class Termynal { /** * Construct the widget's settings. * @param {(string|Node)=} container - Query selector or container element. * @param {Object=} options - Custom settings. * @param {string} options.prefix - Prefix to use for data attributes. * @param {number} options.startDelay - Delay before animation, in ms. * @param {number} options.typeDelay - Delay between each typed character, in ms. * @param {number} options.lineDelay - Delay between each line, in ms. * @param {number} options.progressLength - Number of characters displayed as progress bar. * @param {string} options.progressChar – Character to use for progress bar, defaults to █. * @param {number} options.progressPercent - Max percent of progress. * @param {string} options.cursor – Character to use for cursor, defaults to ▋. * @param {Object[]} lineData - Dynamically loaded line data objects. * @param {boolean} options.noInit - Don't initialise the animation. */ constructor(container = '#termynal', options = {}) { this.container = (typeof container === 'string') ? document.querySelector(container) : container; this.pfx = `data-${options.prefix || 'ty'}`; this.originalStartDelay = this.startDelay = options.startDelay || parseFloat(this.container.getAttribute(`${this.pfx}-startDelay`)) || 600; this.originalTypeDelay = this.typeDelay = options.typeDelay || parseFloat(this.container.getAttribute(`${this.pfx}-typeDelay`)) || 90; this.originalLineDelay = this.lineDelay = options.lineDelay || parseFloat(this.container.getAttribute(`${this.pfx}-lineDelay`)) || 1500; this.progressLength = options.progressLength || parseFloat(this.container.getAttribute(`${this.pfx}-progressLength`)) || 40; this.progressChar = options.progressChar || this.container.getAttribute(`${this.pfx}-progressChar`) || '█'; this.progressPercent = options.progressPercent || parseFloat(this.container.getAttribute(`${this.pfx}-progressPercent`)) || 100; this.cursor = options.cursor || this.container.getAttribute(`${this.pfx}-cursor`) || '▋'; this.lineData = this.lineDataToElements(options.lineData || []); this.loadLines() if (!options.noInit) this.init() } loadLines() { // Load all the lines and create the container so that the size is fixed // Otherwise it would be changing and the user viewport would be constantly // moving as she/he scrolls const finish = this.generateFinish() finish.style.visibility = 'hidden' this.container.appendChild(finish) // Appends dynamically loaded lines to existing line elements. this.lines = [...this.container.querySelectorAll(`[${this.pfx}]`)].concat(this.lineData); for (let line of this.lines) { line.style.visibility = 'hidden' this.container.appendChild(line) } const restart = this.generateRestart() restart.style.visibility = 'hidden' this.container.appendChild(restart) this.container.setAttribute('data-termynal', ''); } /** * Initialise the widget, get lines, clear container and start animation. */ init() { /** * Calculates width and height of Termynal container. * If container is empty and lines are dynamically loaded, defaults to browser `auto` or CSS. */ const containerStyle = getComputedStyle(this.container); this.container.style.width = containerStyle.width !== '0px' ? containerStyle.width : undefined; this.container.style.minHeight = containerStyle.height !== '0px' ? containerStyle.height : undefined; this.container.setAttribute('data-termynal', ''); this.container.innerHTML = ''; for (let line of this.lines) { line.style.visibility = 'visible' } this.start(); } /** * Start the animation and rener the lines depending on their data attributes. */ async start() { this.addFinish() await this._wait(this.startDelay); for (let line of this.lines) { const type = line.getAttribute(this.pfx); const delay = line.getAttribute(`${this.pfx}-delay`) || this.lineDelay; if (type == 'input') { line.setAttribute(`${this.pfx}-cursor`, this.cursor); await this.type(line); await this._wait(delay); } else if (type == 'progress') { await this.progress(line); await this._wait(delay); } else { this.container.appendChild(line); await this._wait(delay); } line.removeAttribute(`${this.pfx}-cursor`); } this.addRestart() this.finishElement.style.visibility = 'hidden' this.lineDelay = this.originalLineDelay this.typeDelay = this.originalTypeDelay this.startDelay = this.originalStartDelay } generateRestart() { const restart = document.createElement('a') restart.onclick = (e) => { e.preventDefault() this.container.innerHTML = '' this.init() } restart.href = '#' restart.setAttribute('data-terminal-control', '') restart.innerHTML = "restart ↻" return restart } generateFinish() { const finish = document.createElement('a') finish.onclick = (e) => { e.preventDefault() this.lineDelay = 0 this.typeDelay = 0 this.startDelay = 0 } finish.href = '#' finish.setAttribute('data-terminal-control', '') finish.innerHTML = "fast →" this.finishElement = finish return finish } addRestart() { const restart = this.generateRestart() this.container.appendChild(restart) } addFinish() { const finish = this.generateFinish() this.container.appendChild(finish) } /** * Animate a typed line. * @param {Node} line - The line element to render. */ async type(line) { const chars = [...line.textContent]; line.textContent = ''; this.container.appendChild(line); for (let char of chars) { const delay = line.getAttribute(`${this.pfx}-typeDelay`) || this.typeDelay; await this._wait(delay); line.textContent += char; } } /** * Animate a progress bar. * @param {Node} line - The line element to render. */ async progress(line) { const progressLength = line.getAttribute(`${this.pfx}-progressLength`) || this.progressLength; const progressChar = line.getAttribute(`${this.pfx}-progressChar`) || this.progressChar; const chars = progressChar.repeat(progressLength); const progressPercent = line.getAttribute(`${this.pfx}-progressPercent`) || this.progressPercent; line.textContent = ''; this.container.appendChild(line); for (let i = 1; i < chars.length + 1; i++) { await this._wait(this.typeDelay); const percent = Math.round(i / chars.length * 100); line.textContent = `${chars.slice(0, i)} ${percent}%`; if (percent>progressPercent) { break; } } } /** * Helper function for animation delays, called with `await`. * @param {number} time - Timeout, in ms. */ _wait(time) { return new Promise(resolve => setTimeout(resolve, time)); } /** * Converts line data objects into line elements. * * @param {Object[]} lineData - Dynamically loaded lines. * @param {Object} line - Line data object. * @returns {Element[]} - Array of line elements. */ lineDataToElements(lineData) { return lineData.map(line => { let div = document.createElement('div'); div.innerHTML = `${line.value || ''}`; return div.firstElementChild; }); } /** * Helper function for generating attributes string. * * @param {Object} line - Line data object. * @returns {string} - String of attributes. */ _attributes(line) { let attrs = ''; for (let prop in line) { // Custom add class if (prop === 'class') { attrs += ` class=${line[prop]} ` continue } if (prop === 'type') { attrs += `${this.pfx}="${line[prop]}" ` } else if (prop !== 'value') { attrs += `${this.pfx}-${prop}="${line[prop]}" ` } } return attrs; } } /** * HTML API: If current script has container(s) specified, initialise Termynal. */ if (document.currentScript.hasAttribute('data-termynal-container')) { const containers = document.currentScript.getAttribute('data-termynal-container'); containers.split('|') .forEach(container => new Termynal(container)) } ================================================ FILE: docs/source/algorithms.md ================================================ (list_of_algorithms)= # Optimizers Check out {ref}`how-to-select-algorithms` to see how to select an algorithm and specify `algo_options` when using `maximize` or `minimize`. The default algorithm options are discussed in {ref}`algo_options` and their type hints are documented in {ref}`typing`. ## Optimizers from SciPy (scipy-algorithms)= optimagic supports most [SciPy](https://scipy.org/) algorithms and SciPy is automatically installed when you install optimagic. ```{eval-rst} .. dropdown:: scipy_lbfgsb **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="scipy_lbfgsb", algo_options={"stopping_maxiter": 1_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB ``` ```{eval-rst} .. dropdown:: scipy_slsqp .. code-block:: "scipy_slsqp" Minimize a scalar function of one or more variables using the SLSQP algorithm. SLSQP stands for Sequential Least Squares Programming. SLSQP is a line search algorithm. It is well suited for continuously differentiable scalar optimization problems with up to several hundred parameters. The optimizer is taken from scipy which wraps the SLSQP optimization subroutine originally implemented by :cite:`Kraft1988`. .. note:: SLSQP's general nonlinear constraints are not supported yet by optimagic. - **convergence.ftol_abs** (float): Precision goal for the value of f in the stopping criterion. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **display** (bool): Set to True to print convergence messages. Default is False. Scipy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_neldermead .. code-block:: "scipy_neldermead" Minimize a scalar function using the Nelder-Mead algorithm. The Nelder-Mead algorithm is a direct search method (based on function comparison) and is often applied to nonlinear optimization problems for which derivatives are not known. Unlike most modern optimization methods, the Nelder–Mead heuristic can converge to a non-stationary point, unless the problem satisfies stronger conditions than are necessary for modern methods. Nelder-Mead is never the best algorithm to solve a problem but rarely the worst. Its popularity is likely due to historic reasons and much larger than its properties warrant. The argument `initial_simplex` is not supported by optimagic as it is not compatible with optimagic's handling of constraints. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **convergence.xtol_abs** (float): Absolute difference in parameters between iterations that is tolerated to declare convergence. As no relative tolerances can be passed to Nelder-Mead, optimagic sets a non zero default for this. - **convergence.ftol_abs** (float): Absolute difference in the criterion value between iterations that is tolerated to declare convergence. As no relative tolerances can be passed to Nelder-Mead, optimagic sets a non zero default for this. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. - **adaptive** (bool): Adapt algorithm parameters to dimensionality of problem. Useful for high-dimensional minimization (:cite:`Gao2012`, p. 259-277). scipy's default is False. ``` ```{eval-rst} .. dropdown:: scipy_powell .. code-block:: "scipy_powell" Minimize a scalar function using the modified Powell method. .. warning:: In our benchmark using a quadratic objective function, the Powell algorithm did not find the optimum very precisely (less than 4 decimal places). If you require high precision, you should refine an optimum found with Powell with another local optimizer. The criterion function need not be differentiable. Powell's method is a conjugate direction method, minimizing the function by a bi-directional search in each parameter's dimension. The argument ``direc``, which is the initial set of direction vectors and which is part of the scipy interface is not supported by optimagic because it is incompatible with how optimagic handles constraints. - **convergence.xtol_rel (float)**: Stop when the relative movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. More formally, this is expressed as .. math:: \frac{(f^k - f^{k+1})}{\\max{{\{|f^k|, |f^{k+1}|, 1\}}}} \leq \text{relative_criterion_tolerance} - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count thisas convergence. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_bfgs .. code-block:: "scipy_bfgs" Minimize a scalar function of one or more variables using the BFGS algorithm. BFGS stands for Broyden-Fletcher-Goldfarb-Shanno algorithm. It is a quasi-Newton method that can be used for solving unconstrained nonlinear optimization problems. BFGS is not guaranteed to converge unless the function has a quadratic Taylor expansion near an optimum. However, BFGS can have acceptable performance even for non-smooth optimization instances. - **convergence.gtol_abs** (float): Stop if all elements of the gradient are smaller than this. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **norm** (float): Order of the vector norm that is used to calculate the gradient's "score" that is compared to the gradient tolerance to determine convergence. Default is infinite which means that the largest entry of the gradient vector is compared to the gradient tolerance. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. - **convergence_xtol_rel** (float): Relative tolerance for `x`. Terminate successfully if step size is less than `xk * xrtol` where `xk` is the current parameter vector. Default is 1e-5. SciPy name: **xrtol**. - **armijo_condition** (float): Parameter for Armijo condition rule. Default is 1e-4. Ensures .. math:: f(x_k+\alpha p_k) \le f(x_k) \;+\mathrm{armijo\_condition}\,\cdot\,\alpha\,\nabla f(x_k)^\top p_k, so each step yields at least a fraction **armijo_condition** of the predicted decrease. Smaller ⇒ more aggressive steps, larger ⇒ more conservative ones. SciPy name: **c1**. - **curvature_condition** (float): Parameter for curvature condition rule. Default is 0.9. Ensures .. math:: \nabla f(x_k+\alpha p_k)^\top p_k \ge \mathrm{curvature\_condition}\,\cdot\,\nabla f(x_k)^\top p_k, so the new slope isn’t too negative. Smaller ⇒ stricter curvature reduction (smaller steps), larger ⇒ looser (bigger steps). SciPy name: **c2**. ``` ```{eval-rst} .. dropdown:: scipy_conjugate_gradient .. code-block:: "scipy_conjugate_gradient" Minimize a function using a nonlinear conjugate gradient algorithm. The conjugate gradient method finds functions' local optima using just the gradient. This conjugate gradient algorithm is based on that of Polak and Ribiere, detailed in :cite:`Nocedal2006`, pp. 120-122. Conjugate gradient methods tend to work better when: - the criterion has a unique global minimizing point, and no local minima or other stationary points. - the criterion is, at least locally, reasonably well approximated by a quadratic function. - the criterion is continuous and has a continuous gradient. - the gradient is not too large, e.g., has a norm less than 1000. - The initial guess is reasonably close to the criterion's global minimizer. - **convergence.gtol_abs** (float): Stop if all elements of the gradient are smaller than this. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **norm** (float): Order of the vector norm that is used to calculate the gradient's "score" that is compared to the gradient tolerance to determine convergence. Default is infinite which means that the largest entry of the gradient vector is compared to the gradient tolerance. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_newton_cg .. code-block:: "scipy_newton_cg" Minimize a scalar function using Newton's conjugate gradient algorithm. .. warning:: In our benchmark using a quadratic objective function, the truncated newton algorithm did not find the optimum very precisely (less than 4 decimal places). If you require high precision, you should refine an optimum found with Powell with another local optimizer. Newton's conjugate gradient algorithm uses an approximation of the Hessian to find the minimum of a function. It is practical for small and large problems (see :cite:`Nocedal2006`, p. 140). Newton-CG methods are also called truncated Newton methods. This function differs scipy_truncated_newton because - ``scipy_newton_cg``'s algorithm is written purely in Python using NumPy and scipy while ``scipy_truncated_newton``'s algorithm calls a C function. - ``scipy_newton_cg``'s algorithm is only for unconstrained minimization while ``scipy_truncated_newton``'s algorithm supports bounds. Conjugate gradient methods tend to work better when: - the criterion has a unique global minimizing point, and no local minima or other stationary points. - the criterion is, at least locally, reasonably well approximated by a quadratic function. - the criterion is continuous and has a continuous gradient. - the gradient is not too large, e.g., has a norm less than 1000. - The initial guess is reasonably close to the criterion's global minimizer. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. Newton CG uses the average relative change in the parameters for determining the convergence. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_cobyla .. code-block:: "scipy_cobyla" Minimize a scalar function of one or more variables using the COBYLA algorithm. COBYLA stands for Constrained Optimization By Linear Approximation. It is derivative-free and supports nonlinear inequality and equality constraints. .. note:: Cobyla's general nonlinear constraints is not supported yet by optimagic. Scipy's implementation wraps the FORTRAN implementation of the algorithm. For more information on COBYLA see :cite:`Powell1994`, :cite:`Powell1998` and :cite:`Powell2007`. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. In case of COBYLA this is a lower bound on the size of the trust region and can be seen as the required accuracy in the variables but this accuracy is not guaranteed. - **trustregion.initial_radius** (float): Initial value of the trust region radius. Since a linear approximation is likely only good near the current simplex, the linear program is given the further requirement that the solution, which will become the next evaluation point must be within a radius RHO_j from x_j. RHO_j only decreases, never increases. The initial RHO_j is the `trustregion.initial_radius`. In this way COBYLA's iterations behave like a trust region algorithm. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_truncated_newton .. code-block:: "scipy_truncated_newton" Minimize a scalar function using truncated Newton algorithm. This function differs from scipy_newton_cg because - ``scipy_newton_cg``'s algorithm is written purely in Python using NumPy and scipy while ``scipy_truncated_newton``'s algorithm calls a C function. - ``scipy_newton_cg``'s algorithm is only for unconstrained minimization while ``scipy_truncated_newton``'s algorithm supports bounds. Conjugate gradient methods tend to work better when: - the criterion has a unique global minimizing point, and no local minima or other stationary points. - the criterion is, at least locally, reasonably well approximated by a quadratic function. - the criterion is continuous and has a continuous gradient. - the gradient is not too large, e.g., has a norm less than 1000. - The initial guess is reasonably close to the criterion's global minimizer. optimagic does not support the ``scale`` nor ``offset`` argument as they are not compatible with the way optimagic handles constraints. It also does not support ``messg_num`` which is an additional way to control the verbosity of the optimizer. - **func_min_estimate** (float): Minimum function value estimate. Defaults to 0. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **convergence.xtol_abs** (float): Absolute difference in parameters between iterations after scaling that is tolerated to declare convergence. - **convergence.ftol_abs** (float): Absolute difference in the criterion value between iterations after scaling that is tolerated to declare convergence. - **convergence.gtol_abs** (float): Stop if the value of the projected gradient (after applying x scaling factors) is smaller than this. If convergence.gtol_abs < 0.0, convergence.gtol_abs is set to 1e-2 * sqrt(accuracy). - **max_hess_evaluations_per_iteration** (int): Maximum number of hessian*vector evaluations per main iteration. If ``max_hess_evaluations == 0``, the direction chosen is ``- gradient``. If ``max_hess_evaluations < 0``, ``max_hess_evaluations`` is set to ``max(1,min(50,n/2))`` where n is the length of the parameter vector. This is also the default. - **max_step_for_line_search** (float): Maximum step for the line search. It may be increased during the optimization. If too small, it will be set to 10.0. By default we use scipy's default. - **line_search_severity** (float): Severity of the line search. If < 0 or > 1, set to 0.25. optimagic defaults to scipy's default. - **finitie_difference_precision** (float): Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). optimagic defaults to scipy's default. - **criterion_rescale_factor** (float): Scaling factor (in log10) used to trigger criterion rescaling. If 0, rescale at each iteration. If a large value, never rescale. If < 0, rescale is set to 1.3. optimagic defaults to scipy's default. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_trust_constr .. code-block:: "scipy_trust_constr" Minimize a scalar function of one or more variables subject to constraints. .. warning:: In our benchmark using a quadratic objective function, the trust_constr algorithm did not find the optimum very precisely (less than 4 decimal places). If you require high precision, you should refine an optimum found with trust_constr with another local optimizer. .. note:: Its general nonlinear constraints' handling is not supported yet by optimagic. It switches between two implementations depending on the problem definition. It is the most versatile constrained minimization algorithm implemented in SciPy and the most appropriate for large-scale problems. For equality constrained problems it is an implementation of Byrd-Omojokun Trust-Region SQP method described in :cite:`Lalee1998` and in :cite:`Conn2000`, p. 549. When inequality constraints are imposed as well, it switches to the trust-region interior point method described in :cite:`Byrd1999`. This interior point algorithm in turn, solves inequality constraints by introducing slack variables and solving a sequence of equality-constrained barrier problems for progressively smaller values of the barrier parameter. The previously described equality constrained SQP method is used to solve the subproblems with increasing levels of accuracy as the iterate gets closer to a solution. It approximates the Hessian using the Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. - **convergence.gtol_abs** (float): Tolerance for termination by the norm of the Lagrangian gradient. The algorithm will terminate when both the infinity norm (i.e., max abs value) of the Lagrangian gradient and the constraint violation are smaller than the convergence.gtol_abs. For this algorithm we use scipy's gradient tolerance for trust_constr. This smaller tolerance is needed for the sum of squares tests to pass. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as convergence. - **convergence.xtol_rel** (float): Tolerance for termination by the change of the independent variable. The algorithm will terminate when the radius of the trust region used in the algorithm is smaller than the convergence.xtol_rel. - **trustregion.initial_radius** (float): Initial value of the trust region radius. The trust radius gives the maximum distance between solution points in consecutive iterations. It reflects the trust the algorithm puts in the local approximation of the optimization problem. For an accurate local approximation the trust-region should be large and for an approximation valid only close to the current point it should be a small one. The trust radius is automatically updated throughout the optimization process, with ``trustregion_initial_radius`` being its initial value. - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**. ``` ```{eval-rst} .. dropdown:: scipy_ls_dogbox .. code-block:: "scipy_ls_dogbox" Minimize a nonlinear least squares problem using a rectangular trust region method. Typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. The algorithm supports the following options: - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is below this. - **convergence.gtol_rel** (float): Stop when the gradient, divided by the absolute value of the criterion function is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **tr_solver** (str): Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. - 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. - 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default), the solver is chosen based on the type of Jacobian returned on the first iteration. - **tr_solver_options** (dict): Keyword options passed to trust-region solver. - ``tr_solver='exact'``: `tr_options` are ignored. - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. ``` ```{eval-rst} .. dropdown:: scipy_ls_trf .. code-block:: "scipy_ls_trf" Minimize a nonlinear least squares problem using a trustregion reflective method. Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. The algorithm supports the following options: - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is below this. - **convergence.gtol_rel** (float): Stop when the gradient, divided by the absolute value of the criterion function is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **tr_solver** (str): Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. - 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. - 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default), the solver is chosen based on the type of Jacobian returned on the first iteration. - **tr_solver_options** (dict): Keyword options passed to trust-region solver. - ``tr_solver='exact'``: `tr_options` are ignored. - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. ``` ```{eval-rst} .. dropdown:: scipy_ls_lm .. code-block:: "scipy_ls_lm" Minimize a nonlinear least squares problem using a Levenberg-Marquardt method. Does not handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. The algorithm supports the following options: - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is below this. - **convergence.gtol_rel** (float): Stop when the gradient, divided by the absolute value of the criterion function is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **tr_solver** (str): Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. - 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. - 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default), the solver is chosen based on the type of Jacobian returned on the first iteration. - **tr_solver_options** (dict): Keyword options passed to trust-region solver. - ``tr_solver='exact'``: `tr_options` are ignored. - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. ``` ```{eval-rst} .. dropdown:: scipy_basinhopping .. code-block:: "scipy_basinhopping" Find the global minimum of a function using the basin-hopping algorithm which combines a global stepping algorithm with local minimization at each step. Basin-hopping is a two-phase method that combines a global stepping algorithm with local minimization at each step. Designed to mimic the natural process of energy minimization of clusters of atoms, it works well for similar problems with “funnel-like, but rugged” energy landscapes. This is mainly supported for completeness. Consider optimagic's built in multistart optimization for a similar approach that can run multiple optimizations in parallel, supports all local algorithms in optimagic (as opposed to just those from scipy) and allows for a better visualization of the multistart history. When provided the derivative is passed to the local minimization method. The algorithm supports the following options: - **local_algorithm** (str/callable): Any scipy local minimizer: valid options are. "Nelder-Mead". "Powell". "CG". "BFGS". "Newton-CG". "L-BFGS-B". "TNC". "COBYLA". "SLSQP". "trust-constr". "dogleg". "trust-ncg". "trust-exact". "trust-krylov". or a custom function for local minimization, default is "L-BFGS-B". - **n_local_optimizations**: (int) The number local optimizations. Default is 100 as in scipy's default. - **temperature**: (float) Controls the randomness in the optimization process. Higher the temperatures the larger jumps in function value will be accepted. Default is 1.0 as in scipy's default. - **stepsize**: (float) Maximum step size. Default is 0.5 as in scipy's default. - **local_algo_options**: (dict) Additional keyword arguments for the local minimizer. Check the documentation of the local scipy algorithms for details on what is supported. - **take_step**: (callable) Replaces the default step-taking routine. Default is None as in scipy's default. - **accept_test**: (callable) Define a test to judge the acception of steps. Default is None as in scipy's default. - **interval**: (int) Determined how often the step size is updated. Default is 50 as in scipy's default. - **convergence.n_unchanged_iterations**: (int) Number of iterations the global minimum estimate stays the same to stops the algorithm. Default is None as in scipy's default. - **seed**: (None, int, numpy.random.Generator,numpy.random.RandomState)Default is None as in scipy's default. - **target_accept_rate**: (float) Adjusts the step size. Default is 0.5 as in scipy's default. - **stepwise_factor**: (float) Step size multiplier upon each step. Lies between (0,1), default is 0.9 as in scipy's default. ``` ```{eval-rst} .. dropdown:: scipy_brute .. code-block:: "scipy_brute" Find the global minimum of a fuction over a given range by brute force. Brute force evaluates the criterion at each point and that is why better suited for problems with very few parameters. The start values are not actually used because the grid is only defined by bounds. It is still necessary for optimagic to infer the number and format of the parameters. Due to the parallelization, this algorithm cannot collect a history of parameters and criterion evaluations. The algorithm supports the following options: - **n_grid_points** (int): the number of grid points to use for the brute force search. Default is 20 as in scipy. - **polishing_function** (callable): Function to seek a more precise minimum near brute-force' best gridpoint taking brute-force's result at initial guess as a positional argument. Default is None providing no polishing. - **n_cores** (int): The number of cores on which the function is evaluated in parallel. Default 1. - **batch_evaluator** (str or callable). An optimagic batch evaluator. Default 'joblib'. ``` ```{eval-rst} .. dropdown:: scipy_differential_evolution .. code-block:: "scipy_differential_evolution" Find the global minimum of a multivariate function using differential evolution (DE). DE is a gradient-free method. Due to optimagic's general parameter format the integrality and vectorized arguments are not supported. The algorithm supports the following options: - **strategy** (str): Measure of quality to improve a candidate solution, can be one of the following keywords (default 'best1bin'.) - ‘best1bin’ - ‘best1exp’ - ‘rand1exp’ - ‘randtobest1exp’ - ‘currenttobest1exp’ - ‘best2exp’ - ‘rand2exp’ - ‘randtobest1bin’ - ‘currenttobest1bin’ - ‘best2bin’ - ‘rand2bin’ - ‘rand1bin’ - **stopping.maxiter** (int): The maximum number of criterion evaluations without polishing is(stopping.maxiter + 1) * population_size * number of parameters - **population_size_multiplier** (int): A multiplier setting the population size. The number of individuals in the population is population_size * number of parameters. The default 15. - **convergence.ftol_rel** (float): Default 0.01. - **mutation_constant** (float/tuple): The differential weight denoted by F in literature. Should be within 0 and 2. The tuple form is used to specify (min, max) dithering which can help speed convergence. Default is (0.5, 1). - **recombination_constant** (float): The crossover probability or CR in the literature determines the probability that two solution vectors will be combined to produce a new solution vector. Should be between 0 and 1. The default is 0.7. - **seed** (int): DE is stochastic. Define a seed for reproducability. - **polish** (bool): Uses scipy's L-BFGS-B for unconstrained problems and trust-constr for constrained problems to slightly improve the minimization. Default is True. - **sampling_method** (str/np.array): Specify the sampling method for the initial population. It can be one of the following options - "latinhypercube" - "sobol" - "halton" - "random" - an array specifying the initial population of shape (total population size, number of parameters). The initial population is clipped to bounds before use. Default is 'latinhypercube' - **convergence.ftol_abs** (float): CONVERGENCE_SECOND_BEST_ABSOLUTE_CRITERION_TOLERANCE - **n_cores** (int): The number of cores on which the function is evaluated in parallel. Default 1. - **batch_evaluator** (str or callable). An optimagic batch evaluator. Default 'joblib'. ``` ```{eval-rst} .. dropdown:: scipy_shgo .. code-block:: "scipy_shgo" Find the global minimum of a fuction using simplicial homology global optimization. The algorithm supports the following options: - **local_algorithm** (str): The local optimization algorithm to be used. Only COBYLA and SLSQP supports constraints. Valid options are "Nelder-Mead". "Powell". "CG". "BFGS". "Newton-CG". "L-BFGS-B". "TNC". "COBYLA". "SLSQP". "trust-constr". "dogleg". "trust-ncg". "trust-exact". "trust-krylov" or a custom function for local minimization, default is "L-BFGS-B". - **local_algo_options**: (dict) Additional keyword arguments for the local minimizer. Check the documentation of the local scipy algorithms for details on what is supported. - **n_sampling_points** (int): Specify the number of sampling points to construct the simplical complex. - **n_simplex_iterations** (int): Number of iterations to construct the simplical complex. Default is 1 as in scipy. - **sampling_method** (str/callable): The method to use for sampling the search space. Default 'simplicial'. - **max_sampling_evaluations** (int): The maximum number of evaluations of the criterion function in the sampling phase. - **convergence.minimum_criterion_value** (float): Specify the global minimum when it is known. Default is - np.inf. For maximization problems, flip the sign. - **convergence.minimum_criterion_tolerance** (float): Specify the relative error between the current best minimum and the supplied global criterion_minimum allowed. Default is scipy's default, 1e-4. - **stopping.maxiter** (int): The maximum number of iterations. - **stopping.maxfun** (int): The maximum number of criterion evaluations. - **stopping.max_processing_time** (int): The maximum time allowed for the optimization. - **minimum_homology_group_rank_differential** (int): The minimum difference in the rank of the homology group between iterations. - **symmetry** (bool): Specify whether the criterion contains symetric variables. - **minimize_every_iteration** (bool): Specify whether the gloabal sampling points are passed to the local algorithm in every iteration. - **max_local_minimizations_per_iteration** (int): The maximum number of local optimizations per iteration. Default False, i.e. no limit. - **infinity_constraints** (bool): Specify whether to save the sampling points outside the feasible domain. Default is True. ``` ```{eval-rst} .. dropdown:: scipy_dual_annealing .. code-block:: "scipy_dual_annealing" Find the global minimum of a function using dual annealing for continuous variables. The algorithm supports the following options: - **stopping.maxiter** (int): Specify the maximum number of global searh iterations. - **local_algorithm** (str): The local optimization algorithm to be used. valid options are: "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", Default "L-BFGS-B". - **local_algo_options**: (dict) Additional keyword arguments for the local minimizer. Check the documentation of the local scipy algorithms for details on what is supported. - **initial_temperature** (float): The temparature algorithm starts with. The higher values lead to a wider search space. The range is (0.01, 5.e4] and default is 5230.0. - **restart_temperature_ratio** (float): Reanneling starts when the algorithm is decreased to initial_temperature * restart_temperature_ratio. Default is 2e-05. - **visit** (float): Specify the thickness of visiting distribution's tails. Range is (1, 3] and default is scipy's default, 2.62. - **accept** (float): Controls the probability of acceptance. Range is (-1e4, -5] and default is scipy's default, -5.0. Smaller values lead to lower acceptance probability. - **stopping.maxfun** (int): soft limit for the number of criterion evaluations. - **seed** (int, None or RNG): Dual annealing is a stochastic process. Seed or random number generator. Default None. - **no_local_search** (bool): Specify whether to apply a traditional Generalized Simulated Annealing with no local search. Default is False. ``` ```{eval-rst} .. dropdown:: scipy_direct .. code-block:: "scipy_direct" Find the global minimum of a function using dividing rectangles method. It is not necessary to provide an initial guess. The algorithm supports the following options: - **eps** (float): Specify the minimum difference of the criterion values between the current best hyperrectangle and the next potentially best hyperrectangle to be divided determining the trade off between global and local search. Default is 1e-6 differing from scipy's default 1e-4. - **stopping.maxfun** (int/None): Maximum number of criterion evaluations allowed. Default is None which caps the number of evaluations at 1000 * number of dimentions automatically. - **stopping.maxiter** (int): Maximum number of iterations allowed. - **locally_biased** (bool): Determine whether to use the locally biased variant of the algorithm DIRECT_L. Default is True. - **convergence.minimum_criterion_value** (float): Specify the global minimum when it is known. Default is minus infinity. For maximization problems, flip the sign. - **convergence.minimum_criterion_tolerance** (float): Specify the relative error between the current best minimum and the supplied global criterion_minimum allowed. Default is scipy's default, 1e-4. - **volume_hyperrectangle_tolerance** (float): Specify the smallest volume of the hyperrectangle containing the lowest criterion value allowed. Range is (0,1). Default is 1e-16. - **length_hyperrectangle_tolerance** (float): Depending on locally_biased it can refer to normalized side (True) or diagonal (False) length of the hyperrectangle containing the lowest criterion value. Range is (0,1). Default is scipy's default, 1e-6. ``` (own-algorithms)= ## Own optimizers We implement a few algorithms from scratch. They are currently considered experimental. ```{eval-rst} .. dropdown:: bhhh .. code-block:: "bhhh" Minimize a likelihood function using the BHHH algorithm. BHHH (:cite:`Berndt1974`) can - and should ONLY - be used for minimizing (or maximizing) a likelihood. It is similar to the Newton-Raphson algorithm, but replaces the Hessian matrix with the outer product of the gradient. This approximation is based on the information matrix equality (:cite:`Halbert1982`) and is thus only vaid when minimizing (or maximizing) a likelihood. The criterion function :func:`func` should return a dictionary with at least the entry ``{"contributions": array_or_pytree}`` where ``array_or_pytree`` contains the likelihood contributions of each individual. bhhh supports the following options: - **convergence.gtol_abs** (float): Stopping criterion for the gradient tolerance. Default is 1e-8. - **stopping.maxiter** (int): Maximum number of iterations. If reached, terminate. Default is 200. ``` ```{eval-rst} .. dropdown:: neldermead_parallel .. code-block:: "neldermead_parallel" Minimize a function using the neldermead_parallel algorithm. This is a parallel Nelder-Mead algorithm following Lee D., Wiswall M., A parallel implementation of the simplex function minimization routine, Computational Economics, 2007. The algorithm was implemented by Jacek Barszczewski The algorithm supports the following options: - **init_simplex_method** (string or callable): Name of the method to create initial simplex or callable which takes as an argument initial value of parameters and returns initial simplex as j+1 x j array, where j is length of x. The default is "gao_han". - **n_cores** (int): Degree of parallization. The default is 1 (no parallelization). - **adaptive** (bool): Adjust parameters of Nelder-Mead algorithm to account for simplex size. The default is True. - **stopping.maxiter** (int): Maximum number of algorithm iterations. The default is STOPPING_MAX_ITERATIONS. - **convergence.ftol_abs** (float): maximal difference between function value evaluated on simplex points. The default is CONVERGENCE_SECOND_BEST_ABSOLUTE_CRITERION_TOLERANCE. - **convergence.xtol_abs** (float): maximal distance between points in the simplex. The default is CONVERGENCE_SECOND_BEST_ABSOLUTE_PARAMS_TOLERANCE. - **batch_evaluator** (string or callable): See :ref:`batch_evaluators` for details. Default "joblib". ``` ```{eval-rst} .. dropdown:: pounders .. code-block:: "pounders" Minimize a function using the POUNDERS algorithm. POUNDERs (:cite:`Benson2017`, :cite:`Wild2015`, `GitHub repository `_) can be a useful tool for economists who estimate structural models using indirect inference, because unlike commonly used algorithms such as Nelder-Mead, POUNDERs is tailored for minimizing a non-linear sum of squares objective function, and therefore may require fewer iterations to arrive at a local optimum than Nelder-Mead. Scaling the problem is necessary such that bounds correspond to the unit hypercube :math:`[0, 1]^n`. For unconstrained problems, scale each parameter such that unit changes in parameters result in similar order-of-magnitude changes in the criterion value(s). pounders supports the following options: - **convergence.gtol_abs**: Convergence tolerance for the absolute gradient norm. Stop if norm of the gradient is less than this. Default is 1e-8. - **convergence.gtol_rel**: Convergence tolerance for the relative gradient norm. Stop if norm of the gradient relative to the criterion value is less than this. Default is 1-8. - **convergence.gtol_scaled**: Convergence tolerance for the scaled gradient norm. Stop if norm of the gradient divided by norm of the gradient at the initial parameters is less than this. Disabled, i.e. set to False, by default. - **max_interpolation_points** (int): Maximum number of interpolation points. Default is `2 * n + 1`, where `n` is the length of the parameter vector. - **stopping.maxiter** (int): Maximum number of iterations. If reached, terminate. Default is 2000. - **trustregion_initial_radius (float)**: Delta, initial trust-region radius. 0.1 by default. - **trustregion_minimal_radius** (float): Minimal trust-region radius. 1e-6 by default. - **trustregion_maximal_radius** (float): Maximal trust-region radius. 1e6 by default. - **trustregion_shrinking_factor_not_successful** (float): Shrinking factor of the trust-region radius in case the solution vector of the suproblem is not accepted, but the model is fully linear (i.e. "valid"). Defualt is 0.5. - **trustregion_expansion_factor_successful** (float): Shrinking factor of the trust-region radius in case the solution vector of the suproblem is accepted. Default is 2. - **theta1** (float): Threshold for adding the current x candidate to the model. Function argument to find_affine_points(). Default is 1e-5. - **theta2** (float): Threshold for adding the current x candidate to the model. Argument to get_interpolation_matrices_residual_model(). Default is 1e-4. - **trustregion_threshold_successful** (float): First threshold for accepting the solution vector of the subproblem as the best x candidate. Default is 0. - **trustregion_threshold_very_successful** (float): Second threshold for accepting the solution vector of the subproblem as the best x candidate. Default is 0.1. - **c1** (float): Treshold for accepting the norm of our current x candidate. Function argument to find_affine_points() for the case where input array *model_improving_points* is zero. - **c2** (int): Treshold for accepting the norm of our current x candidate. Equal to 10 by default. Argument to *find_affine_points()* in case the input array *model_improving_points* is not zero. - **trustregion_subproblem_solver** (str): Solver to use for the trust-region subproblem. Two internal solvers are supported: - "bntr": Bounded Newton Trust-Region (default, supports bound constraints) - "gqtpar": (does not support bound constraints) - **trustregion_subsolver_options** (dict): Options dictionary containing the stopping criteria for the subproblem. It takes different keys depending on the type of subproblem solver used. With the exception of the stopping criterion "maxiter", which is always included. If the subsolver "bntr" is used, the dictionary also contains the tolerance levels "gtol_abs", "gtol_rel", and "gtol_scaled". Moreover, the "conjugate_gradient_method" can be provided. Available conjugate gradient methods are: - "cg". In this case, two additional stopping criteria are "gtol_abs_cg" and "gtol_rel_cg" - "steihaug-toint" - "trsbox" (default) If the subsolver "gqtpar" is employed, the two stopping criteria are "k_easy" and "k_hard". None of the dictionary keys need to be specified by default, but can be. - **batch_evaluator** (str or callable): Name of a pre-implemented batch evaluator (currently "joblib" and "pathos_mp") or callable with the same interface as the optimagic batch_evaluators. Default is "joblib". - **n_cores (int)**: Number of processes used to parallelize the function evaluations. Default is 1. ``` (tao-algorithms)= ## Optimizers from the Toolkit for Advanced Optimization (TAO) We wrap the pounders algorithm from the Toolkit of Advanced optimization. To use it you need to have [petsc4py](https://pypi.org/project/petsc4py/) installed. ```{eval-rst} .. dropdown:: tao_pounders .. code-block:: "tao_pounders" Minimize a function using the POUNDERs algorithm. POUNDERs (:cite:`Benson2017`, :cite:`Wild2015`, `GitHub repository `_) can be a useful tool for economists who estimate structural models using indirect inference, because unlike commonly used algorithms such as Nelder-Mead, POUNDERs is tailored for minimizing a non-linear sum of squares objective function, and therefore may require fewer iterations to arrive at a local optimum than Nelder-Mead. Scaling the problem is necessary such that bounds correspond to the unit hypercube :math:`[0, 1]^n`. For unconstrained problems, scale each parameter such that unit changes in parameters result in similar order-of-magnitude changes in the criterion value(s). POUNDERs has several convergence criteria. Let :math:`X` be the current parameter vector, :math:`X_0` the initial parameter vector, :math:`g` the gradient, and :math:`f` the criterion function. ``absolute_gradient_tolerance`` stops the optimization if the norm of the gradient falls below :math:`\epsilon`. .. math:: ||g(X)|| < \epsilon ``relative_gradient_tolerance`` stops the optimization if the norm of the gradient relative to the criterion value falls below :math:`epsilon`. .. math:: \frac{||g(X)||}{|f(X)|} < \epsilon ``scaled_gradient_tolerance`` stops the optimization if the norm of the gradient is lower than some fraction :math:`epsilon` of the norm of the gradient at the initial parameters. .. math:: \frac{||g(X)||}{||g(X0)||} < \epsilon - **convergence.gtol_abs** (float): Stop if norm of gradient is less than this. If set to False the algorithm will not consider convergence.gtol_abs. - **convergence.gtol_rel** (float): Stop if relative norm of gradient is less than this. If set to False the algorithm will not consider convergence.gtol_rel. - **convergence.scaled_gradient_tolerance** (float): Stop if scaled norm of gradient is smaller than this. If set to False the algorithm will not consider convergence.scaled_gradient_tolerance. - **trustregion.initial_radius** (float): Initial value of the trust region radius. It must be :math:`> 0`. - **stopping.maxiter** (int): Alternative Stopping criterion. If set the routine will stop after the number of specified iterations or after the step size is sufficiently small. If the variable is set the default criteria will all be ignored. ``` (nag-algorithms)= ## Optimizers from the Numerical Algorithms Group (NAG) We wrap two algorithms from the numerical algorithms group. To use them, you need to install each of them separately: - `pip install DFO-LS` - `pip install Py-BOBYQA` ```{eval-rst} .. dropdown:: nag_dfols *Note*: We recommend to install `DFO-LS` version 1.5.3 or higher. Versions of 1.5.0 or lower also work but the versions `1.5.1` and `1.5.2` contain bugs that can lead to errors being raised. .. code-block:: "nag_dfols" Minimize a function with least squares structure using DFO-LS. The DFO-LS algorithm :cite:`Cartis2018b` is designed to solve the nonlinear least-squares minimization problem (with optional bound constraints). Remember to cite :cite:`Cartis2018b` when using DF-OLS in addition to optimagic. .. math:: \min_{x\in\mathbb{R}^n} &\quad f(x) := \sum_{i=1}^{m}r_{i}(x)^2 \\ \text{s.t.} &\quad \text{lower_bounds} \leq x \leq \text{upper_bounds} The :math:`r_{i}` are called root contributions in optimagic. DFO-LS is a derivative-free optimization algorithm, which means it does not require the user to provide the derivatives of f(x) or :math:`r_{i}(x)`, nor does it attempt to estimate them internally (by using finite differencing, for instance). There are two main situations when using a derivative-free algorithm (such as DFO-LS) is preferable to a derivative-based algorithm (which is the vast majority of least-squares solvers): 1. If the residuals are noisy, then calculating or even estimating their derivatives may be impossible (or at least very inaccurate). By noisy, we mean that if we evaluate :math:`r_{i}(x)` multiple times at the same value of x, we get different results. This may happen when a Monte Carlo simulation is used, for instance. 2. If the residuals are expensive to evaluate, then estimating derivatives (which requires n evaluations of each :math:`r_{i}(x)` for every point of interest x) may be prohibitively expensive. Derivative-free methods are designed to solve the problem with the fewest number of evaluations of the criterion as possible. To read the detailed documentation of the algorithm `click here `_. There are four possible convergence criteria: 1. when the lower trust region radius is shrunk below a minimum (``convergence.minimal_trustregion_radius_tolerance``). 2. when the improvements of iterations become very small (``convergence.slow_progress``). This is very similar to ``relative_criterion_tolerance`` but ``convergence.slow_progress`` is more general allowing to specify not only the threshold for convergence but also a period over which the improvements must have been very small. 3. when a sufficient reduction to the criterion value at the start parameters has been reached, i.e. when :math:`\frac{f(x)}{f(x_0)} \leq \text{convergence.ftol_scaled}` 4. when all evaluations on the interpolation points fall within a scaled version of the noise level of the criterion function. This is only applicable if the criterion function is noisy. You can specify this criterion with ``convergence.noise_corrected_criterion_tolerance``. DF-OLS supports resetting the optimization and doing a fast start by starting with a smaller interpolation set and growing it dynamically. For more information see `their detailed documentation `_ and :cite:`Cartis2018b`. - **clip_criterion_if_overflowing** (bool): see :ref:`algo_options`. convergence.minimal_trustregion_radius_tolerance (float): see :ref:`algo_options`. - **convergence.noise_corrected_criterion_tolerance** (float): Stop when the evaluations on the set of interpolation points all fall within this factor of the noise level. The default is 1, i.e. when all evaluations are within the noise level. If you want to not use this criterion but still flag your criterion function as noisy, set this tolerance to 0.0. .. warning:: Very small values, as in most other tolerances don't make sense here. - **convergence.ftol_scaled** (float): Terminate if a point is reached where the ratio of the criterion value to the criterion value at the start params is below this value, i.e. if :math:`f(x_k)/f(x_0) \leq \text{convergence.ftol_scaled}`. Note this is deactivated unless the lowest mathematically possible criterion value (0.0) is actually achieved. - **convergence.slow_progress** (dict): Arguments for converging when the evaluations over several iterations only yield small improvements on average, see see :ref:`algo_options` for details. - **initial_directions (str)**: see :ref:`algo_options`. - **interpolation_rounding_error** (float): see :ref:`algo_options`. - **noise_additive_level** (float): Used for determining the presence of noise and the convergence by all interpolation points being within noise level. 0 means no additive noise. Only multiplicative or additive is supported. - **noise_multiplicative_level** (float): Used for determining the presence of noise and the convergence by all interpolation points being within noise level. 0 means no multiplicative noise. Only multiplicative or additive is supported. - **noise_n_evals_per_point** (callable): How often to evaluate the criterion function at each point. This is only applicable for criterion functions with noise, when averaging multiple evaluations at the same point produces a more accurate value. The input parameters are the ``upper_trustregion_radius`` (:math:`\Delta`), the ``lower_trustregion_radius`` (:math:`\rho`), how many iterations the algorithm has been running for, ``n_iterations`` and how many resets have been performed, ``n_resets``. The function must return an integer. Default is no averaging (i.e. ``noise_n_evals_per_point(...) = 1``). - **random_directions_orthogonal** (bool): see :ref:`algo_options`. - **stopping.maxfun** (int): see :ref:`algo_options`. - **threshold_for_safety_step** (float): see :ref:`algo_options`. - **trustregion.expansion_factor_successful** (float): see :ref:`algo_options`. - **trustregion.expansion_factor_very_successful** (float): see :ref:`algo_options`. - **trustregion.fast_start_options** (dict): see :ref:`algo_options`. - **trustregion.initial_radius** (float): Initial value of the trust region radius. - **trustregion.method_to_replace_extra_points (str)**: If replacing extra points in successful iterations, whether to use geometry improving steps or the momentum method. Can be "geometry_improving" or "momentum". - **trustregion.n_extra_points_to_replace_successful** (int): The number of extra points (other than accepting the trust region step) to replace. Useful when ``trustregion.n_interpolation_points > len(x) + 1``. - **trustregion.n_interpolation_points** (int): The number of interpolation points to use. The default is :code:`len(x) + 1`. If using resets, this is the number of points to use in the first run of the solver, before any resets. - **trustregion.precondition_interpolation** (bool): see :ref:`algo_options`. - **trustregion.shrinking_factor_not_successful** (float): see :ref:`algo_options`. - **trustregion.shrinking_factor_lower_radius** (float): see :ref:`algo_options`. - **trustregion.shrinking_factor_upper_radius** (float): see :ref:`algo_options`. - **trustregion.threshold_successful** (float): Share of the predicted improvement that has to be achieved for a trust region iteration to count as successful. - **trustregion.threshold_very_successful** (float): Share of the predicted improvement that has to be achieved for a trust region iteration to count as very successful. ``` ```{eval-rst} .. dropdown:: nag_pybobyqa .. code-block:: "nag_pybobyqa" Minimize a function using the BOBYQA algorithm. BOBYQA (:cite:`Powell2009`, :cite:`Cartis2018`, :cite:`Cartis2018a`) is a derivative-free trust-region method. It is designed to solve nonlinear local minimization problems. Remember to cite :cite:`Powell2009` and :cite:`Cartis2018` when using pybobyqa in addition to optimagic. If you take advantage of the ``seek_global_optimum`` option, cite :cite:`Cartis2018a` additionally. There are two main situations when using a derivative-free algorithm like BOBYQA is preferable to derivative-based algorithms: 1. The criterion function is not deterministic, i.e. if we evaluate the criterion function multiple times at the same parameter vector we get different results. 2. The criterion function is very expensive to evaluate and only finite differences are available to calculate its derivative. The detailed documentation of the algorithm can be found `here `_. There are four possible convergence criteria: 1. when the trust region radius is shrunk below a minimum. This is approximately equivalent to an absolute parameter tolerance. 2. when the criterion value falls below an absolute, user-specified value, the optimization terminates successfully. 3. when insufficient improvements have been gained over a certain number of iterations. The (absolute) threshold for what constitutes an insufficient improvement, how many iterations have to be insufficient and with which iteration to compare can all be specified by the user. 4. when all evaluations on the interpolation points fall within a scaled version of the noise level of the criterion function. This is only applicable if the criterion function is noisy. - **clip_criterion_if_overflowing** (bool): see :ref:`algo_options`. - **convergence.criterion_value** (float): Terminate successfully if the criterion value falls below this threshold. This is deactivated (i.e. set to -inf) by default. - **convergence.minimal_trustregion_radius_tolerance** (float): Minimum allowed value of the trust region radius, which determines when a successful termination occurs. - **convergence.noise_corrected_criterion_tolerance** (float): Stop when the evaluations on the set of interpolation points all fall within this factor of the noise level. The default is 1, i.e. when all evaluations are within the noise level. If you want to not use this criterion but still flag your criterion function as noisy, set this tolerance to 0.0. .. warning:: Very small values, as in most other tolerances don't make sense here. - **convergence.slow_progress** (dict): Arguments for converging when the evaluations over several iterations only yield small improvements on average, see see :ref:`algo_options` for details. - **initial_directions** (str)``: see :ref:`algo_options`. - **interpolation_rounding_error** (float): see :ref:`algo_options`. - **noise_additive_level** (float): Used for determining the presence of noise and the convergence by all interpolation points being within noise level. 0 means no additive noise. Only multiplicative or additive is supported. - **noise_multiplicative_level** (float): Used for determining the presence of noise and the convergence by all interpolation points being within noise level. 0 means no multiplicative noise. Only multiplicative or additive is supported. - **noise_n_evals_per_point** (callable): How often to evaluate the criterion function at each point. This is only applicable for criterion functions with noise, when averaging multiple evaluations at the same point produces a more accurate value. The input parameters are the ``upper_trustregion_radius`` (``delta``), the ``lower_trustregion_radius`` (``rho``), how many iterations the algorithm has been running for, ``n_iterations`` and how many resets have been performed, ``n_resets``. The function must return an integer. Default is no averaging (i.e. ``noise_n_evals_per_point(...) = 1``). - **random_directions_orthogonal** (bool): see :ref:`algo_options`. - **seek_global_optimum** (bool): whether to apply the heuristic to escape local minima presented in :cite:`Cartis2018a`. Only applies for noisy criterion functions. - **stopping.maxfun** (int): see :ref:`algo_options`. - **threshold_for_safety_step** (float): see :ref:`algo_options`. - **trustregion.expansion_factor_successful** (float): see :ref:`algo_options`. - **trustregion.expansion_factor_very_successful** (float): see :ref:`algo_options`. - **trustregion.initial_radius** (float): Initial value of the trust region radius. - **trustregion.minimum_change_hession_for_underdetermined_interpolation** (bool): Whether to solve the underdetermined quadratic interpolation problem by minimizing the Frobenius norm of the Hessian, or change in Hessian. - **trustregion.n_interpolation_points** (int): The number of interpolation points to use. With $n=len(x)$ the default is $2n+1$ if the criterion is not noisy. Otherwise, it is set to $(n+1)(n+2)/2)$. Larger values are particularly useful for noisy problems. Py-BOBYQA requires .. math:: n + 1 \leq \text{trustregion.n_interpolation_points} \leq (n+1)(n+2)/2. - **trustregion.precondition_interpolation** (bool): see :ref:`algo_options`. - **trustregion.reset_options** (dict): Options for resetting the optimization, see :ref:`algo_options` for details. - **trustregion.shrinking_factor_not_successful** (float): see :ref:`algo_options`. - **trustregion.shrinking_factor_upper_radius** (float): see :ref:`algo_options`. - **trustregion.shrinking_factor_lower_radius** (float): see :ref:`algo_options`. - **trustregion.threshold_successful** (float): see :ref:`algo_options`. - **trustregion.threshold_very_successful** (float): see :ref:`algo_options`. ``` (pygmo-algorithms)= ## PYGMO2 Optimizers Please cite {cite}`Biscani2020` in addition to optimagic when using pygmo. optimagic supports the following [pygmo2](https://esa.github.io/pygmo2) optimizers. ```{eval-rst} .. dropdown:: pygmo_gaco .. code-block:: "pygmo_gaco" Minimize a scalar function using the generalized ant colony algorithm. The version available through pygmo is an generalized version of the original ant colony algorithm proposed by :cite:`Schlueter2009`. This algorithm can be applied to box-bounded problems. Ant colony optimization is a class of optimization algorithms modeled on the actions of an ant colony. Artificial "ants" (e.g. simulation agents) locate optimal solutions by moving through a parameter space representing all possible solutions. Real ants lay down pheromones directing each other to resources while exploring their environment. The simulated "ants" similarly record their positions and the quality of their solutions, so that in later simulation iterations more ants locate better solutions. The generalized ant colony algorithm generates future generations of ants by using a multi-kernel gaussian distribution based on three parameters (i.e., pheromone values) which are computed depending on the quality of each previous solution. The solutions are ranked through an oracle penalty method. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **batch_evaluator** (str or Callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the optimagic batch_evaluators. See :ref:`batch_evaluators`. - **n_cores** (int): Number of cores to use. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **kernel_size** (int): Number of solutions stored in the solution archive. - **speed_parameter_q** (float): This parameter manages the convergence speed towards the found minima (the smaller the faster). In the pygmo documentation it is referred to as $q$. It must be positive and can be larger than 1. The default is 1.0 until **threshold** is reached. Then it is set to 0.01. - **oracle** (float): oracle parameter used in the penalty method. - **accuracy** (float): accuracy parameter for maintaining a minimum penalty function's values distances. - **threshold** (int): when the iteration counter reaches the threshold the convergence speed is set to 0.01 automatically. To deactivate this effect set the threshold to stopping.maxiter which is the largest allowed value. - **speed_of_std_values_convergence** (int): parameter that determines the convergence speed of the standard deviations. This must be an integer (`n_gen_mark` in pygmo and pagmo). - **stopping.max_n_without_improvements** (int): if a positive integer is assigned here, the algorithm will count the runs without improvements, if this number exceeds the given value, the algorithm will be stopped. - **stopping.maxfun** (int): maximum number of function evaluations. - **focus** (float): this parameter makes the search for the optimum greedier and more focused on local improvements (the higher the greedier). If the value is very high, the search is more focused around the current best solutions. Values larger than 1 are allowed. - **cache** (bool): if True, memory is activated in the algorithm for multiple calls. ``` ```{eval-rst} .. dropdown:: pygmo_bee_colony .. code-block:: "pygmo_bee_colony" Minimize a scalar function using the artifical bee colony algorithm. The Artificial Bee Colony Algorithm was originally proposed by :cite:`Karaboga2007`. The implemented version of the algorithm is proposed in :cite:`Mernik2015`. The algorithm is only suited for bounded parameter spaces. - **stopping.maxiter** (int): Number of generations to evolve. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **max_n_trials** (int): Maximum number of trials for abandoning a source. Default is 1. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 20. ``` ```{eval-rst} .. dropdown:: pygmo_de .. code-block:: "pygmo_de" Minimize a scalar function using the differential evolution algorithm. Differential Evolution is a heuristic optimizer originally presented in :cite:`Storn1997`. The algorithm is only suited for bounded parameter spaces. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 10. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **weight_coefficient** (float): Weight coefficient. It is denoted by $F$ in the main paper and must lie in [0, 2]. It controls the amplification of the differential variation $(x_{r_2, G} - x_{r_3, G})$. - **crossover_probability** (float): Crossover probability. - **mutation_variant (str or int)**: code for the mutation variant to create a new candidate individual. The default is . The following are available: - "best/1/exp" (1, when specified as int) - "rand/1/exp" (2, when specified as int) - "rand-to-best/1/exp" (3, when specified as int) - "best/2/exp" (4, when specified as int) - "rand/2/exp" (5, when specified as int) - "best/1/bin" (6, when specified as int) - "rand/1/bin" (7, when specified as int) - "rand-to-best/1/bin" (8, when specified as int) - "best/2/bin" (9, when specified as int) - "rand/2/bin" (10, when specified as int) - **convergence.criterion_tolerance**: stopping criteria on the criterion tolerance. Default is 1e-6. It is not clear whether this is the absolute or relative criterion tolerance. - **convergence.xtol_rel**: stopping criteria on the x tolerance. In pygmo the default is 1e-6 but we use our default value of 1e-5. ``` ```{eval-rst} .. dropdown:: pygmo_sea .. code-block:: "pygmo_sea" Minimize a scalar function using the (N+1)-ES simple evolutionary algorithm. This algorithm represents the simplest evolutionary strategy, where a population of $\lambda$ individuals at each generation produces one offspring by mutating its best individual uniformly at random within the bounds. Should the offspring be better than the worst individual in the population it will substitute it. See :cite:`Oliveto2007`. The algorithm is only suited for bounded parameter spaces. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 10. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): number of generations to consider. Each generation will compute the objective function once. ``` ```{eval-rst} .. dropdown:: pygmo_sga .. code-block:: "pygmo_sga" Minimize a scalar function using a simple genetic algorithm. A detailed description of the algorithm can be found `in the pagmo2 documentation `_. See also :cite:`Oliveto2007`. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **crossover_probability** (float): Crossover probability. - **crossover_strategy** (str): the crossover strategy. One of “exponential”,“binomial”, “single” or “sbx”. Default is "exponential". - **eta_c** (float): distribution index for “sbx” crossover. This is an inactive parameter if other types of crossovers are selected. Can be in [1, 100]. - **mutation_probability** (float): Mutation probability. - **mutation_strategy** (str): Mutation strategy. Must be "gaussian", "polynomial" or "uniform". Default is "polynomial". - **mutation_polynomial_distribution_index** (float): Must be in [0, 1]. Default is 1. - **mutation_gaussian_width** (float): Must be in [0, 1]. Default is 1. - **selection_strategy (str)**: Selection strategy. Must be "tournament" or "truncated". - **selection_truncated_n_best** (int): number of best individuals to use in the "truncated" selection mechanism. - **selection_tournament_size** (int): size of the tournament in the "tournament" selection mechanism. Default is 1. ``` ```{eval-rst} .. dropdown:: pygmo_sade .. code-block:: "pygmo_sade" Minimize a scalar function using Self-adaptive Differential Evolution. The original Differential Evolution algorithm (pygmo_de) can be significantly improved introducing the idea of parameter self-adaptation. Many different proposals have been made to self-adapt both the crossover and the F parameters of the original differential evolution algorithm. pygmo's implementation supports two different mechanisms. The first one, proposed by :cite:`Brest2006`, does not make use of the differential evolution operators to produce new values for the weight coefficient $F$ and the crossover probability $CR$ and, strictly speaking, is thus not self-adaptation, rather parameter control. The resulting differential evolution variant is often referred to as jDE. The second variant is inspired by the ideas introduced by :cite:`Elsayed2011` and uses a variaton of the selected DE operator to produce new $CR$ anf $F$ parameters for each individual. This variant is referred to iDE. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - jde (bool): Whether to use the jDE self-adaptation variant to control the $F$ and $CR$ parameter. If True jDE is used, else iDE. - **stopping.maxiter** (int): Number of generations to evolve. - **mutation_variant** (int or str): code for the mutation variant to create a new candidate individual. The default is "rand/1/exp". The first ten are the classical mutation variants introduced in the orginal DE algorithm, the remaining ones are, instead, considered in the work by :cite:`Elsayed2011`. The following are available: - "best/1/exp" or 1 - "rand/1/exp" or 2 - "rand-to-best/1/exp" or 3 - "best/2/exp" or 4 - "rand/2/exp" or 5 - "best/1/bin" or 6 - "rand/1/bin" or 7 - "rand-to-best/1/bin" or 8 - "best/2/bin" or 9 - "rand/2/bin" or 10 - "rand/3/exp" or 11 - "rand/3/bin" or 12 - "best/3/exp" or 13 - "best/3/bin" or 14 - "rand-to-current/2/exp" or 15 - "rand-to-current/2/bin" or 16 - "rand-to-best-and-current/2/exp" or 17 - "rand-to-best-and-current/2/bin" or 18 - **keep_adapted_params** (bool): when true the adapted parameters $CR$ anf $F$ are not reset between successive calls to the evolve method. Default is False. - ftol (float): stopping criteria on the x tolerance. - xtol (float): stopping criteria on the f tolerance. ``` ```{eval-rst} .. dropdown:: pygmo_cmaes .. code-block:: "pygmo_cmaes" Minimize a scalar function using the Covariance Matrix Evolutionary Strategy. CMA-ES is one of the most successful algorithm, classified as an Evolutionary Strategy, for derivative-free global optimization. The version supported by optimagic is the version described in :cite:`Hansen2006`. In contrast to the pygmo version, optimagic always sets force_bounds to True. This avoids that ill defined parameter values are evaluated. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **backward_horizon** (float): backward time horizon for the evolution path. It must lie betwen 0 and 1. - **variance_loss_compensation** (float): makes partly up for the small variance loss in case the indicator is zero. `cs` in the MATLAB Code of :cite:`Hansen2006`. It must lie between 0 and 1. - **learning_rate_rank_one_update** (float): learning rate for the rank-one update of the covariance matrix. `c1` in the pygmo and pagmo documentation. It must lie between 0 and 1. - **learning_rate_rank_mu_update** (float): learning rate for the rank-mu update of the covariance matrix. `cmu` in the pygmo and pagmo documentation. It must lie between 0 and 1. - **initial_step_size** (float): initial step size, :math:`\sigma^0` in the original paper. - **ftol** (float): stopping criteria on the x tolerance. - **xtol** (float): stopping criteria on the f tolerance. - **keep_adapted_params** (bool): when true the adapted parameters are not reset between successive calls to the evolve method. Default is False. ``` ```{eval-rst} .. dropdown:: pygmo_simulated_annealing .. code-block:: "pygmo_simulated_annealing" Minimize a function with the simulated annealing algorithm. This version of the simulated annealing algorithm is, essentially, an iterative random search procedure with adaptive moves along the coordinate directions. It permits uphill moves under the control of metropolis criterion, in the hope to avoid the first local minima encountered. This version is the one proposed in :cite:`Corana1987`. .. note: When selecting the starting and final temperature values it helps to think about the tempertaure as the deterioration in the objective function value that still has a 37% chance of being accepted. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **start_temperature** (float): starting temperature. Must be > 0. - **end_temperature** (float): final temperature. Our default (0.01) is lower than in pygmo and pagmo. The final temperature must be positive. - **n_temp_adjustments** (int): number of temperature adjustments in the annealing schedule. - **n_range_adjustments** (int): number of adjustments of the search range performed at a constant temperature. - **bin_size** (int): number of mutations that are used to compute the acceptance rate. - **start_range** (float): starting range for mutating the decision vector. It must lie between 0 and 1. ``` ```{eval-rst} .. dropdown:: pygmo_pso .. code-block:: "pygmo_pso" Minimize a scalar function using Particle Swarm Optimization. Particle swarm optimization (PSO) is a population based algorithm inspired by the foraging behaviour of swarms. In PSO each point has memory of the position where it achieved the best performance xli (local memory) and of the best decision vector :math:`x^g` in a certain neighbourhood, and uses this information to update its position. For a survey on particle swarm optimization algorithms, see :cite:`Poli2007`. Each particle determines its future position :math:`x_{i+1} = x_i + v_i` where .. math:: v_{i+1} = \omega (v_i + \eta_1 \cdot \mathbf{r}_1 \cdot (x_i - x^{l}_i) + \eta_2 \cdot \mathbf{r}_2 \cdot (x_i - x^g)) - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 10. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **omega** (float): depending on the variant chosen, :math:`\omega` is the particles' inertia weight or the construction coefficient. It must lie between 0 and 1. - **force_of_previous_best** (float): :math:`\eta_1` in the equation above. It's the magnitude of the force, applied to the particle’s velocity, in the direction of its previous best position. It must lie between 0 and 4. - **force_of_best_in_neighborhood** (float): :math:`\eta_2` in the equation above. It's the magnitude of the force, applied to the particle’s velocity, in the direction of the best position in its neighborhood. It must lie between 0 and 4. - **max_velocity** (float): maximum allowed particle velocity as fraction of the box bounds. It must lie between 0 and 1. - **algo_variant (int or str)**: algorithm variant to be used: - 1 or "canonical_inertia": Canonical (with inertia weight) - 2 or "social_and_cog_rand": Same social and cognitive rand. - 3 or "all_components_rand": Same rand. for all components - 4 or "one_rand": Only one rand. - 5 or "canonical_constriction": Canonical (with constriction fact.) - 6 or "fips": Fully Informed (FIPS) - **neighbor_definition (int or str)**: swarm topology that defines each particle's neighbors that is to be used: - 1 or "gbest" - 2 or "lbest" - 3 or "Von Neumann" - 4 or "Adaptive random" - **neighbor_param** (int): the neighbourhood parameter. If the lbest topology is selected (neighbor_definition=2), it represents each particle's indegree (also outdegree) in the swarm topology. Particles have neighbours up to a radius of k = neighbor_param / 2 in the ring. If the Randomly-varying neighbourhood topology is selected (neighbor_definition=4), it represents each particle’s maximum outdegree in the swarm topology. The minimum outdegree is 1 (the particle always connects back to itself). If neighbor_definition is 1 or 3 this parameter is ignored. - **keep_velocities** (bool): when true the particle velocities are not reset between successive calls to `evolve`. ``` ```{eval-rst} .. dropdown:: pygmo_pso_gen .. code-block:: "pygmo_pso_gen" Minimize a scalar function with generational Particle Swarm Optimization. Particle Swarm Optimization (generational) is identical to pso, but does update the velocities of each particle before new particle positions are computed (taking into consideration all updated particle velocities). Each particle is thus evaluated on the same seed within a generation as opposed to the standard PSO which evaluates single particle at a time. Consequently, the generational PSO algorithm is suited for stochastic optimization problems. For a survey on particle swarm optimization algorithms, see :cite:`Poli2007`. Each particle determines its future position :math:`x_{i+1} = x_i + v_i` where .. math:: v_{i+1} = \omega (v_i + \eta_1 \cdot \mathbf{r}_1 \cdot (x_i - x^{l}_i) + \eta_2 \cdot \mathbf{r}_2 \cdot (x_i - x^g)) - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 10. - **batch_evaluator (str or Callable)**: Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the optimagic batch_evaluators. See :ref:`batch_evaluators`. - **n_cores** (int): Number of cores to use. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **omega** (float): depending on the variant chosen, :math:`\omega` is the particles' inertia weight or the constructuion coefficient. It must lie between 0 and 1. - **force_of_previous_best** (float): :math:`\eta_1` in the equation above. It's the magnitude of the force, applied to the particle’s velocity, in the direction of its previous best position. It must lie between 0 and 4. - **force_of_best_in_neighborhood** (float): :math:`\eta_2` in the equation above. It's the magnitude of the force, applied to the particle’s velocity, in the direction of the best position in its neighborhood. It must lie between 0 and 4. - **max_velocity** (float): maximum allowed particle velocity as fraction of the box bounds. It must lie between 0 and 1. - **algo_variant** (int): code of the algorithm's variant to be used: - 1 or "canonical_inertia": Canonical (with inertia weight) - 2 or "social_and_cog_rand": Same social and cognitive rand. - 3 or "all_components_rand": Same rand. for all components - 4 or "one_rand": Only one rand. - 5 or "canonical_constriction": Canonical (with constriction fact.) - 6 or "fips": Fully Informed (FIPS) - **neighbor_definition** (int): code for the swarm topology that defines each particle's neighbors that is to be used: - 1 or "gbest" - 2 or "lbest" - 3 or "Von Neumann" - 4 or "Adaptive random" - **neighbor_param** (int): the neighbourhood parameter. If the lbest topology is selected (neighbor_definition=2), it represents each particle's indegree (also outdegree) in the swarm topology. Particles have neighbours up to a radius of k = neighbor_param / 2 in the ring. If the Randomly-varying neighbourhood topology is selected (neighbor_definition=4), it represents each particle’s maximum outdegree in the swarm topology. The minimum outdegree is 1 (the particle always connects back to itself). If neighbor_definition is 1 or 3 this parameter is ignored. - **keep_velocities** (bool): when true the particle velocities are not reset between successive calls to `evolve`. ``` ```{eval-rst} .. dropdown:: pygmo_mbh .. code-block:: "pygmo_mbh" Minimize a scalar function using generalized Monotonic Basin Hopping. Monotonic basin hopping, or simply, basin hopping, is an algorithm rooted in the idea of mapping the objective function $f(x_0)$ into the local minima found starting from $x_0$. This simple idea allows a substantial increase of efficiency in solving problems, such as the Lennard-Jones cluster or the MGA-1DSM interplanetary trajectory problem that are conjectured to have a so-called funnel structure. See :cite:`Wales1997` for the paper introducing the basin hopping idea for a Lennard-Jones cluster optimization. pygmo provides an original generalization of this concept resulting in a meta-algorithm that operates on a population. When a population containing a single individual is used the original method is recovered. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 250. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **inner_algorithm** (pygmo.algorithm): an pygmo algorithm or a user-defined algorithm, either C++ or Python. If None the `pygmo.compass_search` algorithm will be used. - **stopping.max_inner_runs_without_improvement** (int): consecutive runs of the inner algorithm that need to result in no improvement for mbh to stop. - **perturbation** (float): the perturbation to be applied to each component. ``` ```{eval-rst} .. dropdown:: pygmo_xnes .. code-block:: "pygmo_xnes" Minimize a scalar function using Exponential Evolution Strategies. Exponential Natural Evolution Strategies is an algorithm closely related to CMAES and based on the adaptation of a gaussian sampling distribution via the so-called natural gradient. Like CMAES it is based on the idea of sampling new trial vectors from a multivariate distribution and using the new sampled points to update the distribution parameters. Naively this could be done following the gradient of the expected fitness as approximated by a finite number of sampled points. While this idea offers a powerful lead on algorithmic construction it has some major drawbacks that are solved in the so-called Natural Evolution Strategies class of algorithms by adopting, instead, the natural gradient. xNES is one of the most performing variants in this class. See :cite:`Glasmachers2010` and the `pagmo documentation on xNES `_ for details. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **learning_rate_mean_update** (float): learning rate for the mean update (:math:`\eta_\mu`). It must be between 0 and 1 or None. - **learning_rate_step_size_update** (float): learning rate for the step-size update. It must be between 0 and 1 or None. - **learning_rate_cov_matrix_update** (float): learning rate for the covariance matrix update. It must be between 0 and 1 or None. - **initial_search_share** (float): share of the given search space that will be initally searched. It must be between 0 and 1. Default is 1. - **ftol** (float): stopping criteria on the x tolerance. - **xtol** (float): stopping criteria on the f tolerance. - **keep_adapted_params** (bool): when true the adapted parameters are not reset between successive calls to the evolve method. Default is False. ``` ```{eval-rst} .. dropdown:: pygmo_gwo .. code-block:: "pygmo_gwo" Minimize a scalar function usinng the Grey Wolf Optimizer. The grey wolf optimizer was proposed by :cite:`Mirjalili2014`. The pygmo implementation that is wrapped by optimagic is pased on the pseudo code provided in that paper. This algorithm is a classic example of a highly criticizable line of search that led in the first decades of our millenia to the development of an entire zoo of metaphors inspiring optimzation heuristics. In our opinion they, as is the case for the grey wolf optimizer, are often but small variations of already existing heuristics rebranded with unnecessray and convoluted biological metaphors. In the case of GWO this is particularly evident as the position update rule is shokingly trivial and can also be easily seen as a product of an evolutionary metaphor or a particle swarm one. Such an update rule is also not particulary effective and results in a rather poor performance most of times. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. ``` ```{eval-rst} .. dropdown:: pygmo_compass_search .. code-block:: "pygmo_compass_search" Minimize a scalar function using compass search. The algorithm is described in :cite:`Kolda2003`. It is considered slow but reliable. It should not be used for stochastic problems. - **population_size** (int): Size of the population. Even though the algorithm is not population based the population size does affect the results of the algorithm. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxfun** (int): maximum number of function evaluations. - **start_range** (float): the start range. Must be in (0, 1]. - **stop_range** (float): the stop range. Must be in (0, start_range]. - **reduction_coeff** (float): the range reduction coefficient. Must be in (0, 1). ``` ```{eval-rst} .. dropdown:: pygmo_ihs .. code-block:: "pygmo_ihs" Minimize a scalar function using the improved harmony search algorithm. Improved harmony search (IHS) was introduced by :cite:`Mahdavi2007`. IHS supports stochastic problems. - **population_size** (int): Size of the population. If None, it's twice the number of parameters. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **stopping.maxiter** (int): Number of generations to evolve. - **choose_from_memory_probability** (float): probability of choosing from memory (similar to a crossover probability). - **min_pitch_adjustment_rate** (float): minimum pitch adjustment rate. (similar to a mutation rate). It must be between 0 and 1. - **max_pitch_adjustment_rate** (float): maximum pitch adjustment rate. (similar to a mutation rate). It must be between 0 and 1. - **min_distance_bandwidth** (float): minimum distance bandwidth. (similar to a mutation width). It must be positive. - **max_distance_bandwidth** (float): maximum distance bandwidth. (similar to a mutation width). ``` ```{eval-rst} .. dropdown:: pygmo_de1220 .. code-block:: "pygmo_de1220" Minimize a scalar function using Self-adaptive Differential Evolution, pygmo flavor. See `the PAGMO documentation for details `_. - **population_size** (int): Size of the population. If None, it's twice the number of parameters but at least 64. - **seed** (int): seed used by the internal random number generator. - **discard_start_params** (bool): If True, the start params are not guaranteed to be part of the initial population. This saves one criterion function evaluation that cannot be done in parallel with other evaluations. Default False. - **jde** (bool): Whether to use the jDE self-adaptation variant to control the $F$ and $CR$ parameter. If True jDE is used, else iDE. - **stopping.maxiter** (int): Number of generations to evolve. - **allowed_variants** (array-like object): allowed mutation variants (can be codes or strings). Each code refers to one mutation variant to create a new candidate individual. The first ten refer to the classical mutation variants introduced in the original DE algorithm, the remaining ones are, instead, considered in the work by :cite:`Elsayed2011`. The default is ["rand/1/exp", "rand-to-best/1/exp", "rand/1/bin", "rand/2/bin", "best/3/exp", "best/3/bin", "rand-to-current/2/exp", "rand-to-current/2/bin"]. The following are available: - 1 or "best/1/exp" - 2 or "rand/1/exp" - 3 or "rand-to-best/1/exp" - 4 or "best/2/exp" - 5 or "rand/2/exp" - 6 or "best/1/bin" - 7 or "rand/1/bin" - 8 or "rand-to-best/1/bin" - 9 or "best/2/bin" - 10 or "rand/2/bin" - 11 or "rand/3/exp" - 12 or "rand/3/bin" - 13 or "best/3/exp" - 14 or "best/3/bin" - 15 or "rand-to-current/2/exp" - 16 or "rand-to-current/2/bin" - 17 or "rand-to-best-and-current/2/exp" - 18 or "rand-to-best-and-current/2/bin" - **keep_adapted_params** (bool): when true the adapted parameters $CR$ anf $F$ are not reset between successive calls to the evolve method. Default is False. - **ftol** (float): stopping criteria on the x tolerance. - **xtol** (float): stopping criteria on the f tolerance. ``` (ipopt-algorithm)= ## The Interior Point Optimizer (ipopt) optimagic's support for the Interior Point Optimizer ({cite}`Waechter2005`, {cite}`Waechter2005a`, {cite}`Waechter2005b`, {cite}`Nocedal2009`) is built on [cyipopt](https://cyipopt.readthedocs.io/en/latest/index.html), a Python wrapper for the [Ipopt optimization package](https://coin-or.github.io/Ipopt/index.html). To use ipopt, you need to have [cyipopt installed](https://cyipopt.readthedocs.io/en/latest/index.html) (`conda install cyipopt`). ```{eval-rst} .. dropdown:: ipopt .. code-block:: "ipopt" Minimize a scalar function using the Interior Point Optimizer. This implementation of the Interior Point Optimizer (:cite:`Waechter2005`, :cite:`Waechter2005a`, :cite:`Waechter2005b`, :cite:`Nocedal2009`) relies on `cyipopt `_, a Python wrapper for the `Ipopt optimization package `_. There are two levels of termination criteria. If the usual "desired" tolerances (see tol, dual_inf_tol etc) are satisfied at an iteration, the algorithm immediately terminates with a success message. On the other hand, if the algorithm encounters "acceptable_iter" many iterations in a row that are considered "acceptable", it will terminate before the desired convergence tolerance is met. This is useful in cases where the algorithm might not be able to achieve the "desired" level of accuracy. The options are analogous to the ones in the `ipopt documentation `_ with the exception of the linear solver options which are here bundled into a dictionary. Any argument that takes "yes" and "no" in the ipopt documentation can also be passed as a `True` and `False`, respectively. and any option that accepts "none" in ipopt accepts a Python `None`. The following options are not supported: - `num_linear_variables`: since optimagic may reparametrize your problem and this changes the parameter problem, we do not support this option. - derivative checks - print options. - **convergence.ftol_rel** (float): The algorithm terminates successfully, if the (scaled) non linear programming error becomes smaller than this value. - **mu_target** (float): Desired value of complementarity. Usually, the barrier parameter is driven to zero and the termination test for complementarity is measured with respect to zero complementarity. However, in some cases it might be desired to have Ipopt solve barrier problem for strictly positive value of the barrier parameter. In this case, the value of "mu_target" specifies the final value of the barrier parameter, and the termination tests are then defined with respect to the barrier problem for this value of the barrier parameter. The valid range for this real option is 0 ≤ mu_target and its default value is 0. - **s_max** (float): Scaling threshold for the NLP error. - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops, but we do not count this as successful convergence. The difference to ``max_criterion_evaluations`` is that one iteration might need several criterion evaluations, for example in a line search or to determine if the trust region radius has to be shrunk. - **stopping.max_wall_time_seconds** (float): Maximum number of walltime clock seconds. - **stopping.max_cpu_time** (float): Maximum number of CPU seconds. A limit on CPU seconds that Ipopt can use to solve one problem. If during the convergence check this limit is exceeded, Ipopt will terminate with a corresponding message. The valid range for this real option is 0 < max_cpu_time and its default value is :math:`1e+20` . - **dual_inf_tol** (float): Desired threshold for the dual infeasibility. Absolute tolerance on the dual infeasibility. Successful termination requires that the max-norm of the (unscaled) dual infeasibility is less than this threshold. The valid range for this real option is 0 < dual_inf_tol and its default value is 1. - **constr_viol_tol** (float): Desired threshold for the constraint and bound violation. Absolute tolerance on the constraint and variable bound violation. Successful termination requires that the max-norm of the (unscaled) constraint violation is less than this threshold. If option ``bound_relax_factor`` is not zero 0, then Ipopt relaxes given variable bounds. The value of constr_viol_tol is used to restrict the absolute amount of this bound relaxation. The valid range for this real option is 0 < constr_viol_tol and its default value is 0.0001. - **compl_inf_tol** (float): Desired threshold for the complementarity conditions. Absolute tolerance on the complementarity. Successful termination requires that the max-norm of the (unscaled) complementarity is less than this threshold. The valid range for this real option is 0 < text{compl_inf_tol and its default is 0.0001. - **acceptable_iter** (int): Number of "acceptable" iterates before termination. If the algorithm encounters this many successive "acceptable" iterates (see above on the acceptable heuristic), it terminates, assuming that the problem has been solved to best possible accuracy given round-off. If it is set to zero, this heuristic is disabled. The valid range for this integer option is 0 ≤ acceptable_iter. - **acceptable_tol** (float):"Acceptable" convergence tolerance (relative). Determines which (scaled) overall optimality error is considered to be "acceptable". The valid range for this real option is 0 < acceptable_tol. - **acceptable_dual_inf_tol** (float): "Acceptance" threshold for the dual infeasibility. Absolute tolerance on the dual infeasibility. "Acceptable" termination requires that the (max-norm of the unscaled) dual infeasibility is less than this threshold; see also ``acceptable_tol`` . The valid range for this real option is 0 < acceptable_dual_inf_tol and its default value is :math:`1e+10.` - **acceptable_constr_viol_tol** (float): "Acceptance" threshold for the constraint violation. Absolute tolerance on the constraint violation. "Acceptable" termination requires that the max-norm of the (unscaled) constraint violation is less than this threshold; see also ``acceptable_tol`` . The valid range for this real option is 0 < acceptable_constr_viol_tol and its default value is 0.01. - **acceptable_compl_inf_tol** (float): "Acceptance" threshold for the complementarity conditions. Absolute tolerance on the complementarity. "Acceptable" termination requires that the max-norm of the (unscaled) complementarity is less than this threshold; see also ``acceptable_tol`` . The valid range for this real option is 0 < text{acceptable_compl_inf_tol and its default value is 0.01. - **acceptable_obj_change_tol** (float): "Acceptance" stopping criterion based on objective function change. If the relative change of the objective function (scaled by :math:`max(1,|f(x)|)` ) is less than this value, this part of the acceptable tolerance termination is satisfied; see also ``acceptable_tol`` . This is useful for the quasi-Newton option, which has trouble to bring down the dual infeasibility. The valid range for this real option is 0 ≤ acceptable_obj_change_tol and its default value is :math:`1e+20` . - **diverging_iterates_tol** (float): Threshold for maximal value of primal iterates. If any component of the primal iterates exceeded this value (in absolute terms), the optimization is aborted with the exit message that the iterates seem to be diverging. The valid range for this real option is 0 < diverging_iterates_tol and its default value is :math:`1e+20` . - **nlp_lower_bound_inf** (float): any bound less or equal this value will be considered -inf (i.e. not lwer bounded). The valid range for this real option is unrestricted and its default value is :math:`-1e+19` . - **nlp_upper_bound_inf** (float): any bound greater or this value will be considered :math:`+\inf` (i.e. not upper bunded). The valid range for this real option is unrestricted and its default value is :math:`1e+19` . - **fixed_variable_treatment (str)**: Determines how fixed variables should be handled. The main difference between those options is that the starting point in the "make_constraint" case still has the fixed variables at their given values, whereas in the case "make_parameter(_nodual)" the functions are always evaluated with the fixed values for those variables. Also, for "relax_bounds", the fixing bound constraints are relaxed (according to ``bound_relax_factor`` ). For all but "make_parameter_nodual", bound multipliers are computed for the fixed variables. The default value for this string option is "make_parameter". Possible values: - "make_parameter": Remove fixed variable from optimization variables - "make_parameter_nodual": Remove fixed variable from optimization variables and do not compute bound multipliers for fixed variables - "make_constraint": Add equality constraints fixing variables - "relax_bounds": Relax fixing bound constraints - **dependency_detector (str)**: Indicates which linear solver should be used to detect linearly dependent equality constraints. This is experimental and does not work well. The default value for this string option is "none". Possible values: - "none" or None: don't check; no extra work at beginning - "mumps": use MUMPS - "wsmp": use WSMP - "ma28": use MA28 - **dependency_detection_with_rhs (str or bool)**: Indicates if the right hand sides of the constraints should be considered in addition to gradients during dependency detection. The default value for this string option is "no". Possible values: 'yes', 'no', True, False. - **kappa_d** (float): Weight for linear damping term (to handle one-sided bounds). See Section 3.7 in implementation paper. The valid range for this real option is 0 ≤ kappa_d and its default value is :math:`1e-05` . - **bound_relax_factor** (float): Factor for initial relaxation of the bounds. Before start of the optimization, the bounds given by the user are relaxed. This option sets the factor for this relaxation. Additional, the constraint violation tolerance ``constr_viol_tol`` is used to bound the relaxation by an absolute value. If it is set to zero, then then bounds relaxation is disabled. See Eqn.(35) in implementation paper. Note that the constraint violation reported by Ipopt at the end of the solution process does not include violations of the original (non-relaxed) variable bounds. See also option honor_original_bounds. The valid range for this real option is 0 ≤ bound_relax_factor and its default value is :math:`1e-08` . - **honor_original_bounds** (str or bool): Indicates whether final points should be projected into original bunds. Ipopt might relax the bounds during the optimization (see, e.g., option ``bound_relax_factor`` ). This option determines whether the final point should be projected back into the user-provide original bounds after the optimization. Note that violations of constraints and complementarity reported by Ipopt at the end of the solution process are for the non-projected point. The default value for this string option is "no". Possible values: 'yes', 'no', True, False - **check_derivatives_for_naninf (str)**: whether to check for NaN / inf in the derivative matrices. Activating this option will cause an error if an invalid number is detected in the constraint Jacobians or the Lagrangian Hessian. If this is not activated, the test is skipped, and the algorithm might proceed with invalid numbers and fail. If test is activated and an invalid number is detected, the matrix is written to output with print_level corresponding to J_MORE_DETAILED; so beware of large output! The default value for this string option is "no". - **jac_c_constant (str or bool)**: Indicates whether to assume that all equality constraints are linear Activating this option will cause Ipopt to ask for the Jacobian of the equality constraints only once from the NLP and reuse this information later. The default value for this string option is "no". Possible values: yes, no, True, False. - **jac_d_constant (str or bool)**: Indicates whether to assume that all inequality constraints are linear Activating this option will cause Ipopt to ask for the Jacobian of the inequality constraints only once from the NLP and reuse this information later. The default value for this string option is "no". Possible values: yes, no, True, False - **hessian_constant (str or bool)**: Indicates whether to assume the problem is a QP (quadratic objective, linear constraints). Activating this option will cause Ipopt to ask for the Hessian of the Lagrangian function only once from the NLP and reuse this information later. The default value for this string option is "no". Possible values: yes, no, True, False. - **nlp_scaling_method (str)**: Select the technique used for scaling the NLP. Selects the technique used for scaling the problem internally before it is solved. For user-scaling, the parameters come from the NLP. If you are using AMPL, they can be specified through suffixes ("scaling_factor") The default value for this string option is "gradient-based". Possible values: - "none": no problem scaling will be performed - "user-scaling": scaling parameters will come from the user - "gradient-based": scale the problem so the maximum gradient at the starting point is ``nlp_scaling_max_gradient`` . - "equilibration-based": scale the problem so that first derivatives are of order 1 at random points (uses Harwell routine MC19) - **obj_scaling_factor** (float): Scaling factor for the objective function. This option sets a scaling factor for the objective function. The scaling is seen internally by Ipopt but the unscaled objective is reported in the console output. If additional scaling parameters are computed (e.g. user-scaling or gradient-based), both factors are multiplied. If this value is chosen to be negative, Ipopt will maximize the objective function instead of minimizing it. The valid range for this real option is unrestricted and its default value is 1. - **nlp_scaling_max_gradient** (float): Maximum gradient after NLP scaling. This is the gradient scaling cut-off. If the maximum gradient is above this value, then gradient based scaling will be performed. Scaling parameters are calculated to scale the maximum gradient back to this value. (This is g_max in Section 3.8 of the implementation paper.) Note: This option is only used if ``nlp_scaling_method`` is chosen as "gradient-based". The valid range for this real option is :math:`0 < \text{nlp_scaling_max_gradient}` and its default value is 100. - **nlp_scaling_obj_target_gradient** (float): advanced! Target value for objective function gradient size. If a positive number is chosen, the scaling factor for the objective function is computed so that the gradient has the max norm of the given size at the starting point. This overrides ``nlp_scaling_max_gradient`` for the objective function. The valid range for this real option is 0 ≤ nlp_scaling_obj_target_gradient and its default value is 0. - **nlp_scaling_constr_target_gradient** (float): arget value for constraint function gradient size. If a positive number is chosen, the scaling factors for the constraint functions are computed so that the gradient has the max norm of the given size at the starting point. This overrides nlp_scaling_max_gradient for the constraint functions. The valid range for this real option is 0 ≤ nlp_scaling_constr_target_gradient and its default value is 0. - **nlp_scaling_min_value** (float): Minimum value of gradient-based scaling values. This is the lower bound for the scaling factors computed by gradient-based scaling method. If some derivatives of some functions are huge, the scaling factors will otherwise become very small, and the (unscaled) final constraint violation, for example, might then be significant. Note: This option is only used if ``nlp_scaling_method`` is chosen as "gradient-based". The valid range for this real option is 0 ≤ nlp_scaling_min_value and its default value is :math:`1e-08`. - **bound_push** (float): Desired minimum absolute distance from the initial point to bound. Determines how much the initial point might have to be modified in order to be sufficiently inside the bounds (together with ``bound_frac`` ). (This is kappa_1 in Section 3.6 of implementation paper.) The valid range for this real option is 0 < bound_push and its default value is 0.01. - **bound_frac** (float): Desired minimum relative distance from the initial point to bound. Determines how much the initial point might have to be modified in order to be sufficiently inside the bounds (together with "bound_push"). (This is kappa_2 in Section 3.6 of implementation paper.) The valid range for this real option is 0 < bound_frac ≤ 0.5 and its default value is 0.01. - **slack_bound_push** (float): Desired minimum absolute distance from the initial slack to bound. Determines how much the initial slack variables might have to be modified in order to be sufficiently inside the inequality bounds (together with ``slack_bound_frac`` ). (This is kappa_1 in Section 3.6 of implementation paper.) The valid range for this real option is 0 < slack_bound_push and its default value is 0.01. - **slack_bound_frac** (float): Desired minimum relative distance from the initial slack to bound. Determines how much the initial slack variables might have to be modified in order to be sufficiently inside the inequality bounds (together with ``slack_bound_push`` ). (This is kappa_2 in Section 3.6 of implementation paper.) The valid range for this real option is 0 < slack_bound_frac ≤ 0.5 and its default value is 0.01. - **constr_mult_init_max** (float): Maximum allowed least-square guess of constraint multipliers. Determines how large the initial least-square guesses of the constraint multipliers are allowed to be (in max-norm). If the guess is larger than this value, it is discarded and all constraint multipliers are set to zero. This options is also used when initializing the restoration phase. By default, "resto.constr_mult_init_max" (the one used in RestoIterateInitializer) is set to zero. The valid range for this real option is 0 ≤ constr_mult_init_max and its default value is 1000. - **bound_mult_init_val** (float): Initial value for the bound multipliers. All dual variables corresponding to bound constraints are initialized to this value. The valid range for this real option is 0 < bound_mult_init_val and its default value is 1. - **bound_mult_init_method (str)**: Initialization method for bound multipliers This option defines how the iterates for the bound multipliers are initialized. If "constant" is chosen, then all bound multipliers are initialized to the value of ``bound_mult_init_val``. If "mu-based" is chosen, the each value is initialized to the the value of "mu_init" divided by the corresponding slack variable. This latter option might be useful if the starting point is close to the optimal solution. The default value for this string option is "constant". Possible values: - "constant": set all bound multipliers to the value of ``bound_mult_init_val`` - "mu-based": initialize to mu_init/x_slack - **least_square_init_primal (str or bool)**: Least square initialization of the primal variables. If set to yes, Ipopt ignores the user provided point and solves a least square problem for the primal variables (x and s) to fit the linearized equality and inequality constraints.This might be useful if the user doesn't know anything about the starting point, or for solving an LP or QP. The default value for this string option is "no". Possible values: - "no": take user-provided point - "yes": overwrite user-provided point with least-square estimates - **least_square_init_duals (str or bool)**: Least square initialization of all dual variables If set to yes, Ipopt tries to compute least-square multipliers (considering ALL dual variables). If successful, the bound multipliers are possibly corrected to be at least ``bound_mult_init_val`` . This might be useful if the user doesn't know anything about the starting point, or for solving an LP or QP. This overwrites option ``bound_mult_init_method`` . The default value for this string option is "no". Possible values: - "no": use ``bound_mult_init_val`` and least-square equality constraint multipliers - "yes": overwrite user-provided point with least-square estimates - **warm_start_init_point (str or bool)**: Warm-start for initial point Indicates whether this optimization should use a warm start initialization, where values of primal and dual variables are given (e.g., from a previous optimization of a related problem.) The default value for this string option is "no". Possible values: - "no" or False: do not use the warm start initialization - "yes" or True: use the warm start initialization - **warm_start_same_structure (str or bool)**: Advanced feature! Indicates whether a problem with a structure identical t the previous one is to be solved. If enabled, then the algorithm assumes that an NLP is now to be solved whose structure is identical to one that already was considered (with the same NLP object). The default value for this string option is "no". Possible values: yes, no, True, False. - **warm_start_bound_push** (float): same as ``bound_push`` for the regular initializer. The valid range for this real option is 0 < warm_start_bound_push and its default value is 0.001. - **warm_start_bound_frac** (float): same as ``bound_frac`` for the regular initializer The valid range for this real option is 0 < warm_start_bound_frac ≤ 0.5 and its default value is 0.001. - **warm_start_slack_bound_push** (float): same as ``slack_bound_push`` for the regular initializer The valid range for this real option is 0 < warm_start_slack_bound_push and its default value is 0.001. - **warm_start_slack_bound_frac** (float): same as ``slack_bound_frac`` for the regular initializer The valid range for this real option is 0 < warm_start_slack_bound_frac ≤ 0.5 and its default value is 0.001. - **warm_start_mult_bound_push** (float): same as ``mult_bound_push`` for the regular initializer The valid range for this real option is 0 < warm_start_mult_bound_push and its default value is 0.001. - **warm_start_mult_init_max** (float): Maximum initial value for the equality multipliers. The valid range for this real option is unrestricted and its default value is :math:`1e+06` . - **warm_start_entire_iterate (str or bool)**: Tells algorithm whether to use the GetWarmStartIterate method in the NLP. The default value for this string option is "no". Possible values: - "no": call GetStartingPoint in the NLP - "yes": call GetWarmStartIterate in the NLP - **warm_start_target_mu** (float): Advanced and experimental! The valid range for this real option is unrestricted and its default value is 0. - **option_file_name (str)**: File name of options file. By default, the name of the Ipopt options file is "ipopt.opt" - or something else if specified in the IpoptApplication::Initialize call. If this option is set by SetStringValue BEFORE the options file is read, it specifies the name of the options file. It does not make any sense to specify this option within the options file. Setting this option to an empty string disables reading of an options file. - **replace_bounds (bool or str)**: Whether all variable bounds should be replaced by inequality constraints. This option must be set for the inexact algorithm. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **skip_finalize_solution_call (str or bool)**: Whether a call to NLP::FinalizeSolution after optimization should be suppressed. In some Ipopt applications, the user might want to call the FinalizeSolution method separately. Setting this option to "yes" will cause the IpoptApplication object to suppress the default call to that method. The default value for this string option is "no". Possible values: "yes", "no", True, False - **timing_statistics (str or bool)**: Indicates whether to measure time spend in components of Ipopt and NLP evaluation. The overall algorithm time is unaffected by this option. The default value for this string option is "no". Possible values: "yes", "no", True, False - **mu_max_fact** (float): Factor for initialization of maximum value for barrier parameter. This option determines the upper bound on the barrier parameter. This upper bound is computed as the average complementarity at the initial point times the value of this option. (Only used if option "mu_strategy" is chosen as "adaptive".) The valid range for this real option is 0 < mu_max_fact and its default value is 1000. - **mu_max** (float): Maximum value for barrier parameter. This option specifies an upper bound on the barrier parameter in the adaptive mu selection mode. If this option is set, it overwrites the effect of mu_max_fact. (Only used if option "mu_strategy" is chosen as "adaptive".) The valid range for this real option is 0 < mu_max and its default value is 100000. - **mu_min** (float): Minimum value for barrier parameter. This option specifies the lower bound on the barrier parameter in the adaptive mu selection mode. By default, it is set to the minimum of :math:`1e-11` and min( ``tol`` , ``compl_inf_tol`` )/( ``barrier_tol_factor`` +1), which should be a reasonable value. (Only used if option ``mu_strategy`` is chosen as "adaptive".) The valid range for this real option is 0 < mu_min and its default value is :math:`1e-11` . - **adaptive_mu_globalization (str)**: Globalization strategy for the adaptive mu selection mode. To achieve global convergence of the adaptive version, the algorithm has to switch to the monotone mode (Fiacco-McCormick approach) when convergence does not seem to appear. This option sets the criterion used to decide when to do this switch. (Only used if option "mu_strategy" is chosen as "adaptive".) The default value for this string option is "obj-constr-filter". Possible values: - "kkt-error": nonmonotone decrease of kkt-error - "obj-constr-filter": 2-dim filter for objective and constraint violation - "never-monotone-mode": disables globalization. - **adaptive_mu_kkterror_red_iters** (float): advanced feature! Maximum number of iterations requiring sufficient progress. For the "kkt-error" based globalization strategy, sufficient progress must be made for "adaptive_mu_kkterror_red_iters" iterations. If this number of iterations is exceeded, the globalization strategy switches to the monotone mode. The valid range for this integer option is 0 ≤ adaptive_mu_kkterror_red_iters and its default value is 4. - **adaptive_mu_kkterror_red_fact** (float): advanced feature! Sufficient decrease factor for "kkt-error" globalization strategy. For the "kkt-error" based globalization strategy, the error must decrease by this factor to be deemed sufficient decrease. The valid range for this real option is 0 < adaptive_mu_kkterror_red_fact < 1 and its default value is 0.9999. - **filter_margin_fact** (float): advanced feature! Factor determining width of margin for obj-constr-filter adaptive globalization strategy. When using the adaptive globalization strategy, "obj-constr-filter", sufficient progress for a filter entry is defined as follows: (new obj) < (filter obj) - filter_margin_fact*(new constr-viol) OR (new constr-viol) < (filter constr-viol) - filter_margin_fact*(new constr-viol). For the description of the "kkt-error-filter" option see ``filter_max_margin`` . The valid range for this real option is 0 < filter_margin_fact < 1 and its default value is :math:`10-05` . - **filter_max_margin** (float): advanced feature! Maximum width of margin in obj-constr-filter adaptive globalization strategy. The valid range for this real option is 0 < filter_max_margin and its default value is 1. - **adaptive_mu_restore_previous_iterate (str or bool)**: advanced feature! Indicates if the previous accepted iterate should be restored if the monotone mode is entered. When the globalization strategy for the adaptive barrier algorithm switches to the monotone mode, it can either start from the most recent iterate (no), or from the last iterate that was accepted (yes). The default value for this string option is "no". Possible values: "yes", "no", True, False - **adaptive_mu_monotone_init_factor** (float): advanced feature! Determines the initial value of the barrier parameter when switching to the monotone mode. When the globalization strategy for the adaptive barrier algorithm switches to the monotone mode and fixed_mu_oracle is chosen as "average_compl", the barrier parameter is set to the current average complementarity times the value of "adaptive_mu_monotone_init_factor". The valid range for this real option is 0 < adaptive_mu_monotone_init_factor and its default value is 0.8. - **adaptive_mu_kkt_norm_type (str)**: advanced! Norm used for the KKT error in the adaptive mu globalization strategies. When computing the KKT error for the globalization strategies, the norm to be used is specified with this option. Note, this option is also used in the QualityFunctionMuOracle. The default value for this string option is "2-norm-squared". Possible values: - "1-norm": use the 1-norm (abs sum) - "2-norm-squared": use the 2-norm squared (sum of squares) - "max-norm": use the infinity norm (max) - "2-norm": use 2-norm - **mu_strategy (str)**: Update strategy for barrier parameter. Determines which barrier parameter update strategy is to be used. The default value for this string option is "monotone". Possible values: - "monotone": use the monotone (Fiacco-McCormick) strategy - "adaptive": use the adaptive update strategy - **mu_oracle (str)**: Oracle for a new barrier parameter in the adaptive strategy. Determines how a new barrier parameter is computed in each "free-mode" iteration of the adaptive barrier parameter strategy. (Only considered if "adaptive" is selected for option "mu_strategy"). The default value for this string option is "quality-function". Possible values: - "probing": Mehrotra's probing heuristic - "loqo": LOQO's centrality rule - "quality-function": minimize a quality function - **fixed_mu_oracle (str)**: Oracle for the barrier parameter when switching to fixed mode. Determines how the first value of the barrier parameter should be computed when switching to the "monotone mode" in the adaptive strategy. (Only considered if "adaptive" is selected for option "mu_strategy".) The default value for this string option is "average_compl". Possible values: - "probing": Mehrotra's probing heuristic - "loqo": LOQO's centrality rule - "quality-function": minimize a quality function - "average_compl": base on current average complementarity - **mu_init** (float): Initial value for the barrier parameter. This option determines the initial value for the barrier parameter (mu). It is only relevant in the monotone, Fiacco-McCormick version of the algorithm. (i.e., if "mu_strategy" is chosen as "monotone") The valid range for this real option is 0 < mu_init and its default value is 0.1. - **barrier_tol_factor** (float): Factor for mu in barrier stop test. The convergence tolerance for each barrier problem in the monotone mode is the value of the barrier parameter times "barrier_tol_factor". This option is also used in the adaptive mu strategy during the monotone mode. This is kappa_epsilon in implementation paper. The valid range for this real option is 0 < barrier_tol_factor and its default value is 10. - **mu_linear_decrease_factor** (float): Determines linear decrease rate of barrier parameter. For the Fiacco-McCormick update procedure the new barrier parameter mu is obtained by taking the minimum of mu*"mu_linear_decrease_factor" and mu^"superlinear_decrease_power". This is kappa_mu in implementation paper. This option is also used in the adaptive mu strategy during the monotone mode. The valid range for this real option is 0 < mu_linear_decrease_factor < 1 and its default value is 0.2. - **mu_superlinear_decrease_power** (float): Determines superlinear decrease rate of barrier parameter. For the Fiacco-McCormick update procedure the new barrier parameter mu is obtained by taking the minimum of mu*"mu_linear_decrease_factor" and mu^"superlinear_decrease_power". This is theta_mu in implementation paper. This option is also used in the adaptive mu strategy during the monotone mode. The valid range for this real option is 1 < mu_superlinear_decrease_power < 2 and its default value is 1.5. - **mu_allow_fast_monotone_decrease (str or bool)**: Advanced feature! Allow skipping of barrier problem if barrier test i already met. The default value for this string option is "yes". Possible values: - "no": Take at least one iteration per barrier problem even if the barrier test is already met for the updated barrier parameter - "yes": Allow fast decrease of mu if barrier test it met - **tau_min** (float): Advanced feature! Lower bound on fraction-to-the-boundary parameter tau. This is tau_min in the implementation paper. This option is also used in the adaptive mu strategy during the monotone mode. The valid range for this real option is 0 < tau_min < 1 and its default value is 0.99. - **sigma_max** (float): Advanced feature! Maximum value of the centering parameter. This is the upper bound for the centering parameter chosen by the quality function based barrier parameter update. Only used if option "mu_oracle" is set to "quality-function". The valid range for this real option is 0 < sigma_max and its default value is 100. - **sigma_min** (float): Advanced feature! Minimum value of the centering parameter. This is the lower bound for the centering parameter chosen by the quality function based barrier parameter update. Only used if option "mu_oracle" is set to "quality-function". The valid range for this real option is 0 ≤ sigma_min and its default value is :math:`10-06` . - **quality_function_norm_type (str)**: Advanced feature. Norm used for components of the quality function. Only used if option "mu_oracle" is set to "quality-function". The default value for this string option is "2-norm-squared". Possible values: - "1-norm": use the 1-norm (abs sum) - "2-norm-squared": use the 2-norm squared (sum of squares) - "max-norm": use the infinity norm (max) - "2-norm": use 2-norm - **quality_function_centrality (str)**: Advanced feature. The penalty term for centrality that is included in quality function. This determines whether a term is added to the quality function to penalize deviation from centrality with respect to complementarity. The complementarity measure here is the xi in the Loqo update rule. Only used if option "mu_oracle" is set to "quality-function". The default value for this string option is "none". Possible values: - "none": no penalty term is added - "log": complementarity * the log of the centrality measure - "reciprocal": complementarity * the reciprocal of the centrality measure - "cubed-reciprocal": complementarity * the reciprocal of the centrality measure cubed - **quality_function_balancing_term (str)**: Advanced feature. The balancing term included in the quality function for centrality. This determines whether a term is added to the quality function that penalizes situations where the complementarity is much smaller than dual and primal infeasibilities. Only used if option "mu_oracle" is set to "quality-function". The default value for this string option is "none". Possible values: - "none": no balancing term is adde - "cubic": :math:`max(0,\max(\text{dual_inf},\text{primal_inf})-\text{compl})^3` - **quality_function_max_section_steps** (int): Maximum number of search steps during direct search procedure determining the optimal centering parameter. The golden section search is performed for the quality function based mu oracle. Only used if option "mu_oracle" is set to "quality-function". The valid range for this integer option is 0 ≤ quality_function_max_section_steps and its default value is 8. - **quality_function_section_sigma_tol** (float): advanced feature! Tolerance for the section search procedure determining the optimal centering parameter (in sigma space). The golden section search is performed for the quality function based mu oracle. Only used if option "mu_oracle" is set to "quality-function". The valid range for this real option is 0 ≤ quality_function_section_sigma_tol < 1 and its default value is 0.01. - **quality_function_section_qf_tol** (float): advanced feature! Tolerance for the golden section search procedure determining the optimal centering parameter (in the function value space). The golden section search is performed for the quality function based mu oracle. Only used if option "mu_oracle" is set to "quality-function". The valid range for this real option is 0 ≤ quality_function_section_qf_tol < 1 and its default value is 0. - **line_search_method (str)**: Advanced feature. Globalization method used in backtracking line search. Only the "filter" choice is officially supported. But sometimes, good results might be obtained with the other choices. The default value for this string option is "filter". Possible values: - "filter": Filter method - "cg-penalty": Chen-Goldfarb penalty function - "penalty": Standard penalty function - **alpha_red_factor** (float): Advanced feature. Fractional reduction of the trial step size in the backtracking lne search. At every step of the backtracking line search, the trial step size is reduced by this factor. The valid range for this real option is 0 < alpha_red_factor < 1 and its default value is 0.5. - **accept_every_trial_step (str or bool)**: Always accept the first trial step. Setting this option to "yes" essentially disables the line search and makes the algorithm take aggressive steps, without global convergence guarantees. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **accept_after_max_steps** (float): advanced feature. Accept a trial point after maximal this number of steps een if it does not satisfy line search conditions. Setting this to -1 disables this option. The valid range for this integer option is -1 ≤ accept_after_max_steps and its default value is -1. - **alpha_for_y (str)**: Method to determine the step size for constraint multipliers (alpha_y) . The default value for this string option is "primal". Possible values: - "primal": use primal step size - "bound-mult": use step size for the bound multipliers (good for LPs) - "min": use the min of primal and bound multipliers - "max": use the max of primal and bound multipliers - "full": take a full step of size one - "min-dual-infeas": choose step size minimizing new dual infeasibility - "safer-min-dual-infeas": like "min_dual_infeas", but safeguarded by "min" and "max" - "primal-and-full": use the primal step size, and full step if delta_x <= alpha_for_y_tol - "dual-and-full": use the dual step size, and full step if delta_x <= alpha_for_y_tol - "acceptor": Call LSAcceptor to get step size for y - **alpha_for_y_tol** (float): Tolerance for switching to full equality multiplier steps. This is only relevant if "alpha_for_y" is chosen "primal-and-full" or "dual-and-full". The step size for the equality constraint multipliers is taken to be one if the max-norm of the primal step is less than this tolerance. The valid range for this real option is 0 ≤ alpha_for_y_tol and its default value is 10. - **tiny_step_tol** (float): Advanced feature. Tolerance for detecting numerically insignificant steps. If the search direction in the primal variables (x and s) is, in relative terms for each component, less than this value, the algorithm accepts the full step without line search. If this happens repeatedly, the algorithm will terminate with a corresponding exit message. The default value is 10 times machine precision. The valid range for this real option is 0 ≤ tiny_step_tol and its default value is 2.22045 · :math:`1e-15`. - **tiny_step_y_tol** (float): Advanced feature. Tolerance for quitting because of numerically insignificant steps. If the search direction in the primal variables (x and s) is, in relative terms for each component, repeatedly less than tiny_step_tol, and the step in the y variables is smaller than this threshold, the algorithm will terminate. The valid range for this real option is 0 ≤ tiny_step_y_tol and its default value is 0.01. - **watchdog_shortened_iter_trigger** (int): Number of shortened iterations that trigger the watchdog. If the number of successive iterations in which the backtracking line search did not accept the first trial point exceeds this number, the watchdog procedure is activated. Choosing "0" here disables the watchdog procedure. The valid range for this integer option is 0 ≤ watchdog_shortened_iter_trigger and its default value is 10. - **watchdog_trial_iter_max** (int): Maximum number of watchdog iterations. This option determines the number of trial iterations allowed before the watchdog procedure is aborted and the algorithm returns to the stored point. The valid range for this integer option is 1 ≤ watchdog_trial_iter_max and its default value is 3. theta_max_fact (float): Advanced feature. Determines upper bound for constraint violation in the filter. The algorithmic parameter theta_max is determined as theta_max_fact times the maximum of 1 and the constraint violation at initial point. Any point with a constraint violation larger than theta_max is unacceptable to the filter (see Eqn. (21) in the implementation paper). The valid range for this real option is 0 < theta_max_fact and its default value is 10000. - **theta_min_fact** (float): advanced feature. Determines constraint violation threshold in the switching rule. The algorithmic parameter theta_min is determined as theta_min_fact times the maximum of 1 and the constraint violation at initial point. The switching rules treats an iteration as an h-type iteration whenever the current constraint violation is larger than theta_min (see paragraph before Eqn. (19) in the implementation paper). The valid range for this real option is 0 < theta_min_fact and its default value is 0.0001. - **eta_phi** (float): advanced! Relaxation factor in the Armijo condition. See Eqn. (20) in the implementation paper. The valid range for this real option is 0 < eta_phi < 0.5 and its default value is :math:`1e-08`. - **delta** (float): advanced! Multiplier for constraint violation in the switching rule. See Eqn. (19) in the implementation paper. The valid range for this real option is 0 < delta and its default value is 1. - **s_phi** (float): advanced! Exponent for linear barrier function model in the switching rule. See Eqn. (19) in the implementation paper. The valid range for this real option is 1 < s_phi and its default value is 2.3. - **s_theta** (float): advanced! Exponent for current constraint violation in the switching rule. See Eqn. (19) in the implementation paper. The valid range for this real option is 1 < s_theta and its default value is 1.1. - **gamma_phi** (float): advanced! Relaxation factor in the filter margin for the barrier function. See Eqn. (18a) in the implementation paper. The valid range for this real option is 0 < gamma_phi < 1 and its default value is :math:`1e-08`. - **gamma_theta** (float): advanced! Relaxation factor in the filter margin for the constraint violation. See Eqn. (18b) in the implementation paper. The valid range for this real option is 0 < gamma_theta < 1 and its default value is :math:`1e-05`. - **alpha_min_frac** (float): advanced! Safety factor for the minimal step size (before switching to restoration phase). This is gamma_alpha in Eqn. (20) in the implementation paper. The valid range for this real option is 0 < alpha_min_frac < 1 and its default value is 0.05. - **max_soc** (int): Maximum number of second order correction trial steps at each iteration. Choosing 0 disables the second order corrections. This is p^{max} of Step A-5.9 of Algorithm A in the implementation paper. The valid range for this integer option is 0 ≤ max_soc and its default value is 4. - **kappa_soc** (float): advanced! Factor in the sufficient reduction rule for second order correction. This option determines how much a second order correction step must reduce the constraint violation so that further correction steps are attempted. See Step A-5.9 of Algorithm A in the implementation paper. The valid range for this real option is 0 < kappa_soc and its default value is 0.99. - **obj_max_inc** (float): advanced! Determines the upper bound on the acceptable increase of barrier objective function. Trial points are rejected if they lead to an increase in the barrier objective function by more than obj_max_inc orders of magnitude. The valid range for this real option is 1 < obj_max_inc and its default value is 5. - **max_filter_resets** (int): advanced! Maximal allowed number of filter resets. A positive number enables a heuristic that resets the filter, whenever in more than "filter_reset_trigger" successive iterations the last rejected trial steps size was rejected because of the filter. This option determine the maximal number of resets that are allowed to take place. The valid range for this integer option is 0 ≤ max_filter_resets and its default value is 5. - **filter_reset_trigger** (int): Advanced! Number of iterations that trigger the filter reset. If the filter reset heuristic is active and the number of successive iterations in which the last rejected trial step size was rejected because of the filter, the filter is reset. The valid range for this integer option is 1 ≤ filter_reset_trigger and its default value is 5. - **corrector_type (str)**: advanced! The type of corrector steps that should be taken. If "mu_strategy" is "adaptive", this option determines what kind of corrector steps should be tried. Changing this option is experimental. The default value for this string option is "none". Possible values: - "none" or None: no corrector - "affine": corrector step towards mu=0 - "primal-dual": corrector step towards current mu - **skip_corr_if_neg_curv (str or bool)**: advanced! Whether to skip the corrector step in negative curvature iteration. The corrector step is not tried if negative curvature has been encountered during the computation of the search direction in the current iteration. This option is only used if "mu_strategy" is "adaptive". Changing this option is experimental. The default value for this string option is "yes". Possible values: "yes", "no", True, False. - **skip_corr_in_monotone_mode (str or bool)**: Advanced! Whether to skip the corrector step during monotone brrier parameter mode. The corrector step is not tried if the algorithm is currently in the monotone mode (see also option "barrier_strategy"). This option is only used if "mu_strategy" is "adaptive". Changing this option is experimental. The default value for this string option is "yes". Possible values: "yes", "no", True, False - **corrector_compl_avrg_red_fact** (float): advanced! Complementarity tolerance factor for accepting corrector step. This option determines the factor by which complementarity is allowed to increase for a corrector step to be accepted. Changing this option is experimental. The valid range for this real option is 0 < corrector_compl_avrg_red_fact and its default value is 1. - **soc_method** (int): Ways to apply second order correction. This option determines the way to apply second order correction, 0 is the method described in the implementation paper. 1 is the modified way which adds alpha on the rhs of x and s rows. Officially, the valid range for this integer option is 0 ≤ soc_method ≤ 1 and its default value is 0 but only 0 and 1 are allowed. - **nu_init** (float): advanced! Initial value of the penalty parameter. The valid range for this real option is 0 < nu_init and its default value is :math:`1e-06`. - **nu_inc** (float): advanced! Increment of the penalty parameter. The valid range for this real option is 0 < nu_inc and its default value is 0.0001. - **rho** (float): advanced! Value in penalty parameter update formula. The valid range for this real option is 0 < rho < 1 and its default value is 0.1. - **kappa_sigma** (float): advanced! Factor limiting the deviation of dual variables from primal estimates. If the dual variables deviate from their primal estimates, a correction is performed. See Eqn. (16) in the implementation paper. Setting the value to less than 1 disables the correction. The valid range for this real option is 0 < kappa_sigma and its default value is :math:`1e+10`. - **recalc_y (str or bool)**: Tells the algorithm to recalculate the equality and inequality multipliers as least square estimates. This asks the algorithm to recompute the multipliers, whenever the current infeasibility is less than recalc_y_feas_tol. Choosing yes might be helpful in the quasi-Newton option. However, each recalculation requires an extra factorization of the linear system. If a limited memory quasi-Newton option is chosen, this is used by default. The default value for this string option is "no". Possible values: - "no" or False: use the Newton step to update the multipliers - "yes" or True: use least-square multiplier - **estimates recalc_y_feas_tol** (float): Feasibility threshold for recomputation of multipliers. If recalc_y is chosen and the current infeasibility is less than this value, then the multipliers are recomputed. The valid range for this real option is 0 < recalc_y_feas_tol and its default value is :math:`1e-06`. - **slack_move** (float): advanced! Correction size for very small slacks. Due to numerical issues or the lack of an interior, the slack variables might become very small. If a slack becomes very small compared to machine precision, the corresponding bound is moved slightly. This parameter determines how large the move should be. Its default value is mach_eps^{3/4}. See also end of Section 3.5 in implementation paper - but actual implementation might be somewhat different. The valid range for this real option is 0 ≤ slack_move and its default value is 1.81899 · :math:`1e-12`. - **constraint_violation_norm_type (str)**: advanced! Norm to be used for the constraint violation in te line search. Determines which norm should be used when the algorithm computes the constraint violation in the line search. The default value for this string option is "1-norm". Possible values: - "1-norm": use the 1-norm - "2-norm": use the 2-norm - "max-norm": use the infinity norm - **mehrotra_algorithm (str or bool)**: Indicates whether to do Mehrotra's predictor-corrector algorithm. If enabled, line search is disabled and the (unglobalized) adaptive mu strategy is chosen with the "probing" oracle, and "corrector_type=affine" is used without any safeguards; you should not set any of those options explicitly in addition. Also, unless otherwise specified, the values of ``bound_push`` , ``bound_frac`` , and ``bound_mult_init_val`` are set more aggressive, and sets "alpha_for_y=bound_mult". The Mehrotra's predictor-corrector algorithm works usually very well for LPs and convex QPs. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **fast_step_computation (str or bool)**: Indicates if the linear system should be solved quickly. If enabled, the algorithm assumes that the linear system that is solved to obtain the search direction is solved sufficiently well. In that case, no residuals are computed to verify the solution and the computation of the search direction is a little faster. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **min_refinement_steps** (int): Minimum number of iterative refinement steps per linear system solve. Iterative refinement (on the full asymmetric system) is performed for each right hand side. This option determines the minimum number of iterative refinements (i.e. at least "min_refinement_steps" iterative refinement steps are enforced per right hand side.) The valid range for this integer option is 0 ≤ min_refinement_steps and its default value is 1. - **max_refinement_steps** (int): Maximum number of iterative refinement steps per linear system solve. Iterative refinement (on the full unsymmetric system) is performed for each right hand side. This option determines the maximum number of iterative refinement steps. The valid range for this integer option is 0 ≤ max_refinement_steps and its default value is 10. - **residual_ratio_max** (float): advanced! Iterative refinement tolerance. Iterative refinement is performed until the residual test ratio is less than this tolerance (or until "max_refinement_steps" refinement steps are performed). The valid range for this real option is 0 < residual_ratio_max and its default value is :math:`1e-10`. - **residual_ratio_singular** (float): advanced! Threshold for declaring linear system singular after filed iterative refinement. If the residual test ratio is larger than this value after failed iterative refinement, the algorithm pretends that the linear system is singular. The valid range for this real option is 0 < residual_ratio_singular and its default value is :math:`1e-05`. - **residual_improvement_factor** (float): advanced! Minimal required reduction of residual test ratio in iterative refinement. If the improvement of the residual test ratio made by one iterative refinement step is not better than this factor, iterative refinement is aborted. The valid range for this real option is 0 < residual_improvement_factor and its default value is 1. - **neg_curv_test_tol** (float): Tolerance for heuristic to ignore wrong inertia. If nonzero, incorrect inertia in the augmented system is ignored, and Ipopt tests if the direction is a direction of positive curvature. This tolerance is alpha_n in the paper by :cite:`Chiang2014` and it determines when the direction is considered to be sufficiently positive. A value in the range of [1e-12, 1e-11] is recommended. The valid range for this real option is 0 ≤ neg_curv_test_tol and its default value is 0. - **neg_curv_test_reg (str or bool)**: Whether to do the curvature test with the primal regularization (see :cite:`Chiang2014`). The default value for this string option is "yes". Possible values: - "yes" or True: use primal regularization with the inertia-free curvature test - "no" or False: use original IPOPT approach, in which the primal regularization is ignored - **max_hessian_perturbation** (float): Maximum value of regularization parameter for handling negative curvature. In order to guarantee that the search directions are indeed proper descent directions, Ipopt requires that the inertia of the (augmented) linear system for the step computation has the correct number of negative and positive eigenvalues. The idea is that this guides the algorithm away from maximizers and makes Ipopt more likely converge to first order optimal points that are minimizers. If the inertia is not correct, a multiple of the identity matrix is added to the Hessian of the Lagrangian in the augmented system. This parameter gives the maximum value of the regularization parameter. If a regularization of that size is not enough, the algorithm skips this iteration and goes to the restoration phase. This is delta_w^max in the implementation paper. The valid range for this real option is 0 < max_hessian_perturbation and its default value is :math:`1e+20`. - **min_hessian_perturbation** (float): Smallest perturbation of the Hessian block. The size of the perturbation of the Hessian block is never selected smaller than this value, unless no perturbation is necessary. This is delta_w^min in implementation paper. The valid range for this real option is 0 ≤ min_hessian_perturbation and its default value is :math:`1e-20`. - **perturb_inc_fact_first** (float): Increase factor for x-s perturbation for very first perturbation. The factor by which the perturbation is increased when a trial value was not sufficient - this value is used for the computation of the very first perturbation and allows a different value for the first perturbation than that used for the remaining perturbations. This is bar_kappa_w^+ in the implementation paper. The valid range for this real option is 1 < perturb_inc_fact_first and its default value is 100. - **perturb_inc_fact** (float): Increase factor for x-s perturbation. The factor by which the perturbation is increased when a trial value was not sufficient - this value is used for the computation of all perturbations except for the first. This is kappa_w^+ in the implementation paper. The valid range for this real option is 1 < perturb_inc_fact and its default value is 8. - **perturb_dec_fact** (float): Decrease factor for x-s perturbation. The factor by which the perturbation is decreased when a trial value is deduced from the size of the most recent successful perturbation. This is kappa_w^- in the implementation paper. The valid range for this real option is 0 < perturb_dec_fact < 1 and its default value is 0.333333. - **first_hessian_perturbation** (float): Size of first x-s perturbation tried. The first value tried for the x-s perturbation in the inertia correction scheme. This is delta_0 in the implementation paper. The valid range for this real option is 0 < first_hessian_perturbation and its default value is 0.0001. - **jacobian_regularization_value** (float): Size of the regularization for rank-deficient constraint Jacobians. This is bar delta_c in the implementation paper. The valid range for this real option is 0 ≤ jacobian_regularization_value and its default value is :math:`1e-08`. - **jacobian_regularization_exponent** (float): advanced! Exponent for mu in the regularization for rnk-deficient constraint Jacobians. This is kappa_c in the implementation paper. The valid range for this real option is 0 ≤ jacobian_regularization_exponent and its default value is 0.25. - **perturb_always_cd (str or bool)**: advanced! Active permanent perturbation of constraint linearization. Enabling this option leads to using the delta_c and delta_d perturbation for the computation of every search direction. Usually, it is only used when the iteration matrix is singular. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **expect_infeasible_problem (str or bool)**: Enable heuristics to quickly detect an infeasible problem. This options is meant to activate heuristics that may speed up the infeasibility determination if you expect that there is a good chance for the problem to be infeasible. In the filter line search procedure, the restoration phase is called more quickly than usually, and more reduction in the constraint violation is enforced before the restoration phase is left. If the problem is square, this option is enabled automatically. The default value for this string option is "no". Possible values: "yes", "no", True, False. - **expect_infeasible_problem_ctol** (float): Threshold for disabling "expect_infeasible_problem" option. If the constraint violation becomes smaller than this threshold, the "expect_infeasible_problem" heuristics in the filter line search are disabled. If the problem is square, this options is set to 0. The valid range for this real option is 0 ≤ expect_infeasible_problem_ctol and its default value is 0.001. - **expect_infeasible_problem_ytol** (float): Multiplier threshold for activating "xpect_infeasible_problem" option. If the max norm of the constraint multipliers becomes larger than this value and "expect_infeasible_problem" is chosen, then the restoration phase is entered. The valid range for this real option is 0 < expect_infeasible_problem_ytol and its default value is :math:`1e+08`. - **start_with_resto (str or bool)**: Whether to switch to restoration phase in first iteration.Setting this option to "yes" forces the algorithm to switch to the feasibility restoration phase in the first iteration. If the initial point is feasible, the algorithm will abort with a failure. The default value for this string option is "no". Possible values: "yes", "no", True, False - **soft_resto_pderror_reduction_factor** (float): Required reduction in primal-dual error in the soft restoration phase. The soft restoration phase attempts to reduce the primal-dual error with regular steps. If the damped primal-dual step (damped only to satisfy the fraction-to-the-boundary rule) is not decreasing the primal-dual error by at least this factor, then the regular restoration phase is called. Choosing "0" here disables the soft restoration phase. The valid range for this real option is 0 ≤ soft_resto_pderror_reduction_factor and its default value is 0.9999. - **max_soft_resto_iters** (int): advanced! Maximum number of iterations performed successively in soft rstoration phase. If the soft restoration phase is performed for more than so many iterations in a row, the regular restoration phase is called. The valid range for this integer option is 0 ≤ max_soft_resto_iters and its default value is 10. - **required_infeasibility_reduction** (float): Required reduction of infeasibility before leaving restoration phase. The restoration phase algorithm is performed, until a point is found that is acceptable to the filter and the infeasibility has been reduced by at least the fraction given by this option. The valid range for this real option is 0 ≤ required_infeasibility_reduction < 1 and its default value is 0.9. - **max_resto_iter** (int): advanced! Maximum number of successive iterations in restoration phase.The algorithm terminates with an error message if the number of iterations successively taken in the restoration phase exceeds this number. The valid range for this integer option is 0 ≤ max_resto_iter and its default value is 3000000. - **evaluate_orig_obj_at_resto_trial (str or bool)**: Determines if the original objective function should be evaluated at restoration phase trial points. Enabling this option makes the restoration phase algorithm evaluate the objective function of the original problem at every trial point encountered during the restoration phase, even if this value is not required. In this way, it is guaranteed that the original objective function can be evaluated without error at all accepted iterates; otherwise the algorithm might fail at a point where the restoration phase accepts an iterate that is good for the restoration phase problem, but not the original problem. On the other hand, if the evaluation of the original objective is expensive, this might be costly. The default value for this string option is "yes". Possible values: "yes", "no", True, False - **resto_penalty_parameter** (float): advanced! Penalty parameter in the restoration phase objective function. This is the parameter rho in equation (31a) in the Ipopt implementation paper. The valid range for this real option is 0 < resto_penalty_parameter and its default value is 1000. - **resto_proximity_weight** (float): advanced! Weighting factor for the proximity term in restoration pase objective. This determines how the parameter zeta in equation (29a) in the implementation paper is computed. zeta here is resto_proximity_weight*sqrt(mu), where mu is the current barrier parameter. The valid range for this real option is 0 ≤ resto_proximity_weight and its default value is 1. - **bound_mult_reset_threshold** (float): Threshold for resetting bound multipliers after the restoration pase. After returning from the restoration phase, the bound multipliers are updated with a Newton step for complementarity. Here, the change in the primal variables during the entire restoration phase is taken to be the corresponding primal Newton step. However, if after the update the largest bound multiplier exceeds the threshold specified by this option, the multipliers are all reset to 1. The valid range for this real option is 0 ≤ bound_mult_reset_threshold and its default value is 1000. - **constr_mult_reset_threshold** (float): Threshold for resetting equality and inequality multipliers ater restoration phase. After returning from the restoration phase, the constraint multipliers are recomputed by a least square estimate. This option triggers when those least-square estimates should be ignored. The valid range for this real option is 0 ≤ constr_mult_reset_threshold and its default value is 0. - **resto_failure_feasibility_threshold** (float): advanced! Threshold for primal infeasibility to declare failure of restoration phase. If the restoration phase is terminated because of the "acceptable" termination criteria and the primal infeasibility is smaller than this value, the restoration phase is declared to have failed. The default value is actually 1e2*tol, where tol is the general termination tolerance. The valid range for this real option is 0 ≤ resto_failure_feasibility_threshold and its default value is 0. - **limited_memory_aug_solver (str)**: advanced! Strategy for solving the augmented system for low-rank Hessian. The default value for this string option is "sherman-morrison". Possible values: - "sherman-morrison": use Sherman-Morrison formula - "extended": use an extended augmented system - **limited_memory_max_history** (int): Maximum size of the history for the limited quasi-Newton Hessian approximation. This option determines the number of most recent iterations that are taken into account for the limited-memory quasi-Newton approximation. The valid range for this integer option is 0 ≤ limited_memory_max_history and its default value is 6. - **limited_memory_update_type (str)**: Quasi-Newton update formula for the limited memory quasi-Newton approximation. The default value for this string option is "bfgs". Possible values: - "bfgs": BFGS update (with skipping) - "sr1": SR1 (not working well) - **limited_memory_initialization (str)**: Initialization strategy for the limited memory quasi-Newton aproximation. Determines how the diagonal Matrix B_0 as the first term in the limited memory approximation should be computed. The default value for this string option is "scalar1". Possible values: - "scalar1": sigma = s^Ty/s^Ts - "scalar2": sigma = y^Ty/s^Ty - "scalar3": arithmetic average of scalar1 and scalar2 - "scalar4": geometric average of scalar1 and scalar2 - "constant": sigma = limited_memory_init_val - **limited_memory_init_val** (float): Value for B0 in low-rank update. The starting matrix in the low rank update, B0, is chosen to be this multiple of the identity in the first iteration (when no updates have been performed yet), and is constantly chosen as this value, if "limited_memory_initialization" is "constant". The valid range for this real option is 0 < limited_memory_init_val and its default value is 1. - **limited_memory_init_val_max** (float): Upper bound on value for B0 in low-rank update. The starting matrix in the low rank update, B0, is chosen to be this multiple of the identity in the first iteration (when no updates have been performed yet), and is constantly chosen as this value, if "limited_memory_initialization" is "constant". The valid range for this real option is 0 < limited_memory_init_val_max and its default value is :math:`1e+08`. - **limited_memory_init_val_min** (float): Lower bound on value for B0 in low-rank update. The starting matrix in the low rank update, B0, is chosen to be this multiple of the identity in the first iteration (when no updates have been performed yet), and is constantly chosen as this value, if "limited_memory_initialization" is "constant". The valid range for this real option is 0 < limited_memory_init_val_min and its default value is :math:`1e-08`. - **limited_memory_max_skipping** (int): Threshold for successive iterations where update is skipped. If the update is skipped more than this number of successive iterations, the quasi-Newton approximation is reset. The valid range for this integer option is 1 ≤ limited_memory_max_skipping and its default value is 2. - **limited_memory_special_for_resto (str or bool)**: Determines if the quasi-Newton updates should be special dring the restoration phase. Until Nov 2010, Ipopt used a special update during the restoration phase, but it turned out that this does not work well. The new default uses the regular update procedure and it improves results. If for some reason you want to get back to the original update, set this option to "yes". The default value for this string option is "no". Possible values: "yes", "no", True, False. - **hessian_approximation (str)**: Indicates what Hessian information is to be used. This determines which kind of information for the Hessian of the Lagrangian function is used by the algorithm. The default value for this string option is "limited-memory". Possible values: - "exact": Use second derivatives provided by the NLP. - "limited-memory": Perform a limited-memory quasi-Newton approximation - **hessian_approximation_space (str)**: advanced! Indicates in which subspace the Hessian information is to be approximated. The default value for this string option is "nonlinear-variables". Possible values: - "nonlinear-variables": only in space of nonlinear variables. - "all-variables": in space of all variables (without slacks) - **linear_solver (str)**: Linear solver used for step computations. Determines which linear algebra package is to be used for the solution of the augmented linear system (for obtaining the search directions). The default value for this string option is "ma27". Possible values: - "mumps" (use the Mumps package, default) - "ma27" (load the Harwell routine MA27 from library at runtime) - "ma57" (load the Harwell routine MA57 from library at runtime) - "ma77" (load the Harwell routine HSL_MA77 from library at runtime) - "ma86" (load the Harwell routine MA86 from library at runtime) - "ma97" (load the Harwell routine MA97 from library at runtime) - "pardiso" (load the Pardiso package from pardiso-project.org from user-provided library at runtime) - "custom" (use custom linear solver (expert use)) - **linear_solver_options** (dict or None): dictionary with the linear solver options, possibly including `linear_system_scaling`, `hsllib` and `pardisolib`. See the `ipopt documentation for details `_. The linear solver options are not automatically converted to float at the moment.] ``` (fides-algorithm)= ## The Fides Optimizer optimagic supports the [Fides Optimizer](https://fides-optimizer.readthedocs.io/en/latest). To use Fides, you need to have [the fides package](https://github.com/fides-dev/fides) installed (`pip install fides>=0.7.4`, make sure you have at least 0.7.1). ```{eval-rst} .. dropdown:: fides .. code-block:: "fides" `Fides `_ implements an Interior Trust Region Reflective for boundary costrained optimization problems based on the papers :cite:`Coleman1994` and :cite:`Coleman1996`. Accordingly, Fides is named after the Roman goddess of trust and reliability. In contrast to other optimizers, Fides solves the full trust-region subproblem exactly, which can yields higher quality proposal steps, but is computationally more expensive. This makes Fides particularly attractive for optimization problems with objective functions that are computationally expensive to evaluate and the computational cost of solving the trust-region subproblem is negligible. - **hessian_update_strategy** (str): Hessian Update Strategy to employ. You can provide a lowercase or uppercase string or a fides.hession_approximation.HessianApproximation class instance. FX, SSM, TSSM and GNSBFGS are not supported by optimagic. The available update strategies are: - **bb**: Broydens "bad" method as introduced :cite:`Broyden1965`. - **bfgs**: Broyden-Fletcher-Goldfarb-Shanno update strategy. - **bg**: Broydens "good" method as introduced in :cite:`Broyden1965`. - You can use a general BroydenClass Update scheme using the Broyden class from `fides.hessian_approximation`. This is a generalization of BFGS/DFP methods where the parameter :math:`phi` controls the convex combination between the two. This is a rank 2 update strategy that preserves positive-semidefiniteness and symmetry (if :math:`\phi \in [0,1]`). It is described in :cite:`Nocedal1999`, Chapter 6.3. - **dfp**: Davidon-Fletcher-Powell update strategy. - **sr1**: Symmetric Rank 1 update strategy as described in :cite:`Nocedal1999`, Chapter 6.2. - **convergence.ftol_abs** (float): absolute convergence criterion tolerance. This is only the interpretation of this parameter if the relative criterion tolerance is set to 0. Denoting the absolute criterion tolerance by :math:`\alpha` and the relative criterion tolerance by :math:`\beta`, the convergence condition on the criterion improvement is :math:`|f(x_k) - f(x_{k-1})| < \alpha + \beta \cdot |f(x_{k-1})|` - **convergence.ftol_rel** (float): relative convergence criterion tolerance. This is only the interpretation of this parameter if the absolute criterion tolerance is set to 0 (as is the default). Denoting the absolute criterion tolerance by :math:`\alpha` and the relative criterion tolerance by :math:`\beta`, the convergence condition on the criterion improvement is :math:`|f(x_k) - f(x_{k-1})| < \alpha + \beta \cdot |f(x_{k-1})|` - **convergence.xtol_abs** (float): The optimization terminates successfully when the step size falls below this number, i.e. when :math:`||x_{k+1} - x_k||` is smaller than this tolerance. - **convergence.gtol_abs** (float): The optimization terminates successfully when the gradient norm is less or equal than this tolerance. - **convergence.gtol_rel** (float): The optimization terminates successfully when the norm of the gradient divided by the absolute function value is less or equal to this tolerance. - **stopping.maxiter** (int): maximum number of allowed iterations. - **stopping.max_seconds** (int): maximum number of walltime seconds, deactivated by default. - **trustregion.initial_radius** (float): Initial trust region radius. Default is 1. - **trustregion.stepback_strategy** (str): search refinement strategy if proposed step reaches a parameter bound. The default is "truncate". The available options are: - "reflect": recursive reflections at boundary. - "reflect_single": single reflection at boundary. - "truncate": truncate step at boundary and re-solve the restricted subproblem - "mixed": mix reflections and truncations - **trustregion.subspace_dimension** (str): Subspace dimension in which the subproblem will be solved. The default is "2D". The following values are available: - "2D": Two dimensional Newton/Gradient subspace - "full": full dimensionality - "scg": Conjugated Gradient subspace via Steihaug's method - **trustregion.max_stepback_fraction** (float): Stepback parameter that controls how close steps are allowed to get to the boundary. It is the maximal fraction of a step to take if full step would reach breakpoint. - **trustregion.decrease_threshold** (float): Acceptance threshold for trust region ratio. The default is 0.25 (:cite:`Nocedal2006`). The radius is decreased if the trust region ratio is below this value. This is denoted by :math:`\\mu` in algorithm 4.1 in :cite:`Nocedal2006`. - **trustregion.increase_threshold** (float): Threshold for the trust region radius ratio above which the trust region radius can be increased. This is denoted by :math:`\eta` in algorithm 4.1 in :cite:`Nocedal2006`. The default is 0.75 (:cite:`Nocedal2006`). - **trustregion.decrease_factor** (float): factor by which trust region radius will be decreased in case it is decreased. This is denoted by :math:`\gamma_1` in algorithm 4.1 in :cite:`Nocedal2006` and its default is 0.25. - **trustregion.increase_factor** (float): factor by which trust region radius will be increase in case it is increase. This is denoted by :math:`\gamma_2` in algorithm 4.1 in :cite:`Nocedal2006` and its default is 2.0. - **trustregion.refine_stepback** (bool): whether to refine stepbacks via optimization. Default is False. - **trustregion.scaled_gradient_as_possible_stepback** (bool): whether the scaled gradient should be added to the set of possible stepback proposals. Default is False. ``` ## The NLOPT Optimizers (nlopt) optimagic supports the following [NLOPT](https://nlopt.readthedocs.io/en/latest/) algorithms. Please add the [appropriate citations](https://nlopt.readthedocs.io/en/latest/Citing_NLopt/) in addition to optimagic when using an NLOPT algorithm. To install nlopt run `conda install nlopt`. ```{eval-rst} .. dropdown:: nlopt_bobyqa .. code-block:: "nlopt_bobyqa" Minimize a scalar function using the BOBYQA algorithm. The implementation is derived from the BOBYQA subroutine of M. J. D. Powell. The algorithm performs derivative free bound-constrained optimization using an iteratively constructed quadratic approximation for the objective function. Due to its use of quadratic appoximation, the algorithm may perform poorly for objective functions that are not twice-differentiable. For details see :cite:`Powell2009`. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_neldermead .. code-block:: "nlopt_neldermead" Minimize a scalar function using the Nelder-Mead simplex algorithm. The basic algorithm is described in :cite:`Nelder1965`. The difference between the nlopt implementation an the original implementation is that the nlopt version supports bounds. This is done by moving all new points that would lie outside the bounds exactly on the bounds. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_praxis .. code-block:: "nlopt_praxis" Minimize a scalar function using principal-axis method. This is a gradient-free local optimizer originally described in :cite:`Brent1972`. It assumes quadratic form of the optimized function and repeatedly updates a set of conjugate search directions. The algorithm is not invariant to scaling of the objective function and may fail under its certain rank-preserving transformations (e.g., will lead to a non-quadratic shape of the objective function). The algorithm is not determenistic and it is not possible to achieve detereminancy via seed setting. The algorithm failed on a simple benchmark function with finite parameter bounds. Passing arguments `lower_bounds` and `upper_bounds` has been disabled for this algorithm. The difference between the nlopt implementation an the original implementation is that the nlopt version supports bounds. This is done by returning infinity (Inf) when the constraints are violated. The implementation of bound constraints is achieved at the const of significantly reduced speed of convergence. In case of bounded constraints, this method is dominated by `nlopt_bobyqa` and `nlopt_cobyla`. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_cobyla .. code-block:: "nlopt_cobyla" Minimize a scalar function using the cobyla method. The alggorithm is derived from Powell's Constrained Optimization BY Linear Approximations (COBYLA) algorithm. It is a derivative-free optimizer with nonlinear inequality and equality constrains, described in :cite`Powell1994`. It constructs successive linear approximations of the objective function and constraints via a simplex of n+1 points (in n dimensions), and optimizes these approximations in a trust region at each step. The the nlopt implementation differs from the original implementation in a a few ways: - Incorporates all of the NLopt termination criteria. - Adds explicit support for bound constraints. - Allows the algorithm to increase the trust-reion radius if the predicted imptoovement was approximately right and the simplex is satisfactory. - Pseudo-randomizes simplex steps in the algorithm, aimproving robustness by avoiding accidentally taking steps that don't improve conditioning, preserving the deterministic nature of the algorithm. - Supports unequal initial-step sizes in the different parameters. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_sbplx .. code-block:: "nlopt_sbplx" Minimize a scalar function using the "Subplex" algorithm. The alggorithm is a reimplementation of Tom Rowan's "Subplex" algorithm. See :cite:`Rowan1990`. Subplex is a variant of Nedler-Mead that uses Nedler-Mead on a sequence of subspaces. It is climed to be more efficient and robust than the original Nedler-Mead algorithm. The difference between this re-implementation and the original algorithm of Rowan, is that it explicitly supports bound constraints providing big improvement in the case where the optimum lies against one of the constraints. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_newuoa .. code-block:: "nlopt_newuoa" Minimize a scalar function using the NEWUOA algorithm. The algorithm is derived from the NEWUOA subroutine of M.J.D Powell which uses iteratively constructed quadratic approximation of the objctive fucntion to perform derivative-free unconstrained optimization. Fore more details see: :cite:`Powell2004`. The algorithm in `nlopt` has been modified to support bound constraints. If all of the bound constraints are infinite, this function calls the `nlopt.LN_NEWUOA` optimizers for uncsonstrained optimization. Otherwise, the `nlopt.LN_NEWUOA_BOUND` optimizer for constrained problems. `NEWUOA` requires the dimension n of the parameter space to be `≥ 2`, i.e. the implementation does not handle one-dimensional optimization problems. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_tnewton .. code-block:: "nlopt_tnewton" Minimize a scalar function using the "TNEWTON" algorithm. The alggorithm is based on a Fortran implementation of a preconditioned inexact truncated Newton algorithm written by Prof. Ladislav Luksan. Truncated Newton methods are a set of algorithms designed to solve large scale optimization problems. The algorithms use (inaccurate) approximations of the solutions to Newton equations, using conjugate gradient methodds, to handle the expensive calculations of derivatives during each iteration. Detailed description of algorithms is given in :cite:`Dembo1983`. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_lbfgs .. code-block:: "nlopt_lbfgs" Minimize a scalar function using the "LBFGS" algorithm. The alggorithm is based on a Fortran implementation of low storage BFGS algorithm written by Prof. Ladislav Luksan. LFBGS is an approximation of the original Broyden–Fletcher–Goldfarb–Shanno algorithm based on limited use of memory. Memory efficiency is obtained by preserving a limi- ted number (<10) of past updates of candidate points and gradient values and using them to approximate the hessian matrix. Detailed description of algorithms is given in :cite:`Nocedal1989`, :cite:`Nocedal1980`. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_ccsaq .. code-block:: "nlopt_ccsaq" Minimize a scalar function using CCSAQ algorithm. CCSAQ uses the quadratic variant of the conservative convex separable approximation. The algorithm performs gradient based local optimization with equality (but not inequality) constraints. At each candidate point x, a quadratic approximation to the criterion faunction is computed using the value of gradient at point x. A penalty term is incorporated to render optimizaion convex and conservative. The algorithm is "globally convergent" in the sense that it is guaranteed to con- verge to a local optimum from any feasible starting point. The implementation is based on CCSA algorithm described in :cite:`Svanberg2002`. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_mma .. code-block:: "nlopt_mma" Minimize a scalar function using the method of moving asymptotes (MMA). The implementation is based on an algorithm described in :cite:`Svanberg2002`. The algorithm performs gradient based local optimization with equality (but not inequality) constraints. At each candidate point x, an approximation to the criterion faunction is computed using the value of gradient at point x. A quadratic penalty term is incorporated to render optimizaion convex and conservative. The algorithm is "globally convergent" in the sense that it is guaranteed to con- verge to a local optimum from any feasible starting point. - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_var .. code-block:: "nlopt_var" Minimize a scalar function limited memory switching variable-metric method. The algorithm relies on saving only limited number M of past updates of the gradient to approximate the inverse hessian. The large is M, the more memory is consumed Detailed explanation of the algorithm, including its two variations of rank-2 and rank-1 methods can be found in the following paper :cite:`Vlcek2006` . - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **rank_1_update** (bool): Whether I rank-1 or rank-2 update is used. ``` ```{eval-rst} .. dropdown:: nlopt_slsqp .. code-block:: "nlopt_slsqp" Optimize a scalar function based on SLSQP method. SLSQP solves gradient based nonlinearly constrained optimization problems. The algorithm treats the optimization problem as a sequence of constrained least-squares problems. The implementation is based on the procedure described in :cite:`Kraft1988` and :cite:`Kraft1994` . - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_direct .. code-block:: "nlopt_direct" Optimize a scalar function based on DIRECT method. DIRECT is the DIviding RECTangles algorithm for global optimization, described in :cite:`Jones1993` . Variations of the algorithm include locally biased routines (distinguished by _L suffix) that prove to be more efficients for functions that have few local minima. See the following for the DIRECT_L variant :cite:`Gablonsky2001` . Locally biased algorithms can be implmented both with deterministic and random (distinguished by _RAND suffix) search algorithm. Finally, both original and locally biased variants can be implemented with and without the rescaling of the bound constraints. Boolean arguments `locally_biased`, 'random_search', and 'unscaled_bouds' can be set to `True` or `False` to determine which method is run. The comprehensive list of available methods are: - "DIRECT" - "DIRECT_L" - "DIRECT_L_NOSCAL" - "DIRECT_L_RAND" - "DIRECT_L_RAND_NOSCAL" - "DIRECT_RAND" - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **locally_biased** (bool): Whether the "L" version of the algorithm is selected. - **random_search** (bool): Whether the randomized version of the algorithm is selected. - **unscaled_bounds** (bool): Whether the "NOSCAL" version of the algorithm is selected. ``` ```{eval-rst} .. dropdown:: nlopt_esch .. code-block:: "nlopt_esch" Optimize a scalar function using the ESCH algorithm. ESCH is an evolutionary algorithm that supports bound constraints only. Specifi cally, it does not support nonlinear constraints. More information on this method can be found in :cite:`DaSilva2010` , :cite:`DaSilva2010a` , :cite:`Beyer2002` and :cite:`Vent1975` . - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_isres .. code-block:: "nlopt_isres" Optimize a scalar function using the ISRES algorithm. ISRES is an implementation of "Improved Stochastic Evolution Strategy" written for solving optimization problems with non-linear constraints. The algorithm is supposed to be a global method, in that it has heuristics to avoid local minima. However, no convergence proof is available. The original method and a refined version can be found, respecively, in :cite:`PhilipRunarsson2005` and :cite:`Thomas2000` . - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. ``` ```{eval-rst} .. dropdown:: nlopt_crs2_lm .. code-block:: "nlopt_crs2_lm" Optimize a scalar function using the CRS2_LM algorithm. This implementation of controlled random search method with local mutation is based on :cite:`Kaelo2006` . The original CRS method is described in :cite:`Price1978` and :cite:`Price1983` . CRS class of algorithms starts with random population of points and evolves the points "randomly". The size of the initial population can be set via the param- meter population_size. If the user doesn't specify a value, it is set to the nlopt default of 10*(n+1). - **convergence.xtol_rel** (float): Stop when the relative movement between parameter vectors is smaller than this. - **convergence.xtol_abs** (float): Stop when the absolute movement between parameter vectors is smaller than this. - **convergence.ftol_rel** (float): Stop when the relative improvement between two iterations is smaller than this. - **convergence.ftol_abs** (float): Stop when the change of the criterion function between two iterations is smaller than this. - **stopping.maxfun** (int): If the maximum number of function evaluation is reached, the optimization stops but we do not count this as convergence. - **population_size** (int): Size of the population. If None, it's set to be 10 * (number of parameters + 1). ``` ## Optimizers from iminuit optimagic supports the [IMINUIT MIGRAD Optimizer](https://iminuit.readthedocs.io/). To use MIGRAD, you need to have [the iminuit package](https://github.com/scikit-hep/iminuit) installed (`pip install iminuit`). ```{eval-rst} .. dropdown:: iminuit_migrad **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.iminuit_migrad(stopping_maxfun=10_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="iminuit_migrad", algo_options={"stopping_maxfun=10_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.iminuit_migrad.IminuitMigrad ``` ## Nevergrad Optimizers optimagic supports following algorithms from the [Nevergrad](https://facebookresearch.github.io/nevergrad/index.html) library. To use these optimizers, you need to have [the nevergrad package](https://github.com/facebookresearch/nevergrad) installed. (`pip install nevergrad`).\ Two algorithms from nevergrad are not available in optimagic.\ `SPSA (Simultaneous Perturbation Stochastic Approximation)` - This is WIP in nevergrad and hence imprecise.\ `AXP (AX-platfofm)` - Very slow and not recommended. ```{eval-rst} .. dropdown:: nevergrad_pso **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_pso(stopping_maxfun=1_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_pso", algo_options={"stopping_maxfun": 1_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradPSO ``` ```{eval-rst} .. dropdown:: nevergrad_cmaes **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_cmaes(stopping_maxfun=1_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_cmaes", algo_options={"stopping_maxfun": 1_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradCMAES ``` ```{eval-rst} .. dropdown:: nevergrad_oneplusone **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_oneplusone(stopping_maxfun=1_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_oneplusone", algo_options={"stopping_maxfun": 1_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradOnePlusOne ``` ```{eval-rst} .. dropdown:: nevergrad_de **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_de(population_size="large", ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_de", algo_options={"population_size": "large", ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradDifferentialEvolution ``` ```{eval-rst} .. dropdown:: nevergrad_bo .. note:: Using this optimizer requires the `bayes-optim` package to be installed as well. This can be done with `pip install bayes-optim`. **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_bo(stopping_maxfun=1_000, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_bo", algo_options={"stopping_maxfun": 1_000, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradBayesOptim ``` ```{eval-rst} .. dropdown:: nevergrad_emna **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_emna(noise_handling=False, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_emna", algo_options={"noise_handling": False, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradEMNA ``` ```{eval-rst} .. dropdown:: nevergrad_cga **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_cga(stopping_maxfun=10_000) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_cga", algo_options={"stopping_maxfun": 10_000} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradCGA ``` ```{eval-rst} .. dropdown:: nevergrad_eda **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_eda(stopping_maxfun=10_000) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_eda", algo_options={"stopping_maxfun": 10_000} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradEDA ``` ```{eval-rst} .. dropdown:: nevergrad_tbpsa **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_tbpsa(noise_handling=False, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_tbpsa", algo_options={"noise_handling": False, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradTBPSA ``` ```{eval-rst} .. dropdown:: nevergrad_randomsearch **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_randomsearch(opposition_mode="quasi", ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_randomsearch", algo_options={"opposition_mode": "quasi", ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradRandomSearch ``` ```{eval-rst} .. dropdown:: nevergrad_samplingsearch **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.nevergrad_samplingsearch(sampler="Hammersley", scrambled=True) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_samplingsearch", algo_options={"sampler": "Hammersley", "scrambled": True} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradSamplingSearch ``` ```{eval-rst} .. dropdown:: nevergrad_wizard **How to use this algorithm:** .. code-block:: import optimagic as om from optimagic.optimizers.nevergrad_optimizers import Wizard om.minimize( ..., algorithm=om.algos.nevergrad_wizard(optimizer= Wizard.NGOptRW, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_wizard", algo_options={"optimizer": "NGOptRW", ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradWizard .. autoclass:: optimagic.optimizers.nevergrad_optimizers.Wizard ``` ```{eval-rst} .. dropdown:: nevergrad_portfolio **How to use this algorithm:** .. code-block:: import optimagic as om from optimagic.optimizers.nevergrad_optimizers import Portfolio om.minimize( ..., algorithm=om.algos.nevergrad_portfolio(optimizer= Portfolio.BFGSCMAPlus, ...) ) or .. code-block:: om.minimize( ..., algorithm="nevergrad_portfolio", algo_options={"optimizer": "BFGSCMAPlus", ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradPortfolio .. autoclass:: optimagic.optimizers.nevergrad_optimizers.Portfolio ``` ## Bayesian Optimization We wrap the [BayesianOptimization](https://github.com/bayesian-optimization/BayesianOptimization) package. To use it, you need to have [bayesian-optimization](https://pypi.org/project/bayesian-optimization/) installed. Note: This optimizer requires `bayesian_optimization > 2.0.0` to be installed which is incompatible with `nevergrad > 1.0.3`. ```{eval-rst} .. dropdown:: bayes_opt **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.bayes_opt(n_iter=50, ...) ) or .. code-block:: om.minimize( ..., algorithm="bayes_opt", algo_options={"n_iter": 50, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.bayesian_optimizer.BayesOpt ``` ## Gradient Free Optimizers Optimizers from the [gradient_free_optimizers](https://github.com/SimonBlanke/Gradient-Free-Optimizers?tab=readme-ov-file) package are available in optimagic. To use it, you need to have [gradient_free_optimizers](https://pypi.org/project/gradient_free_optimizers) installed. ```{eval-rst} .. dropdown:: gfo_hillclimbing **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_hillclimbing(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_hillclimbing", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOHillClimbing :members: :inherited-members: Algorithm, object ``` ```{eval-rst} .. dropdown:: gfo_stochastichillclimbing **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_stochastichillclimbing(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_stochastichillclimbing", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOStochasticHillClimbing :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_repulsinghillclimbing **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_repulsinghillclimbing(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_repulsinghillclimbing", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFORepulsingHillClimbing :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_simulatedannealing **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_simulatedannealing(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_simulatedannealing", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSimulatedAnnealing :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_downhillsimplex **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_downhillsimplex(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_downhillsimplex", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODownhillSimplex :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_powells_method **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_powells_method(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_powells_method", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOPowellsMethod :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_pso **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.gfo_pso(stopping_maxiter=1_000, ...), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="gfo_pso", algo_options={"stopping_maxiter": 1_000, ...}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParticleSwarmOptimization :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_parallel_tempering **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm=om.algos.gfo_parallel_tempering(population_size=15, n_iter_swap=5), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm="gfo_parallel_tempering", algo_options={"population_size": 15, "n_iter_swap": 5}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParallelTempering :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_spiral_optimization **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm=om.algos.gfo_spiral_optimization(population_size=15, decay_rate=0.95), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm="gfo_spiral_optimization", algo_options={"population_size": 15, "decay_rate": 0.95}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSpiralOptimization :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_genetic_algorithm **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm=om.algos.gfo_genetic_algorithm(population_size=20, mutation_rate=0.6), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm="gfo_genetic_algorithm", algo_options={"population_size": 20, "mutation_rate": 0.6}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOGeneticAlgorithm :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_evolution_strategy **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm=om.algos.gfo_evolution_strategy(population_size=15, crossover_rate=0.4), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm="gfo_evolution_strategy", algo_options={"population_size": 15, "crossover_rate": 0.4}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOEvolutionStrategy :members: :inherited-members: Algorithm, object :member-order: bysource ``` ```{eval-rst} .. dropdown:: gfo_differential_evolution **How to use this algorithm.** .. code-block:: python import optimagic as om import numpy as np om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm=om.algos.gfo_differential_evolution(population_size=20, mutation_rate=0.8), bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=np.array([1.0, 2.0, 3.0]), algorithm="gfo_differential_evolution", algo_options={"population_size": 20, "mutation_rate": 0.8}, bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) ) **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODifferentialEvolution :members: :inherited-members: Algorithm, object :member-order: bysource ``` ## Pygad Optimizer We wrap the pygad optimizer. To use it you need to have [pygad](https://pygad.readthedocs.io/en/latest/) installed. ```{eval-rst} .. dropdown:: pygad **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.pygad(num_generations=100, ...) ) or .. code-block:: om.minimize( ..., algorithm="pygad", algo_options={"num_generations": 100, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.pygad_optimizer.Pygad ``` ## PySwarms Optimizers optimagic supports the following continuous algorithms from the [PySwarms](https://pyswarms.readthedocs.io/en/latest/) library: (GlobalBestPSO, LocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have [the pyswarms package](https://github.com/ljvmiranda921/pyswarms) installed. (`pip install pyswarms`). ```{eval-rst} .. dropdown:: pyswarms_global_best **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.pyswarms_global_best(n_particles=50, ...) ) or .. code-block:: om.minimize( ..., algorithm="pyswarms_global_best", algo_options={"n_particles": 50, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGlobalBestPSO :members: :inherited-members: Algorithm, object ``` ```{eval-rst} .. dropdown:: pyswarms_local_best **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.pyswarms_local_best(n_particles=50, k_neighbors=3, ...) ) or .. code-block:: om.minimize( ..., algorithm="pyswarms_local_best", algo_options={"n_particles": 50, "k_neighbors": 3, ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsLocalBestPSO :members: :inherited-members: Algorithm, object ``` ```{eval-rst} .. dropdown:: pyswarms_general **How to use this algorithm:** .. code-block:: import optimagic as om om.minimize( ..., algorithm=om.algos.pyswarms_general(n_particles=50, topology_type="star", ...) ) or .. code-block:: om.minimize( ..., algorithm="pyswarms_general", algo_options={"n_particles": 50, "topology_type": "star", ...} ) **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGeneralPSO :members: :inherited-members: Algorithm, object ``` ## References ```{eval-rst} .. bibliography:: refs.bib :labelprefix: algo_ :filter: docname in docnames :style: unsrt ``` ================================================ FILE: docs/source/conf.py ================================================ #!/usr/bin/env python3 # # optimagic documentation build configuration file, created by # sphinx-quickstart on Fri Jan 18 10:59:27 2019. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import datetime as dt import os from importlib.metadata import version from intersphinx_registry import get_intersphinx_mapping year = dt.datetime.now().year author = "Janos Gabler" # Set variable so that todos are shown in local build on_rtd = os.environ.get("READTHEDOCS") == "True" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx_copybutton", "myst_nb", "sphinxcontrib.bibtex", "sphinx_design", "sphinxcontrib.mermaid", "sphinx_llm.txt", "sphinx_llms_txt", ] myst_enable_extensions = [ "colon_fence", "dollarmath", "html_image", ] myst_fence_as_directive = ["mermaid"] copybutton_prompt_text = ">>> " copybutton_only_copy_prompt_lines = False bibtex_bibfiles = ["refs.bib"] autodoc_member_order = "bysource" autodoc_class_signature = "separated" autodoc_default_options = { "exclude-members": "__init__", "members": True, "undoc-members": True, "member-order": "bysource", "class-doc-from": "class", } autodoc_preserve_defaults = True autodoc_type_aliases = { "PositiveInt": "optimagic.typing.PositiveInt", "NonNegativeInt": "optimagic.typing.NonNegativeInt", "PositiveFloat": "optimagic.typing.PositiveFloat", "NonNegativeFloat": "optimagic.typing.NonNegativeFloat", "NegativeFloat": "optimagic.typing.NegativeFloat", "GtOneFloat": "optimagic.typing.GtOneFloat", "UnitIntervalFloat": "optimagic.typing.UnitIntervalFloat", "YesNoBool": "optimagic.typing.YesNoBool", "DirectionLiteral": "optimagic.typing.DirectionLiteral", "BatchEvaluatorLiteral": "optimagic.typing.BatchEvaluatorLiteral", "ErrorHandlingLiteral": "optimagic.typing.ErrorHandlingLiteral", } autodoc_mock_imports = [ "bokeh", "cloudpickle", "cyipopt", "fides", "joblib", "nlopt", "pytest", "pygmo", "scipy", "sqlalchemy", "tornado", "petsc4py", "statsmodels", "numba", ] extlinks = { "ghuser": ("https://github.com/%s", "%s"), "gh": ("https://github.com/optimagic-dev/optimagic/pull/%s", "%s"), } intersphinx_mapping = get_intersphinx_mapping( packages={"numpy", "scipy", "pandas", "python"} ) linkcheck_ignore = [ r"https://tinyurl\.com/*.", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = [".rst", ".ipynb", ".md"] # The master toctree document. master_doc = "index" # General information about the project. project = "optimagic" copyright = f"2019 - {year}, {author}" # noqa: A001 # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = version("optimagic").split("+")[0] version = ".".join(release.split(".")[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [ "_build", "**.ipynb_checkpoints", "how_to/how_to_slice_plot_3d.ipynb", ] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" pygments_dark_style = "monokai" # If true, `todo` and `todoList` produce output, else they produce nothing. if on_rtd: pass else: todo_include_todos = True todo_emit_warnings = True # -- Options for myst-nb ---------------------------------------- nb_execution_mode = "force" # "off", "force", "cache", "auto" nb_execution_allow_errors = False nb_merge_streams = True nb_scroll_outputs = True # Notebook cell execution timeout; defaults to 30. nb_execution_timeout = 1000 # List of notebooks that will not be executed. nb_execution_excludepatterns = [ # Problem with latex rendering "estimation_tables_overview.ipynb", # too long runtime "bootstrap_montecarlo_comparison.ipynb", "how_to_slice_plot_3d.ipynb", ] # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "furo" # Add any paths that contain custom static files (such as style sheets) here, relative # to this directory. They are copied after the built-in static files, so a file named # "default.css" will overwrite the built-in "default.css". html_css_files = ["css/termynal.css", "css/termynal_custom.css", "css/custom.css"] html_js_files = [ "js/termynal.js", "js/custom.js", "js/require.js", ] # Add any paths that contain custom static files (such as style sheets) here, relative # to this directory. They are copied after the builtin static files, so a file named # "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. html_split_index = False # If true, links to the source (either copied by sphinx on on github) html_copy_source = True # If true, links to the reST sources are added to the pages. html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True html_title = "optimagic" html_theme_options = { "sidebar_hide_name": True, "navigation_with_keys": True, "light_logo": "images/optimagic_logo.svg", "dark_logo": "images/optimagic_logo_dark_mode.svg", "light_css_variables": { "color-brand-primary": "#f04f43", "color-brand-content": "#f04f43", }, "dark_css_variables": { "color-brand-primary": "#f04f43", "color-brand-content": "#f04f43", }, "source_repository": "https://github.com/optimagic-dev/optimagic", "source_branch": "main", "source_directory": "docs/source/", "footer_icons": [ { "name": "GitHub", "url": "https://github.com/optimagic-dev/optimagic", "html": """ """, "class": "", }, { "name": "Zulip", "url": "https://ose.zulipchat.com/#narrow/channel/221432-optimagic", "html": """ """, "class": "", }, ], } ================================================ FILE: docs/source/development/changes.md ================================================ (changes)= ```{include} ../../../CHANGES.md ``` ================================================ FILE: docs/source/development/code_of_conduct.md ================================================ (coc)= ## Code of Conduct The optimagic project has a [Code of Conduct][conduct] to which all contributors must adhere. See details in the [written policy statement][conduct]. [conduct]: https://github.com/optimagic-dev/optimagic/blob/main/.github/CODE_OF_CONDUCT.md ================================================ FILE: docs/source/development/credits.md ================================================ # Credits ## The optimagic Team ```{eval-rst} +---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+ + .. figure:: ../_static/images/janos.jpg + .. figure:: ../_static/images/mariam.jpg + .. figure:: ../_static/images/tim.jpeg + .. figure:: ../_static/images/klara.jpg + + :width: 120px + :width: 120px + :width: 120px + :width: 120px + + + + + + + `Janoś Gabler `_ + `Mariam Petrosyan `_ + `Tim Mensinger `_ + `Klara Röhrl `_ + +---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+ + .. figure:: ../_static/images/tobi.png + .. figure:: ../_static/images/annica.jpeg + .. figure:: ../_static/images/sebi.jpg + .. figure:: ../_static/images/bahar.jpg + + :width: 120px + :width: 120px + :width: 120px + :width: 120px + + + + + + + `Tobias Raabe `_ + `Annica Gehlen `_ + `Sebastian Gsell `_ + `Bahar Coskun `_ + +---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+ + .. figure:: ../_static/images/aida.jpg + .. figure:: ../_static/images/hmg.jpg + .. figure:: ../_static/images/ken.jpeg + + + :width: 120px + :width: 120px + :width: 120px + + + + + + + + `Aida Takhmazova `_ + `Hans-Martin von Gaudecker `_ + `Kenneth L. Judd `_ + + +---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+ ``` Janoś is the original developer and architect behind optimagic (formerly estimagic). All team members are active contributors in terms of commits, advice or community building. Hans-Martin and Ken support optimagic with funding and their expertise. ## Contributors We are grateful for many contributions from the community. In particular, we want to thank Moritz Mendel, Max Blesch, Christian Zimpelmann, Robin Musolff, Sofia Badini, Sofya Akimova, Xuefei Han, Leiqiong Wan, Andrew Souther, Luis Calderon, Linda Maokomatanda, Madhurima Chandra, and Vijaybabu Gangaprasad. ## Acknowledgements We thank all institutions that have funded or supported optimagic (formerly estimagic) ```{image} ../_static/images/aai-institute-logo.svg --- width: 185px --- ``` ```{image} ../_static/images/numfocus_logo.png --- width: 200 --- ``` ```{image} ../_static/images/tra_logo.png --- width: 240px --- ``` ```{image} ../_static/images/hoover_logo.png --- width: 192px --- ``` ```{image} ../_static/images/transferlab-logo.svg --- width: 420px --- ``` ================================================ FILE: docs/source/development/enhancement_proposals.md ================================================ # Enhancement Proposals optimagic Enhancement Proposals (EPs) can be used to discuss and design large changes. EP-00 details the EP process, the optimagic governance model and the optimagic Code of Conduct. It is the only EP that gets continuously updated. These EPs are currently in place: ```{toctree} --- maxdepth: 1 --- ep-00-governance-model.md ep-01-pytrees.md ep-02-typing.md ep-03-alignment.md ``` ================================================ FILE: docs/source/development/ep-00-governance-model.md ================================================ (ep-00)= # EP-00: Governance model & code of conduct ```{eval-rst} +------------+------------------------------------------------------------------+ | Author | `Maximilian Blesch `_, | | | `Janoś Gabler `_, | | | `Hans-Martin von Gaudecker `_, | | | `Annica Gehlen `_, | | | `Sebastian Gsell `_, | | | `Tim Mensinger `_, | | | `Mariam Petrosyan `_, | | | `Tobias Raabe `_, | | | `Klara Röhrl `_ | +------------+------------------------------------------------------------------+ | Status | Accepted | +------------+------------------------------------------------------------------+ | Type | Standards Track | +------------+------------------------------------------------------------------+ | Created | 2022-04-28 | +------------+------------------------------------------------------------------+ | Resolution | | +------------+------------------------------------------------------------------+ ``` ## Purpose This document formalizes the optimagic code of conduct and governance model. In case of changes, this document can be updated following the optimagic Enhancement Proposal process detailed below. ```{include} ../../../CODE_OF_CONDUCT.md ``` ## optimagic governance model ### Summary The governance model strives to be lightweight and based on [consensus](https://numpy.org/doc/stable/dev/governance/governance.html#consensus-based-decision-making-by-the-community) of all interested parties. Most work happens in GitHub issues and pull requests (regular decision process). Any interested party can voice their concerns or veto on proposed changes. If this happens, the optimagic Enhancement Proposal (EP) process can be used to iterate over proposals until consesus is reached (controversial decision process). If necessary, members of the steering council can moderate heated debates and help to broker a consensus. ### Regular decision process Most changes to optimagic are additions of new functionality or strict improvements of existing functionality. Such changes can be discussed in GitHub issues and discussions and implemented in pull requests. They do not require an optimagic Enhancement Proposal. Before starting to work on optimagic, contributors should read [how to contribute](how-to) and the [styleguide](styleguide). They can also reach out to existing contributors if any help is needed or anything remains unclear. We are all happy to help onboarding new contributors in any way necessary. For example, we have given introductions to git and GitHub in the past to help people make a contribution to optimagic. Pull requests should be opened as soon as work is started. They should contain a good description of the planned work such that any interested party can participate in the discussion around the changes. If planned changes turn out to be controversial, their design should be discussed in an optimagic Enhancement Proposal before the actual work starts. When the work is finished, the author of a pull request can request a review. In most cases, previous discussions will show who is a suitable reviewer. If in doubt, tag [janosg](https://github.com/janosg). Pull requests can be merged if there is at least one approving review. Reviewers should be polite, welcoming and helpful to the author of the pull request who might have spent many hours working on the changes. Authors of pull requests should keep in mind that reviewers' time is valuable. Major points should be discussed publicly on GitHub, but very critical feedback or small details can be moved to private discussions — if the latter are necessary at all (see [the bottom section of this blog post](https://rgommers.github.io/2019/06/the-cost-of-an-open-source-contribution/) for an excellent discussion of the burden that review comments place on maintainers, which might not always be obvious). Video calls can help if a discussion gets stuck. The code of conduct applies to all interactions related to code reviews. ### optimagic Enhancement Proposals (EPs) / Controversial decision process Large changes to optimagic can be proposed in optimagic Enhancement Proposals, short EPs. They serve the purpose of summarising discussions that may happen in chats, issues, pull requests, in person, or by any other means. Simple extensions (like adding new optimizers) do not need to be discussed with such a formal process. EPs are written as markdown documents that become part of the documentation. Opening an EP means opening a pull request that adds the markdown document to the documentation. It is not necessary to already have a working implementations for the planned changes, even though it might be a good idea to have rough prototypes for solutions to the most challenging parts. If the author of an EP feels that it is ready to be accepted they need to make a post in the relevant [Zulip topic](https://ose.zulipchat.com) and a comment on the PR that contains the following information: 1. Summary of all contentious aspects of the EP and how they have been resolved 1. Every interested party has seven days to comment on the PR proposing the EP, either with approval or objections. While only objections are relevant for the decision making process, approvals are a good way to signal interest in the planned change and recognize the work of the authors. 1. If there are no unresolved objections after seven days, the EP will automatically be accepted and can be merged. Note that the pull requests that actually implement the proposed enhancements still require a standard review cycle. ### Steering Council The optimagic Steering Council consists of five people who take responsibility for the future development of optimagic and the optimagic community. Being a member of the steering council comes with no special rights. The main roles of the steering council are: - Facilitate the growth of optimagic and the optimagic community by organizing community events, identifying funding opportunities and improving the experience of all community members. - Develop a roadmap, break down large changes into smaller projects and find contributors to work on the implementation of these projects. - Ensure that new contributors are onboarded and assisted and that pull requests are reviewed in a timely fashion. - Step in as moderators when discussions get heated, help to achieve consensus on controversial topics and enforce the code of conduct. The Steering Council is elected by the optimagic community during a community meeting. Candidates need to be active community members and can be nominated by other community members or themselves until the start of the election. Nominated candidates need to accept the nomination before the start of the election. If there are only five candidates, the Steering Council is elected by acclamation. Else, every participant casts five votes. The 5 candidates with the most votes become elected. Candidates can vote for themselves. Ties are resolved by a second round of voting where each participant casts as many votes as there are positions left. Remaining ties are resolved by randomization. Current memebers of the optimagic Steering Council are: - [Janoś Gabler](https://github.com/janosg) - [Annica Gehlen](https://github.com/amageh) - [Hans-Martin von Gaudecker](https://github.com/hmgaudecker) - [Tim Mensinger](https://github.com/timmens) - [Mariam Petrosyan](https://github.com/mpetrosian) ### Community meeting Community meetings can be held to elect a steering council, make changes to the governance model or code of conduct, or to make other decisions that affect the community as a whole. Moreover, they serve to keep the community updated about the development of optimagic and get feedback. Community meetings need to be announced via our public channels (e.g. the [zulip workspace](https://ose.zulipchat.com) or GitHub discussions) with sufficient time until the meeting. The definition of sufficient time will increase with the size of the community. ================================================ FILE: docs/source/development/ep-01-pytrees.md ================================================ (eppytrees)= # EP-01: Pytrees ```{eval-rst} +------------+------------------------------------------------------------------+ | Author | `Janos Gabler `_ | +------------+------------------------------------------------------------------+ | Status | Accepted | +------------+------------------------------------------------------------------+ | Type | Standards Track | +------------+------------------------------------------------------------------+ | Created | 2022-01-28 | +------------+------------------------------------------------------------------+ | Resolution | | +------------+------------------------------------------------------------------+ ``` ## Abstract This EEP explains how we will use pytrees to allow for more flexible specification of parameters for optimization or differentiation, more convenient ways of writing moment functions for msm estimation and more. The actual code to work with pytrees is implemented in [Pybaum], developed by {ghuser}`janosg` and {ghuser}`tobiasraabe`. ## Backwards compatibility All changes are fully backwards compatible. ## Motivation Estimagic has many functions that require user written functions as inputs. Examples are: - criterion functions and their derivatives for optimization - functions of which numerical derivatives are taken - functions that calculate simulated moments - functions that calculate bootstrap statistics In all cases, there are some restrictions on possible inputs and outputs of the user written functions. For example, parameters for numerical optimization need to be provided as pandas.DataFrame with a `"value"` column. Simulated moments and bootstrap statistics need to be returned as a pandas.Series, etc. Pytrees allow to relax many of those restrictions on interfaces of user provided functions. This is not only more convenient for users, but sometimes also allows to reduce overhead because the user can choose optimal data structures for their problem. ## Background: What is a pytree Pytree is a term used in TensorFlow and JAX to refer to a tree-like structure built out of container-like Python objects with arbitrary levels of nesting. What is a container can be re-defined for each application. By default, lists, tuples and dicts are considered containers and everything else is a leaf. Then the following are examples of pytrees: ```python [1, "a", np.arange(3)] # 3 leaves [1, {"k1": 2, "k2": (3, 4)}, 5] # 5 leaves np.arange(5) # 1 leaf ``` What makes pytrees so powerful are the operations defined for them. The most important ones are: - `tree_flatten`: Convert any pytree into a flat list of leaves + metadata - `tree_unflatten`: The inverse of `tree_flatten` - `tree_map`: Apply a function to all leaves in a pytree - `leaf_names`: Generate a list of names for all leaves in a pytree The above examples of pytrees would look as follows when flattened (with a default definition of containers): ```python [1, "a", np.arange(3)] [1, 2, 3, 4, 5] [np.arange(5)] ``` By adding numpy arrays to the registry of container like objects, each of the three examples above would have five leafs. The flattened versions would look as follows: ```python [1, "a", 0, 1, 2] [1, 2, 3, 4, 5] [0, 1, 2, 3, 4] ``` Needless to say, it is possible to register anything as container. For example, we would add pandas.Series and pandas.DataFrame (with varying definitions, depending on the application). ## Difference between pytrees in JAX and estimagic Most JAX functions [only work with Pytrees of arrays](https://jax.readthedocs.io/en/latest/pytrees.html#pytrees-and-jax-functions) and scalars, i.e. pytrees where container types are dicts, lists and tuples and all leaves are arrays or scalars. We will just call them pytrees of arrays because scalars are converted to arrays by JAX. There are two ways to look at such pytrees: 1. As pytree of arrays -> `tree_flatten` produces a list of arrays 1. As pytree of numbers -> `tree_flatten` produces a list of numbers The only difference between the two perspectives is that for the second one, arrays have been registered as container types that can be flattened. In JAX the term `ravel` instead of `flatten` is sometimes used to make clear that the second perspective is meant. Estimagic functions work with slightly more general pytrees. On top of arrays, they can also contain scalars, pandas.Series and pandas.DataFrames. Again, there are two possible ways to look at such pytrees: 1. As pytree of arrays, numbers, Series and DataFrames -> `tree_flatten` produces a list of arrays numbers, Series and DataFrames. 1. As pytree of numbers -> `tree_flatten` produces a list of numbers Again, the difference between the two is which objects are registered as container types and the rules to flatten and unflatten them are defined. While numpy arrays, scalars and pandas.Series have only one natural way of defining the flattening rules, this becomes more complex for DataFrames due to the way `params` DataFrames were used in estimagic before. We define the following rules: If a DataFrame contains a column called `"value"`, we interpret them as classical estimagic DataFrame and only consider the entries in the `"value"` column when flattening the DataFrame into a list of numbers. If there is no column `"value"`, all numeric columns of the DataFrame are considered. Note that internally, we will sometimes define flattening rules such that only some other columnn, e.g. only `"lower_bound"` is considered. However we never look at more than one column of a classical estimagic params DataFrame at a time. To distinguish between the different pytrees we use the terms JAX-pytree and estimagic-pytree. ## Optimization with pytrees In this section we look at possible ways to specify optimizations when parameters and some outputs of criterion functions can be estimagic-pytrees. As an example we use a hypothetical criterion function with pytree inputs and outputs to describe how a user can optimize it. We also give a rough intuition what happens behind the scenes. ### The criterion function Consider a criterion function that takes parameters in the following format: ```python params = { "delta": 0.95, "utility": pd.DataFrame( [[0.5, 0]] * 3, index=["a", "b", "c"], columns=["value", "lower_bound"] ), "probs": np.array([[0.8, 0.2], [0.3, 0.7]]), } ``` The criterion function returns a dictionary of the form: ```python { "value": 1.1, "contributions": {"a": np.array([0.36, 0.25]), "b": 0.49}, "root_contributions": {"a": np.array([0.6, 0.5]), "b": 0.7}, } ``` ### Run an optimization ```python from estimagic import minimize minimize( criterion=crit, params=params, algorithm="scipy_lbfgsb", ) ``` The internal optimizer (in this case the lbfgsb algorithm from scipy) will see a wrapped version of `crit`. That version takes a 1d numpy array as its only argument and returns a scalar float (the `"value"` entry of the result of `crit`). Numerical derivatives are also taken on that function. If instead a derivative based least squares optimizer like `"scipy_ls_dogbox"` had been used, the internal optimizer would see a modified version of `crit` that takes a 1d numpy array and returns a 1d numpy array (the flattened version of the `"root_contributions"` entry of the result of `crit`). ### The optimization output The following entries of the output of minimize are affected by the change: - `"solution_params"`: A pytree with the same structure as `params` - `"solution_criterion"`: The output dictionary of `crit` evaluated solution params - `solution_derivative`: Maybe we should not even have this entry. ```{note} We need to discuss if and in which form we want to have a solution derivative entry. In it's current form it is useless if constraints are used. This gets worse when we allow for pytrees and translating this into a meaningful shape might be very difficult. ``` ### Add bounds Bounds on parameters that are inside a DataFrame with `"value"` column can simply be specified as before. For all others, there are separate `lower_bounds` and `upper_bounds` arguments in `maximize` and `minimize`. `lower_bounds` and `upper_bounds` are pytrees of the same structure as `params` or a subtree that preserves enough structure to match all bounds. For example: ```python minimize( criterion=crit, params=params, algorithm="scipy_lbfgsb", lower_bounds={"delta": 0}, upper_bounds={"delta": 1}, ) ``` This would add bounds for delta, keep the bounds on all `"utility"` parameters, and leave the `"probs"` parameters unbounded. ### Add a constraint Currently, parameters to which a constraint is applied are selected via a `"loc"` or `"query"` entry in the constraints dictionary. This keeps working as long as params are specified as a single DataFrame containing a `"value"` column. If a more general pytree is used we need a "selector" entry instead. The value of that entry is a callable that takes the pytree and returns selected parameters. The `selector` function may return the parameters in the form of an estimagic-pytree. Should order play a role for the constraints (e.g., increasing) the constraint will be applied to the flattened version of the pytree returned by the `selector` function. However, in the case that order matters, we advise users to return one-dimensional arrays (explicit is better than implicit). As an example, let's add probability constraints for each row of `"probs"`: ```python constraints = [ {"selector": lambda params: params["probs"][0], "type": "probability"}, {"selector": lambda params: params["probs"][1], "type": "probability"}, ] minimize( criterion=crit, params=params, algorithm="scipy_lbfgsb", constraints=constraints, ) ``` The required changes to support this are relatively simple. This is because most functions that deal with constraints already work with a 1d array of parameters and the `"loc"` and `"query"` entries of constraints are internally translated to positions in that array very early on. ### Derivatives during optimization If numerical derivatives are used, they are already taken on a modified function that maps from 1d numpy array to scalars or 1d numpy arrays. Allowing for estimagic-pytrees in parameters and criterion outputs will not pose any difficulties here. Closed form derivatives need to have the following interface: They expect `params` in the exact same format as the criterion function as first argument. They return a derivative in the same format as our numerical derivative functions or JAXs autodiff functions when applied to the criterion function. ## Numerical derivatives with pytrees ### Problem: Higher dimensional extensions of pytrees The derivative of a function that maps from a 1d array to a 1d array (usually called Jacobian) is a 2d matrix. If the 1d arrays are replaced by pytrees, we need a two dimensional extension of the pytrees. Below we will look at how JAX does this and why we cannot simply copy that solution. ### The JAX solution Let's look at an example. We first define a function in terms of 1d arrays and then in terms of pytrees and look at a JAX calculated jacobian in both cases: ```python def square(x): return x**2 x = jnp.array([1, 2, 3, 4, 5, 6.0]) jacobian(square)(x) ``` ```bash DeviceArray([[ 2., 0., 0., 0., 0., 0], [ 0., 4., 0., 0., 0., 0], [ 0., 0., 6., 0., 0., 0], [ 0., 0., 0., 8., 0., 0], [ 0., 0., 0., 0., 10., 0], [ 0., 0., 0., 0., 0., 12]], dtype=float32) ``` ```python def tree_square(x): out = { "c": x["a"] ** 2, "d": x["b"].flatten() ** 2, } return out tree_x = {"a": jnp.array([1, 2.0]), "b": jnp.array([[3, 4], [5, 6.0]])} jacobian(tree_square)(tree_x) ``` Instead of showing the entire results, let's just look at the resulting tree structure and array shapes: ```python { "c": { "a": (2, 2), "b": (2, 2, 2), }, "d": { "a": (4, 2), "b": (4, 2, 2), }, } ``` The outputs for hessians have even deeper nesting and three dimensional arrays inside the nested dictionary. Similarly, we would get higher dimensional arrays if one of the original pytrees had already contained a 2d array. ### Extending the JAX solution to estimagic-pytrees JAX pytrees can only contain arrays, whereas estimagic-pytrees may contain scalars, pandas.Series and pandas.DataFrames (with or without `"value"` column). Unfortunately, this poses non-trivial challenges for numerical derivatives because those data types have no natural extension in arbtirary dimensions. Our solution needs to fulfill two requirements: 1\. Compatible with JAX in the sense than whenever a derivative can be calculated with JAX it can also be calculated with estimagic and the result has the same structure. 2. Compatible with the rest of estimagic in the sense that any function that can be optimized can also be differentiated. In the special case of differentiating with respect to a DataFrame it also needs to be backwards compatible. A solution that achieves this is to treat Series and DataFrames with `"value"` columns as 1d arrays and other DataFrames as 2d arrays, then proceed as in JAX and finally try to preserve as much index and column information as possible. This leads to very natural results in the typical usecases with flat dicts of Series or params DataFrames both as inputs and outputs and is backwards compatible with everything that is supported already. However, similar to JAX, not everything that is supported will also be a good idea. Predicting where a pandas Object is preserved and where it will be replaced by an array might be hard for very nested pytrees. However, these rules are mainly defined to avoid hard limitations that have to be checked and documented. Users will learn to avoid too much complexity by avoiding complex pytrees as inputs and outputs at the same time. To see this in action, let's look at an example. We repeat the example from the JAX interface above with the following changes: 1. The 1d numpy array in x["a"] is replaced by a DataFrame with `"value"` column 1. The "d" entry in the output becomes a Series instead of a 1d numpy array. ```python def pd_tree_square(x): out = { "c": x["a"]["value"] ** 2, "d": pd.Series(x["b"].flatten() ** 2, index=list("jklm")), } return out pd_tree_x = { "a": pd.DataFrame(data=[[1], [2]], index=["alpha", "beta"], columns=["value"]), "b": np.array([[3, 4], [5, 6]]), } pd_tree_square(pd_tree_x) ``` ``` { 'c': "alpha" 1 "beta" 4 dtype: int64, 'd': "j" 9 "k" 16 "l" 25 "m" 36 dtype: int64, } ``` The resulting shapes of the jacobian will be the same as before. For all arrays with only two dimensions we can preserve some information from the Series and DataFrame indices. On the higher dimensional ones, this will be lost. ```python { "c": { "a": (2, 2), # df with columns ["alpha", "beta"], index ["alpha", "beta"] "b": (2, 2, 2), # numpy array without label information }, "d": { "a": (4, 2), # columns ["alpha", "beta"], index [0, 1, 2, 3] "b": (4, 2, 2), # numpy array without label information }, } ``` To get more intuition for the structure of the result, let's add a few labels to the very first jacobian: ```{eval-rst} +--------+----------+----------+----------+----------+----------+----------+----------+ | | | a | | b | | | | +--------+----------+----------+----------+----------+----------+----------+----------+ | | | alpha | beta | j | k | l | m | +--------+----------+----------+----------+----------+----------+----------+----------+ | c | alpha | 2 | 0 | 0 | 0 | 0 | 0 | + +----------+----------+----------+----------+----------+----------+----------+ | | beta | 0 | 4 | 0 | 0 | 0 | 0 | +--------+----------+----------+----------+----------+----------+----------+----------+ | d | 0 | 0 | 0 | 6 | 0 | 0 | 0 | + +----------+----------+----------+----------+----------+----------+----------+ | | 1 | 0 | 0 | 0 | 8 | 0 | 0 | + +----------+----------+----------+----------+----------+----------+----------+ | | 2 | 0 | 0 | 0 | 0 | 10 | 0 | + +----------+----------+----------+----------+----------+----------+----------+ | | 3 | 0 | 0 | 0 | 0 | 0 | 12 | +--------+----------+----------+----------+----------+----------+----------+----------+ ``` The indices ["j", "k", "l", "m"] unfortunately never made it into the result because they were only applied to elements that already came from a 2d array and thus always have a 3d Jacobian, i.e. the result entry `["c"][b"]` is a reshaped version of the upper right 2 by 4 array and the result entry `["d"]["b"]` is a reshaped version of the lower right 4 by 4 array. ### Implementation We use the following terminology to describe the implementation: - input_tree: The pytree containing parameters, i.e. inputs to the function that is differentiated. - output_tree: The pytree that is returned by the function being differentiated - derivative_tree: The pytree we want to generate, i.e. the pytree that would be returned by JAX jacobian. - flat_derivative: The matrix version of the derivative_tree To simply reproduce the JAX behavior with pytrees of arrays, we could proceed in the following steps: - Create a modified function that maps from 1d array to 1d array - Calculate flat_derivative by taking numerical derivatives just as before - Calculate the shapes of all arrays in derivative_tree by concatenating the shapes of the cartesian product of flattend output_tree and input_tree - Calculate the 2d versions of those arrays by taking the product over elements in the shape tuple before concatenating. - Create a list of lists containing all arrays that will be in derivative_tree. The values are taken from flat_derivative, using the previously calculated shapes. - call `tree_unflatten` on the inner lists with the treedef corresponding to input_tree. - call `tree_unflatten` on the result of that with the treedef corresponding to output_tree. To implement the extension to estimagic pytrees we would probably do exactly the same but have a bit more preparation and post-processing to do. ## General aspects of pytrees in estimation functions ### Estimation summaries Currently, estimation summaries are DataFrames. The estimated parameters are in the `"value"` column. There are other columns with standard errors, p-values, significance stars and confidence intervals. This is another form of higher dimensional extension of pytrees, where we need to add additional columns. There are two ways in which estimation summaries could be presented. I suggest we offer both. The first is more geared towards generating estimation tables and serving as actual summary to be looked at in a jupyter notebook. It is also backwards compatible and should thus be the default. The second is more geared towards further calculations. There will be utility functions to convert between the two. Both formats will be explained using the `params` pytree from the optimization example (reproduced here for convenience): #### Format 1: Everything becomes a DataFrame In this approach we do the following conversions: 1. numpy arrays are flattened and converted to DataFrames with one column called `"value"`. The index contains the original positions of elements. 1. pandas.Series are converted to DataFrames. The index remains unchanged. The column is called `"value"`. 1. scalars become DataFrames with one row with index 0 and one column called `"value"`. 1. DataFrames without `"value"` column are stacked into a DataFrame with just one column called `"value"`. 1. DataFrames with `"value"` column are reduced to that column. After these transformations, all numbers of the original pytree are stored in DataFrames with `"value"` column. Additional columns with standard errors and the like can then simply be assigned as before. For more intuition, let's see how this would look in an example. For simplicity we only add a column with stars and ommit standard errors, p-values and confidence intervals. We use the same example as in the optimization section: ```python params = { "delta": 0.95, "utility": pd.DataFrame( [[0.5, 0]] * 3, index=["a", "b", "c"], columns=["value", "lower_bound"] ), "probs": np.array([[0.8, 0.2], [0.3, 0.7]]), } ``` ``` { 'delta': value stars 0 0.95 ***, 'utility': value stars a 0.5 ** b 0.5 ** c 0.5 **, 'probs': value stars 0 0 0.8 *** 1 0.2 * 1 0 0.3 ** 1 0.7 ***, } ``` #### Format 2: Dictionary of pytrees The second solution is a dictionary of pytrees the keys are the columns of the current summary but probably in plural, i.e. "values", "standard_errors", "p-values", ...; Each value is a pytree with the exact same structure as `params`. If this pytree contains DataFrames with `"value"` column, only that column is updated. i.e. standard errors would be accessed via `summary["standard_errors"]["my_df"]["value"]`. ### Representation of covariance matrices A covariance matrix is a two dimensional extension of a `params` pytree. We could theoretically handle it exactly the same way as Jacobians. However, this would not be useful for statistical tests and visualization if it contains more than 2 dimensional arrays (as the Jacobian example does). We thus propose to have two possible formats in which covariance matrices can be returned: 1. The pytree variant described in the above Jacobian example. This will be useful to look at sub-matrices of the full covariance matrix as long as the `params` pytree only contains one dimensional arrays, Series and DataFrames with `"value"` columns. 1. A DataFrame containing the covariance matrix of the flattened parameter vector. The index and columns of the DataFrames can be constructed from the `leaf_names` function in `pybaum`. We could also triviall add a function there that constructs an index that is easier to work with for selecting elements and let the user choose between the two versions. The function that maps from the flat version (which would be calculated internally) to the pytree version is the same as we need for numerical derivatives. The inverse of that function is probably not too difficult to implement and can also be useful for derivatives. ### params Everything that can be used as `params` in optimization and differentiation can also be used as `params` in estimation. The registries used in pytree functions are identical. ## ML specific aspects of pytrees The output of the log likelihood functions is a dictionary with the entries: - `"value"`: a scalar float - `"contributions"`: a 1d numpy array or pandas.Series Moreover, there can be arbitrary additional entries. The only change is that `"contributions"` can now be any estimagic pytree. ## MSM specific aspects of pytrees ### Valid formats of empirical and simulated moments There are three types of moments in MSM estimation: - `empirical moments` - The output of `simulate_moments` - The output of `calculate_moments`, needed to get a moments covariance matrix. We propose that moments can be stored as any valid estimagic pytree but of course all three types of moments have to be aligned, i.e. be stored in a tree of the same structure. We will raise an error if the trees do not have the same structure. This is a generalization of an interface that has already proven useful in [respy](https://github.com/OpenSourceEconomics/respy), [sid](https://github.com/covid-19-impact-lab/sid) and other applications. In the future, the project specific implementations of flatten and unflatten functions could simply be deleted. ### Representation of the weighting matrix and moments_cov The weighting matrix for MSM estimation is represented as a DataFrame in the same way as the flat representation of the covariance matrices. Of course, the conversion functions that work for covariance matrices would also work here, but it is highly unlikely that a different representation of a weighting matrix is ever needed. Note that the user does not have to construct this weighting matrix manually. They can generate them using `get_moments_cov` and `get_weighting_matrix`, so they do not need any knowledge of how the flattening works. ### Pepresentation of sensitivity measures Sensitivity measures are similar to covariance matrices in the sense that they require a two dimensional extension of pytrees. The only difference is that for covariance matrices the two pytrees the same (namely the `params`) and for sensitivity measures they are different (one is `params`, the other `moments`). We therefore suggest to use the same solution, i.e. to offer a flat representation in form of a DataFrame, a pytree representation and functions to convert between the two. ## Compatibility with estimation tables Estimation tables are constructed from estimation summaries. This continues to work for summaries where everything has been converted to DataFrames. Users will select individual DataFrames from a pytree of DataFrames, possibly concatenate or filter them and pass them to the estimation table function. ## Compatibility with plotting functions The following functions are affected: - `plot_univariate_effects` - `convergence_plot` - `lollipop_plot` - `derivative_plot` Most of them can be adjusted easily to the proposed changes. On all others we will simply raise errors and provide tutorials to work around the limitations. ## Compatibility with Dashboard The main challenge for the dashboard is that pytrees have no natural multi-column extension and thus it becomes harder to specify a group or name column. However, these features have not been used very much anyways. We propose to write a better automatic grouping and naming function for pytrees. That way it is simply not necessary to provide group and name columns and most of the users will get a better dashboard experience. Rules of thumb for both should be: 1. Only parameters where the start values have a similar magnitude can be in the same group, i.e. displayed in one lineplot. 1. Parameters that are close to each other in the tree (i.e. have a common beginning in their leaf_name should be in the same group. 1. The plot title should subsume the commen parts of the tree-structure (i.e. name we get from `pybaum.leaf_names`. 1. Most line plots should have approximately 5 lines, none should have more than 8. ## Advanced options for functions that work with pytrees There are two argument to `tree_flatten` and other pytree functions that determine which entries in a pytree are considered a leaf and which a container as well as how containers are flattened. 1. `registry` and 2. `is_leaf`. See the documentation of `pybaum` for details. To allow for absolute flexibility, each function that works with pytrees needs to allow a user to pass in a `registry` and an `is_leaf` argument. If a function works with multiple pytrees (e.g. in `estimate_msm` the `params` are a pytree and `emprirical_moments` are a pytree) it needs to allow users to pass in multiple registries and is_leaf functions (e.g. `params_registry`, `params_is_leaf` and `moments_registry`, `moments_is_leaf`. However, we need only as many registries as there are different pytrees. For example since `simulated_moments` and `empirical_moments` always need to be pytrees with the same structure, they do not need separate registries and is_leaf functions. ## Pytree related reasons for a switch to result objects There will be an other EEP that proposes to replace the result dictionaries we currently use everywhere in estimagic by result objects. While this in not completely related to pytrees, the switch to pytrees provides a few additional reasons: 1. Since we sometimes provide provide results in several formats (e.g. summaries as dict of pytrees and as pytree of DataFrames), the result dictionary would become too large and confusing. Having result objects that just calculate specific formats on demand can alleviate this. 1. The result object can serve as a simplfied wrapper to pytree functions and pytree conversion functions between pytree formats that abstracts from registry, is_leaf and treedefs. 1. Results objects allow to define a `__repr__` which becomes really useful as soon as parameters are not just one DataFrame but for example, a dict of DataFrames. ## Compatibility with JAX autodiff While we allow for pytrees of arrays, numbers and DataFrames, JAX only allows pytrees of arrays and numbers for automatic differentiation. If you want to use automatic differentiation with estimagic you will thus have to restrict yourself in the way you specify parameters. [pybaum]: https://github.com/OpenSourceEconomics/pybaum ================================================ FILE: docs/source/development/ep-02-typing.md ================================================ (eeptyping)= # EP-02: Static typing ```{eval-rst} +------------+------------------------------------------------------------------+ | Author | `Janos Gabler `_ | +------------+------------------------------------------------------------------+ | Status | Accepted | +------------+------------------------------------------------------------------+ | Type | Standards Track | +------------+------------------------------------------------------------------+ | Created | 2024-05-02 | +------------+------------------------------------------------------------------+ | Resolution | | +------------+------------------------------------------------------------------+ ``` ## Abstract This enhancement proposal explains the adoption of static typing in optimagic. The goal is to reap a number of benefits: - Users will benefit from IDE tools such as easier discoverability of options and autocompletion. - Developers and users will find code easier to read due to type hints. - The codebase will become more robust due to static type checking and use of stricter types in internal functions. Achieving these goals requires more than adding type hints. optimagic is currently mostly [stringly typed](https://wiki.c2.com/?StringlyTyped). For example, optimization algorithms are selected via strings. Another example are [constraints](https://estimagic.readthedocs.io/en/latest/how_to_guides/optimization/how_to_specify_constraints.html), which are dictionaries with a fixed set of required keys. This enhancement proposal outlines how we can accommodate the changes needed to reap the benefits of static typing without breaking users' code in too many places. ## Motivation and resources - [Writing Python like it's Rust](https://kobzol.github.io/rust/python/2023/05/20/writing-python-like-its-rust.html). A very good blogpost that summarizes the drawbacks of "stringly-typed" Python code and shows how to incorporate typing philosophies from Rust into Python projects. Read this if you don't have time to read the other resources. - [Robust Python](https://www.oreilly.com/library/view/robust-python/9781098100650/), an excellent book that discusses how to design code around types and provides an introduction to static type checkers in Python. - [jax enhancement proposal](https://jax.readthedocs.io/en/latest/jep/12049-type-annotations.html) for adopting static typing. It has a very good discussion on benefits of static typing. - [Subclassing in Python Redux](https://hynek.me/articles/python-subclassing-redux/) explains which types of subclassing are considered harmful and was very helpful for designing this proposal. (design-philosophy)= ## Design Philosophy The core principles behind this enhancement proposal can be summarized by the following points. This is an extension to our existing [styleguide](https://estimagic.org/en/latest/development/styleguide.html) which will be updated if this proposal is accepted. - User facing functions should be generous regarding their input type. Example: the `algorithm` argument can be a string, `Algorithm` class or `Algorithm` instance. The `algo_options` can be an `AlgorithmOptions` object or a dictionary of keyword arguments. - User facing functions should be strict about their output types. A strict output type does not just mean that the output type is known (and not a generous Union), but that it is a proper type that enables static analysis for available attributes. Example: whenever possible, public functions should not return dicts but proper result types (e.g. `OptimizeResult`, `NumdiffResult`, ...) - Internal functions should be strict about input and output types; Typically, a public function will check all arguments, convert them to a proper type and then call an internal function. Example: `minimize` will convert any valid value for `algorithm` into an `Algorithm` instance and then call an internal function with that type. - Each argument that previously accepted strings or option dictionaries now also accepts input types that are more amenable to static analysis and offer better autocomplete. Example: `algo_options` could just be a dict of keyword arguments. Now it can also be an `AlgorithmOptions` instance that enables autocomplete and static analysis for attribute access. - Fixed field types should only be used if all fields are known. An example where this is not the case are collections of benchmark problems, where the set of fields depends on the selected benchmark sets and other things. In such situations, dictionaries that map strings to BenchmarkProblem objects are a good idea. - For backwards compatibility and compatibility with SciPy, we allow things we don't find ideal (e.g. selecting algorithms via strings). However, the documentation should mostly show our prefered way of doing things. Alternatives can be hidden in tabs and expandable boxes. - Whenever possible, use immutable types. Whenever things need to be changeable, consider using an immutable type with copy constructors for modified instances. Example: instances of `Algorithm` are immutable but using `Algorithm.with_option` users can create modified copies. - The main entry point to optimagic are functions, objects are mostly used for configuration and return types. This takes the best of both worlds: we get the safety and static analysis that (in Python) can only be achieved using objects but the beginner friendliness and freedom provided by functions. Example: Having a `minimize` function, it is very easy to add the possibility of running minimizations with multiple algorithms in parallel and returning the best value. Having a `.solve` method on an algorithm object would require a whole new interface for this. ## Changes for optimization The following changes apply to all functions that are directly related to optimization, i.e. `maximize`, `minimize`, `slice_plot`, `criterion_plot`, `params_plot`, `count_free_params`, `check_constraints` and `OptimizeResult`. ### The objective function #### Current situation The objective or criterion function is the function being optimized. The same criterion function can work for scalar, least-squares and likelihood optimizers. Moreover, a criterion function can return additional data that is stored in the log file (if logging is active). All of this is achieved by returning a dictionary instead of just a scalar float. For the simplest case, where only scalar optimizers are used, `criterion` returns a float. Here are two examples of this simple case. The **first example** represents `params` as a flat numpy array and returns a float. This would also be compatible with SciPy: ```python def sphere(params: np.ndarray) -> float: return params @ params ``` The **second example** also returns a float but uses a different format for the parameters: ```python def dict_sphere(params: dict) -> float: return params["a"] ** 2 + params["b"] ** 2 ``` If the user wants the criterion function to be compatible with specialized optimizers for least-squares problems, the criterion function needs to return a dictionary. ```python def least_squares_sphere(params: np.ndarray) -> dict[str, Any]: return {"root_contributions": params} ``` Here the `"root_contributions"` are the least-squares residuals. The dictionary key tells optimagic how to interpret the output. This is needed because optimagic has no way of finding out whether a criterion function that returns a vector (or pytree) is a least-squares function or a likelihood function. Of course all specialized problems can still be solved with scalar optimizers. The criterion function can also return a dictionary, if the user wants to store some information in the log file. This is independent of having a least-squares function or not. An example is: ```python def logging_sphere(x: np.ndarray) -> dict[str, Any]: return {"value": x @ x, "mean": x.mean(), "std": x.std()} ``` Here `"value"` is the actual scalar criterion value. All other fields are unknown to optimagic and therefore just logged in the database if logging is active. The specification of likelihood functions is very analogous to least-squares functions and therefore omitted here. **Things we want to keep** - Allow using the same criterion function for scalar, likelihood and least-squares optimizers. This feature makes it easy to try out and compare very different algorithms with minimal code changes. - No restrictions on the type of additional arguments of the criterion function. - Maintain compatibility with scipy.optimize when the criterion function returns a scalar. **Problems** - Most users of optimagic find it hard to write criterion functions that return the correct dictionary. Therefore, they don't use the logging feature and we often get questions about specifying least-squares problems correctly. - Internally we can make almost no assumptions about the output of a criterion function, making the code that processes the criterion output very complex and full of if conditions. - We only know whether the specified criterion function is compatible with the selected optimizer after we evaluate it once. This means that users see errors only very late. - While optional, in least-squares problems it is possible that a user specifies `root_contributions`, `contributions` and `value` even though any of them could be constructed out of the `root_contributions`. This redundancy of information means that we need to check the consistency of all user provided function outputs. #### Proposal In the current situation, the dictionary return type solves two different problems that will now be solved separately. ##### Specifying different problem types The simplest way of specifying a least-squares function becomes: ```python import optimagic as om @om.mark.least_squares def ls_sphere(params): return params ``` Analogously, the simplest way of specifying a likelihood function becomes: ```python @om.mark.likelihood def ll_sphere(params): return params**2 ``` The simplest way of specifying a scalar function stays unchanged, but optionally a `mark.scalar` decorator can be used: ```python @om.mark.scalar # this is optional def sphere(params): return params @ params ``` Except for the decorators, these three functions are specified the same way as in other python libraries that support specialized optimizers (e.g. `scipy.optimize.least_squares`). The reason why we need the decorators is that we support all kinds of optimizers in the same interface. ##### Return additional information If users additionally want to return information that should be stored in the log file, they need to use a specific Object as return type. ```python @dataclass(frozen=True) class FunctionValue: value: float | PyTree info: dict[str, Any] ``` An example of a least-squares function that also returns additional info for the log file would look like this: ```python from optimagic import FunctionValue @om.mark.least_squares def least_squares_sphere(params): out = FunctionValue( value=params, info={"p_mean": params.mean, "p_std": params.std()} ) return out ``` And analogous for scalar and likelihood functions, where again the `mark.scalar` decorator is optional. ##### Optionally replace decorators by type hints The purpose of the decorators is to tell us the output type of the criterion function. This is necessary because there is no way of distinguishing between likelihood and least-squares functions from the output alone and because we want to know the function type before we evaluate the function once. An alternative that might be more convenient for advanced Python programmers would be to do this via type hints. In this case, the return types need to be a bit more fine-grained: ```python @dataclass(frozen=True) class ScalarFunctionValue(FunctionValue): value: float info: dict[str, Any] @dataclass(frozen=True) class LeastSquaresFunctionValue(FunctionValue): value: PyTree info: dict[str, Any] @dataclass(frozen=True) class LikelihoodFunctionValue(FunctionValue): value: PyTree info: dict[str, Any] ``` A least-squares function could then be specified without decorator as follows: ```python from optimagic import LeastSquaresFunctionValue def least_squares_sphere(params: np.ndarray) -> LeastSquaresFunctionValue: out = LeastSquaresFunctionValue( value=params, info={"p_mean": params.mean, "p_std": params.std()} ) return out ``` This approach works nicely in projects that use type hints already. However, it would be hard for users who have never heard about type hints. Therefore, we should implement it but not use it in beginner tutorials and always make clear that this is completely optional. ##### Summary of output types The output type of the objective function is `float | PyTree[float] | FunctionValue`. ### Bundling bounds #### Current situation Currently we have four arguments of `maximize`, `minimize`, and related functions that let the user specify bounds: ```python om.minimize( # ... lower_bounds=params - 1, upper_bounds=params + 1, soft_lower_bounds=params - 2, soft_upper_bounds=params + 2, # ... ) ``` Each of them is a pytree that mirrors the structure of `params` or `None` **Problems** - Usually, all of these arguments are used together and passing them around individually is annoying. - The names are very long because the word `bounds` is repeated. #### Proposal We bundle the bounds together in a `Bounds` type: ```python bounds = om.Bounds( lower=params - 1, upper=params + 1, soft_lower=params - 2, soft_upper=params + 2, ) om.minimize( # ... bounds=bounds, # ... ) ``` As a bonus feature, the `Bounds` type can do some checks on the bounds at instance creation time such that users get errors before running an optimization. Using the old arguments will be deprecated. Since there is no need to modify instances of `Bounds`, it should be immutable. To improve the alignment with SciPy, we can also allow users to pass a `scipy.optimize.Bounds` object as bounds. Internally, this will be converted to our `Bounds` object. ### Constraints #### Current situation Currently, constraints are dictionaries with a set of required keys. The exact requirements depend on the type of constraints and even on the structure of `params`. Each constraint needs a way to select the parameters to which the constraint applies. There are three dictionary keys for this: - `"loc"`, which works if params are numpy arrays, `pandas.Series` or `pandas.DataFrame`. - `"query"`, which works only if `params` are `pandas.DataFrame` - `"Selector"`, which works for all valid formats of `params`. Moreover, each constraint needs to specify its type using the `"type"` key. Some constraints have additional required keys: - Linear constraints have `"weights"`, `"lower_bound"`, `"upper_bound"`, and `"value"`. - Nonlinear constraints have `"func"`, `"lower_bound"`, `"upper_bound"`, and `"value"`. Details and examples can be found [here](https://estimagic.readthedocs.io/en/latest/how_to_guides/optimization/how_to_specify_constraints.html). **Things we want to keep** - The constraints interface is very declarative; Constraints purely collect information and are completely separate from the implementation. - All three ways of selecting parameters have their strength and can be very concise and readable in specific applications. **Problems** - Constraints are hard to document and generally not understood by most users. - Having multiple ways of selecting parameters (not all compatible with all `params` formats) is confusing for users and annoying when processing constraints. We have to handle the case where no selection or multiple selections are specified. - Dicts with required keys are brittle and do not provide autocomplete. This is made worse by the fact that each type of constraint requires different sets of keys. #### Proposal 1. We implement simple dataclasses for each type of constraint. 1. We get rid of `loc` and `query` as parameter selection methods. Instead, we show in the documentation how both selection methods can be used inside a `selector` function. Examples of the new syntax are: ```python constraints = [ om.constraints.FixedConstraint(selector=lambda x: x[0, 5]), om.constraints.IncreasingConstraint(selector=lambda x: x[1:4]), ] res = om.minimize( fun=criterion, params=np.array([2.5, 1, 1, 1, 1, -2.5]), algorithm="scipy_lbfgsb", constraints=constraints, ) ``` Since there is no need to modify instances of constraints, they should be immutable. All constraints can subclass `Constraint` which will only have the `selector` attribute. During the deprecation phase, `Constraint` will also have `loc` and `query` attributes. The current `cov` and `sdcorr` constraints apply to flattened covariance matrices, as well as standard deviations and flattened correlation matrices. This comes from a time where optimagic only supported an essentially flat parameter format (`DataFrames` with `"value"` column). We can exploit the current deprecation cycle to rename the current `cov` and `sdcorr` constraints to `FlatCovConstraint` and `FlatSdcorrConstraint`. This prepares the introduction of a more natural `CovConstraint` and `SdcorrConstraint` later. (algorithm-selection)= ### Algorithm selection #### Current situation `algorithm` is a string or a callable that satisfies the internal algorithm interface. If the user passes a string, we look up the algorithm implementation in a dictionary containing all installed algorithms. We implement suggestions for typical typos based on fuzzy matching of strings. **Things we want to keep** - optimagic can be used just like scipy **Problems** - There is no autocomplete. - It is very easy to make typos and they only get caught at runtime. - Users cannot select algorithms without reading the documentation. #### Proposal The following proposal is quite ambitious and split into multiple steps. Thanks to [@schroedk](https://github.com/schroedk) for helpful discussions on this topic. ##### Step 1: Passing algorithm classes and objects For compatibility with SciPy we continue to allow algorithm strings. However, the preferred ways of selecting algorithms are now: 1. Passing an algorithm class 1. Passing a configured algorithm object Both new ways become possible because of changes to the internal algorithm interface. See [here](algorithm-interface) for the proposal. We remove the possibility of passing callables that comply with the old internal algorithm interface. In a simple example, algorithm selection via algorithm classes looks as follows: ```python om.minimize( lambda x: x @ x, params=np.arange(5), algorithm=om.algorithms.scipy_neldermead, ) ``` Passing a configured instance of an algorithm looks as follows: ```python om.minimize( lambda x: x @ x, params=np.arange(5), algorithm=om.algorithms.scipy_neldermead(adaptive=True), ) ``` ##### Step 2: Achieving autocomplete without too much typing There are many ways in which the above behavior could be achieved with full autocomplete support. For reasons that will become clear in the next section, we choose to represent `algorithms` as a dataclass. Alternatives are enums, `__init__` files, NamedTuples, etc. A prototype for that dataclass looks as follows: ```python from typing import Type @dataclass(frozen=True) class Algorithms: scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB # ... # many more # ... algorithms = Algorithms() ``` Currently, all algorithms are collected in a dictionary that is created programmatically. Representing algorithms in a static data structure instead requires a lot more typing and therefore code to maintain. This situation will become even worse with some of the features we propose below. Therefore, we want to automate the creation of the dataclass. To this end, we can write a function that automatically creates the code for the `Algorithms` dataclass. This function can be executed in a local pre-commit hook to make sure all generated code is up-to-date in every commit. It can also be executed in a [pytest hook](https://docs.pytest.org/en/7.1.x/how-to/writing_hook_functions.html) (before the collection phase) to make sure everything is up-to-date when tests run. Users of optimagic (and their IDEs) will never know that this code was not typed in by a human, which guarantees that autocomplete and static analysis will work without problems. ```{note} We can also use [pytest-hooks](https://docs.pytest.org/en/7.1.x/how-to/writing_hook_functions.html) to make sure the ``` ##### Step 3: Filtered autocomplete Having the flat `Algorithms` data structure would be enough if every user knew exactly which algorithm they want to use and just needed help typing in the name. However, this is very far from realistic. Most users have little knowledge about optimization algorithms. In the best case, they know a few properties of their problems (e.g. whether it is differentiable) and their goal (e.g. do they need a local or global solution). To exemplify what we want to achieve, assume a simplified situation with 4 algorithms. We only consider whether an algorithm is gradient free or gradient based. Here is the fictitious list: - `neldermead`: `gradient_free` - `bobyqa`: `gradient_free` - `lbfgs`: `gradient_based` - `slsqp`: `gradient_based` We want the following behavior: The user types `om.algorithms.` and autocomplete shows | | | --------------- | | `GradientBased` | | `GradientFree` | | `neldermead` | | `bobyqa` | | `lbfgs` | | `slsqp` | A user can either select one of the algorithms (lowercase) directly or filter further by selecting a category (CamelCase). This would look as follows: The user types `om.algorithms.GradientFree.` and autocomplete shows | | | ------------ | | `neldermead` | | `bobyqa` | Once the user arrives at an algorithm, a subclass of `Algorithm` is returned. This class will be passed to `minimize` or `maximize`. Passing configured instances of `Algorithm`s will be discussed in [Algorithm Options](algorithm-options). In practice, we would have a lot more algorithms and a lot more categories. Some categories might be mutually exclusive, in that case the second category is omitted after the first one is selected. We have the following categories: - `GradientBased` vs. `GradientFree` - `Local` vs. `Global` - `Bounded` vs. `Unbounded` - `Scalar` vs. `LeastSquares` vs. `Likelihood` - `LinearConstrained` vs. `NonlinearConstrained` vs. `Unconstrained` Potentially, we could also offer a `.All` attribute that returns a list of all currently selected algorithms. That way a user could for example loop over all `Bounded` and `GradientBased` `LeastSquares` algorithms and compare them in a criterion plot. These categories match nicely with our [algorithm selection tutorials](https://effective-programming-practices.vercel.app/scientific_computing/optimization_algorithms/objectives_materials.html). To achieve this behavior, we would have to implement something like this: ```python @dataclass(frozen=True) class GradientBasedAlgorithms: lbfgs: Type[LBFGS] = LBFGS slsqp: Type[SLSQP] = SLSQP @property def All(self) -> List[om.typing.Algorithm]: return [LBFGS, SLSQP] @dataclass(frozen=True) class GradientFreeAlgorithms: neldermead: Type[NelderMead] = NelderMead bobyqa: Type[Bobyqa] = Bobyqa @property def All(self) -> List[om.typing.Algorithm]: return [NelderMead, Bobyqa] @dataclass(frozen=True) class Algorithms: lbfgs: Type[LBFGS] = LBFGS slsqp: Type[SLSQP] = SLSQP neldermead: Type[NelderMead] = NelderMead bobyqa: Type[Bobyqa] = Bobyqa @property def GradientBased(self) -> GradientBasedAlgorithms: return GradientBasedAlgorithms() @property def GradientFree(self) -> GradientFreeAlgorithms: return GradientFreeAlgorithms() @property def All(self) -> List[om.typing.Algorithm]: return [LBFGS, SLSQP, NelderMead, Bobyqa] ``` If implemented by hand, this would require an enormous amount of typing and introduce a very high maintenance burden. Whenever a new algorithm was added to optimagic, we would have to register it in multiple nested dataclasses. The code generation approach detailed in the previous section can solve this problom. While it might have been overkill to achieve basic autocomplete, it is justified to achieve this filtering behavior. How the relevant information for filtering (e.g. whether an algorithm is gradient based) is collected, will be discussed in [internal algorithms](algorithm-interface). ```{note} The use of dataclasses is an implementation detail. This enhancement proposal only defines the autocomplete behavior we want to achieve. Everything else can be changed later as we see fit. ``` (algorithm-options)= ### Algorithm options Algorithm options refer to options that are not handled by optimagic but directly by the algorithms. Examples are convergence criteria, stopping criteria and advanced configuration of algorithms. Some of them are supported by many algorithms (e.g. stopping after a maximum number of function evaluations is reached), some are supported by certain classes of algorithms (e.g. most genetic algorithms have a population size, most trustregion algorithms allow to set an initial trustregion radius) and some of them are completely specific to one algorithm (e.g. ipopt has more than 100 very specific options, `nag_dfols` supports very specific restarting strategies, ...). While nothing can be changed about the fact that every algorithm supports different options (e.g. there is simply no trustregion radius in a genetic algorithm), we go very far in harmonizing `algo_options` across optimizers: 1. Options that are the same in spirit (e.g. stop after a specific number of iterations) get the same name across all optimizers wrapped in optimagic. Most of them even get the same default value. 1. Options that have non-descriptive (and often heavily abbreviated) names in their original implementation get more readable names, even if they appear only in a single algorithm. 1. Options that are specific to a well known optimizer (e.g. `ipopt`) are not renamed #### Current situation The user passes `algo_options` as a dictionary of keyword arguments. All options that are not supported by the selected algorithm are discarded with a warning. The names of most options are very descriptive (even though a bit too long at times). We implement basic namespaces by introducing a dot notation. Example: ```python options = { "stopping.max_iterations": 1000, "stopping.max_criterion_evaluations": 1500, "convergence.relative_criterion_tolerance": 1e-6, "convergence.scaled_gradient_tolerance": 1e-6, "initial_radius": 0.1, "population_size": 100, } ``` The option dictionary is then used as follows: ```python minimize( # ... algorithm="scipy_lbfgsb", algo_options=options, # ... ) ``` In the example, only the options `stopping.max_criterion_evaluations`, `stopping.max_iterations` and `convergence.relative_criterion_tolerance` are supported by `scipy_lbfgsb`. All other options would be ignored. ```{note} The `.` notation in `stopping.max_iterations` is just syntactic sugar. Internally, the option is called `stopping_max_iterations` because all options need to be valid Python variable names. ``` **Things we want to keep** - The ability to provide global options that are filtered for each optimizer. Mixing the options for all optimizers in a single dictionary and discarding options that do not apply to the selected optimizer allows to loop very efficiently over very different algorithms (without `if` conditions in the user's code). This is very good for quick experimentation, e.g. solving the same problem with three different optimizers and limiting each optimizer to 100 function evaluations. - The basic namespaces help to quickly see what is influenced by a specific option. This works especially well to distinguish stopping options and convergence criteria from other tuning parameters of the algorithms. However, it would be enough to keep them as a naming convention if we find it hard to support the `.` notation. - All options are documented in the optimagic documentation, i.e. we do not link to the docs of original packages. Now they will also be discoverable in an IDE. **Problems** - There is no autocomplete and the only way to find out which options are supported is the documentation. - A small typo in an option name can easily lead to the option being discarded. - Option dictionaries can grow very big. - The fact that option dictionaries are mutable can lead to errors, for example when a user wants to try out a grid of values for one tuning parameter while keeping all other options constant. #### Proposal We want to offer multiple entry points for passing additional options to algorithms. Users can pick the one that works best for their particular use-case. The current solution remains valid but not recommended. ##### Configured algorithms Instead of passing an `Algorithm` class (as described in [Algorithm Selection](algorithm-selection)) the user can create an instance of their selected algorithm. When creating the instance, they have autocompletion for all options supported by the selected algorithm. `Algorithm`s are immutable. ```python algo = om.algorithms.scipy_lbfgsb( stopping_max_iterations=1000, stopping_max_criterion_evaluations=1500, convergence_relative_criterion_tolerance=1e-6, ) minimize( # ... algorithm=algo, # ... ) ``` ##### Copy constructors on algorithms Given an instance of an `Algorithm`, a user can easily create a modified copy of that instance by using the `with_option` method. ```python # using copy constructors to create variants base_algo = om.algorithms.fides(stopping_max_iterations=1000) algorithms = [base_algo.with_option(initial_radius=r) for r in [0.1, 0.2, 0.5]] for algo in algorithms: minimize( # ... algorithm=algo, # ... ) ``` We can provide additional methods `with_stopping` and `with_convergence` that call `with_option` internally but provide two additional features: 1. They validate that the option is indeed a stopping/convergence criterion. 1. They allow to omit the `convergence_` or `stopping_` at the beginning of the option name and can thus reduce repetition in the option names. This recreates the namespaces we currently achieve with the dot notation: ```python # using copy constructors for better namespaces algo = ( om.algorithms.scipy_lbfgsb() .with_stopping( max_iterations=1000, max_criterion_evaluations=1500, ) .with_convergence( relative_criterion_tolerance=1e-6, ) ) minimize( # ... algorithm=algo, # ... ) ``` ##### Global option object As before, the user can pass a global set of options to `maximize` or `minimize`. We continue to support option dictionaries but also allow `AlgorithmOption` objects that enable better autocomplete and immutability. We can construct them using a similar pre-commit hook approach as discussed in [algorithm selection](algorithm-selection). Global options override the options that were directly passed to an optimizer. For consistency, `AlgorithmOptions` can offer the `with_stopping`, `with_convergence` and `with_option` copy-constructors, so users can modify options safely. Probably, this approach should be featured less prominently in the documentation as it offers no guarantees that the specified options are compatible with the selected algorithm. The previous example continues to work. Examples of the new possibilities are: ```python options = om.AlgorithmOptions( stopping_max_iterations=1000, stopping_max_criterion_evaluations=1500, convergence_relative_criterion_tolerance=1e-6, convergence_scaled_gradient_tolerance=1e-6, initial_radius=0.1, population_size=100, ) minimize( # ... algorithm=om.algorithms.scipy_lbfgsb, algo_options=options, # ... ) ``` ```{note} In my currently planned implementation, autocomplete will not work reliably for the copy constructors (`with_option`, `with_stopping` and `with_convergence`). The main reason is that most editors do not play well with `functools.wraps` or any other means of dynamic signature creation. For more details, see the discussions about the [Internal Algorithm Interface](algorithm-interface). ``` ### Custom derivatives Providing custom derivatives to optimagic is slightly complicated because we support scalar, likelihood and least-squares problems in the same interface. Moreover, we allow to either provide a `derivative` function or a joint `criterion_and_derivative` function that allow users to exploit synergies between evaluating the criterion and the derivative. #### Current situation The `derivative` argument can currently be one of three things: - A `callable`: This is assumed to be the relevant derivative of `criterion`. If a scalar optimizer is used, it is the gradient of the criterion value w.r.t. params. If a likelihood optimizer is used, it is the jacobian of the likelihood contributions w.r.t. params. If a least-squares optimizer is used, it is the jacobian of the residuals w.r.t. params. - A `dict`: The dict must have three keys `"value"`, `"contributions"` and `"root_contributions"`. The corresponding values are the three callables described above. - `None`: In this case, a numerical derivative is calculated. The `criterion_and_derivative` argument exactly mirrors `derivative` but each callable returns a tuple of the criterion value and the derivative instead. **Things we want to keep** - It is good that synergies between `criterion` and `derivative` can be exploited. - There are three arguments (`criterion`, `derivative`, `criterion_and_derivative`). This makes sure that every algorithm can run efficiently when looping over algorithms and keeping everything else equal. With SciPy's approach of setting `jac=True` if one wants to use a joint criterion and derivative function, a gradient free optimizer would have no chance of evaluating just the criterion. - Scalar, least-squares and likelihood problems are supported in one interface. **Problems** - A dict with required keys is brittle - Autodiff needs to be handled completely outside of optimagic - The names `criterion`, `derivative` and `criterion_and_derivative` are not aligned with scipy and very long. - Providing derivatives to optimagic is perceived as complicated and confusing. #### Proposal ```{note} The following section uses the new names `fun`, `jac` and `fun_and_jac` instead of `criterion`, `derivative` and `criterion_and_derivative`. ``` To improve the integration with modern automatic differentiation frameworks, `jac` or `fun_and_jac` can also be a string `"jax"` or a more autocomplete friendly enum `om.autodiff_backend.JAX`. This can be used to signal that the objective function is jax compatible and jax should be used to calculate its derivatives. In the long run we can add PyTorch support and more. Since this is mostly about a signal of compatibility, it would be enough to set one of the two arguments to `"jax"`, the other one can be left at `None`. Here is an example: ```python import jax.numpy as jnp import optimagic as om def jax_sphere(x): return jnp.dot(x, x) res = om.minimize( fun=jax_sphere, params=jnp.arange(5), algorithm=om.algorithms.scipy_lbfgsb, jac="jax", ) ``` If a custom callable is provided as `jac` or `fun_and_jac`, it needs to be decorated with `@om.mark.least_squares` or `om.mark.likelihood` if it is not the gradient of a scalar function values. Using the `om.mark.scalar` decorator is optional. For a simple least-squares problem this looks as follows: ```python import numpy as np @om.mark.least_squares def ls_sphere(params): return params @om.mark.least_squares def ls_sphere_jac(params): return np.eye(len(params)) res = om.minimize( fun=ls_sphere, params=np.arange(5), algorithm=om.algorithms.scipy_ls_lm, jac=ls_sphere_jac, ) ``` Note that here we have a least-squares problem and solve it with a least-squares optimizer. However, any least-squares problem can also be solved with scalar optimizers. While optimagic could convert the least-squares derivative to the gradient of the scalar function value, this is generally inefficient. Therefore, a user can provide multiple callables of the objective function in such a case, so we can pick the best one for the chosen optimizer. ```python @om.mark.scalar def sphere_grad(params): return 2 * params res = om.minimize( fun=ls_sphere, params=np.arange(5), algorithm=om.algorithms.scipy_lbfgsb, jac=[ls_sphere_jac, sphere_grad], ) ``` Since a scalar optimizer was chosen to solve the least-squares problem, optimagic would pick the `sphere_grad` as derivative. If a leas-squares solver was chosen, we would use `ls_sphere_jac`. ### Other option dictionaries #### Current situation We often allow to switch on some behavior with a bool or a string value and then configure the behavior with an option dictionary. Examples are: - `logging` (`str | pathlib.Path | False`) and `log_options` (dict) - `scaling` (`bool`) and `scaling_options` (dict) - `error_handling` (`Literal["raise", "continue"]`) and `error_penalty` (dict) - `multistart` (`bool`) and `multistart_options` Moreover we have option dictionaries whenever we have nested invocations of optimagic functions. Examples are: - `numdiff_options` in `minimize` and `maximize` - `optimize_options` in `estimate_msm` and `estimate_ml` **Things we want to keep** - Complex behavior like logging or multistart can be switched on in extremely simple ways, without importing anything and without looking up supported options. - The interfaces are very declarative and decoupled from our implementation. **Problems** - Option dictionaries are brittle and don't support autocomplete. - It can be confusing if someone provided `scaling_options` or `multistart_options` but they take no effect because `scaling` or `multistart` were not set to `True`. #### Proposal We want to keep a simple way of enabling complex behavior (with some default options) but get rid of having two separate arguments (one to switch the behavior on and one to configure it). This means that we have to be generous regarding input types. ##### Logging Currently we only implement logging via an sqlite database. All `log_options` are specific to this type of logging. However, logging is slow and we should support more types of logging. For this, we can implement a simple `Logger` abstraction. Advanced users could implement their own logger. After the changes, `logging` can be any of the following: - `False` (or anything Falsy): No logging is used. - A `str` or `pathlib.Path`: Logging is used at default options. - An instance of `optimagic.Logger`. There will be multiple subclasses, e.g. `SqliteLogger` which allow us to switch out the logging backend. Each subclass might have different optional arguments. The `log_options` are deprecated. Using dictionaries instead of `Option` objects will be supported during a deprecation cycle. ##### Scaling, error handling and multistart In contrast to logging, scaling, error handling and multistart are deeply baked into optimagic's minimize function. Therefore, it does not make sense to create abstractions for these features that would make them replaceable components that can be switched out for other implementations by advanced users. Most of these features are already perceived as advanced and allow for a lot of configuration. We therefore suggest the following argument types: - `scaling`: `bool | ScalingOptions` - `error_handling`: `bool | ErrorHandlingOptions` - `multistart`: `bool | MultistartOptions` All of the Option objects are simple dataclasses that mirror the current dictionaries. All `_options` arguments are deprecated. ##### `numdiff_options` and similar Dictionaries are still supported but we also offer more autocomplete friendly dataclasses as alternative. (algorithm-interface)= ### The internal algorithm interface and `Algorithm` objects #### Current situation Currently, algorithms are defined as `minimize` functions that are decorated with `om.mark_minimizer`. The `minimize` function returns a dictionary with a few mandatory and several optional keys. Algorithms can provide information to optimagic in two ways: 1. The signature of the minimize function signals whether the algorithm needs derivatives and whether it supports bounds and nonlinear constraints. Moreover, it signals which algorithm specific options are supported. Default values for algorithm specific options are also defined in the signature of the minimize function. 1. `@mark_minimizer` collects the following information via keyword arguments: - Is the algorithm a scalar, least-squares or likelihood optimizer? - The algorithm name. - Does the algorithm require well scaled problems? - Is the algorithm currently installed? - Is the algorithm global or local? - Should the history tracking be disabled (e.g. because the algorithm tracks its own history)? - Does the algorithm parallelize criterion evaluations? A slightly simplified example of the current internal algorithm interface is: ```python @mark_minimizer( name="scipy_neldermead", needs_scaling=False, primary_criterion_entry="value", is_available=IS_SCIPY_AVAILABLE, is_global=False, disable_history=False, ) def scipy_neldermead( criterion, x, lower_bounds, upper_bounds, *, stopping_max_iterations=1_000_000, stopping_max_criterion_evaluations=1_000_000, convergence_absolute_criterion_tolerance=1e-8, convergence_absolute_params_tolerance=1e-8, adaptive=False, ): options = { "maxiter": stopping_max_iterations, "maxfev": stopping_max_criterion_evaluations, # both tolerances seem to have to be fulfilled for Nelder-Mead to converge. # if not both are specified it does not converge in our tests. "xatol": convergence_absolute_params_tolerance, "fatol": convergence_absolute_criterion_tolerance, "adaptive": adaptive, } res = scipy.optimize.minimize( fun=criterion, x0=x, bounds=_get_scipy_bounds(lower_bounds, upper_bounds), method="Nelder-Mead", options=options, ) return process_scipy_result(res) ``` The first two arguments (`criterion` and `x`) are mandatory. The lack of any arguments related to derivatives signifies that `scipy_neldermead` is a gradient free algorithm. The bounds related arguments show that it supports box constraints. The remaining arguments define the supported stopping criteria and algorithm options as well as their default values. The decorator simply attaches information to the function as `_algorithm_info` attribute. This originated as a hack but was never changed afterwards. The `AlgorithmInfo` looks as follows: ```python class AlgoInfo(NamedTuple): primary_criterion_entry: str name: str parallelizes: bool needs_scaling: bool is_available: bool arguments: list # this is read from the signature is_global: bool = False disable_history: bool = False ``` **Things we want to keep** - The internal interface has proven flexible enough for many optimizers we had not wrapped when we designed it. It is easy to add more optional arguments to the decorator without breaking any existing code. - The decorator approach completely hides how we represent algorithms internally. - Since we read a lot of information from function signatures (as opposed to registering options somewhere), there is no duplicated information. If we change the approach to collecting information, we still need to ensure there is no duplication or possibility to provide wrong information to optimagic. **Problems** - Type checkers complain about the `._algorithm_info` hack. - All computations and signature checking are done eagerly for all algorithms at import time. This is one of the reasons why imports are slow. - The first few arguments to the minimize functions follow a naming scheme and any typo in those names would lead to situations that are hard to debug (e.g. if `lower_bound` was miss-typed as `lower_buond` we would assume that the algorithm does not support lower bounds but has a tuning parameter called `lower_buond`). #### Proposal We first show the proposed new algorithm interface and discuss the changes later. ```python @om.mark.minimizer( name="scipy_neldermead", needs_scaling=False, problem_type=om.ProblemType.Scalar, is_available=IS_SCIPY_AVAILABLE, is_global=False, disable_history=False, needs_derivatives=False, needs_parallelism=False, supports_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, ) @dataclass(frozen=True) class ScipyNelderMead(Algorithm): stopping_max_iterations: int = 1_000_000 stopping_max_criterion_evaluations: int = 1_000_000 convergence_absolute_criterion_tolerance: float = 1e-8 convergence_absolute_params_tolerance: float = 1e-8 adaptive = False def __post_init__(self): # check everything that cannot be handled by the type system assert self.convergence_absolute_criterion_tolerance > 0 assert self.convergence_absolute_params_tolerance > 0 def _solve_internal_problem( self, problem: InternalProblem, x0: NDArray[float] ) -> InternalOptimizeResult: options = { "maxiter": self.stopping_max_iterations, "maxfev": self.stopping_max_criterion_evaluations, "xatol": self.convergence_absolute_params_tolerance, "fatol": self.convergence_absolute_criterion_tolerance, "adaptive": self.adaptive, } res = minimize( fun=problom.scalar.fun, x0=x, bounds=_get_scipy_bounds(problom.bounds), method="Nelder-Mead", options=options, ) return process_scipy_result(res) ``` 1. The new internal algorithms are dataclasses, where all algorithm options are dataclass fields. This enables us to obtain information about the options via the `__dataclass_fields__` attribute without inspecting signatures or imposing naming conventions on non-option arguments. 1. The `_solve_internal_problem` method receives an instance of `InternalProblem` and `x0` (the start values) as arguments. `InternalProblem` collects the criterion function, its derivatives, bounds, etc. This again avoids any potential for typos in argument names. 1. The `mark.minimizer` decorator collects all the information that was previously collected via optional arguments with naming conventions. This information is available while constructing the instance of `InternalProblem`. Thus we can make sure that attributes that were not requested (e.g. derivatives if `needs_derivative` is `False`) raise an `AttributeError` if used. 1. The minimize function returns an `InternalOptimizeResult` instead of a dictionary. The copy constructors (`with_option`, `with_convergence`, and `with_stopping`) are inherited from `optimagic.Algorithm`. This means, that they will have `**kwargs` as signature and thus do not support autocomplete. However, they can check that all specified options are actually in the `__dataclass_fields__` and thus provide feedback before an optimization is run. All breaking changes of the internal algorithm interface are done without deprecation cycle. ```{note} The `_solve_internal_problem` method is private because users should not call it; This also prepares adding a public `minimize` method that internally calls the `minimize` function. ``` To make things more concrete, here are prototypes for components related to the `InternalProblem` and `InternalOptimizeResult`. ```{note} The names of the internal problem are already aligned with the new names for the objective function and its derivatives. ``` ```python from numpy.typing import NDArray from dataclasses import dataclass from typing import Callable, Tuple import optimagic as om @dataclass(frozen=True) class ScalarProblemFunctions: fun: Callable[[NDArray[float]], float] jac: Callable[[NDArray[float]], NDArray[float]] fun_and_jac: Callable[[NDArray[float]], Tuple[float, NDArray[float]]] @dataclass(frozen=True) class LeastSquaresProblemFunctions: fun: Callable[[NDArray[float]], NDArray[float]] jac: Callable[[NDArray[float]], NDArray[float]] fun_and_jac: Callable[[NDArray[float]], Tuple[NDArray[float], NDArray[float]]] @dataclass(frozen=True) class LikelihoodProblemFunctions: fun: Callable[[NDArray[float]], NDArray[float]] jac: Callable[[NDArray[float]], NDArray[float]] fun_and_jac: Callable[[NDArray[float]], Tuple[NDArray[float], NDArray[float]]] @dataclass(frozen=True) class InternalProblem: scalar: ScalarProblemFunctions least_squares: LeastSquaresProblemFunctions likelihood: LikelihoodProblemFunctions bounds: om.Bounds | None linear_constraints: list[om.LinearConstraint] | None nonlinear_constraints: list[om.NonlinearConstraint] | None ``` The `InternalOptimizeResult` formalizes the current dictionary solution: ```python @dataclass(frozen=True) class InternalOptimizeResult: solution_x: NDArray[float] solution_criterion: float n_criterion_evaluations: int | None n_derivative_evaluations: int | None n_iterations: int | None success: bool | None message: str | None ``` #### Alternative to `mark.minimizer` Instead of collecting information about the optimizers via the `mark.minimizer` decorator, we could require the `Algorithm` subclasses to provide that information via class variables. The presence of all required class variables could be enforced via `__init_subclass__`. The two approaches are equivalent in terms of achievable functionality. I see the following advantages and disadvantages: **Advantages of decorator approach** - Easier for beginners as no subtle concepts (such as the difference between instance and class variables) are involved - Very easy way to provide default values for some of the collected variables - Every user of optimagic is familiar with `mark` decorators - Autocomplete while filling out the arguments of the mark decorator - Very clear visual separation of algorithm options and attributes optimagic needs to know about. **Advantages of class variable approach** - More familiar for people with object oriented background - Possibly better ways to enforce the presence of the class variables via static analysis I am personally leaning towards the decorator approach but any feedback on this topic is welcome. ## Numerical differentiation ### Current situation The following proposal applies to the functions `first_derivative` and `second_derivative`. Both functions have an interface that has grown over time and both return a relatively complex result dictionary. There are several arguments that govern which entries are stored in the result dictionary. The functions `first_derivative` and `second_derivative` allow params to be arbitrary pytrees. They work for scalar and vector valued functions and a `key` argument makes sure that they work for `criterion` functions that return a dict containing `"value"`, `"contributions"`, and `"root_contributions"`. In contrast to optimization, all pytree handling (for params and function outputs) is mixed with the calculation of the numerical derivatives. This can produce more informative error messages and save some memory. However it increases complexity extremely because we can make very few assumptions on types. There are many if conditions to deal with this situation. The interface is further complicated by supporting Richardson Extrapolation. This feature was inspired by [numdifftools](https://numdifftools.readthedocs.io/en/latest/) but has not produced convincing results in benchmarks. **Things we want to keep** - `params` and function values can be pytrees - support for optimagic `criterion` functions (now functions that return `FunctionValue`) - Many optional arguments to influence the details of the numerical differentiation - Rich output format that helps to get insights on the precision of the numerical differentiation - Ability to optionally pass in a function evaluation at `params` or return a function evaluation at `params` **Problems** - We can make no assumptions on types inside the function because pytree handling is mixed with calculations - Support for Richardson extrapolation complicates the interface and implementation but has not been convincing in benchmarks - Pytree handling is acatually incomplete (`base_steps`, `min_steps` and `step_ratio` are assumed to be flat numpy arrays) - Many users expect the output of a function for numerical differentiation to be just the gradient, jacobian or hessian, not a more complex result object. ### Proposal #### Separation of calculations and pytree handling As in numerical optimization, we should implement the core functionality for first and second derivative for functions that map from 1-Dimensional numpy arrays to 1-Dimensional numpy arrays. All pytree handling or other handling of function outputs (e.g. functions that return a `FunctionValue`) should be done outside of the core functions. #### Deprecate Richardson Extrapolation (and prepare alternatives) The goal of implementing Richardson Extrapolation was to get more precise estimates of numerical derivatives when it is hard to find an optimal step size. Example use-cases we had in mind were: - Optimization of a function that is piecewise flat, e.g. the likelihood function of a naively implemented multinomial probit - Optimization or standard error estimation of slightly noisy functions, e.g. functions of an MSM estimation problem - Standard error estimation of wiggly functions where the slope and curvature at the minimum does not yield reasonable standard errors and confidence intervals Unfortunately, the computational cost of Richardson extrapolation is too high for any application during optimization. Moreover, our practical experience with Richardson Extrapolation was not positive and it seems that Richardson extrapolation is not designed for our use-cases. It is designed as a sequence acceleration method that reduces roundoff error while shrinking a step size to zero, whereas in our application it might often be better to take a larger step size (for example, the success of derivative free trust-region optimizers suggest less local slope and curvature information is more useful than actual derivatives for optimization; similarly, numerical derivatives with larger step sizes could be seen as an estimate of a [quasi jacobian](https://arxiv.org/abs/1907.13093) and inference based on it might have good statistical properties). We therefore propose to remove Richardson extrapolation and open an Issue to work on alternatives. Examples for alternatives could be: - [Moré and Wild (2010)](https://www.mcs.anl.gov/papers/P1785.pdf) propose an approach to calculate optimal step sizes for finite difference differentiation of noisy functions - We could think about aggregating derivative estimates at multiple step sizes in a way that produces worst case standard errors and confidence intervals - ... ```{note} Richardson extrapolation was only completed for first derivatives, even though it is already prepared in the interface for second derivatives. ``` #### Better `NumdiffResult` object The result dictionary will be replaced by a `NumdiffResult` object. All arguments that govern which results are stored will be removed. If some of the formerly optional results require extra computation that we wanted to avoid by making them optional, they can be properties or methods of the result object. #### Jax inspired high-level interfaces Since our `first_derivative` and `second_derivative` functions need to fulfill very specific requirements for use during optimization, they need to return a complex result object. However, this can be annoying in simple situations where users just want a gradient, jacobian or hessian. To cover these simple situations and provide a high level interface to our numdiff functions, we can provide a set of jax inspired decorators: - `@grad` - `@value_and_grad` - `@jac` (no distinction between `@jacrev` and `jacfwd` necessary) - `@value_and_jac` - `@hessian` - `@value_and_hessian` All of these will be very simple wrappers around `first_derivative` and `second_derivative` with very low implementation and maintenance costs. ## Benchmarking ### `get_benchmark_problems` #### Current situation As other functions in optimagic, `get_benchmark_problems` follows a design where behavior can be switched on by a bool and configured by an options dictionary. The following arguments are related to this: - `additive_noise` and `additive_noise_options` - `multiplicative_noise` and `multiplicative_noise_options` - `scaling` and `scaling_options` All of them have the purpose of adding some difficult characteristics to an existing benchmark set, so we can analyze how well an optimizer can deal with this situation. The name of the benchmark set is passed in as a string. The return value of `get_benchmark_problems` is a nested dictionary. The keys in the outer dictionary are the names of benchmark problems. The inner dictionaries represent benchmark problems. **Things we want to keep** - Benchmark problems are collected in a dict, not in a fixed-field data structure. This makes it easy to merge problems from multiple benchmark sets or filter benchmark sets. A fixed field data structure would not work here. **Problems** - As discussed before, having separate arguments for switching-on behavior and configuring it can be dangerous - Each single benchmark problem should not be represented as a dictionary - Adding noise or scaling problems should be made more flexible and generic #### Proposal ##### Add noise to benchmark problems The four arguments `additive_noise`, `multiplicative_noise`, `additive_noise_options`, and `multiplicative_noise_options` are combined in one `noise` argument. This `noise` argument can be `bool | BenchmarkNoise`. If `False`, no noise is added. If `True`, standard normal noise is added. We implement several subclasses of `BenchmarkNoise` to cover the current use cases. As syntactic sugar, we can make `BenchmarkNoise` instances addable (by implementing an `__add__` method) so multiple sources of noise can be combined. A rough prototype for `BenchmarkNoise` looks as follows: ```python FvalType = TypeVar("FvalType", bound=float | NDArray[float]) class BenchmarkNoise(ABC): @abstractmethod def draw_noise( self, fval: FvalType, params: NDArray, size: int, rng: np.random.Generator ) -> FvalType: pass def __add__(self, other: BenchmarkNoise): pass ``` Passing `fval` and `params` to `draw_noise` enables use to implement multiplicative noise (i.e. noise where the standard deviation scales with the function value) and stochastic or deterministic wiggle (e.g. a sine curve that depends on params). Therefore, this proposal does not just cover everything that is currently implemented but also adds new functionality we wanted to implement. ##### Add scaling issues to benchmark problems The `scaling_options` argument is deprecated. The `scaling` argument can be `bool | BenchmarkScaler`. We implement `LinspaceBenchmarkScaler` to cover everything that is implemented right now but more types of scaling can be implemented in the future. A rough prototype of `BenchmarkScaler` looks as follows: ```python class BenchmarkScaler(ABC): @abstractmethod def scale(self, params: NDArray) -> NDArray: pass @abstractmethod def unscale(self, params: NDArray) -> NDArray: pass ``` ##### Representing benchmark problems Instead of the fixed-field dictionary we will have a dataclass with corresponding fields. This would roughly look as follows: ```python @dataclass class BenchmarkProblem: fun: Callable[[NDArray], FunctionValue] start_x: NDArray solution_x: NDArray | None start_fun: float solution_fun: float ``` ### `run_benchmark` #### Current situation `run_benchmark` takes `benchmark_problems` (covered in the previous section), `optimize_options` and a few other arguments and returns a nested dictionary representing benchmark results. `optimize_options` can be a list of algorithm names, a dict with algorithm names as values or a nested dict of keyword arguments for `minimize`. **Things we want to keep** - Benchmark results are collected in a dict, not in a fixed-field data structure. This makes it easy to merge results from multiple benchmark sets or filter benchmark results. A fixed field data structure would not work here. **Problems** - `optimize_options` are super flexible but error prone and hard to write as there is no autocomplete support - Each single benchmark result should not be represented as a dictionary #### Proposal We restrict the typo of `optimize_options` to `dict[str, Type[Algorithm] | Algorithm | OptimizeOptions]`. Here, `OptimizeOptions` will be a simple dataclass that we need for `estimate_ml` and `estimate_msm` anyways. Passing just lists of algorithm names is deprecated. Passing dicts as optimize options is also deprecated. Most use-cases will be covered by passing dictionaries of configured Algorithms as optimize options. Actually using the full power of passing `OptimizeOptions` will be rarely needed. The return type of `run_benchmark` will be `dict[tuple[str], BenchmarkResult]` `BenchmarkResult` is a dataclass with fields that mirror the keys of the current dictionary. It will roughly look as follows: ```python @dataclass class BenchmarkResult: params_history: list[NDArray] fun_history: list[float] time_history: list[float] batches_history: list[int] solution: OptimizeResult ``` ## Estimation The changes to the estimation functions `estimate_ml` and `estimate_msm` will be minimal: - `lower_bounds` and `upper_bounds` are replaced by `bounds` (as in optimization) - `numdiff_options` and `optimize_options` become dataclasses - `logging` and `log_options` get aligned with our proposal for optimization In the long run we plan a general overhaul of `MSM` estimation that provides better access to currently internal objects such as the MSM objective function. ## Type checkers and their configuration We choose mypy as static type checker and run it as part of our continuous integration. Once this enhancement proposal is fully implemented, we want to use the following settings: ``` check_untyped_defs = true disallow_any_generics = true disallow_untyped_defs = true disallow_incomplete_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = true ``` In addition to CI, we could also run type-checks as part of the pre-commit hooks. An example where this is done can be found [here](https://github.com/google/jax/blob/de0fd722f0c4c0c238884f0e64e4ef8da72e4c1d/.pre-commit-config.yaml#L33). ## Runtime type checking Since most of our users do not use static type checkers we will still need to check the type of most user inputs so we can give them early feedback when problems arise. Thus we cannot remove our current error handling just because many of these errors could now be caught by static analysis. We can investigate using `jaxtyping`'s pytest hooks to enable runtime typecheckers like beartype during testing but it is not a priority for now. ## Changes in documentation All type information in docstrings will be removed. Whenever there are now multiple ways of doing things, we show the ones that support autocomplete and static analysis most prominently. We can achieve this via tabs, similar to how [pytask](https://pytask-dev.readthedocs.io/en/stable/tutorials/defining_dependencies_products.html#products) does it. The general structure of the documentation is not affected by this enhancement proposal. ## Summary of breaking changes - The internal algorithm interface changes completely without deprecations - The support for Richardson Extrapolation in `first_derivative` is dropped without deprecation; The corresponding arguments `n_steps` and `step_ratio` are removed. - The return type of `first_derivative` and `second_derivative` changes from dict to `NumdiffResult` without deprecations. The arguments `return_func_value` and `return_info` are dropped. - The representation of benchmark problems and benchmark results changes without deprecations ## Summary of deprecations The following deprecations become active in version `0.5.0`. The functionality will be removed in version `0.6.0` which should be scheduled for approximately half a year after the realease of `0.5.0`. - Returning a `dict` in the objective function io deprecated. Return `FunctionValue` instead. In addition, likelihood and least-squares problems need to be decorated with `om.mark.likelihood` and `om.mark_least_squares`. - The arguments `lower_bounds`, `upper_bounds`, `soft_lower_bounds` and `soft_upper_bounds` are deprecated. Use `bounds` instead. `bounds` can be `optimagic.Bounds` or `scipy.optimize.Bounds` objects. - Specifying constraints with dictionaries is deprecated. Use the corresponding subclass of `om.constraints.Constraint` instead. In addition, all selection methods except for `selector` are deprecated. - The `covariance` constraint is renamed to `FlatCovConstraint` and the `sdcorr` constraint is renamed to `FlatSdcorrConstraint` to prepare the introduction of more natural (non-flattened) covariance and sdcorr constraints. - The `log_options` argument of `maximize` and `minimize` is deprecated and gets subsumed in the `logging` argument. - The `scaling_options` argument of `maximize` and `minimize` is deprecated and gets subsumed in the `scaling` argument. - The `error_penalty` argument of `maximize` and `minimize` is deprecated and gets subsumed in the `error_handling` argument. - The `multistart_options` argument of `maximize` and `minimize` is deprecated and gets subsumed in the `multistart` argument. - The arguments `additive_noise`, `additive_noise_options`, `multiplicative_noise`, and `multiplicative_noise_options` in `get_benchmark_problems` are deprecated and combined into `noise`. - The `scaling_options` argument in `get_benchmark_problems` is deprecated and subsumed in the `scaling` argument. - Passing just a list of algorithm strings as `optimize_options` in `run_benchmark` is deprecated. ================================================ FILE: docs/source/development/ep-03-alignment.md ================================================ (eepalignment)= # EP-03: Alignment with SciPy ```{eval-rst} +------------+------------------------------------------------------------------+ | Author | `Janos Gabler `_ | +------------+------------------------------------------------------------------+ | Status | Accepted | +------------+------------------------------------------------------------------+ | Type | Standards Track | +------------+------------------------------------------------------------------+ | Created | 2024-07-09 | +------------+------------------------------------------------------------------+ | Resolution | | +------------+------------------------------------------------------------------+ ``` ## Abstract This enhancement proposal explains how we will better align optimagic with `scipy.minimize`. Scipy is the most widely used optimizer library in Python and most of our new users are switching over from SciPy. The goal is therefore simple: Make it as easy as possible for SciPy users to use optimagic. In most cases this means that the only thing that has to be changed is the import statement for the `minimize` function: ```python # from scipy.optimize import minimize from optimagic import minimize ``` ## Design goals - If we can make code written for SciPy run with optimagic, we should do so - If we cannot make it run, the user should get a helpful error message that explains how the code needs to be adjusted. ## Aligning names | **Old Name** | **Proposed Name** | **Source** | | ------------------------------------------ | ------------------------- | ---------- | | `criterion` | `fun` | scipy | | `criterion_kwargs` | `fun_kwargs` | | | `params` | `x0` | | | `derivative` | `jac` | scipy | | `derivative_kwargs` | `jac_kwargs` | | | `criterion_and_derivative` | `fun_and_jac` | | | `criterion_and_derivative_kwargs` | `fun_and_jac_kwargs` | | | `stopping_max_criterion_evaluations` | `stopping_maxfun` | scipy | | `stopping_max_iterations` | `stopping_maxiter` | scipy | | `convergence_absolute_criterion_tolerance` | `convergence_ftol_abs` | NlOpt | | `convergence_relative_criterion_tolerance` | `convergence_ftol_rel` | NlOpt | | `convergence_absolute_params_tolerance` | `convergence_xtol_abs` | NlOpt | | `convergence_relative_params_tolerance` | `convergence_xtol_rel` | NlOpt | | `convergence_absolute_gradient_tolerance` | `convergence_gtol_abs` | NlOpt | | `convergence_relative_gradient_tolerance` | `convergence_gtol_rel` | NlOpt | | `convergence_scaled_gradient_tolerance` | `convergence_gtol_scaled` | | While it seems that many names are taken from NlOpt and not from SciPy, this is a bit misleading. SciPy does use the words `xtol`, `ftol` and `gtol` just like NlOpt, but it does not completely harmonize them between algorithms. We therefore chose NlOpt's version which is understandable for everyone who knows SciPy but more readable than SciPy's. ## Names we do not want to align - We do not want to rename `algorithm` to `method` because our algorithm names are different from SciPy, so people who switch over from SciPy need to adjust their code anyways. - We do not want to rename `algo_options` to `options` for the same reason. Instead we can provide aliases for those. ## Additional aliases To make it even easier for SciPy users to switch to optimagic, we can provide additional aliases in `minimize` and `maximize` that let them used their SciPy code without changes or help to adjust it by showing good error messages. The following arguments are relevant: - `method`: In SciPy this is used instead of `algorithm` to select the optimization algorithm. We opted against simply renaming `algorithm` to `method` because our naming scheme of algorithms is (and has to be) different from SciPy. By using `method` instead of `algorithm`, users could select SciPy algorithms by their SciPy name. If `method` and `algorithm` are both provided, they would get an error. - `tol`: We do not want to support one `tol` argument for all kinds of different convergence criteria but could raise an error for people who use it and point them to the relevant parts of our documentation. - `args`: we can support `args` as an alternative to `fun_kwargs` - `options`: This is the SciPy counterpart to our `algo_options`. We do not want to support this as our option names are different but we can provide a good error message with pointers to our documentation if someone uses it. - `hess` and `hessp`: Currently we don't support closed form hessians. If we support them they will be called `hess`. In the meantime, this can raise a `NotImplementedError`. - `callback`: Currently we do not support `callback`s. If we support them they will be called `callback` and be as compatible with SciPy as possible. In the meantime we can raise a `NotImplementedError`. - If a user sets `jac=True` we raise and error and explain how to use `fun_and_jac` instead. ## Letting algorithms pick their default values Currently we try to align default values for convergence criteria and other algorithm options across algorithms and even across optimizer packages. This means that sometimes algorithms that are used via optimagic produce different results than the same algorithm used via SciPy or other packages. Moreover, it is possible that we deviate from algorithm options that the original authors carefully picked because they maximize performance on a relevant benchmark set. I therefore propose that in the future we do not try to align algorithm options across algorithms and packages. ## Implementation All renamings are done with a careful deprecation cycle. The deprecations become active in version `0.5.0`. Old names will be removed in version `0.6.0` which should be scheduled for approximately half a year after the release of `0.5.0`. ================================================ FILE: docs/source/development/how_to_contribute.md ================================================ (how-to-contribute)= # How to contribute ## 1. Intro We welcome and greatly appreciate contributions of all forms and sizes! Whether it's updating the documentation, adding small extensions, or implementing new features, every effort is valued. For substantial changes, please contact us in advance. This allows us to discuss your ideas and guide the development process from the beginning. You can start a conversation by posting an issue on GitHub or by emailing [janosg](https://github.com/janosg). To get familiar with the codebase, we recommend checking out our [issue tracker](https://github.com/optimagic-dev/optimagic/issues) for some immediate and clearly defined tasks. ## 2. Before you start Once you've decided to contribute, please review the {ref}`style_guide` (see the next page) to ensure your work aligns with the project's coding standards. We manage new features through Pull Requests (PRs). Contributors work on their local copy of optimagic, modifying and extending the codebase there, before opening a PR to propose merging their changes into the main branch. Regular contributors gain push access to unprotected branches, which simplifies the contribution process (see Notes below). ## 3. Step-by-step guide 1. Fork the [optimagic repository](https://github.com/optimagic-dev/optimagic/). This action creates a copy of the repository with write access for you. ```{note} For regular contributors: **Clone** the [repository](https://github.com/optimagic-dev/optimagic/) to your local machine and create a new branch for implementing your changes. You can push your branch directly to the remote optimagic repository and open a PR from there. ``` 1. Clone your forked repository to your disk. This is where you'll make all your changes. 1. Open your terminal and execute the following commands from the root directory of your local optimagic repository: ```console $ prek install ``` This activates pre-commit hooks for linting and style formatting. ```{note} `prek` is not managed by pixi and must be installed globally. You can find installation instructions at [github.com/j178/prek](https://github.com/j178/prek). ``` You can then run the test suite with: ```console $ pixi run tests ``` which installs the development dependencies and runs pytest. To run the type checker, use: ```console $ pixi run mypy ``` To see all available pixi tasks, run: ```console $ pixi task list ``` 1. Implement your fix or feature. Use git to add, commit, and push your changes to the remote repository. For more on git and how to stage and commit your work, refer to these [online materials](https://effective-programming-practices.vercel.app/git/staging/objectives_materials.html). 1. Contributions are validated in two main ways. We run a comprehensive test suite to ensure compatibility with the existing codebase and employ [pre-commit hooks](https://effective-programming-practices.vercel.app/git/pre_commits/objectives_materials.html) to maintain quality and adherence to our style guidelines. Opening a PR (see below) triggers optimagic's [Continuous Integration (CI)](https://docs.github.com/en/actions/automating-builds-and-tests/about-continuous-integration) workflow, which runs the full test suite, pre-commit hooks, and other checks on a remote server. You can also run the test suite locally for [debugging](https://effective-programming-practices.vercel.app/debugging/pdbp/objectives_materials.html). With prek installed, linters run before each commit. Commits are rejected if any checks fail. Note that some linters may automatically fix errors by modifying the code in-place. Remember to re-stage the files after such modifications. ```{tip} Skip the next paragraph if you haven't worked on the documentation. ``` 1. Assuming you have updated the documentation, verify that it builds correctly. Run: ```console $ pixi run build-docs ``` This command builds the HTML documentation, saving all files in the `docs/build/html` directory. You can view the documentation with your preferred web browser by opening `docs/build/html/index.html` or any other file. Similar to the online documentation, you can navigate to different pages simply by clicking on the links. 1. Once all tests and hooks pass locally, push your changes to your forked repository and create a pull request through GitHub: Go to the Github repository of your fork. A banner on your fork's GitHub repository will prompt you to open a PR. ```{note} Regular contributors with push access can directly push their local branch to the remote optimagic repository and initiate a PR from there. ``` Follow the steps outlined in the optimagic [PR template](https://github.com/optimagic-dev/optimagic/blob/main/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md) to describe your contribution, the problem it addresses, and your proposed solution. Opening a PR initiates a complete CI run, including the `pytest` suite, linters, code coverage checks, doctests, and building the HTML documentation. Monitor the CI workflow status on your PR page and make necessary modifications to your code based on the results, iterating until all tests pass. 1. Request a review from one of the main contributors once all CI tests pass. Address any feedback or suggestions by making the necessary changes and committing them. 1. After your PR is approved, one of the main contributors will merge it into optimagic's main branch. ================================================ FILE: docs/source/development/index.md ================================================ # Development ```{toctree} --- maxdepth: 1 --- code_of_conduct how_to_contribute styleguide enhancement_proposals credits changes ``` ================================================ FILE: docs/source/development/styleguide.md ================================================ (style_guide)= # Styleguide Your contribution should fulfill the criteria provided below. ## Styleguide for the codebase - Functions have no side effect. : If you modify a mutable argument, make a copy at the beginning of the function. - Use good names for functions and variables : *"You should name a variable using the same care with which you name a first-born child."*, Robert C. Martin, Clean Code: A Handbook of Agile Software Craftsmanship. A bit more concretely, this means: The length of a variable name should be proportional to its scope. In a list comprehension or short loop, i might be an acceptable name for the running variable, but variables that are used at many different places should have descriptive names. The name of variables should reflect the content or meaning of the variable and not only the type. Names like `dict_list` would not have been a good name for the `constraints`. Function names should contain a verb. Moreover, the length of a function name is typically inversely proportional to its scope. The public functions like `maximize` and `minimize` can have very short names. At a lower level of abstraction you typically need more words to describe what a function does. - User facing functions should be generous regarding their input type. Example: the `algorithm` argument can be a string, `Algorithm` class or `Algorithm` instance. The `algo_options` can be an `AlgorithmOptions` object or a dictionary of keyword arguments. - User facing functions should be strict about their output types. A strict output type does not just mean that the output type is known (and not a generous Union), but that it is a proper type that enables static analysis for available attributes. Example: whenever possible, public functions should not return dicts but proper result types (e.g. `OptimizeResult`, `NumdiffResult`, ...) - Internal functions should be strict about input and output types; Typically, a public function will check all arguments, convert them to a proper type and then call an internal function. Example: `minimize` will convert any valid value for `algorithm` into an `Algorithm` instance and then call an internal function with that type. - Fixed field types should only be used if all fields are known. An example where this is not the case are collections of benchmark problems, where the set of fields depends on the selected benchmark sets and other things. In such situations, dictionaries that map strings to BenchmarkProblem objects are a good idea. - Think about autocomplete! If you want to accept a string as an argument (e.g. an algorithm name) also accept input types that are more amenable to static analysis and offer better autocomplete. - Whenever possible, use immutable types. Whenever things need to be changeable, consider using an immutable type with copy constructors for modified instances. Example: instances of `Algorithm` are immutable but using `Algorithm.with_option` users can create modified copies. - The main entry point to optimagic are functions, objects are mostly used for configuration and return types. This takes the best of both worlds: we get the safety and static analysis that (in Python) can only be achieved using objects but the beginner friendliness and freedom provided by functions. Example: Having a `minimize` function, it is very easy to add the possibility of running minimizations with multiple algorithms in parallel and returning the best value. Having a `.solve` method on an algorithm object would require a whole new interface for this. - Deep modules. : This is a term coined by [John Ousterhout](https://www.youtube.com/watch?v=bmSAYlu0NcY). A deep module is a module that has just one public function. This function calls the private functions (i.e. functions that start with an underscore) defined further down in the module and reads almost like a table of contents to the whole module. - Never import a private function in another module : By following this strictly, you can be sure that you can rename or refactor private functions without looking at other modules. Of course it is also not a solution to copy paste the function! If you would like to import a function that starts with an underscore, rename it. - All functions have a [Google style](https://tinyurl.com/mxams9k) docstring : The docstring describes all arguments and outputs. For arrays, please document how many dimensions and what shape they have. Look around in the code to find examples if you are in doubt. Example: ```python def ordered_logit(formula, data): """Estimate an ordered probit model with maximum likelihood. Args: formula (str): A patsy formula. data (str): A pandas DataFrame. Returns: res: optimization result. """ pass ``` In particular each docstring should start with a one liner that describes very concisely what the function does. The one liner should be in imperative mode, i.e. not "This function does" ..." , but "Do ..." and end with a period. - Unit tests : If you write a small helper whose interface might change during refactoring, it is sufficient if the function that calls it is tested. But all functions that are exposed to the user must have unit tests. - Enable pre-commit hooks by executing `prek install` in a terminal in the root of the optimagic repository. This makes sure that your formatting is consistent with what we expect. - Use `pathlib` for all file paths operations. : You can find the pathlib documentation [here](https://docs.python.org/3/library/pathlib.html) - Object serialization. : Pickling and unpickling of DataFrames should be done with `pd.read_pickle` and `pd.to_pickle`. - Don't use global variables unless absolutely necessary : Exceptions are global variables from a config file that replace magic numbers. Never use mutable global variables! ## Styleguide for the documentation - General. : The documentation is rendered with [Sphinx](https://www.sphinx-doc.org/en/master/) and written in **Markedly Structured Text.** How-to guides are usually Jupyter notebooks. - The documentation follows the [diataxis](https://diataxis.fr) framework. ================================================ FILE: docs/source/estimagic/explanation/bootstrap_ci.md ================================================ (bootstrap-cis)= # Bootstrap Confidence Intervals We use the notation and formulations provided in chapter 10 of {cite}`Hansen2020`. The first supported confidence interval type is the **"percentile"** confidence interval, as discussed in section 10.10 of the Hansen textbook. Let $\{ \hat{\theta}_1^*, ..., \hat{\theta}_B^*\}$ denote the estimates of estimator $\hat{\theta}$ for the B bootstrap samples. The idea of the percentile confidence interval is to simply take the empirical quantiles $q_{p}^*$ of this distributions, so we have $$ CI^{percentile} = [q_{\alpha/2}^*, q_{1-\alpha/2}^*]. $$ The second supported confidence interval **"normal"** is based on a normal approximation and discussed in Hansen's section 10.9. Let $s_{boot}$ be the sample standard error of the distribution of bootstrap estimators, $z_q$ the q-quantile of a standard normal distribution and $\hat{\theta}$ be the full sample estimate of $\theta$. Then, the asymptotic normal confidence interval is given by $$ CI^{normal} = [\hat{\theta} - z_{1- \alpha/2} s_{boot}, \hat{\theta} + z_{1- \alpha/2} s_{boot}]. $$ The bias-corrected **"bc"** bootstrap confidence interval addresses the issue of biased estimators. This problem is often present when estimating nonlinear models. Econometric details are discussed in section 10.17 of Hansen. Let $$ p^* = \frac{1}{B} \sum_{b=1}^B 1(\hat{\theta}_b^* \leq \hat{\theta}) $$ and define $z_0^* = \Phi^{-1} (p^*)$, where $\Phi$ is the standard normal cdf. The bias correction works via correcting the significance level. Define $x(\alpha) = \Phi(z_\alpha + 2 z_0^*)$ as the corrected significance level for a target significant level of $\alpha$. Then, the bias-corrected confidence interval is given by $$ CI^{bc} = [q_{x(\alpha/2)}^*, q_{x(1-\alpha/2)}^*]. $$ A further refined version of the bias-corrected confidence interval is the bias-corrected and accelerated interval, short **"bca"**, as discussed in section 10.20 of Hansen. The general idea is to correct for skewness sampling distribution. Downsides of this confidence interval are that it takes quite a lot of time to compute, since it features calculating leave-one-out estimates of the original sample. Formally, again, the significance levels are adjusted. Define $$ \hat{a}=\frac{\sum_{i=1}^{n}\left(\bar{\theta}-\hat{\theta}_{(-i)}\right)^{3}} {6\left(\sum_{i=1}^{n}\left(\bar{\theta}-\hat{\theta}_{(-i)}\right)^{2} \right)^{3 / 2}}, $$ where $\bar{\theta}=\frac{1}{n} \sum_{i=1}^{n} \widehat{\theta}_{(-i)}$. This is an estimator for the skewness of $\hat{\theta}$. Then, the corrected significance level is given by $$ x(\alpha)=\Phi(z_{0}+\frac{z_{\alpha}+z_{0}}{1-a(z_{\alpha}+z_{0})}) $$ and the bias-corrected and accelerated confidence interval is given by $$ CI^{bca} = [q_{x(\alpha/2)}^*, q_{x(1-\alpha/2)}^*]. $$ The studentized confidence interval, here called **"t"** type confidence interval first studentizes the bootstrap parameter distribution, i.e. applies the transformation $\frac{\hat{\theta}_b-\hat{\theta}}{s_{boot}}$, and then builds the confidence interval based on the estimated quantile function of the studentized data $\hat{G}$: $$ CI^{t} = \left[\hat{\theta}+\hat{\sigma} \hat{G}^{-1}(\alpha / 2), \hat{\theta}+\hat{\sigma} \hat{G}^{-1}(1-\alpha / 2)\right] $$ The final supported confidence interval method is the **"basic"** bootstrap confidence interval, which is derived in section 3.4 of {cite}`Wassermann2006`, where it is called the pivotal confidence interval. It is given by $$ CI^{basic} = \left[\hat{\theta}+\left(\hat{\theta}-\hat{\theta}_{u}^{\star}\right), \hat{\theta}+\left(\hat{\theta}-\hat{\theta}_{l}^{\star}\right)\right], $$ where $\hat{\theta}_{u}^{\star}$ denotes the $1-\alpha/2$ empirical quantile of the bootstrap estimate distribution for parameter $\theta$ and $\hat{\theta}_{l}^{\star}$ denotes the $\alpha/2$ quantile. ```{eval-rst} .. bibliography:: ../../refs.bib :filter: docname in docnames ``` ================================================ FILE: docs/source/estimagic/explanation/bootstrap_montecarlo_comparison.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Bootstrap Monte Carlo Comparison" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In this juypter notebook, we perform a Monte Carlo exercise to illustrate the importance of using the cluster robust variant of the bootstrap when data within clusters is correlated. \n", "\n", "The main idea is to repeatedly draw clustered samples, get both uniform and clustered bootstrap estimates in these samples, and then compare how often the true null hypothesis is rejected." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Data Generating Process" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The true data generating process is given by\n", "\n", "$$ logit(y_{i,g}) = \\beta_0 + \\beta_1 (x_{i,g}) + \\epsilon_{i,g}, $$\n", "\n", "where the independent variable $x_{i,g} = x_i + x_g$ and the noise term $\\epsilon_{i,g} = \\epsilon_i + \\epsilon_g$ each consist of an individual and a cluster term.\n", "\n", "In the simulations we perform below, we have $\\beta_0 = \\beta_1 =0$. $x_i$ and $x_g$ are drawn from a standard normal distribution, and $\\epsilon_i$ and $\\epsilon_g$ are drawn from a normal distribution with $\\mu_0$ and $\\sigma=0.5$. The value of $\\sigma$ is chosen to not blow up rejection rates in the independent case too much." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas as pd\n", "import scipy\n", "import statsmodels.api as sm\n", "from joblib import Parallel, delayed\n", "\n", "import estimagic as em" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def create_clustered_data(nclusters, nobs_per_cluster, true_beta=0):\n", " \"\"\"Create a bivariate clustered dataset with specified number of\n", " clusters and number of observations per cluster that has a population\n", " value of true_beta for the logit coefficient on the independent variable.\n", "\n", " Args:\n", " nclusters (int): Number of clusters.\n", " nobs_per_cluster (int): Number of observations per cluster.\n", " true_beta (int): The true logit coefficient on x.\n", "\n", " Returns:\n", " pd.DataFrame: Clustered dataset.\n", " \"\"\"\n", " x_cluster = np.random.normal(size=nclusters)\n", " x_ind = np.random.normal(size=nobs_per_cluster * nclusters)\n", " eps_cluster = np.random.normal(size=nclusters, scale=0.5)\n", " eps_ind = np.random.normal(size=nobs_per_cluster * nclusters, scale=0.5)\n", "\n", " y = []\n", " x = []\n", " cluster = []\n", "\n", " for g in range(nclusters):\n", " for i in range(nobs_per_cluster):\n", " key = (i + 1) * (g + 1) - 1\n", "\n", " arg = (\n", " true_beta * (x_cluster[g] + x_ind[key]) + eps_ind[key] + eps_cluster[g]\n", " )\n", "\n", " y_prob = 1 / (1 + np.exp(-arg))\n", " y.append(np.random.binomial(n=1, p=y_prob))\n", " x.append(x_cluster[g] + x_ind[(i + 1) * (g + 1) - 1])\n", " cluster.append(g)\n", "\n", " y = np.array(y)\n", " x = np.array(x)\n", " cluster = np.array(cluster)\n", "\n", " return pd.DataFrame({\"y\": y, \"x\": x, \"cluster\": cluster})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Monte Carlo Simulation Code" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The following function computes bootstrap t-values. As suggested my Cameron and Miller (2015), critical values are the 0.975 quantiles from a t distribution with `n_clusters` -1 degrees of freedom." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def get_t_values(data, sample_size=200, hyp_beta=0, cluster=False):\n", " \"\"\"Get bootstrap t-values for testing the hypothesis that beta == hyp_beta.\n", "\n", " Args:\n", " data (pd.DataFrame): Original dataset.\n", " sample_size (int): Number of bootstrap samples to draw.\n", " hyp_beta (float): Hypothesised value of beta.\n", " cluster (bool): Whether or not to cluster on the cluster column.\n", "\n", " Returns:\n", " float: T-Value of hypothesis.\n", " \"\"\"\n", "\n", " def logit_wrap(df):\n", " y = df[\"y\"]\n", " x = df[\"x\"]\n", "\n", " result = sm.Logit(y, sm.add_constant(x)).fit(disp=0).params\n", "\n", " return pd.Series(result, index=[\"constant\", \"x\"])\n", "\n", " if cluster is False:\n", " result = em.bootstrap(data=data, outcome=logit_wrap, n_draws=sample_size)\n", " estimates = pd.DataFrame(result.outcomes)[\"x\"]\n", "\n", " else:\n", " result = em.bootstrap(\n", " data=data,\n", " outcome=logit_wrap,\n", " n_draws=sample_size,\n", " cluster_by=\"cluster\",\n", " )\n", " estimates = pd.DataFrame(result.outcomes)[\"x\"]\n", "\n", " return (estimates.mean() - hyp_beta) / estimates.std()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def monte_carlo(nsim, nclusters, nobs_per_cluster, true_beta=0, n_cores=8):\n", " \"\"\"Run a simulation for rejection rates and a logit data generating process.\n", "\n", " Rejection rates are based on a t distribution with nclusters-1 degrees of freedom.\n", "\n", " Args:\n", " nsim (int): Number of Monte Carlo draws.\n", " nclusters (int): Number of clusters in each generated dataset.\n", " nobs_per_cluster (int) Number of observations per cluster.\n", " true_beta (int): Population value of logit coefficient on x.\n", " n_cores (int): Number of jobs for Parallelization.\n", "\n", " Returns:\n", " pd.DataFrame: DataFrame of average rejection rates.\n", " \"\"\"\n", " np.zeros(nsim)\n", "\n", " np.zeros(nsim)\n", "\n", " def loop():\n", " df = create_clustered_data(nclusters, nobs_per_cluster, true_beta)\n", "\n", " return [get_t_values(df), get_t_values(df, cluster=True)]\n", "\n", " t_value_array = np.array(\n", " Parallel(n_jobs=n_cores)(delayed(loop)() for _ in range(nsim))\n", " )\n", " t_value_array = np.array([loop() for _ in range(nsim)])\n", "\n", " crit = scipy.stats.t.ppf(0.975, nclusters - 1)\n", "\n", " result = pd.DataFrame(np.abs(t_value_array) > crit, columns=[\"uniform\", \"cluster\"])\n", "\n", " return result" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Results" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Here, we perform Monte Carlo simulations with the above functions. In each simulation, the sample size is 200, but the number of clusters varies across simulations. Be warned that the code below takes a long time to run." ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "np.random.seed(505)\n", "\n", "results_list = []\n", "\n", "for g, k in [[20, 50], [100, 10], [500, 2]]:\n", " results_list.append(monte_carlo(nsim=100, nclusters=g, nobs_per_cluster=k))" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "mean_rejection_data = pd.DataFrame([x.mean() for x in results_list])\n", "mean_rejection_data[\"nclusters\"] = [20, 100, 500]\n", "mean_rejection_data.set_index(\"nclusters\", inplace=True)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Text(0.5, 0.98, 'Comparison of Rejection Rates')" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAsAAAAH2CAYAAAB+5DrCAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAA9hAAAPYQGoP6dpAACI/UlEQVR4nOzdd1xTV/8H8E/YG1RARFAcOEARJ+JCXLharQutdba2ddRZV5+6+rR11121S619qtbVVmu11gEOcFGse6I4AEVliAqSnN8f95dATEAIgQTyeb9eeZHce+7NNwnoh8O558iEEAJERERERCbCzNAFEBERERGVJAZgIiIiIjIpDMBEREREZFIYgImIiIjIpDAAExEREZFJYQAmIiIiIpPCAExEREREJoUBmIiIiIhMCgMwEREREZkUBmCiEpCRkYGvvvoKoaGhqFixIqysrFCuXDkEBwdj5syZiI+PN3SJRs3HxwcymczQZRjEpk2b0LhxY9jZ2UEmk8HHx+e1xyjfr9w3R0dHNGzYEHPmzMHTp0/1Utvs2bMhk8mwfv16vZxPX27dugWZTIa2bdsauhQNbdu21fhs7O3t4efnh0mTJuHhw4eGLpHIJFgYugCisu748ePo3bs3EhMTYWdnh+bNm6NixYpITU3FqVOnEB0djQULFmD37t3o0KGDocslI3Lq1Cm88847sLGxQadOneDi4gJXV9cCH9+7d284ODhACIE7d+4gKioKs2fPxvbt23Hs2DE4OjoWY/XFZ/369Rg2bBhmzZqF2bNnG7ocnYSFhcHDwwMAkJCQgOjoaHz11VfYsmULTpw4gcqVKxfp/IcPH0ZoaCiGDBlidL+gEBkDBmCiYhQbG4v27dvjxYsXmDp1KmbMmAF7e3vVfoVCgV9//RVTpkzB3bt3DVipcTtw4ABevnxp6DJK3K5du6BQKLBixQoMHz680McvWrRIrcf42rVraNWqFc6dO4dly5bh008/LVJ9Y8aMQf/+/VGpUqUinUffKleujEuXLsHOzs7QpeRp2rRpaj3UCQkJaN++PS5duoRZs2bhu+++M1xxRCaAQyCIiokQAoMGDcKLFy8we/ZszJs3Ty38AoCZmRl69eqFM2fOoEmTJgaq1PjVqFEDderUMXQZJU75S1H16tX1cj5fX19MnDgRALBv374in8/V1RV16tSBs7Nzkc+lT5aWlqhTpw6qVKli6FIKrFKlSpg1axYA/Xw2RJQ/BmCiYrJ3716cP38eXl5e+M9//pNvW2dnZ9SrV09t27Nnz/Df//4X9erVg62tLZydndGmTRts3rxZ6zlyj5NdtWqV6rhq1aphwYIFEEIAAGJiYvDGG2+gfPnycHBwQI8ePXD79m2N8w0dOhQymQyHDx/Gn3/+iVatWsHBwQHlypVDr169cPnyZY1jXrx4ge+//x49evRA9erVYWtrCxcXl3zrzv08+/btQ2hoKFxcXCCTyZCSkqLx2nI7f/483nnnHVSvXh02NjZwc3NDYGAgxo8fj4SEBI32e/bsQceOHVGuXDnY2Nigdu3amDZtmup5css9vvXcuXN48803Ua5cOdjb2yMkJATHjx/X+nry8+jRI0yePBm+vr6wsbFB+fLl0blzZ/z1119q7davXw+ZTIZ169YBAEJDQ1XjRYv652x/f38AwIMHD7Tu37t3L7p16wY3NzdYW1ujevXqmDhxIh49eqTRNr8xwNnZ2Vi9ejWCg4Ph5OQEW1tbBAYGYunSpcjOztb63BkZGZg/fz6aNGkCJycn2Nvbo06dOhg9ejSuXr0KQBpDO2zYMADAnDlz1MbSKut43RjgjRs3olWrVnBycoKdnR0CAgIwd+5cvHjxQqNt7u/PyMhItGvXDo6OjnByckK3bt1w8eJFrc+hi/w+myNHjmDMmDEICAhAuXLlYGtrizp16mj9/h06dChCQ0MBABs2bFB7j14dMnLnzh2MGTMGNWrUUH1Pdu/ePc/v7+PHj6Nnz56oWrUqrK2t4eHhgWbNmmHatGl6G1tOVCIEERWL0aNHCwBiwoQJhT42LS1NNG7cWAAQbm5uok+fPqJLly7C2tpaABBjx47VOKZq1aoCgBg/frywtbUVXbt2Fd27dxeOjo4CgJg5c6Y4evSosLOzE40aNRL9+vUTNWvWFABEjRo1xLNnz9TON2TIEAFAjBo1SshkMtG0aVPRv39/4efnJwAIZ2dnERsbq3bMpUuXBADh6ekpQkNDRXh4uAgJCRGWlpYCgJg1a5ZG3crnGTFihNrzNG3aVKSkpKi9ttxOnz4tbGxsBAAREBAg+vXrJ7p3766q79ChQ2rtv/zySwFAWFhYiPbt24vw8HDh5eUlAIhatWqJxMREtfazZs0SAMTo0aOFnZ2dqF+/vggPDxcNGjQQAISNjY04d+5cQT9ScffuXVG9enUBQFSpUkWEh4eLdu3aCXNzcwFAfPXVV6q2R44cEUOGDBE1atQQAERYWJgYMmSIGDJkiDhy5Mhrn0v5fsXFxWns+/nnnwUA0apVK419U6dOFQCElZWVaNmypejTp4/w9fVVfY/k9R6tW7dObfuzZ89EaGioACDKly8vOnbsKN544w3h7u4uAIg333xTyOVytWPu378v/P39BQBRrlw58cYbb4g+ffqIRo0aCTMzM7FkyRIhhBBz584VLVu2FABEgwYNVO9L7vcmLi5OABAhISEar/H9999XfX5du3YVffr0Ea6urgKACA4OFhkZGWrtld+fEydOFObm5iIoKEj069dP1KpVSwAQFSpUEAkJCa/5RHKEhIRo/f4UQojjx48LAMLLy0tjX1BQkLCxsRHNmjUTvXv3Ft26dROVKlUSAIS/v79IT09Xtf32229FWFiY6nPL/R7t3LlT7fnKlSsnAIjatWuLXr16idatWwsLCwthbm4uNm/erFbD77//LszMzIRMJhNBQUGif//+onPnzqrvU23fb0TGigGYqJgo/5PeuHFjoY8dM2aMACBCQ0NFWlqaavulS5dUIWLXrl1qxyhDj6enp7h+/braMdbW1sLOzk74+PiI1atXq/ZlZmaKdu3aCQDihx9+UDuf8j9+AOKbb75RbVcoFKqgFBgYqHZMcnKy2L9/v1AoFGrbb968KXx8fISZmZnGf5K5n+fV/3BffW25DR48WAAQixYt0mh/6dIlcf/+fdXjkydPCjMzM+Hg4CCio6NV21+8eCH69u0rAIjevXurnUMZ7gCIZcuWqe0bP368ACAGDRqktV5tunfvLgCIt99+W2RmZqq2HzlyRNjZ2Qlzc3Pxzz//qB2jfG+0haX85BeAle/b559/rrb9l19+EQBEvXr1xLVr11TbFQqFmDlzpgAgwsPD1Y7JKwCPGjVK1V75S4wQ0i92Xbt2FQDUvg+FEKJ9+/YCgOjXr59amBNCCrRnz55VPV63bl2ev1Ap22sLwNu2bVP9jFy9elW1PSUlRbRq1UoAEJMmTVI7RvkZmJmZqYXH7Oxs0bt3bwFAzJgxQ2sd2uQXgJXv83vvvaexb8+ePWrvpRDS968y0M+ZM0dt36FDhwQAMWTIEK11pKamikqVKglzc3Px008/qe07deqUKFeunHBwcBAPHjxQbW/Tpo0AILZt26ZxvpMnT6r9W0Vk7BiAiYpJnTp1BACxd+/eQh339OlTYWtrK8zMzMSlS5c09i9fvlwAEB06dFDbrgw93333ncYxb731Vp69fr/99pvW/yiV//G3aNFC45isrCxV72lBeiSFkHqlAIjly5drfZ5u3brleay2ANylSxcBQKMXWhtl6Js+fbrGvqSkJNX7HR8fr9quDHctW7bUOCY5OVkAEFWrVn3tcwshxI0bNwQA4eDgIB49eqSxf+LEiVqDj74CsEKhELdv3xazZs1S9d69GjKVPdvaerUVCoUIDAwU5ubm4uHDh6rt2gJwUlKSsLS0FN7e3hp/VRBCiISEBGFlZSUCAgJU206cOCEACHd39wKFKF0DsDLArV27VuOYs2fPCplMJhwcHMTz589V25WfwcCBAzWOOX36dJ49zXnRFoDv378vVqxYIWxsbETNmjXVfnl7nWfPngkLCwvRqFEjte2vC8BLlizRGviVvvrqK42/TNStW1cA0AjiRKURxwATGZkzZ87g+fPnaNSokdYLvwYNGgQAOHbsGBQKhcb+Tp06aWxTXkSV3z5tY2YBoH///hrbLC0t0adPHwDS2MRXHT16FJ9//jlGjhyJYcOGYejQodi6dSsAaSYCbd58802t2/PSuHFjAMDo0aNx+PDhPMeV5q5x4MCBGvvc3d3RqVMnKBQKHDt2TGO/tvesQoUKKF++fJ7v2auOHj0KAOjcuTPKly+vsV/5mWp7L4uiWrVqkMlkMDMzQ9WqVTFnzhx07twZR44cgYODg6rdgwcPcPbsWfj6+mqMRQcAmUyGli1bQi6X48yZM/k+5+HDh/Hy5Ut07twZtra2Gvs9PDzg6+uLc+fO4fnz5wCAv//+GwAwYMCAYpua7eXLl4iOjgag/fsgICAAAQEBePr0KWJjYzX2a/s+qFWrFoC8f3byk3tct6enJz766CP4+fnhzJkzec6qce/ePaxZswbjx4/H8OHDMXToUIwcORJWVlZ5/lzlRTnuvFevXlr3t27dGgBw8uRJ1Tblz9ygQYNw6tQprf/+EJUWnAaNqJhUqFABAAo9sf39+/cBIM8FD1xcXODs7IzU1FQ8efJE9TxK2uYPVYad/PZlZmZqfb6qVatq3a6sT1kvAKSmpqJXr144ePCg1mMAID09Xev2wl6xP3nyZBw9elQ136mDgwOCg4PRrVs3DB06VG1mgte9p8rt9+7d09jn5eWl9RhHR0c8fvy4QLUW5fmLQjkPcFZWFq5cuYKYmBj8+eef+PLLL1UzDgDSRWOA9MvJ6xYcSU5Ozne/8lzffvstvv3223zbPn78GJUrV8adO3cASLN9FJdHjx4hKysLrq6uGrOxKPn4+ODs2bMF/j5QhvW8fnbyo5wHWC6XIy4uDsePH0dMTAzGjRunuvgxt6+++grTpk3T23SAys+pZcuW+bbL/Xl/+eWXOHfuHHbt2oVdu3ahXLlyaNWqFd58803VfNVEpQUDMFExCQwMxLFjxxATE4N33nlHr+fOL6SYmeX9h5389unD1KlTcfDgQYSEhGDOnDmoV68eXFxcYG5ujr/++gthYWGq2SheVdj/PJ2cnHDw4EEcO3YMu3btwuHDh3Hw4EHs378fc+fOxZEjR+Dr61ugc+n6fupLca1y9+o8wFu2bMGAAQPw2WefoXPnzggKCgIAVU+eh4cHwsLC8j1nXr8QKSnPFRgYiAYNGuTb1tra+nUvoUSV5PfBq/MAR0ZGIiwsDOvXr0e3bt1Uf2EBgOjoaEyaNAnOzs5YtmwZ2rZtCw8PD9X75+npWeheaOXn1KdPnzx/IQCg9lcob29vnD59GgcPHsTu3bsRERGhCsMLFixAVFSUxi/kRMaKAZiomHTr1g2rVq3C1q1bsWDBAlhYFOzHzdPTEwC0Tk0GSL2sKSkpsLW1Rbly5fRWb17yqkO5XVkvAOzcuRPm5ub4/fff4eTkpNb+5s2beq9NJpOhVatWaNWqFQDpT/njx4/Hpk2b8J///Ae//PKLqsa4uDjcvn0bfn5+GudR9oYVdfWtvLzuMy3u51cKDw/HwYMH8c0332D69Omqnnpl76arq2uRp1lTnqtVq1ZYsWJFgY7x9vYGANy4caNIz52fChUqwMrKCsnJycjIyNAa+krqc9CmTZs2mDlzJj755BN88skneOutt2Bubg5A+rkCgC+++AJDhgxRO+758+dITEws9PN5eXnhypUrmDZtmmpoQ0FYWFigU6dOqiEht2/fxvDhw3Hw4EHMnz8fCxYsKHQtRIbAMcBExaRz587w9/fH3bt38cUXX+TbNi0tDRcuXAAgjbOztbXFmTNntI7r++mnnwBIf7osid5JZYjMLTs7G9u3bwcAVfgEgCdPnsDJyUkj/OZ1Hn1zd3dXzXN6/vx51XbleMZNmzZpHPPw4UPs27dPNc61OCjfo71792qdc1j5mSrrLE6zZ8+GjY0NDh06pJrr1cvLC3Xq1MHFixdV8+3qKjQ0FObm5ti9e3eB/1yvXAJ806ZNBZpL1srKCgDyHff9KktLSzRv3hwAtM5Jff78eZw9exYODg4IDAws8Hn1afz48fDw8MC1a9ewZcsW1fYnT54A0D4MY+vWrVr/qvK696hjx44AcsK1rqpWrYqpU6cCUP+ZIzJ2DMBExUQmk+Gnn36CjY0NZs+ejenTpyMjI0OtjRACv//+O5o0aYJTp04BAOzt7TF8+HAoFAqMHj1a7ZirV6/i888/BwCMHTu2RF7H0aNH8cMPP6htmzVrFuLj4xEQEKAW2mrVqoUnT56o/ecNAEuWLMGhQ4f0WteaNWsQFxensX3Pnj0AcnoVAelCOTMzMyxfvhynT59Wbc/KysJHH32E58+fo1evXmrH6FP16tXRrVs3pKenY9y4cWrBMCoqCqtXr4a5uTlGjx5dLM+fW6VKlfDhhx8CgNovZjNmzIBCoUDv3r21XgT26NGj147pBaTe0+HDh+PWrVsYMGAAkpKSNNpcv35d9QsUADRr1gyhoaF48OAB3n//fY2fk1u3buHcuXOqx8oe9StXrry2ntw++ugjANIvAbn/IpGeno4xY8ZACIEPPvjAYGNZbW1tMW3aNADA3LlzVcFWebHd999/r/a9c/HiRVX4fNXr3qMPPvgA7u7uWLBgAb755huNC9qys7Oxb98+tVC7ZMkSrb3N2n7miIyeQeegIDIBR48eFRUrVhQAhJ2dnWjfvr14++23Rbdu3VTbbWxsxN9//606JvdCGO7u7qJv376ia9euqoUf8lsIQ5u85msVIu8po5TTP40cOVLIZDLRrFkzMWDAANViBU5OTiImJkbtmJ9++kk1d27r1q3FgAEDhJ+fnzAzMxMTJkzId7q1/Kb60vbalNN2+fn5id69e2ssUnH06FG19l988YVqIYwOHTqI/v37C29vbwFA+Pr6FniRh/xqys/du3dFtWrVVNOn9e/fX7Rv3161EMbixYs1jimOeYCFkKYis7W1FQDU5h7+5JNPVHPeNmrUSPTt21f06dNHNGzYUJibmwtnZ2e18+S3EEbHjh0FAGFvby9atmwpBgwYIN58803V4is9evTQeH9q166tWjzjzTffFH379tVYCEMIIZ4/f66aDzskJEQMGzZMvPvuu+LYsWNCiIIthGFrayu6desm+vbtK9zc3AQA0bx58zwXwsjrM1B+ngWV3zzAytemXODi119/FUJI0+55eHgIAKJatWqiX79+okOHDsLS0lL07ds3z+/FgIAAAUA0bdpUDB06VLz77rvit99+U+2PiopSLQLi7e0tunTpIt5++23Rrl074eLiIgCozX3s7OwszMzMRMOGDUW/fv1E3759VQuClC9fXm1uZSJjxwBMVALS09PFokWLREhIiHBzcxMWFhbCxcVFBAUFiVmzZok7d+5oHPP06VMxZ84c4efnJ6ytrYWjo6No1aqV+Pnnn7U+R3EF4EOHDoldu3aJ4OBgYWdnJ5ydnUWPHj3EhQsXtD7XH3/8IZo3by4cHR2Fi4uL6NChgzh8+HCe85LqGoB///13MXz4cOHv7y9cXFyEnZ2dqFWrlnjvvffE5cuXtZ5n9+7don379sLZ2VlYWVmJmjVriilTpojHjx8X6j3Lq6bXSU5OFpMmTRI1atQQVlZWwsXFRXTq1Ens27dPa/viCsBC5Mw93LdvX7XtERERom/fvsLT01NYWlqKChUqiICAADFmzBgRERGh1lb5Hq1fv17j/NnZ2WLDhg2iXbt2onz58sLS0lJ4enqK4OBgMWfOHHHlyhWNY9LS0sRnn30mAgIChK2trXBwcBB16tQRY8aMUVucQwhpsYaOHTsKZ2dnIZPJ1D6r/AKwEEL8+OOPokWLFsLBwUHY2NgIf39/8cUXX2idt7ikA7AQOXN9N23aVLXtzp074u233xaVK1cWNjY2om7dumLevHkiOzs7z+/Fa9euiZ49e4oKFSoIMzMzrXMnJyQkiClTpgh/f39hZ2cn7OzsRI0aNUSPHj3E+vXr1eaL/vHHH8Xbb78tateuLRwdHYWjo6Pw8/MTEydOFHfv3i3we0BkDGRC5HFJNhGZtKFDh2LDhg04dOiQ2tXqREpTp07FggUL8Msvv6Bv376GLoeIqMA4BpiIiHSiXBSjOOfvJSIqDgzARERUKDNmzECzZs1w4MAB1K1bFw0bNjR0SUREhcIATEREhbJ7925cvHgRXbp0wW+//VZsC3kQERUXjgEmIiIiIpPCHmAiIiIiMikMwERERERkUhiAiYiIiMikMAATERERkUlhACYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwABMRERGRSWEAJiIiIiKTwgBMRERERCaFAZiIiIiITAoDMBERERGZFAZgIiIiIjIpDMBEREREZFIYgImIiIjIpDAAExEREZFJYQAmIiIiIpPCAExEREREJoUBmIiIiIhMCgMwEREREZkUBmAiIiIiMikMwERERERkUhiAiYiIiMikMAATERERkUmxMHQBpYFCocD9+/fh6OgImUxm6HKIiIiI6BVCCKSnp8PT0xNmZvn38TIAF8D9+/fh7e1t6DKIiIiI6DXu3LkDLy+vfNswABeAo6MjAOkNdXJyMnA1RERERPSqtLQ0eHt7q3JbfhiAC0A57MHJyYkBmIiIiMiIFWS4Ki+CIyIiIiKTwgBMRERERCaFAZiIiIiITArHABMREZkQIQSys7Mhl8sNXQpRoZibm8PCwkIvU9IyABMREZmIrKwsJCQk4NmzZ4YuhUgndnZ2qFSpEqysrIp0HgZgIiIiE6BQKBAXFwdzc3N4enrCysqKiztRqSGEQFZWFh4+fIi4uDj4+vq+drGL/DAAExERmYCsrCwoFAp4e3vDzs7O0OUQFZqtrS0sLS1x+/ZtZGVlwcbGRudz8SI4IiIiE1KUXjMiQ9PX9y9/CoiIiIjIpDAAGxm5HDh8GNi0SfrKi3SJiIj0x8fHB0uXLlU9TkxMRMeOHWFvbw8XFxeD1UUliwHYiOzYAfj4AKGhwNtvS199fKTtRERExqKkO2vatm2L8ePHa2xfv359oUPrqVOn8P7776seL1myBAkJCYiNjcXVq1eLWKl+yGQy1c3CwgJVqlTBxIkTkZmZqdfnmT17NgIDAwt1zKu/QJRWvAjOSOzYAfTpAwihvv3ePWn7tm1Ar16GqY2IiEhpxw5g3Djg7t2cbV5ewLJlpeP/KTc3N7XHN27cQOPGjeHr66vzObOysoo8Lder1q1bh86dO+Ply5c4e/Yshg0bBnt7e/z3v//V6/MUB7lcDplMZtTjzY23MhMil0v/mLwafoGcbePHczgEEREZlrKzJnf4BXI6awz9F8uhQ4eiZ8+eWLRoESpVqoQKFSpg9OjRePnypapN7h5MHx8fbN++HT/++CNkMhmGDh0KAIiPj0ePHj3g4OAAJycn9OvXD0lJSapzKHtOv/vuO1SrVk01G4FMJsPatWvRvXt32NnZoW7duoiKisL169fRtm1b2Nvbo0WLFrhx48ZrX4uLiws8PDzg7e2N7t27o0ePHoiJiVFrs3r1atSoUQNWVlaoXbs2Nm7cqLY/v9exfv16zJkzB2fPnlX1Nq9fvx5CCMyePRtVqlSBtbU1PD09MXbsWABST/zt27cxYcIE1THKc7m4uOD333+Hn58frK2tER8fj1OnTqFjx45wdXWFs7MzQkJCNF6DTCbD6tWr0aVLF9ja2qJ69erYtm3ba9+fomIANgJHjmj+Y5KbEMCdO1I7IiIifRECyMgo2C0tDRg7Nv/OmnHjpHYFOZ+28+jDoUOHcOPGDRw6dAgbNmzA+vXrsX79eq1tT506hc6dO6Nfv35ISEjAsmXLoFAo0KNHDzx+/BgRERHYv38/bt68ifDwcLVjr1+/ju3bt2PHjh2IjY1Vbf/vf/+LwYMHIzY2FnXq1MHbb7+NDz74ANOnT8fp06chhMCYMWMK9ZquXr2KgwcPIigoSLVt586dGDduHCZNmoTz58/jgw8+wLBhw3Do0CEAeO3rCA8Px6RJk+Dv74+EhAQkJCQgPDwc27dvx5IlS7B27Vpcu3YNv/76K+rXrw8A2LFjB7y8vPDZZ5+pjlF69uwZ5s+fj++++w4XLlyAu7s70tPTMWTIEBw9ehTR0dHw9fVF165dkZ6ervb6ZsyYgd69e+Ps2bMYOHAg+vfvj0uXLhXqPSo0Qa+VmpoqAIjU1NRiOf/PPwsh/VOQ/+3nn4vl6YmIyAQ8f/5cXLx4UTx//ly17enTgv3/Uxy3p08LXntISIgYN26cxvZ169YJZ2dn1eMhQ4aIqlWriuzsbNW2vn37ivDwcNXjqlWriiVLlqge9+jRQwwZMkT1+K+//hLm5uYiPj5ete3ChQsCgDh58qQQQohZs2YJS0tL8eDBA7V6AIhPP/1U9TgqKkoAEN9//71q26ZNm4SNjU2+rxeAsLGxEfb29sLa2loAEN27dxdZWVmqNi1atBAjRoxQO65v376ia9euhXodDRo0UDvH4sWLRa1atdSeK7dX3z8hpM8BgIiNjc33dcnlcuHo6Ch27dql9lo//PBDtXZBQUFi5MiRWs+h7ftYqTB5jT3ARqBSJf22IyIiMlX+/v4wNzdXPa5UqRIePHhQ4OMvXboEb29veHt7q7b5+fnBxcVFrVeyatWqGuOJASAgIEB1v2LFigCg6kFVbnvx4gXS0tLyrWPJkiWIjY3F2bNnsXv3bly9ehWDBg1Sq7Nly5Zqx7Rs2VJVY0Ffx6v69u2L58+fo3r16hgxYgR27tyJ7OzsfGsFACsrK7XXDgBJSUkYMWIEfH194ezsDCcnJzx9+hTx8fFq7YKDgzUeF3cPMC+CMwKtW0sXENy7p/1PQjKZtL9165KvjYiIyi47O+Dp04K1jYwEunZ9fbs9e4A2bQr23AXl5OSE1NRUje0pKSlwdnZW22Zpaan2WCaTQaFQFPzJCsje3l7r9tzPrxwjq23b62ry8PBAzZo1AQC1a9dGeno6BgwYgM8//1y1vTh4e3vjypUr+Pvvv7F//36MGjUKCxcuREREhMZ7m5utra3G0tpDhgzBo0ePsGzZMlStWhXW1tYIDg5GVlZWsdVfUOwBNgLm5tLVs4AUdrVZulRqR0REpC8yGWBvX7Bbp05SZ0xe/0/JZIC3t9SuIOfL6zza1K5dW+PiKQCIiYlBrVq1dHz12tWtWxd37tzBnTt3VNsuXryIlJQU+Pn56fW5CkPZq/38+XMAUp3Hjh1Ta3Ps2DFVjQV5HVZWVpBrucLe1tYWb7zxBpYvX47Dhw8jKioK586dy/cYbY4dO4axY8eia9eu8Pf3h7W1NZKTkzXaRUdHazyuW7dugZ5DV+wBNhK9eklTnb06tQwAfPxx6ZhahoiIyi5lZ02fPlJ4zf0XS2WYLa7OmpEjR2LlypUYO3Ys3nvvPVhbW+OPP/7Apk2bsGvXLr0+V4cOHVC/fn0MHDgQS5cuRXZ2NkaNGoWQkBA0adJEr8+Vn5SUFCQmJkKhUODatWv47LPPUKtWLVUwnDx5Mvr164eGDRuiQ4cO2LVrF3bs2IG///67wK/Dx8cHcXFxiI2NhZeXFxwdHbFp0ybI5XIEBQXBzs4OP/30E2xtbVG1alXVMZGRkejfvz+sra3h6uqa52vw9fXFxo0b0aRJE6SlpWHy5MmwtbXVaLd161Y0adIErVq1wv/+9z+cPHkS33//vb7fUjXsATYivXoBt24Bhw4BP/8MvPOOtD0ioviuliUiIiooZWdN5crq2728ine++urVqyMyMhKXL19Ghw4dEBQUhF9++QVbt25F586d9fpcMpkMv/32G8qVK4c2bdqgQ4cOqF69OrZs2aLX53mdYcOGoVKlSvDy8sKAAQPg7++PP//8ExYWUt9lz549sWzZMixatAj+/v5Yu3Yt1q1bh7Zt2xb4dfTu3RudO3dGaGgo3NzcsGnTJri4uODbb79Fy5YtERAQgL///hu7du1ChQoVAACfffYZbt26hRo1amgdA53b999/jydPnqBRo0YYNGgQxo4dC3d3d412c+bMwebNmxEQEIAff/wRmzZtKvbedtn/X4FH+UhLS4OzszNSU1Ph5ORUYs/74AFQpQqQmSmF4IKMqSIiItLmxYsXiIuLU5u3VldyuTQ1Z0KCdIF269Ycpke6kclk2LlzJ3r27Fmg9vl9Hxcmr7EH2Ii5uwPDhkn3FywwbC1ERERK5uZA27bAgAHSV4ZfKm0YgI3cpEnS2Ko//gAuXDB0NURERESlHwOwkatZM2dM1aJFhq2FiIiISJ+EEAUe/qBPDMClwOTJ0tf//S//JZOJiIiI6PUYgEuBoCAgJAR4+TJnvmAiIiIi0g0DcCkxZYr0de1aQMtiOERERERUQAzApUSXLkC9ekB6uhSCiYiIiEg3DMClhEwmrQgHSCvtZGYatBwiIiKiUosBuBQZMEBafSchQbogjoiIiIgKjwG4FLGyAiZMkO4vXAgoFIath4iIyJjIZDL8+uuvhi6DSgEG4FJmxAjA2Rm4fBnYvdvQ1RARkUmSy4HDh4FNm6SvcnmxP2ViYiI++ugjVK9eHdbW1vD29sYbb7yBAwcOFMvzHT58GDKZDCkpKcVyfkAK7MqbhYUFqlSpgokTJyJTz+McZ8+ejcDAwEId4+Pjg6VLl+q1DmPCAFzKODkBI0dK9xcuNGwtRERkgnbsAHx8gNBQ4O23pa8+PtL2YnLr1i00btwYBw8exMKFC3Hu3Dns3bsXoaGhGD16dLE9rz4IIZCdnZ3n/nXr1iEhIQFxcXH4+uuvsXHjRnz++eclWKHu5HI5FKX0z9EMwKXQ2LHScIijR4Hjxw1dDRERmYwdO4A+fTRXZbp3T9peTCF41KhRkMlkOHnyJHr37o1atWrB398fEydORHR0tNZjtPXgxsbGQiaT4datWwCA27dv44033kC5cuVgb28Pf39/7NmzB7du3UJoaCgAoFy5cpDJZBg6dCgAQKFQYO7cuahWrRpsbW3RoEEDbNu2TeN5//zzTzRu3BjW1tY4evRonq/NxcUFHh4e8Pb2Rvfu3dGjRw/ExMSotVm9ejVq1KgBKysr1K5dGxs3blTbHx8fjx49esDBwQFOTk7o168fkpKSAADr16/HnDlzcPbsWVVv8/r16yGEwOzZs1GlShVYW1vD09MTY8eOBQC0bdsWt2/fxoQJE1THKM/l4uKC33//HX5+frC2tkZ8fDxOnTqFjh07wtXVFc7OzggJCdF4DTKZDKtXr0aXLl1ga2uL6tWrq71vJY0BuBSqVAkYNEi6z15gIiLSmRBARkbBbmlpUg+MENrPAwDjxkntCnI+befR4vHjx9i7dy9Gjx4Ne3t7jf0uLi46v/zRo0cjMzMTkZGROHfuHObPnw8HBwd4e3tj+/btAIArV64gISEBy/5/Jaq5c+fixx9/xJo1a3DhwgVMmDAB77zzDiIiItTOPW3aNMybNw+XLl1CQEBAgeq5evUqDh48iKCgINW2nTt3Yty4cZg0aRLOnz+PDz74AMOGDcOhQ4cASIG8R48eePz4MSIiIrB//37cvHkT4eHhAIDw8HBMmjQJ/v7+SEhIQEJCAsLDw7F9+3YsWbIEa9euxbVr1/Drr7+ifv36AIAdO3bAy8sLn332meoYpWfPnmH+/Pn47rvvcOHCBbi7uyM9PR1DhgzB0aNHER0dDV9fX3Tt2hXp6elqr2/GjBno3bs3zp49i4EDB6J///64dOlSYT4y/RH0WqmpqQKASE1NNXQpKpcuCQEIIZNJ94mIiPLz/PlzcfHiRfH8+fOcjU+fSv+ZGOL29GmB6j5x4oQAIHbs2PHatgDEzp07hRBCHDp0SAAQT548Ue3/559/BAARFxcnhBCifv36Yvbs2VrPpe34Fy9eCDs7O3H8+HG1tu+++64YMGCA2nG//vprgeq1sbER9vb2wtraWgAQ3bt3F1lZWao2LVq0ECNGjFA7rm/fvqJr165CCCH++usvYW5uLuLj41X7L1y4IACIkydPCiGEmDVrlmjQoIHaORYvXixq1aql9ly5Va1aVSxZskRt27p16wQAERsbm+/rksvlwtHRUezatUvttX744Ydq7YKCgsTIkSPzPdertH4f/7/C5DX2AJdSdeoAPXpI/4osXmzoaoiIiIqHKGBPsS7Gjh2Lzz//HC1btsSsWbPw77//5tv++vXrePbsGTp27AgHBwfV7ccff8SNGzfU2jZp0qRANSxZsgSxsbE4e/Ysdu/ejatXr2KQ8s+8AC5duoSWLVuqHdOyZUtVz+mlS5fg7e0Nb29v1X4/Pz+4uLjk27vat29fPH/+HNWrV8eIESOwc+fOfMcqK1lZWWn0aCclJWHEiBHw9fWFs7MznJyc8PTpU8THx6u1Cw4O1nhsqB5gBuBSbPJk6euPPwKJiYathYiISiE7O+Dp04Ld9uwp2Dn37CnY+ezsCnQ6X19fyGQyXL58uVAvzcxMiji5A/TLly/V2rz33nu4efMmBg0ahHPnzqFJkyZYsWJFnud8+vQpAOCPP/5AbGys6nbx4kWN8azahmto4+HhgZo1a6J27dro1q0b5syZgy1btuD69esFOl5X3t7euHLlCr7++mvY2tpi1KhRaNOmjcZ79CpbW1vVmGClIUOGIDY2FsuWLcPx48cRGxuLChUqICsrqzhfQpEwAJdiLVsCLVoAWVnA8uWGroaIiEodmQywty/YrVMnwMtLOiavc3l7S+0Kcr68zvOK8uXLIywsDKtWrUJGRobG/rymKXNzcwMAtfGrsbGxGu28vb3x4YcfYseOHZg0aRK+/fZbAFJPJyDNdKCU+8KvmjVrqt1y98AWhbm5OQDg+fPnAIC6devi2LFjam2OHTsGPz8/1f47d+7gzp07qv0XL15ESkqKqo2VlZXa61CytbXFG2+8geXLl+Pw4cOIiorCuXPn8j1Gm2PHjmHs2LHo2rUr/P39YW1tjeTkZI12r16wGB0djbp16xboOfTN6ALwqlWr4OPjAxsbGwQFBeHkyZN5tr1w4QJ69+4NHx8fyGQyrfPVzZ07F02bNoWjoyPc3d3Rs2dPXLlypRhfQcmaMkX6+vXXwCtjzYmIiPTH3Bz4/wvBNMKr8vHSpVI7PVu1ahXkcjmaNWuG7du349q1a7h06RKWL1+u8Wd1JWUonT17Nq5du4Y//vgDi18ZMzh+/Hjs27cPcXFxiImJwaFDh1SBrGrVqpDJZNi9ezcePnyIp0+fwtHRER9//DEmTJiADRs24MaNG4iJicGKFSuwYcMGnV5bSkoKEhMTcf/+fUREROCzzz5DrVq1VHVMnjwZ69evx+rVq3Ht2jV89dVX2LFjBz7++GMAQIcOHVC/fn0MHDgQMTExOHnyJAYPHoyQkBDVMAwfHx/ExcUhNjYWycnJyMzMxPr16/H999/j/PnzuHnzJn766SfY2tqiatWqqmMiIyNx7949rWE2N19fX2zcuBGXLl3CiRMnMHDgQNja2mq027p1K3744QdcvXoVs2bNwsmTJzFmzBid3rciK9TI42K2efNmYWVlJX744Qdx4cIFMWLECOHi4iKSkpK0tj958qT4+OOPxaZNm4SHh4fGYG0hhAgLCxPr1q0T58+fF7GxsaJr166iSpUq4mkBB98LYZwXwSnJ5ULUri1dT7B4saGrISIiY5XfxUOFsn27EF5e6he0eXtL24vR/fv3xejRo0XVqlWFlZWVqFy5snjzzTfFoUOHVG2Q6yI4IYQ4evSoqF+/vrCxsRGtW7cWW7duVbsIbsyYMaJGjRrC2tpauLm5iUGDBonk5GTV8Z999pnw8PAQMplMDBkyRAghhEKhEEuXLhW1a9cWlpaWws3NTYSFhYmIiAghhPaL5/ICQHWTyWSiUqVKIjw8XNy4cUOt3ddffy2qV68uLC0tRa1atcSPP/6otv/27dvizTffFPb29sLR0VH07dtXJCYmqva/ePFC9O7dW7i4uAgAYt26dWLnzp0iKChIODk5CXt7e9G8eXPx999/q46JiooSAQEBqovzhJAugnN2dtZ4HTExMaJJkybCxsZG+Pr6iq1bt2pcRAdArFq1SnTs2FFYW1sLHx8fsWXLlte+R6/S10Vwsv8vyigEBQWhadOmWLlyJQBpag9vb2989NFHmDZtWr7H+vj4YPz48Rg/fny+7R4+fAh3d3dERESgTZs2BaorLS0Nzs7OSE1NhZOTU4GOKUnffw+89570l6mbNwFLS0NXRERExubFixeIi4tDtWrVYGNjU7STyeXAkSNAQoI0N2fr1sXS80tlh0wmw86dO9GzZ88inSe/7+PC5DWjGQKRlZWFM2fOoEOHDqptZmZm6NChA6KiovT2PKmpqQCkMUV5yczMRFpamtrNmL3zDuDhIc1LvnmzoashIqIyz9wcaNsWGDBA+srwS6WM0QTg5ORkyOVyVKxYUW17xYoVkainKQ4UCgXGjx+Pli1bol69enm2mzt3LpydnVU3fQ1sLy7W1tLc4wCwYEGB5xYnIiIiMklGE4BLwujRo3H+/Hlsfk036fTp05Gamqq65b6y0lh9+CHg4ACcPw/s3WvoaoiIiIhyCCGKPPxBn4wmALu6usLc3Fy1drVSUlISPDw8inz+MWPGYPfu3Th06BC8vLzybWttbQ0nJye1m7FzcQE++EC6v2CBQUshIiIiMmpGE4CtrKzQuHFjHDhwQLVNoVDgwIEDeU5xUhBCCIwZMwY7d+7EwYMHUa1aNX2Ua5TGjwcsLIDDh4FTpwxdDREREZFxMpoADAATJ07Et99+iw0bNuDSpUsYOXIkMjIyMGzYMADA4MGDMX36dFX7rKws1SosWVlZuHfvHmJjY9VWTxk9ejR++ukn/Pzzz3B0dERiYiISExNVE0yXJV5ewNtvS/cXLjRsLUREZJyMaPInokLT1/evUU2DBgArV67EwoULkZiYiMDAQCxfvhxBQUEAgLZt28LHxwfr168HANy6dUtrj25ISAgOHz4MABrL9SmtW7cOQ4cOLVBNxj4NWm7nzgEBAYCZGXDlClCzpqErIiIiYyCXy3H16lW4u7ujQoUKhi6HSCePHj3CgwcPUKtWLdWqeUqFyWtGF4CNUWkKwADQrZu0FPvIkdIKcURERIC0LHBKSgrc3d1hZ2eXZycRkbERQuDZs2d48OABXFxcUKlSJY02DMB6VtoCcESENC2jjQ1w+zbg7m7oioiIyBgIIZCYmIiUlBRDl0KkExcXF3h4eGj95a0wec2iuAokw2nTBmjWDDh5Eli1Cpgzx9AVERGRMZDJZKhUqRLc3d3x8uVLQ5dDVCiWlpYawx50xR7gAihtPcAAsG0b0LcvUL48EB8P2NsbuiIiIiKi4lMql0Im/XrrLaBGDeDxY+CHHwxdDREREZHxYAAuo8zNgY8/lu4vXgxkZxu2HiIiIiJjwQBchg0ZAri5SRfCbd1q6GqIiIiIjAMDcBlmawt89JF0f+FCgKO9iYiIiBiAy7xRowA7O+Cff4Bcq0wTERERmSwG4DKuQgXgvfek+wsWGLYWIiIiImPAAGwCJkyQLorbv1/qCSYiIiIyZQzAJsDHBwgPl+4vXGjQUoiIiIgMjgHYREyeLH395Rfg1i2DlkJERERkUAzAJiIwEOjYEZDLgSVLDF0NERERkeEwAJuQKVOkr999Bzx6ZNhaiIiIiAyFAdiEtG8PNGwIPHsGfP21oashIiIiMgwGYBMik+X0Aq9YATx/bth6iIiIiAyBAdjE9OkjzQrx8CGwYYOhqyEiIiIqeQzAJsbCApg4Ubq/aJF0URwRERGRKWEANkHDhwPlywM3bgA7dxq6GiIiIqKSxQBsguztgTFjpPsLFgBCGLYeIiIiopLEAGyixowBbGyAU6eAyEhDV0NERERUchiATZSbGzBsmHR/wQLD1kJERERUkhiATdjEiYCZGbBnD3DunKGrISIiIioZDMAmrGZNoHdv6f6iRYathYiIiKikMACbuMmTpa8//wzcuWPYWoiIiIhKAgOwiWvaFGjbFsjOBpYtM3Q1RERERMWPAZhUyyOvXQukpBi0FCIiIqJixwBM6NwZqFcPePoUWLPG0NUQERERFS8GYIJMltMLvGwZ8OKFYeshIiIiKk4MwAQA6N8f8PYGEhOBn34ydDVERERExYcBmAAAlpbA+PHS/UWLAIXCoOUQERERFRsGYFIZMQJwdgauXAF27TJ0NURERETFgwGYVBwdgVGjpPtcHpmIiIjKKgZgUjN2LGBlBRw/Dhw7ZuhqiIiIiPSPAZjUeHgAQ4ZI9xcuNGwtRERERMWBAZg0TJokTY3222/A5cuGroaIiIhIvxiASUPt2kCPHtL9RYsMWwsRERGRvjEAk1bKhTE2bgTu3zdsLURERET6xABMWgUHA61aAVlZwPLlhq6GiIiISH8YgClPkydLX9esAdLSDFsLERERkb4wAFOeuncH6tQBUlOBb781dDVERERE+sEATHkyM8vpBV6yRBoOQURERFTaMQBTvgYOBCpVAu7dAzZtMnQ1REREREXHAEz5srYGxo+X7i9cCAhh0HKIiIiIiowBmF7rgw8AR0fgwgXgzz8NXQ0RERFR0TAA02s5O0shGAAWLDBsLURERERFxQBMBTJuHGBpCUREACdOGLoaIiIiIt0ZXQBetWoVfHx8YGNjg6CgIJw8eTLPthcuXEDv3r3h4+MDmUyGpUuXFvmcpJ2Xl3RBHCCNBSYiIiIqrYwqAG/ZsgUTJ07ErFmzEBMTgwYNGiAsLAwPHjzQ2v7Zs2eoXr065s2bBw8PD72ck/L28cfS1x07gGvXDFsLERERka6MKgB/9dVXGDFiBIYNGwY/Pz+sWbMGdnZ2+OGHH7S2b9q0KRYuXIj+/fvD2tpaL+ekvPn7A926STNBfPWVoashIiIi0o3RBOCsrCycOXMGHTp0UG0zMzNDhw4dEBUVZTTnNHVTpkhf160DkpIMWwsRERGRLowmACcnJ0Mul6NixYpq2ytWrIjExMQSPWdmZibS0tLUbiRp3RoICgIyM4GVKw1dDREREVHhGU0ANiZz586Fs7Oz6ubt7W3okoyGTJbTC7xqFfD0qWHrISIiIiosownArq6uMDc3R9Irf1dPSkrK8wK34jrn9OnTkZqaqrrduXNHp+cvq3r0AHx9gSdPgO+/N3Q1RERERIVjNAHYysoKjRs3xoEDB1TbFAoFDhw4gODg4BI9p7W1NZycnNRulMPcHJg0Sbr/1VfAy5eGrYeIiIioMIwmAAPAxIkT8e2332LDhg24dOkSRo4ciYyMDAwbNgwAMHjwYEyfPl3VPisrC7GxsYiNjUVWVhbu3buH2NhYXL9+vcDnJN0MHgy4uwPx8cDWrYauhoiIiKjgLAxdQG7h4eF4+PAhZs6cicTERAQGBmLv3r2qi9ji4+NhZpaT2e/fv4+GDRuqHi9atAiLFi1CSEgIDh8+XKBzkm5sbYGxY4FPP5WWRx4wQBofTERERGTsZEIIYegijF1aWhqcnZ2RmprK4RC5PH4MVKkCZGQA+/YBnToZuiIiIiIyVYXJa0Y1BIJKl/Llgffek+5zeWQiIiIqLRiAqUgmTJAuivv7byAmxtDVEBEREb0eAzAVSdWqQP/+0n32AhMREVFpwABMRTZ5svT1l1+AuDjD1kJERET0OgzAVGQNGgBhYYBCIc0LTERERGTMGIBJL5S9wN9/DyQnG7YWIiIiovwwAJNetGsHNGoEPH8OfP21oashIiIiyhsDMOmFTAZMmSLdX7ECePbMsPUQERER5YUBmPSmd2+gWjVpCMT69YauhoiIiEg7BmDSGwsLYNIk6f7ixYBcbth6iIiIiLRhACa9GjYMqFABuHkT2LHD0NUQERERaWIAJr2yswPGjJHuz58PCGHYeoiIiIhexQBMejd6NGBrC5w5Axw+bOhqiIiIiNQxAJPeubkBw4dL9xcsMGwtRERERK9iAKZiMXEiYGYG7N0L/PuvoashIiIiysEATMWienWgTx/p/qJFhq2FiIiIKDcGYCo2yuWRN20C4uMNWwsRERGREgMwFZsmTaQlkrOzgaVLDV0NERERkYQBmIqVcnnkb74BnjwxbC1EREREAAMwFbNOnYCAACAjA1izxtDVEBERETEAUzGTyXLGAi9bBrx4Ydh6iIiIiBiAqdiFhwPe3kBSErBxo6GrISIiIlPHAEzFztJSmhcYkKZEk8sNWw8RERGZNgZgKhHvvQeUKwdcvQr8/ruhqyEiIiJTxgBMJcLBARg5Urq/YAEghGHrISIiItOlcwCOj4/Hhx9+iNq1a6N8+fKIjIwEACQnJ2Ps2LH4559/9FYklQ0ffQRYWwPR0cCxY4auhoiIiEyVTgH44sWLaNiwIbZs2YJq1aohNTUV2dnZAABXV1ccPXoUK1eu1GuhVPp5eABDhkj3FywwbC1ERERkunQKwFOmTIGLiwuuXr2Kn376CeKVv2d369YNR44c0UuBVLZMmiRNjbZrF3DxoqGrISIiIlOkUwCOjIzEyJEj4ebmBplMprG/SpUquHfvXpGLo7KnVi3grbek+4sWGbYWIiIiMk06BWCFQgE7O7s89z98+BDW1tY6F0Vlm3JhjJ9+Au7fN2wtREREZHp0CsCNGjXCH3/8oXVfdnY2Nm/ejObNmxepMCq7mjcHWrcGXr6UVocjIiIiKkk6BeDp06dj7969GDlyJM6fPw8ASEpKwt9//41OnTrh0qVLmDZtml4LpbJlyhTp65o1QGqqYWshIiIi0yITr17BVkAbN27EuHHjkJqaCiEEZDIZhBBwcnLC6tWrMWDAAH3XajBpaWlwdnZGamoqnJycDF1OmaBQAPXrSxfCLViQMyyCiIiISBeFyWs6B2AAyMjIwP79+3Ht2jUoFArUqFEDYWFhcHR01PWURokBuHisWwcMHw54egJxcYCVlaErIiIiotKq2ANwZGQk6tatCzc3N637k5OTcfHiRbRp06awpzZKDMDFIzMTqF5duhBu3Tpg6FBDV0RERESlVWHymk5jgENDQ7F///489x84cAChoaG6nJpMiLU1MH68dH/hQmlYBBEREVFx0ykAv67TODMzE+bm5joVRKbl/fcBJydpLPCePYauhoiIiEyBRUEbxsfH49atW6rHly9fRmRkpEa7lJQUrF27FlWrVtVLgVS2OTsDH34oXQi3YAHQvbuhKyIiIqKyrsBjgOfMmYM5c+ZoXfktNyEEzM3NsXbtWgwfPlwvRRoaxwAXr/v3AR8faV7gqChpnmAiIiKiwihMXitwD3C/fv1Qr149CCHQr18/jB07Fq1bt1ZrI5PJYG9vj8DAQFSsWFG36snkeHoC77wjXQi3cCGwfbuhKyIiIqKyTKdZIDZs2ICQkBD4+PgUQ0nGhz3Axe/iRcDfH5DJgMuXgVq1DF0RERERlSbFPgvEkCFDTCb8Usnw8wPeeAMQAli82NDVEBERUVmm80IYL168wPbt2xETE4PU1FQoXpnDSiaT4fvvv9dLkYbGHuCScfQo0Lq1ND3a7dsAR9EQERFRQRXLGODcbt++jdDQUNy6dQsuLi5ITU1F+fLlkZKSArlcDldXVzg4OOhUPJmuli2lC+Cio4EVK4DPPzd0RURERFQW6TQEYvLkyUhNTUV0dDSuXr0KIQS2bNmCp0+fYv78+bC1tcW+ffv0XSuVcTIZMGWKdH/VKuDpU8PWQ0RERGWTTgH44MGDGDVqFJo1awYzM+kUQghYW1tj8uTJaN++PcYrl/giKoQ335QugEtJAb77ztDVEBERUVmkUwB+9uyZ6iI4JycnyGQypKamqvYHBwfj6NGjeimQTIu5OfDxx9L9r76S5gYmIiIi0iedAnCVKlVw9+5dAICFhQUqV66M6Oho1f6LFy/CxsZGPxWSyRk0SLoA7s4dYMsWQ1dDREREZY1OAbhdu3b47bffVI+HDh2KJUuWYMSIEXj33XexatUqvPHGGzoVtGrVKvj4+MDGxgZBQUE4efJkvu23bt2KOnXqwMbGBvXr18eePXvU9j99+hRjxoyBl5cXbG1t4efnhzVr1uhUG5UMGxtg7Fjp/sKF0tRoRERERHojdHD79m2xbds28eLFCyGEEM+fPxfvvvuucHFxERUqVBBDhgwRqamphT7v5s2bhZWVlfjhhx/EhQsXxIgRI4SLi4tISkrS2v7YsWPC3NxcLFiwQFy8eFF8+umnwtLSUpw7d07VZsSIEaJGjRri0KFDIi4uTqxdu1aYm5uL3377rcB1paamCgA6vSbSzePHQtjbCwEIsXevoashIiIiY1eYvKbzPMDFISgoCE2bNsXKlSsBAAqFAt7e3vjoo48wbdo0jfbh4eHIyMjA7t27VduaN2+OwMBAVS9vvXr1EB4ejhkzZqjaNG7cGF26dMHnBZxni/MAG8bEicCSJUC7dsCBA4auhoiIiIxZsa4E9+zZM1SoUAELFy7UuUBtsrKycObMGXTo0CGnODMzdOjQAVFRUVqPiYqKUmsPAGFhYWrtW7Rogd9//x337t2DEAKHDh3C1atX0alTpzxryczMRFpamtqNSt748YCFBXDwIHD6tKGrISIiorKi0AHYzs4OFhYWsLe312shycnJkMvlqPjK8l8VK1ZEYmKi1mMSExNf237FihXw8/ODl5cXrKys0LlzZ6xatQpt2rTJs5a5c+fC2dlZdfP29i7CKyNdVakC9O8v3dfz71tERERkwnS6CK53797Ytm0bjGj0RJ5WrFiB6Oho/P777zhz5gwWL16M0aNH4++//87zmOnTpyM1NVV1u3PnTglWTLlNnix93bYNuHnTsLUQERFR2aDTUsj9+/fHqFGjEBoaihEjRsDHxwe2trYa7Ro1alTgc7q6usLc3BxJSUlq25OSkuDh4aH1GA8Pj3zbP3/+HJ988gl27tyJbt26AQACAgIQGxuLRYsWaQyfULK2toa1tXWBa6fiExAAdO4M7N0rzQv8/8PDiYiIiHSmUwBu27at6v6RI0c09gshIJPJIJfLC3xOKysrNG7cGAcOHEDPnj0BSBfBHThwAGPGjNF6THBwMA4cOKC26tz+/fsRHBwMAHj58iVevnypWq1OydzcHAqFosC1kWFNmSIF4B9+AGbNAtzcDF0RERERlWY6BeB169bpuw4AwMSJEzFkyBA0adIEzZo1w9KlS5GRkYFhw4YBAAYPHozKlStj7ty5AIBx48YhJCQEixcvRrdu3bB582acPn0a33zzDQBplbqQkBBMnjwZtra2qFq1KiIiIvDjjz/iq6++KpbXQPrXti3QpIl0IdyqVcDs2YauiIiIiEozo5oGDQBWrlyJhQsXIjExEYGBgVi+fDmCgoIASD3PPj4+WL9+var91q1b8emnn+LWrVvw9fXFggUL0LVrV9X+xMRETJ8+HX/99RceP36MqlWr4v3338eECRMgk8kKVBOnQTO8X34BwsOBChWA+HjAzs7QFREREZExKUxeM7oAbIwYgA0vOxuoXVu6EG7lSmD0aENXRERERMakWOcBJjIECwtg0iTp/uLFUiAmIiIi0gUDMJUaQ4cCrq5AXBywfbuhqyEiIqLSigGYSg07O+Cjj6T7CxYAHLxDREREumAAplJl1CjA1haIiQEOHTJ0NURERFQaMQBTqeLqCrz7rnR/wQLD1kJERESlk86zQMjlcuzbtw83b97EkydPNJZFlslkmDFjhl6KNDTOAmFc4uKAmjUBhQKIjQUaNDB0RURERGRoxT4N2unTp9G7d2/cvXtXI/iqTlzIleCMGQOw8RkwANi8GRg4EPjpJ0NXQ0RERIZW7NOgjRo1Cs+fP8evv/6Kx48fQ6FQaNzKSvgl4zR5svR182bg9m3D1kJERESli04B+N9//8XUqVPxxhtvwMXFRc8lEb1eo0ZA+/aAXA4sXWroaoiIiKg00SkAe3l55Tn0gaikTJkiff32W+DxY8PWQkRERKWHTgF46tSp+Pbbb5GWlqbveogKrGNH6QK4jAxg9WpDV0NERESlhYUuB6Wnp8PBwQE1a9ZE//794e3tDXNzc7U2MpkMEyZM0EuRRNrIZFIv8MCBwPLlwMSJ0hzBRERERPnRaRYIM7PXdxxzFggqCS9fSlOixccDa9YAH3xg6IqIiIjIEAqT13TqAY6Li9OpMCJ9s7SUen7HjwcWLwbeew945Y8RRERERGp0XgjDlLAH2Lg9fQpUqQI8eQJs3w706mXoioiIiKikFXsPsFJGRgYiIiJw+/8nYq1atSpCQkJgb29flNMSFYqDAzB6NPD558D8+cBbb0njg4mIiIi00bkHeMWKFfj000/x9OlTtSnRHB0d8cUXX2DMmDF6K9LQ2ANs/B48kHqBMzOBiAigTRtDV0REREQlqdhXgvvxxx8xbtw41KtXDz///DNiY2MRGxuLTZs2oX79+hg3bhw2btyoU/FEunB3B4YOle4vWGDQUoiIiEyePEuO2KWHcfyjTYhdehjyLOOaGEGnHuDAwEC4uLjgwIEDGtOfyeVytG/fHikpKYiNjdVXnQbFHuDS4do1oHZtQAjg/HnA39/QFREREZme6Ck7UOWrcfCU31Vtu2/uhfiJy9B8QfFdqFPsPcBXrlxB3759NcIvAJibm6Nv3764cuWKLqcm0pmvb84FcIsWGbYWIiIiUxQ9ZQeaLewDj1zhFwA85PfQbGEfRE/ZYaDK1OkUgJ2dnXHr1q0899+6dYs9pWQQkydLX//3P+Du3fzbEhERkf7Is+So8tU4AEIjYJpBGnDg/dV4oxgOodMsEN26dcOKFSvQuHFj9O/fX23fli1bsHLlSgwcOFAvBRIVRlAQEBIiXQi3bBmwcKGhKyIiIir9FAogPR1IScm5paaqP5YfOII58rx7n8wgUFl+B7FfH0Hg+LYlUXaedBoD/PDhQ4SEhODKlSvw8PCAr68vAODatWtITExEnTp1EBERAVdXV70XbAgcA1y6/PEH0L074OgI3LkDODsbuiIiIiLDUiiAtDT1wJrX7dVgq9ymLTHa4yla4hja4jB6Yztq4dprazk+5me0WDFAPy8sl2KfB9jNzQ0xMTFYu3Yt/vzzT9U8wPXr18fUqVPx/vvvw8bGRpdTExVZly7SBXAXLgBr1wJTphi6IiIioqLJzs47wGoLrK/e0tL0U0d5y3R0sjuKULMItMg6jLrPTsNcFG5Ig12NSvoppgi4ElwBsAe49NmwQZoWrVIlIC4OsLY2dEVERGTKXr5UD6oFCa2526Wn66cOGxvAxSXvm7Oz+uMKlmnwuH4U5f49DLsTh2EWGwPIXwm81aoBISFQtGqDh+9/AjdFkmrMb24KyJBg7gWPZ3Ewt9KcSKGoSmwlOCJjNWAA8J//APfuSRfEDR9u6IqIiKg0y8oqWGjNq01Ghn7qsLPLP7DmF2ydnaUAnK+UFODoUeDwYemCmpgYafxEbtWrA23bShfdhIQAVasCkGZWiLviDLeFfaCATC0EKyAt0Xpn4lJULobwW1gF6gEODQ2FmZkZ9u3bBwsLC7Rr1+71J5bJcODAAb0UaWjsAS6dFi2SZoWoU0caDmGm05wnRERUFmRmFj605r49f66fOhwcChdaX31sZaWfOlSePAGOHJHC7uHDQGysZuCtWVMKusrQ6+2d7ym1zQN8z9wbdyYuNZp5gAvUAyyEgCLXm6FQKCCTyV57DJEhvf8+8N//ApcvSxfGvfGGoSsiIiJdCAG8eFH40Jq7zYsX+qnF0bHggfXVm5MTYGmpnzp09vixFHiVPbyxsZpXt9WqpR54K1cu1FM0X9AL8s97IPbrI3h2IwF2NSqh/qjWRtHzq8QxwAXAHuDSa9o0YP58oFUr6eediIhKnhBSD6quMxCkpEhDEPRBW69qQXtjnZwAi9I2ePTRIyAyMqeH999/NQNv7drqQxo8PQ1RaZEVJq/pFIAjIyNRt25duLm5ad2fnJyMixcvok2bNoU9tVFiAC697t+XxuZnZQHHjgEtWhi6IiKi0kcIaQyrrsMHUlKkWQyKSiYreGjV1sbREdCyiG3Z8vCheuA9d06zTd266j28Hh4lXWWxKPaL4EJDQ7Fx40a8/fbbWvcfOHAAb7/9NuSvXiVIVMI8PYFBg4Dvv5cWxdi509AVERGVPCHUFzEo7AwEKSmaF/7rwsys8KE1983BgddzaHjwQAq8yiEN589rtvHzk8Ju27ZAmzZAxYolXKTx0SkAv67TODMzE+Zl/lcsKi0mTZIC8G+/AVeuSH/pISIqTbStwlWY3tjUVM3rmnRhYVH40Jq7nYOD1ItLRZCUJAVdZQ/vxYuaberVy+ndbdMGcHcv6SqNXoEDcHx8PG7duqV6fPnyZURGRmq0S0lJwdq1a1H1/6fEIDK0unWBN98Efv8dWLwY+OYbQ1dERKZGLi/6Igb6uGLH0rJgYTWvYGtnxwBb4hIS1APv5cuabQICcoY0tGkDlJGVeItTgccAz5kzB3PmzCnQ7A/m5uZYu3YthpeRyVc5Brj0O3ZMuhDOygq4fbvMDHciohKSnZ0TVHUZPqCvVbisrIBy5XQfQmBjwwBr9O7fzwm7ERHSny5zk8mkwKsc0tC6NVChggEKNT7FMga4X79+qFevHoQQ6NevH8aOHYvWrVurtZHJZLC3t0dgYCAqcnwJGZGWLaUL4I4fB5YvB7780tAVEVFJenUVrsIOIXj6VD912NrqPnxAGWCpjLl7V72H99o19f0yGRAYmDOkoXVroHx5AxRatug0C8SGDRvQpk0bVKtWrThqMjrsAS4bfvsN6NlT+o/kzh3pamAiKh2ysoo2A8GzZ/qpw96+aIsYcFl2wp07OWH38GHgxg31/WZmQMOGOUMaWrWSuv3ptYp9FoiBAwfiWT7/mqSlpcHOzg4WpW6yPCrL3nhDugDuyhXg22+BiRMNXRGR6ci9iIEuPbH6XIVL11kInJ2NYBEDKn1u31Yf0nDzpvp+MzOgUaOcIQ2tWknfbFSsdOoBHjVqFCIjI3Fe21QbAOrXr4927dph2bJlRS7QGLAHuOz47jtgxAjAy0v6N4j/mRG9nrZVuArbG5uZqZ9anJx0H0Lg7FwKFzGg0ufWrZywe/iw9Dg3c3OgceOcIQ2tWknf2FRkxd4DvHfvXgwePDjP/X369MFPP/1UZgIwlR3vvAPMmCENudq8WZojmKisE0IaAqDrEILUVP2swiWT5QRRXWYgcHIygUUMqHQRAoiLUx/SEB+v3sbcHGjaNGdIQ8uWHINnBHQKwPfv30flfNaF9vT0xL1793Quiqi42NgA48YB06cDCxZIgZhXRJOxE0K6CEvXGQhSUvSzCpeZ2euHCOQXah0duYgBlXJCSH8+VIbdiAhpTG9uFhZS4FUOaWjRQhp7Q0ZFpwBcoUIFXHl1Wo5cLl26xKECZLQ+/BD44gtpsZy9e4EuXQxdEZV1CkVOgNVl+EBqqn5W4TI3L/oiBgywZFKEAK5fVx/S8GoHn6Ul0KxZzpCGFi2kqyXJqOkUgDt37oy1a9di4MCBaNiwodq+mJgYfPPNN+jbt69eCiTSNxcX4P33ga++kpZHZgCm11EotC9iUNDe2LQ0/a3CVa6c7kMI7O35Fw+ifAkBXL2qPqQhIUG9jaUl0Lx5zpCG4GBphRAqVXS6CO7+/fto2rQpHjx4gDfffBP+/v4AgPPnz2PXrl1wd3fHiRMn4OXlpfeCDYEXwZU9d+4A1atLfxY+eVL6axWVXXJ54RYxeLWNPlfhKsoiBra2DLBEeiWENDVQ7iENiYnqbayspMCr7OFt3pyB10gVJq/pFIABICEhAdOmTcNvv/2GtP9f4sbJyQk9e/bEl19+CU9PT11Oa5QYgMumIUOAH38E+vYFfvnF0NVQfnKvwqXLEIL0dP3UYWOj2xCC3IsYMMASGZAQwKVLOWE3IgJISlJvY20t9eoqA29QkPTbJxm9EgnASkIIPHz4EADg5ub22qWSSyMG4LLp3DlpNUkzM6kDoGZNQ1dUdmVlaQbUwlzMpa9VuOzsiraIAVfhIiplFArg4kX1eXj/P7Oo2NhI43aVQxqaNeMPeylV7NOg5SaTyWBtbQ0HB4cyGX6p7KpfXxr/++ef0njgr782dEXGKzOzaDMQ6HMVrqIsYmBlpZ86iMhIKRTAhQs5QxoiI4HkZPU2trZS4FX28DZrxiX6TJDOAfj06dP49NNPERkZiaysLPz1119o164dkpOT8e6772LChAlo27atHksl0r8pU6QA/MMPQKdO0mpTlSpJS62XpflGX7eIweuC7YsX+qnD0VH3IQRchYuINCgU0p/zcg9pePxYvY2dnTT3rjLwNm3K34ZJtwB8/PhxtGvXDpUrV8Y777yD7777TrXP1dUVqampWLt2rU4BeNWqVVi4cCESExPRoEEDrFixAs2aNcuz/datWzFjxgzcunULvr6+mD9/Prp27arW5tKlS5g6dSoiIiKQnZ0NPz8/bN++HVWqVCl0fVS2hIQANWpIS7G/9VbOdi8vYNkyoFcvw9WmJIQUzHWdgSA1VX+rcBV0EQNtwdbJiatwEVERyeXAv//mDGmIjASePFFvY28vra6mHNLQuDEDL2nQ6b+jTz75BHXr1kV0dDTS09PVAjAAhIaGYsOGDYU+75YtWzBx4kSsWbMGQUFBWLp0KcLCwnDlyhW4u7trtD9+/DgGDBiAuXPnonv37vj555/Rs2dPxMTEoF69egCAGzduoFWrVnj33XcxZ84cODk54cKFC7Dh+B4CsHOnFH5fde8e0KcPsG1b0UOwEEBGhu7DB1JSgJcvi1YDkLMKV1EWMShLveJEVArI5cDZszlDGo4ckf5RzM3BQQq8yh7exo355yJ6LZ0ugrO3t8fcuXMxduxYPHr0CG5ubvj777/Rrl07AMB3332HsWPH4lkhB/4FBQWhadOmWLlyJQBAoVDA29sbH330EaZNm6bRPjw8HBkZGdi9e7dqW/PmzREYGIg1a9YAAPr37w9LS0ts3LixsC9ThRfBlU1yOeDjIy2LrI1MJvUE37ypvQe2ML2x+ljEwMysaIsYcBUuIjJ62dlAbGzOkIYjR6R/YHNzdJTGqSl7eBs14p+XCEAJXARnaWkJRT6zut+7dw8OhVz2LysrC2fOnMH06dNV28zMzNChQwdERUVpPSYqKgoTJ05U2xYWFoZff/0VgBSg//jjD0yZMgVhYWH4559/UK1aNUyfPh09e/bMs5bMzExk5vqbsXKaNypbjhzJO/wCUs/tnTvSX870MQesuXnBFzHQFm4dHDiFFhGVMdnZQExMzpCGo0elibdzc3IC2rTJCbyBgQy8VGQ6fQc1b94c27Ztw/jx4zX2ZWRkYN26dQgJCSnUOZOTkyGXy1GxYkW17RUrVsTly5e1HpOYmKi1feL/T2L94MEDPH36FPPmzcPnn3+O+fPnY+/evejVqxcOHTqUZ41z587FnDlzClU/lT6vLu6TF2X4tbTUfQYCFxfpOgwGWCIyaS9fSoFX2cN79KjmRN3OzlLgVQ5pCAzk+CvSO50C8Jw5cxASEoJu3bphwIABAICzZ8/i5s2bWLRoER4+fIgZM2botVBdKHupe/TogQkTJgAAAgMDcfz4caxZsybPADx9+nS1nuW0tDR4e3sXf8FUoipVKli7rVuBrl25ChcRUaG9fAmcPq3ew5uRod6mXDn1Ht6AAAZeKnY6BeCgoCDs2bMHI0eOxODBgwEAkyZNAgDUqFEDe/bsQUBAQKHO6erqCnNzcyS9siJLUlISPDw8tB7j4eGRb3tXV1dYWFjAz89PrU3dunVx9OjRPGuxtraGNecELPNat5bG+N67p32Ig3IM8Ftv8d9iIqICycoCTp3KCbzHj2sG3vLlc3p427aVJmXnBQpUwnQeRNOuXTtcuXIFsbGxuHbtGhQKBWrUqIHGjRvrtCCGlZUVGjdujAMHDqjG5yoUChw4cABjxozRekxwcDAOHDigNhRj//79CA4OVp2zadOmuHLlitpxV69eRdWqVQtdI5Ut5ubSVGd9+khhN3cIVn4LL13K8EtElKfMTCnwKoc0HDsmXTWcW4UKOb27ISFAvXoMvGRwRR5FHhgYiMDAQD2UAkycOBFDhgxBkyZN0KxZMyxduhQZGRkYNmwYAGDw4MGoXLky5s6dCwAYN24cQkJCsHjxYnTr1g2bN2/G6dOn8c0336jOOXnyZISHh6NNmzYIDQ3F3r17sWvXLhw+fFgvNVPp1quXNNXZuHHqF8R5eUnh1xjmASYiMhqZmcCJE+o9vK+ulOPqmhN227YF/PwYeMnoFCgAR0ZGAgDatGmj9vi1J7ewgKurK2rVqlWg9uHh4Xj48CFmzpyJxMREBAYGYu/evaoL3eLj42GW64eoRYsW+Pnnn/Hpp5/ik08+ga+vL3799VfVHMAA8NZbb2HNmjWqadtq166N7du3o1WrVgWqicq+Xr2AHj2kWSESEsrmSnBERDp58QKIjs4JvNHRmoHX3T0n7LZtC9StywsmyOgVaB5gMzMzyGQyPH/+HFZWVqrHBeXt7Y3t27ejcePGRSrWUDgPMBERmYTnz6WQqxzSEB2tuZRkxYo5YTckBKhTh4GXjILe5wE+dOgQAGlMbe7HryOXy3H//n3MmzcPo0aNwokTJwp0HBEREZWAZ8+AqKicHt4TJ6QL2XKrVEl9SEOtWgy8VOoVKAC/Ol1YYef4ffbsGcaOHVuoY4iIiEjPMjKkcbvKwHvypOZa65Urqw9pqFmTgZfKnCJfBJeQkIAHDx6gZs2asLe319rmnXfeQVhYWFGfioiIiArj6VMp8CqHNJw8Ka2+lpuXl/qQhho1GHipzNM5AP/222+YOnUqrl27BkCafqxdu3ZITk5Gx44dMWvWLNV0ZnZ2dpx2jIiIqLilp0tTkSl7eE+f1gy8VaqoD2moVo2Bl0yOTgF4165d6NWrF4KDg/H2229j9uzZqn2urq6oXLky1q1bpwrAREREVAzS0qTAe/iwdDtzBpDL1dv4+KgPafDxKekqiYyOTgH4s88+Q5s2bXDo0CE8evRILQAD0gIVa9eu1Ud9REREpJSaKi0nrBzScOYMoFCot6lWTX1IA/8CS6RBpwB8/vx5fPXVV3nur1ixIh48eKBzUURERAQgJUWapFw5pOGffzQDb40aOWE3JEQa4kBE+dIpANvZ2SHj1bW9c7l58yYqVKigc1FEREQm6ckTKfAqhzTExqqv0w4Avr7qSwt7eZV8nUSlnE4BODQ0FBs2bMD48eM19iUmJuLbb79F9+7di1obERFR2fb4MRAZmTOk4exZzcBbq5Z6D2/lyoaolKhM0SkAf/HFF2jevDmaNm2Kvn37QiaTYd++fTh48CDWrl0LIQRmzZql71qJiIhKt+RkKfAqhzScO6cZeOvUUQ+8lSoZolKiMq1ASyFrc+HCBYwbNw6HDh1C7lO0bdsWq1atQt26dfVWpKFxKWQiItLJw4c5PbyHDwPnz2u28fPLGdLQpg3g4VHCRRKVDXpfClkbf39//P3333jy5AmuX78OhUKB6tWrw83NDQAghICM8woSEZEpefAgp3c3IgK4cEGzjb9/Tg9vmzZAxYolXSWRySvySnDlypVD06ZNVY+zsrKwfv16LFq0CFevXi3q6YmIiIxXYqIUdJWh99IlzTb166sH3v/vKCIiwylUAM7KysLvv/+OGzduoFy5cujevTs8PT0BAM+ePcPKlSuxdOlSJCYmokaNGsVSMBERkcEkJKj38F6+rNmmQYOcIQ2tWwOuriVdJRG9RoED8P3799G2bVvcuHFDNebX1tYWv//+O6ysrPD222/j3r17aNasGVasWIFevXoVW9FEREQl4t499cD76l82ZTIp8Cp7eFu3BjgNKJHRK3AA/s9//oO4uDhMmTIFrVu3RlxcHD777DO8//77SE5Ohr+/P3766SeEhIQUZ71ERETF5+7dnLB7+DBw/br6fpkMaNhQPfCWK2eAQomoKAocgPfv349hw4Zh7ty5qm0eHh7o27cvunXrht9++w1mZmbFUiQREVGxiI9X7+G9cUN9v5kZ0KhRzpCGVq0AFxcDFEpE+lTgAJyUlITmzZurbVM+Hj58OMMvEREZv9u3c6Yki4gA4uLU95uZAY0b5/TwtmoFODsboFAiKk4FDsByuRw2NjZq25SPnfmPAxERGRshgFu31Ic03L6t3sbcHGjSJKeHt2VLgPO9E5V5hZoF4tatW4iJiVE9Tk1NBQBcu3YNLlr+JNSoUaOiVUdERFRQQgA3b6oPaYiPV29jYSEF3rZtpVuLFoCjowGKJSJDKvBKcGZmZloXttC24IVym1wu10+VBsaV4IiIjJAQ0pjd3D28d++qt7GwAJo1yxnS0KIF4OBggGKJqLgVy0pw69atK3JhREREOhMCuHZNPfDev6/extISCArKGdIQHAzY2xugWCIyZgUOwEOGDCnOOoiIiNQJAVy5oj6kISFBvY2VlRR4lUMamjcH7OwMUCwRlSZFXgqZiIhIL4SQVlbL3cOblKTextpaCrnKIQ3NmwO2tgYolohKMwZgIiIyDCGAixfVe3gfPFBvY2MjDWNQDmkICpK2EREVAQMwERGVDIVCCrzKeXgjI4GHD9Xb2NhIF6ophzQ0ayb1+hIR6REDMBERFQ+FAjh/Pqd3NyICePRIvY2trTT3rnJIQ9OmDLxEVOwYgImISD8UCuDff3OGNERGAo8fq7exs5NWV1MOaWjSRLqQjYioBDEAExGRbuRyKfAqhzQcOQI8eaLext5eCrzKHt4mTaSpyoiIDIgBmIiICkYuB2Jjc4Y0REYC/78iqIqDA9C6dU7gbdSIgZeIjA4DMBERaZedDfzzT86QhiNHgLQ09TZOTlLgVQ5paNhQWn2NiMiI8V8pIiKSZGcDMTE5QxqOHgXS09XbODur9/AGBjLwElGpw3+1iIhM1cuXwJkzOUMajh4Fnj5Vb+PiArRpkxN4GzQAzM0NUCwRkf4wABMRmYqsLOD06ZwhDceOARkZ6m3KlZOCrnJIQ/36DLxEVOYwABMRlVVZWcCpUzlDGo4fB549U29ToYJ6D2/9+oCZmQGKJSIqOQzARERlRWYmcPJkzpCG48eB58/V27i65vTuhoQA/v4MvERkchiAiYhKqxcvgBMncoY0REVJ23Jzc8sJu23bAnXrMvASkcljACYiKi1evACio3OGNERHS72+uVWsqN7DW7cuIJMZoFgiIuPFAExEZKyeP5d6dZVDGqKjpXG9uXl4qPfw1q7NwEtE9BoMwERExuLZM2ncrnJIw8mTmoHX01M98Pr6MvASERUSAzARkaFkZEiBV9nDe/KkNDdvbpUrS0FXGXpr1mTgJSIqIgZgIqKS8vSpNPeuMvCeOiWtvpabt7d6D2/16gy8RER6xgBMRFRc0tOl1dWUQxpOnwbkcvU2VaoAoaE5gdfHh4GXiKiYMQATEelLWpoUeJU9vGfOaAbeatXUZ2nw8TFAoUREpo0BmIhIVykp6oE3JgZQKNTbVK+eE3ZDQoCqVQ1QKBER5cYATERUUCkpwJEjOfPwxsZqBt6aNdV7eL29S7xMIiLKHwMwEVFeHj/OCbwREVLgFUK9Ta1a6oG3cmUDFEpERIXBAExEpPToERAZmXPR2r//agbe2rXVhzR4ehqiUiIiKgKjXBB+1apV8PHxgY2NDYKCgnDy5Ml822/duhV16tSBjY0N6tevjz179uTZ9sMPP4RMJsPSpUv1XDURlTrJycCOHcDYsUBAAODqCvTqBSxbBpw9K4XfunWBDz8ENm8G7t8HLl8G1qwBBgxg+CUiKqWMrgd4y5YtmDhxItasWYOgoCAsXboUYWFhuHLlCtzd3TXaHz9+HAMGDMDcuXPRvXt3/Pzzz+jZsydiYmJQr149tbY7d+5EdHQ0PPmfFpFpevBA6uFVDmk4f16zjZ9fzsITbdoAFSuWcJFERFTcZEK8+vc9wwoKCkLTpk2xcuVKAIBCoYC3tzc++ugjTJs2TaN9eHg4MjIysHv3btW25s2bIzAwEGvWrFFtu3fvHoKCgrBv3z5069YN48ePx/jx4wtUU1paGpydnZGamgonJ6eivUAiKjlJSVLQVQ5puHhRs029ejlDGtq0AbT8ok1ERMavMHnNqHqAs7KycObMGUyfPl21zczMDB06dEBUVJTWY6KiojBx4kS1bWFhYfj1119VjxUKBQYNGoTJkyfD39//tXVkZmYiMzNT9TgtLa2Qr4SIDCIxMSfsHj4sDVd4VUBAzkVrbdpIwx6IiMikGFUATk5OhlwuR8VX/uRYsWJFXNb2HxmAxMREre0TExNVj+fPnw8LCwuMHTu2QHXMnTsXc+bMKWT1RFTi7t/PCbwREcCVK+r7ZTIp8CqHNLRuDVSoYIBCiYjImBhVAC4OZ86cwbJlyxATEwNZAZcXnT59ulqvclpaGrw5lyeR4d29qz6k4do19f0yGRAYmDOkoXVroHx5AxRKRETGzKgCsKurK8zNzZGUlKS2PSkpCR4eHlqP8fDwyLf9kSNH8ODBA1SpUkW1Xy6XY9KkSVi6dClu3bqlcU5ra2tYW1sX8dUQUZHduaM+pOHGDfX9ZmZAw4Y5QxpatQLKlTNAoUREVJoYVQC2srJC48aNceDAAfTs2ROANH73wIEDGDNmjNZjgoODceDAAbUL2vbv34/g4GAAwKBBg9ChQwe1Y8LCwjBo0CAMGzasWF4HEeno9m31IQ03b6rvNzMDGjXKGdLQqhXg7GyAQomIqDQzqgAMABMnTsSQIUPQpEkTNGvWDEuXLkVGRoYqrA4ePBiVK1fG3LlzAQDjxo1DSEgIFi9ejG7dumHz5s04ffo0vvnmGwBAhQoVUOGVMX+Wlpbw8PBA7dq1S/bFEZG6W7dywu7hw9Lj3MzNgcaNc4Y0tGoFcCYWIiIqIqMLwOHh4Xj48CFmzpyJxMREBAYGYu/evaoL3eLj42FmlrN+R4sWLfDzzz/j008/xSeffAJfX1/8+uuvGnMAE5GBCQHExan38N6+rd7G3Bxo2jRnSEPLloCjoyGqJSKiMszo5gE2RpwHmEgHQkhDGJTjdyMipDG9uVlYSIFXOaShRQvAwaHkayUiolKv1M4DTESlmBDA9evqQxru3VNvY2kJNGuWM6ShRQvA3t4AxRIRkSljACYi3QgBXL2qPqTh/n31NpaWQPPmOUMagoMBOztDVEtERKTCAExEBSOEtNBE7iENuRacAQBYWUmBV9nD27w5Ay8RERkdBmAi0k4I4NKlnLAbEQG8Muc2rK2lXl1lD29QEGBra4hqiYiICowBmIgkCgVw8aL6kIaHD9Xb2NhI43aVgbdZM2kbERFRKcIATGSqFArgwoWcIQ2RkUBysnobW1sp8CqHNDRrJvX6EhERlWIMwESmQqEAzp1TH9Lw+LF6Gzs7ae5dZQ9v06bSuF4iIqIyhAGYqKySy4F//80Z0hAZCTx5ot7G3l5aXU0ZeBs3ZuAlIqIyjwGYqKyQy4GzZ3OGNBw5AqSkqLdxcJACr3JIQ+PG0lRlREREJoQBmKi0ys4GYmNzhjQcOQKkpqq3cXQEWrfO6eFt1EhafY2IiMiE8X9CotIiOxuIickZ0nD0KJCWpt7GyQlo0yYn8AYGMvASERG9gv8zEhmrly+lwKvs4T16FEhPV2/j7CwFXuWQhsBAwNzcAMUSERGVHgzARMbi5Uvg9Gn1Ht6MDPU25cqp9/AGBDDwEhERFRIDMJGhZGUBp07lBN7jxzUDb/nyOT28bdsC9esDZmYGKJaIiKjsYAAmKimZmVLgVQ5pOHYMeP5cvU2FCjm9uyEhQL16DLxERER6xgBMVFwyM4ETJ3J6eKOiNAOvq2tO2G3bFvDzY+AlIiIqZgzARPry4gUQHZ0TeKOjpW25ubvnhN22bYG6dQGZzADFEhERmS4GYCJAWkTiyBEgIQGoVEmaO/d1F5c9fy6FXOWQhuhoqdc3t4oVc8JuSAhQpw4DLxERkYExABPt2AGMGwfcvZuzzcsLWLYM6NUrZ9uzZ9IwBmUP74kT0oVsuVWqpD6koVYtBl4iIiIjwwBMpm3HDqBPH0AI9e337knbZ8yQeocPHwZOnpSmKsvN0zOnh7dtW6BmTQZeIiIiIycT4tX/+elVaWlpcHZ2RmpqKpycnAxdDumLXA74+Kj3/L6Ol5d6D2+NGgy8RERERqAweY09wGS6jhwpWPjt1AkID5cCb7VqDLxERESlHAMwma6EhIK1GzoUGDCgWEshIiKiksMJR8k0vXwpXcxWEJUqFW8tREREVKLYA0ymJyoK+OAD4Ny5/NvJZNKY39atS6YuIiIiKhHsASbT8eQJ8OGHQMuWUvitUAEYPVoKuq+O61U+Xrr09fMBExERUanCAExlnxDAzz9Li1CsXSs9HjoUuHwZWLkS2LYNqFxZ/RgvL2l77nmAiYiIqEzgEAgq265fB0aNAvbvlx7XqQOsWSNNY6bUqxfQo0fhV4IjIiKiUokBmMqmzExgwQLgiy+k+9bWwKefApMnS/dfZW4uTXNGREREZR4DMJU9hw9LY32vXJEed+wIfP21tEobERERmTyOAaayIzlZGtsbGiqF34oVpbG/+/Yx/BIREZEKAzCVfgoF8MMPQO3awIYN0gwOI0dKF7kNGMCV24iIiEgNh0BQ6XbxojTc4cgR6XFAgDTTQ/Pmhq2LiIiIjBZ7gKl0ev4c+M9/gMBAKfza2QELFwKnTzP8EhERUb7YA0ylz9690gIWN29Kj994A1ixAqha1bB1ERERUanAHmAqPRISgPBwoEsXKfx6eQE7dwK//cbwS0RERAXGAEzGTy4HVq2SFrH45RfAzAyYMEEa/9uzJy9yIyIiokLhEAgybrGxwAcfACdPSo+bNpUucmvY0KBlERERUenFHmAyTk+fAhMnAo0bS+HX0RFYuRKIimL4JSIioiJhDzAZn19/BT76CLh7V3rcrx+wZAng6WnQsoiIiKhsYAAm4xEfLwXf33+XHlerJi1h3LmzYesiIiKiMoVDIMjwsrOBxYsBPz8p/FpYANOnA+fPM/wSERGR3rEHmAzrxAnpIrezZ6XHrVoBa9YA/v6GrYuIiIjKLPYAk2GkpACjRgHBwVL4LV8e+O47ICKC4ZeIiIiKFXuAqWQJAWzZIs3jm5gobRs8GFi0CHBzM2xtREREZBIYgKnk3Lgh9fr+9Zf0uFYtabhDaKhh6yIiIiKTwiEQVPyysoAvvgDq1ZPCr7U1MGcO8O+/DL9ERERU4owyAK9atQo+Pj6wsbFBUFAQTipXAcvD1q1bUadOHdjY2KB+/frYs2ePat/Lly8xdepU1K9fH/b29vD09MTgwYNx//794n4ZBACRkUBgIPDpp8CLF0D79sC5c8DMmVIQJiIiIiphRheAt2zZgokTJ2LWrFmIiYlBgwYNEBYWhgcPHmhtf/z4cQwYMADvvvsu/vnnH/Ts2RM9e/bE+fPnAQDPnj1DTEwMZsyYgZiYGOzYsQNXrlzBm2++WZIvy/QkJwPDhwMhIcClS4C7O/DTT8D+/YCvr6GrIyIiIhMmE0IIQxeRW1BQEJo2bYqVK1cCABQKBby9vfHRRx9h2rRpGu3Dw8ORkZGB3bt3q7Y1b94cgYGBWLNmjdbnOHXqFJo1a4bbt2+jSpUqr60pLS0Nzs7OSE1NhZOTk46vzEQIAWzYAHz8MfDokbTt/feBefOAcuUMWxsRERGVWYXJa0bVA5yVlYUzZ86gQ4cOqm1mZmbo0KEDoqKitB4TFRWl1h4AwsLC8mwPAKmpqZDJZHBxcdG6PzMzE2lpaWo3KoBLl6QxvcOGSeG3Xj3g2DFg7VqGXyIiIjIaRhWAk5OTIZfLUbFiRbXtFStWRKJyyqxXJCYmFqr9ixcvMHXqVAwYMCDP3w7mzp0LZ2dn1c3b21uHV2NCnj8HZswAGjSQ5vG1tQXmzwdiYoAWLQxdHREREZEaowrAxe3ly5fo168fhBBYvXp1nu2mT5+O1NRU1e3OnTslWGUps38/UL8+8PnnwMuXQLduwMWLwJQpgKWloasjIiIi0mBU8wC7urrC3NwcSUlJatuTkpLg4eGh9RgPD48CtVeG39u3b+PgwYP5jg2xtraGNWcoyF9iIjBxIrBpk/TY0xNYvhzo1QuQyQxbGxEREVE+jKoH2MrKCo0bN8aBAwdU2xQKBQ4cOIDg4GCtxwQHB6u1B4D9+/ertVeG32vXruHvv/9GhQoViucFmAKFQlq8ok4dKfyamQFjx0rjf3v3ZvglIiIio2dUPcAAMHHiRAwZMgRNmjRBs2bNsHTpUmRkZGDYsGEAgMGDB6Ny5cqYO3cuAGDcuHEICQnB4sWL0a1bN2zevBmnT5/GN998A0AKv3369EFMTAx2794NuVyuGh9cvnx5WFlZGeaFlkZnzwIffACcOCE9btxYusCtcWPD1kVERERUCEYXgMPDw/Hw4UPMnDkTiYmJCAwMxN69e1UXusXHx8PMLKfjukWLFvj555/x6aef4pNPPoGvry9+/fVX1KtXDwBw7949/P777wCAwMBAtec6dOgQ2rZtWyKvq1R7+hSYPRtYuhSQywFHR2llt1GjAHNzQ1dHREREVChGNw+wMTLpeYB37QLGjAHi46XHffpIQbhyZYOWRURERJRbYfKa0fUAk5G4cwcYNw7YuVN6XLUqsGqVNMsDERERUSlmVBfBkRHIzgaWLAH8/KTwa2EBTJ0KXLjA8EtERERlAnuAKcepU9JFbv/8Iz1u0UKa8aF+fcPWRURERKRH7AEmIDVVGucbFCSFXxcX4JtvgCNHGH6JiIiozGEPsCkTAti6FRg/HkhIkLa98w6weDHg7m7Q0oiIiIiKCwOwqbp5Exg9Gti7V3rs6wusXg20b2/YuoiIiIiKGYdAmJqsLGDuXMDfXwq/VlbArFnAv/8y/BIREZFJYA+wKTl6FPjwQ2lGBwAIDZV6fWvXNmxdRERERCWIPcCm4NEj4L33gNatpfDr6gr8+CNw4ADDLxEREZkc9gCXZUIAGzcCkyYBycnStvfeA+bPB8qXN2xtRERERAbCAFxWXbkCjBwJHDokPfb3l+b0bdXKsHURERERGRiHQJQ1L15IF7UFBEjh19ZWuugtJobhl4iIiAjsAS5b/v5b6vW9fl163KULsGoVUK2aYesiIiIiMiLsAS4LkpKkBSw6dpTCb6VKwC+/AH/8wfBLRERE9AoG4NJMoZCWLK5TB/jf/wCZTFrS+NIloG9f6TERERERqeEQiNLq3Dnggw+AqCjpccOGwNq1QNOmhq2LiIiIyMixB7i0ycgApkyRAm9UFODgACxZApw8yfBLREREVADsATY2cjlw5AiQkCCN5W3dGjA3l/bt3i0Ncbh9W3rcqxewbBng5WW4eomIiIhKGQZgY7JjBzBuHHD3bs42Ly9g5kxg3z5g+3ZpW5Uq0uwO3bsbpk4iIiKiUowB2Fjs2AH06SOt3pbb3bvA++9L983NgQkTgNmzAXv7Ei+RiIiIqCxgADYGcrnU8/tq+M3Nykoa89uoUcnVRURERFQG8SI4Y3DkiPqwB22ysoC0tJKph4iIiKgMYwA2BgkJ+m1HRERERHliADYGlSrptx0RERER5YkB2Bi0bi3N9pDXym0yGeDtLbUjIiIioiJhADYG5ubSfL6AZghWPl66NGc+YCIiIiLSGQOwsejVC9i2DahcWX27l5e0vVcvw9RFREREVMZwGjRj0qsX0KNH3ivBEREREVGRMQAbG3NzoG1bQ1dBREREVGZxCAQRERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwABMRERGRSWEAJiIiIiKTwgBMRERERCaFAZiIiIiITAoDMBERERGZFAZgIiIiIjIpXAq5AIQQAIC0tDQDV0JERERE2ihzmjK35YcBuADS09MBAN7e3gauhIiIiIjyk56eDmdn53zbyERBYrKJUygUuH//PhwdHSGTyQBIv2V4e3vjzp07cHJyMnCFpA/8TMsmfq5lDz/TsoefadlU0p+rEALp6enw9PSEmVn+o3zZA1wAZmZm8PLy0rrPycmJP6xlDD/Tsomfa9nDz7Ts4WdaNpXk5/q6nl8lXgRHRERERCaFAZiIiIiITAoDsI6sra0xa9YsWFtbG7oU0hN+pmUTP9eyh59p2cPPtGwy5s+VF8ERERERkUlhDzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwAOtg1apV8PHxgY2NDYKCgnDy5ElDl0T5iIyMxBtvvAFPT0/IZDL8+uuvavuFEJg5cyYqVaoEW1tbdOjQAdeuXVNr8/jxYwwcOBBOTk5wcXHBu+++i6dPn5bgqyCluXPnomnTpnB0dIS7uzt69uyJK1euqLV58eIFRo8ejQoVKsDBwQG9e/dGUlKSWpv4+Hh069YNdnZ2cHd3x+TJk5GdnV2SL4VyWb16NQICAlQT5gcHB+PPP/9U7ednWvrNmzcPMpkM48ePV23j51r6zJ49GzKZTO1Wp04d1f7S8pkyABfSli1bMHHiRMyaNQsxMTFo0KABwsLC8ODBA0OXRnnIyMhAgwYNsGrVKq37FyxYgOXLl2PNmjU4ceIE7O3tERYWhhcvXqjaDBw4EBcuXMD+/fuxe/duREZG4v333y+pl0C5REREYPTo0YiOjsb+/fvx8uVLdOrUCRkZGao2EyZMwK5du7B161ZERETg/v376NWrl2q/XC5Ht27dkJWVhePHj2PDhg1Yv349Zs6caYiXRAC8vLwwb948nDlzBqdPn0a7du3Qo0cPXLhwAQA/09Lu1KlTWLt2LQICAtS283Mtnfz9/ZGQkKC6HT16VLWv1HymggqlWbNmYvTo0arHcrlceHp6irlz5xqwKiooAGLnzp2qxwqFQnh4eIiFCxeqtqWkpAhra2uxadMmIYQQFy9eFADEqVOnVG3+/PNPIZPJxL1790qsdtLuwYMHAoCIiIgQQkifn6Wlpdi6dauqzaVLlwQAERUVJYQQYs+ePcLMzEwkJiaq2qxevVo4OTmJzMzMkn0BlKdy5cqJ7777jp9pKZeeni58fX3F/v37RUhIiBg3bpwQgj+rpdWsWbNEgwYNtO4rTZ8pe4ALISsrC2fOnEGHDh1U28zMzNChQwdERUUZsDLSVVxcHBITE9U+U2dnZwQFBak+06ioKLi4uKBJkyaqNh06dICZmRlOnDhR4jWTutTUVABA+fLlAQBnzpzBy5cv1T7TOnXqoEqVKmqfaf369VGxYkVVm7CwMKSlpal6HMlw5HI5Nm/ejIyMDAQHB/MzLeVGjx6Nbt26qX1+AH9WS7Nr167B09MT1atXx8CBAxEfHw+gdH2mFiX2TGVAcnIy5HK52ocGABUrVsTly5cNVBUVRWJiIgBo/UyV+xITE+Hu7q6238LCAuXLl1e1IcNQKBQYP348WrZsiXr16gGQPi8rKyu4uLiotX31M9X2mSv3kWGcO3cOwcHBePHiBRwcHLBz5074+fkhNjaWn2kptXnzZsTExODUqVMa+/izWjoFBQVh/fr1qF27NhISEjBnzhy0bt0a58+fL1WfKQMwEZVao0ePxvnz59XGn1HpVbt2bcTGxiI1NRXbtm3DkCFDEBERYeiySEd37tzBuHHjsH//ftjY2Bi6HNKTLl26qO4HBAQgKCgIVatWxS+//AJbW1sDVlY4HAJRCK6urjA3N9e4mjEpKQkeHh4GqoqKQvm55feZenh4aFzkmJ2djcePH/NzN6AxY8Zg9+7dOHToELy8vFTbPTw8kJWVhZSUFLX2r36m2j5z5T4yDCsrK9SsWRONGzfG3Llz0aBBAyxbtoyfaSl15swZPHjwAI0aNYKFhQUsLCwQERGB5cuXw8LCAhUrVuTnWga4uLigVq1auH79eqn6WWUALgQrKys0btwYBw4cUG1TKBQ4cOAAgoODDVgZ6apatWrw8PBQ+0zT0tJw4sQJ1WcaHByMlJQUnDlzRtXm4MGDUCgUCAoKKvGaTZ0QAmPGjMHOnTtx8OBBVKtWTW1/48aNYWlpqfaZXrlyBfHx8Wqf6blz59R+sdm/fz+cnJzg5+dXMi+EXkuhUCAzM5OfaSnVvn17nDt3DrGxsapbkyZNMHDgQNV9fq6l39OnT3Hjxg1UqlSpdP2sltjldmXE5s2bhbW1tVi/fr24ePGieP/994WLi4va1YxkXNLT08U///wj/vnnHwFAfPXVV+Kff/4Rt2/fFkIIMW/ePOHi4iJ+++038e+//4oePXqIatWqiefPn6vO0blzZ9GwYUNx4sQJcfToUeHr6ysGDBhgqJdk0kaOHCmcnZ3F4cOHRUJCgur27NkzVZsPP/xQVKlSRRw8eFCcPn1aBAcHi+DgYNX+7OxsUa9ePdGpUycRGxsr9u7dK9zc3MT06dMN8ZJICDFt2jQREREh4uLixL///iumTZsmZDKZ+Ouvv4QQ/EzLityzQAjBz7U0mjRpkjh8+LCIi4sTx44dEx06dBCurq7iwYMHQojS85kyAOtgxYoVokqVKsLKyko0a9ZMREdHG7okysehQ4cEAI3bkCFDhBDSVGgzZswQFStWFNbW1qJ9+/biypUraud49OiRGDBggHBwcBBOTk5i2LBhIj093QCvhrR9lgDEunXrVG2eP38uRo0aJcqVKyfs7OzEW2+9JRISEtTOc+vWLdGlSxdha2srXF1dxaRJk8TLly9L+NWQ0vDhw0XVqlWFlZWVcHNzE+3bt1eFXyH4mZYVrwZgfq6lT3h4uKhUqZKwsrISlStXFuHh4eL69euq/aXlM5UJIUTJ9TcTERERERkWxwATERERkUlhACYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIrR4cOHIZPJsG3bNkOXUiBJSUno06cPKlSoAJlMhqVLlxbpfD4+Phg6dKheaiMi0hcGYCIq9davXw+ZTAYbGxvcu3dPY3/btm1Rr149A1RW+kyYMAH79u3D9OnTsXHjRnTu3NnQJeXp2bNnmD17Ng4fPmzoUoiolLEwdAFERPqSmZmJefPmYcWKFYYupdQ6ePAgevTogY8//tjQpbzWs2fPMGfOHADSLzlERAXFHmAiKjMCAwPx7bff4v79+4YupcRlZGTo5TwPHjyAi4uLXs5VWunrvSQi48UATERlxieffAK5XI558+bl2+7WrVuQyWRYv369xj6ZTIbZs2erHs+ePRsymQxXr17FO++8A2dnZ7i5uWHGjBkQQuDOnTvo0aMHnJyc4OHhgcWLF2t9Trlcjk8++QQeHh6wt7fHm2++iTt37mi0O3HiBDp37gxnZ2fY2dkhJCQEx44dU2ujrOnixYt4++23Ua5cObRq1Srf13zz5k307dsX5cuXh52dHZo3b44//vhDtV85jEQIgVWrVkEmk0Emk+V7ToVCgWXLlqF+/fqwsbGBm5sbOnfujNOnT+d5jLL2Vymf/9atW6ptp0+fRlhYGFxdXWFra4tq1aph+PDhAKTP0M3NDQAwZ84cVb25P7vLly+jT58+KF++PGxsbNCkSRP8/vvvWp83IiICo0aNgru7O7y8vAAA6enpGD9+PHx8fGBtbQ13d3d07NgRMTEx+b4vRGT8OASCiMqMatWqYfDgwfj2228xbdo0eHp66u3c4eHhqFu3LubNm4c//vgDn3/+OcqXL4+1a9eiXbt2mD9/Pv73v//h448/RtOmTdGmTRu147/44gvIZDJMnToVDx48wNKlS9GhQwfExsbC1tYWgDT8oEuXLmjcuDFmzZoFMzMzrFu3Du3atcORI0fQrFkztXP27dsXvr6++PLLLyGEyLP2pKQktGjRAs+ePcPYsWNRoUIFbNiwAW+++Sa2bduGt956C23atMHGjRsxaNAgdOzYEYMHD37te/Luu+9i/fr16NKlC9577z1kZ2fjyJEjiI6ORpMmTXR4l3M8ePAAnTp1gpubG6ZNmwYXFxfcunULO3bsAAC4ublh9erVGDlyJN566y306tULABAQEAAAuHDhAlq2bInKlStj2rRpsLe3xy+//IKePXti+/bteOutt9Seb9SoUXBzc8PMmTNVPcAffvghtm3bhjFjxsDPzw+PHj3C0aNHcenSJTRq1KhIr4+IDEwQEZVy69atEwDEqVOnxI0bN4SFhYUYO3asan9ISIjw9/dXPY6LixMAxLp16zTOBUDMmjVL9XjWrFkCgHj//fdV27Kzs4WXl5eQyWRi3rx5qu1PnjwRtra2YsiQIapthw4dEgBE5cqVRVpammr7L7/8IgCIZcuWCSGEUCgUwtfXV4SFhQmFQqFq9+zZM1GtWjXRsWNHjZoGDBhQoPdn/PjxAoA4cuSIalt6erqoVq2a8PHxEXK5XO31jx49+rXnPHjwoACg9j4r5a6/atWqau+HsvZXKT/DuLg4IYQQO3fuVH2meXn48KHG56XUvn17Ub9+ffHixQu1ulq0aCF8fX01nrdVq1YiOztb7RzOzs4Fei+IqPThEAgiKlOqV6+OQYMG4ZtvvkFCQoLezvvee++p7pubm6NJkyYQQuDdd99VbXdxcUHt2rVx8+ZNjeMHDx4MR0dH1eM+ffqgUqVK2LNnDwAgNjYW165dw9tvv41Hjx4hOTkZycnJyMjIQPv27REZGQmFQqF2zg8//LBAte/ZswfNmjVTGybh4OCA999/H7du3cLFixcL9ibksn37dshkMsyaNUtj3+uGThSEchzy7t278fLly0Id+/jxYxw8eBD9+vVDenq66r189OgRwsLCcO3aNY3ZQkaMGAFzc3ONGk6cOGGSY8qJyjoGYCIqcz799FNkZ2e/dixwYVSpUkXtsbOzM2xsbODq6qqx/cmTJxrH+/r6qj2WyWSoWbOmaszrtWvXAABDhgyBm5ub2u27775DZmYmUlNT1c5RrVq1AtV++/Zt1K5dW2N73bp1VfsL68aNG/D09ET58uULfWxBhISEoHfv3pgzZw5cXV3Ro0cPrFu3DpmZma899vr16xBCYMaMGRrvpTKwP3jwQO0Ybe/lggULcP78eXh7e6NZs2aYPXu21l9uiKj04RhgIipzqlevjnfeeQfffPMNpk2bprE/rx5KuVye5zlf7R3MaxuAfMfj5kXZu7tw4UIEBgZqbePg4KD2WDl2uDQp6HuvXDwkOjoau3btwr59+zB8+HAsXrwY0dHRGu9Fbsr38uOPP0ZYWJjWNjVr1lR7rO297NevH1q3bo2dO3fir7/+wsKFCzF//nzs2LEDXbp0yfd1EpFxYwAmojLp008/xU8//YT58+dr7CtXrhwAICUlRW27Lj2hBaXs4VUSQuD69euqi7Zq1KgBAHByckKHDh30+txVq1bFlStXNLZfvnxZtb+watSogX379uHx48eF6gXO/d7nnm4tr/e+efPmaN68Ob744gv8/PPPGDhwIDZv3oz33nsvzzBdvXp1AIClpWWR38tKlSph1KhRGDVqFB48eIBGjRrhiy++YAAmKuU4BIKIyqQaNWrgnXfewdq1a5GYmKi2z8nJCa6uroiMjFTb/vXXXxdbPT/++CPS09NVj7dt24aEhARVkGrcuDFq1KiBRYsW4enTpxrHP3z4UOfn7tq1K06ePImoqCjVtoyMDHzzzTfw8fGBn59foc/Zu3dvCCFUC1Hkll8PuDLo537vMzIysGHDBrV2T5480TiPsmdcOQzCzs4OgOYvMu7u7mjbti3Wrl2rdRx4Qd5LuVyuMeTE3d0dnp6eBRqGQUTGjT3ARFRm/ec//8HGjRtx5coV+Pv7q+177733MG/ePLz33nto0qQJIiMjcfXq1WKrpXz58mjVqhWGDRuGpKQkLF26FDVr1sSIESMAAGZmZvjuu+/QpUsX+Pv7Y9iwYahcuTLu3buHQ4cOwcnJCbt27dLpuadNm4ZNmzahS5cuGDt2LMqXL48NGzYgLi4O27dvh5lZ4ftCQkNDMWjQICxfvhzXrl1D586doVAocOTIEYSGhmLMmDFaj+vUqROqVKmCd999F5MnT4a5uTl++OEHuLm5IT4+XtVuw4YN+Prrr/HWW2+hRo0aSE9Px7fffgsnJyd07doVgDRswc/PD1u2bEGtWrVQvnx51KtXD/Xq1cOqVavQqlUr1K9fHyNGjED16tWRlJSEqKgo3L17F2fPns339aWnp8PLywt9+vRBgwYN4ODggL///hunTp3Kc65nIio9GICJqMyqWbMm3nnnHY3eRQCYOXMmHj58iG3btuGXX35Bly5d8Oeff8Ld3b1Yavnkk0/w77//Yu7cuUhPT0f79u3x9ddfq3oxAWk536ioKPz3v//FypUr8fTpU3h4eCAoKAgffPCBzs9dsWJFHD9+HFOnTsWKFSvw4sULBAQEYNeuXejWrZvO5123bh0CAgLw/fffY/LkyXB2dkaTJk3QokWLPI+xtLTEzp07MWrUKMyYMQMeHh4YP348ypUrh2HDhqnahYSE4OTJk9i8eTOSkpLg7OyMZs2a4X//+5/aBWvfffcdPvroI0yYMAFZWVmYNWsW6tWrBz8/P5w+fRpz5szB+vXr8ejRI7i7u6Nhw4aYOXPma1+bnZ0dRo0ahb/++gs7duyAQqFAzZo18fXXX2PkyJE6v2dEZBxkQperNYiIiIiISimOASYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARPR/7daBAAAAAIAgf+tBLooAYEWAAQBYEWAAAFYEGACAFQEGAGBFgAEAWBFgAABWBBgAgJUAmg7gZZcpejUAAAAASUVORK5CYII=", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "y = mean_rejection_data\n", "x = y.index.values\n", "\n", "plt.rcParams[\"figure.figsize\"] = (8, 5)\n", "plt.xlabel(\"Number of clusters\", fontsize=12)\n", "plt.ylabel(\"Rejection rate\", fontsize=12)\n", "plt.plot(x, y[\"uniform\"], label=\"Uniform Bootstrap\", color=\"blue\", marker=\"o\")\n", "plt.plot(x, y[\"cluster\"], label=\"Cluster Bootstrap\", color=\"red\", marker=\"o\")\n", "plt.legend()\n", "plt.suptitle(\"Comparison of Rejection Rates\", fontsize=15)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can see that when the number of clusters is low, it is particularly important to use the cluster robust bootstrap, since rejection with the regular bootstrap is excessive. For a large number of clusters, clustering naturally becomes less important. " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.8" }, "vscode": { "interpreter": { "hash": "e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a" } } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/estimagic/explanation/cluster_robust_likelihood_inference.md ================================================ (robust_likelihood_inference)= # Robust Likelihood inference (to be written.) In case of an urgent request for this guide, feel free to open an issue \[here\](). ================================================ FILE: docs/source/estimagic/explanation/index.md ================================================ # Explanation ```{toctree} --- maxdepth: 1 --- bootstrap_ci bootstrap_montecarlo_comparison cluster_robust_likelihood_inference ``` ================================================ FILE: docs/source/estimagic/index.md ================================================ (estimagic)= # Estimagic *estimagic* is a subpackage of *optimagic* that helps you to fit nonlinear statistical models to data and perform inference on the estimated parameters. As a user, you need to code up the objective function that defines the estimator. This is either a likelihood (ML) function or a Method of Simulated Moments (MSM) objective function. Everything else is done by *estimagic*. Everything else means: - Optimize your objective function - Calculate asymptotic or bootstrapped standard errors and confidence intervals - Create publication quality tables - Perform sensitivity analysis on MSM models `````{grid} 1 2 2 2 --- gutter: 3 --- ````{grid-item-card} :text-align: center :img-top: ../_static/images/light-bulb.svg :class-img-top: index-card-image :shadow: md ```{button-link} tutorials/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Tutorials ``` New users of estimagic should read this first. ```` ````{grid-item-card} :text-align: center :img-top: ../_static/images/books.svg :class-img-top: index-card-image :shadow: md ```{button-link} explanation/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Explanations ``` Background information on key topics central to the package. ```` ````{grid-item-card} :text-align: center :columns: 12 :img-top: ../_static/images/coding.svg :class-img-top: index-card-image :shadow: md ```{button-link} reference/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- API Reference ``` Detailed description of the estimagic API. ```` ````` ```{toctree} --- hidden: true maxdepth: 1 --- tutorials/index explanation/index reference/index ``` ================================================ FILE: docs/source/estimagic/reference/index.md ================================================ # estimagic API ```{eval-rst} .. currentmodule:: estimagic ``` (estimation)= ## Estimation ```{eval-rst} .. dropdown:: estimate_ml .. autofunction:: estimate_ml ``` ```{eval-rst} .. dropdown:: estimate_msm .. autofunction:: estimate_msm ``` ```{eval-rst} .. dropdown:: get_moments_cov .. autofunction:: get_moments_cov ``` ```{eval-rst} .. dropdown:: lollipop_plot .. autofunction:: lollipop_plot ``` ```{eval-rst} .. dropdown:: estimation_table .. autofunction:: estimation_table ``` ```{eval-rst} .. dropdown:: render_html .. autofunction:: render_html ``` ```{eval-rst} .. dropdown:: render_latex .. autofunction:: render_latex ``` ```{eval-rst} .. dropdown:: LikelihoodResult .. autoclass:: LikelihoodResult :members: ``` ```{eval-rst} .. dropdown:: MomentsResult .. autoclass:: MomentsResult :members: ``` (bootstrap)= ## Bootstrap ```{eval-rst} .. dropdown:: bootstrap .. autofunction:: bootstrap ``` ```{eval-rst} .. dropdown:: BootstrapResult .. autoclass:: BootstrapResult :members: ``` ================================================ FILE: docs/source/estimagic/tutorials/bootstrap_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Bootstrap Tutorial\n", "\n", "This notebook contains a tutorial on how to use the bootstrap functionality provided by estimagic. We start with the simplest possible example of calculating standard errors and confidence intervals for an OLS estimator without as well as with clustering. Then we progress to more advanced examples.\n", "\n", "In the example here, we will work with the \"exercise\" example dataset taken from the seaborn library.\n", "\n", "The working example will be a linear regression to investigate the effects of exercise time on pulse." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import seaborn as sns\n", "import statsmodels.api as sm\n", "\n", "import estimagic as em" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Prepare the dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df = sns.load_dataset(\"exercise\", index_col=0)\n", "replacements = {\"1 min\": 1, \"15 min\": 15, \"30 min\": 30}\n", "df = df.replace({\"time\": replacements})\n", "df[\"constant\"] = 1\n", "\n", "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Doing a very simple bootstrap" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The first thing we need is a function that calculates the bootstrap outcome, given an empirical or re-sampled dataset. The bootstrap outcome is the quantity for which you want to calculate standard errors and confidence intervals. In most applications those are just parameter estimates.\n", "\n", "In our case, we want to regress \"pulse\" on \"time\" and a constant. Our outcome function looks as follows:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def ols_fit(data):\n", " y = data[\"pulse\"]\n", " x = data[[\"constant\", \"time\"]]\n", " params = sm.OLS(y, x).fit().params\n", "\n", " return params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In general, the user-specified outcome function may return any pytree (e.g. numpy.ndarray, pandas.DataFrame, dict etc.). In the example here, it returns a pandas.Series." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we are ready to calculate confidence intervals and standard errors." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_without_cluster = em.bootstrap(data=df, outcome=ols_fit)\n", "results_without_cluster.ci()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_without_cluster.se()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The above function call represents the minimum that a user has to specify, making full use of the default options, such as drawing a 1_000 bootstrap draws, using the \"percentile\" bootstrap confidence interval, not making use of parallelization, etc.\n", "\n", "If, for example, we wanted to take 10_000 draws, while parallelizing on two cores, and using a \"bc\" type confidence interval, we would simply call the following:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_without_cluster2 = em.bootstrap(\n", " data=df, outcome=ols_fit, n_draws=10_000, n_cores=2\n", ")\n", "\n", "results_without_cluster2.ci(ci_method=\"bc\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Doing a clustered bootstrap" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In the cluster robust variant of the bootstrap, the original dataset is divided into clusters according to the values of some user-specified variable, and then clusters are drawn uniformly with replacement in order to create the different bootstrap samples. \n", "\n", "In order to use the cluster robust boostrap, we simply specify which variable to cluster by. In the example we are working with, it seems sensible to cluster on individuals, i.e. on the column \"id\" of our dataset." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results_with_cluster = em.bootstrap(data=df, outcome=ols_fit, cluster_by=\"id\")\n", "\n", "results_with_cluster.se()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can see that the estimated standard errors are indeed of smaller magnitude when we use the cluster robust bootstrap. \n", "\n", "Finally, we can compare our bootstrap results to a regression on the full sample using statsmodels' OLS function.\n", "We see that the cluster robust bootstrap yields standard error estimates very close to the ones of the cluster robust regression, while the regular bootstrap seems to overestimate the standard errors of both coefficients.\n", "\n", "**Note**: We would not expect the asymptotic statsmodels standard errors to be exactly the same as the bootstrapped standard errors.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "y = df[\"pulse\"]\n", "x = df[[\"constant\", \"time\"]]\n", "\n", "\n", "cluster_robust_ols = sm.OLS(y, x).fit(cov_type=\"cluster\", cov_kwds={\"groups\": df[\"id\"]})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Splitting up the process" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In many situations, the above procedure is enough. However, sometimes it may be important to split the bootstrapping process up into smaller steps. Examples for such situations are:\n", "\n", "1. You want to look at the bootstrap estimates\n", "2. You want to do a bootstrap with a low number of draws first and add more draws later without duplicated calculations\n", "3. You have more bootstrap outcomes than just the parameters\n", "\n", "### 1. Accessing bootstrap outcomes\n", "\n", "The bootstrap outcomes are stored in the results object you get back when calling the bootstrap function. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result = em.bootstrap(data=df, outcome=ols_fit, seed=1234)\n", "my_outcomes = result.outcomes\n", "\n", "my_outcomes[:5]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "To further compare the cluster bootstrap to the uniform bootstrap, let's plot the sampling distribution of the parameters on time. We can again see that the standard error is smaller when we cluster on the subject id. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result_clustered = em.bootstrap(data=df, outcome=ols_fit, seed=1234, cluster_by=\"id\")\n", "my_outcomes_clustered = result_clustered.outcomes" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# clustered distribution in blue\n", "sns.histplot(\n", " pd.DataFrame(my_outcomes_clustered)[\"time\"], kde=True, stat=\"density\", linewidth=0\n", ")\n", "\n", "# non-clustered distribution in orange\n", "sns.histplot(\n", " pd.DataFrame(my_outcomes)[\"time\"],\n", " kde=True,\n", " stat=\"density\",\n", " linewidth=0,\n", " color=\"orange\",\n", ");" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Calculating standard errors and confidence intervals from existing bootstrap result" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "If you've already run ``bootstrap`` once, you can simply pass the existing result object to a new call of ``bootstrap``. Estimagic reuses the existing bootstrap outcomes and now only draws ``n_draws`` - ``n_existing`` outcomes instead of drawing entirely new ``n_draws``. Depending on the ``n_draws`` you specified (this is set to 1_000 by default), this may save considerable computation time. \n", "\n", "We can go on and compute confidence intervals and standard errors, just the same way as before, with several methods (e.g. \"percentile\" and \"bc\"), yet without duplicated evaluations of the bootstrap outcome function. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "my_results = em.bootstrap(\n", " data=df,\n", " outcome=ols_fit,\n", " existing_result=result,\n", ")\n", "my_results.ci(ci_method=\"t\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can use this to calculate confidence intervals with several methods (e.g. \"percentile\" and \"bc\") without duplicated evaluations of the bootstrap outcome function." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Extending bootstrap results with more draws\n", "\n", "It is often the case that, for speed reasons, you set the number of bootstrap draws quite low, so you can look at the results earlier and later decide that you need more draws. \n", "\n", "As an example, we will take an initial sample of 500 draws. We then extend it with another 1500 draws. \n", "\n", "*Note*: It is very important to use a different random seed when you calculate the additional outcomes!!!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "initial_result = em.bootstrap(data=df, outcome=ols_fit, seed=5471, n_draws=500)\n", "initial_result.ci(ci_method=\"t\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "combined_result = em.bootstrap(\n", " data=df, outcome=ols_fit, existing_result=initial_result, seed=2365, n_draws=2000\n", ")\n", "combined_result.ci(ci_method=\"t\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Using less draws than totally available bootstrap outcomes" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You have a large sample of bootstrap outcomes but want to compute summary statistics only on a subset? No problem! Estimagic got you covered. You can simply pass any number of ``n_draws`` to your next call of ``bootstrap``, regardless of the size of the existing sample you want to use. We already covered the case where ``n_draws`` > ``n_existing`` above, in which case estimagic draws the remaining bootstrap outcomes for you.\n", "\n", "If ``n_draws`` <= ``n_existing``, estimagic takes a random subset of the existing outcomes - and voilà! " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "subset_result = em.bootstrap(\n", " data=df, outcome=ols_fit, existing_result=combined_result, seed=4632, n_draws=500\n", ")\n", "subset_result.ci(ci_method=\"t\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Accessing the bootstrap samples" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It is also possible to just access the bootstrap samples. You may do so, for example, if you want to calculate your bootstrap outcomes in parallel in a way that is not yet supported by estimagic (e.g. on a large cluster or super-computer)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from estimagic.bootstrap_samples import get_bootstrap_samples\n", "\n", "rng = np.random.default_rng(1234)\n", "my_samples = get_bootstrap_samples(data=df, rng=rng)\n", "my_samples[0]" ] } ], "metadata": { "kernelspec": { "display_name": "estimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" }, "vscode": { "interpreter": { "hash": "e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a" } } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/estimagic/tutorials/estimation_tables_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# How to generate publication quality tables\n" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "Estimagic can create publication quality tables of parameter estimates in LaTeX or HTML. It works with the results from `estimate_ml` and `estimate_msm` but also supports statsmodels results out of the box. \n", "\n", "You can get almost limitless flexibility if you split the table generation into two steps. The fist generates a DataFrame which you can customize to your liking, the second renders that DataFrame in LaTeX or HTML. If you are interested in this feature, search for \"render_inputs\" below." ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [], "source": [ "# Make necessary imports\n", "import pandas as pd\n", "import statsmodels.formula.api as sm\n", "from IPython.core.display import HTML\n", "\n", "import estimagic as em\n", "from estimagic.config import EXAMPLE_DIR" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Create tables from statsmodels results" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [], "source": [ "df = pd.read_csv(EXAMPLE_DIR / \"diabetes.csv\", index_col=0)\n", "mod1 = sm.ols(\"target ~ Age + Sex\", data=df).fit()\n", "mod2 = sm.ols(\"target ~ Age + Sex + BMI + ABP\", data=df).fit()\n", "models = [mod1, mod2]" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
 target
 (1)(2)
Intercept152.00$^{*** }$152.00$^{*** }$
(3.61)(2.85)
Age301.00$^{*** }$37.20$^{ }$
(77.10)(64.10)
Sex17.40$^{ }$-107.00$^{* }$
(77.10)(62.10)
BMI787.00$^{*** }$
(65.40)
ABP417.00$^{*** }$
(69.50)
\n", "
Observations442442
R$^2$0.040.40
Adj. R$^2$0.030.40
Residual Std. Error75.9060
F Statistic8.06$^{***}$72.90$^{***}$
\n", "
Note:***p<0.01; **p<0.05; *p<0.1
" ], "text/plain": [ "" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "HTML(em.estimation_table(models, return_type=\"html\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Adding estimagic results\n", "\n", "`estimate_ml` and `estimate_msm` can both generate summaries of estimation results. Those summaries are either DataFrames with the columns `\"value\"`, `\"standard_error\"`, `\"p_value\"` and `\"stars\"` or pytrees containing such DataFrames. \n", "\n", "For examples, check out our tutorials on [`estimate_ml`](likelihood_overview.ipynb) and [`estimate_msm`](msm_overview.ipynb).\n", "\n", "\n", "Assume we got the following DataFrame from an estimation summary:" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
valuestandard_errorp_value
Intercept142.1233.141501.000000e-08
Age51.4562.718281.000000e-08
Sex-33.7891.618001.000000e-08
\n", "
" ], "text/plain": [ " value standard_error p_value\n", "Intercept 142.123 3.14150 1.000000e-08\n", "Age 51.456 2.71828 1.000000e-08\n", "Sex -33.789 1.61800 1.000000e-08" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "params = pd.DataFrame(\n", " {\n", " \"value\": [142.123, 51.456, -33.789],\n", " \"standard_error\": [3.1415, 2.71828, 1.6180],\n", " \"p_value\": [1e-8] * 3,\n", " },\n", " index=[\"Intercept\", \"Age\", \"Sex\"],\n", ")\n", "params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can either use just the params DataFrame or a dictionary containing \"params\" and additional information in `estimation_table`." ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [], "source": [ "mod3 = {\"params\": params, \"name\": \"target\", \"info\": {\"n_obs\": 445}}\n", "models = [mod1, mod2, mod3]" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
 target
 (1)(2)(3)
Intercept152.00$^{*** }$152.00$^{*** }$142.00$^{*** }$
(3.61)(2.85)(3.14)
Age301.00$^{*** }$37.20$^{ }$51.50$^{*** }$
(77.10)(64.10)(2.72)
Sex17.40$^{ }$-107.00$^{* }$-33.80$^{*** }$
(77.10)(62.10)(1.62)
BMI787.00$^{*** }$
(65.40)
ABP417.00$^{*** }$
(69.50)
\n", "
Observations442442445
R$^2$0.040.40
Adj. R$^2$0.030.40
Residual Std. Error75.9060
F Statistic8.06$^{***}$72.90$^{***}$
\n", "
Note:***p<0.01; **p<0.05; *p<0.1
" ], "text/plain": [ "" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "HTML(em.estimation_table(models, return_type=\"html\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Selecting the right return_type\n", "\n", "The following return types are supported:\n", "- `\"latex\"`: Returns a string that you can save and import into a LaTeX document\n", "- `\"html\"`: Returns a string that you can save and import into a HTML document.\n", "- `\"render_inputs\"`: Returns a dictionary with the following entries:\n", " - `\"body\"`: A DataFrame containing the main table\n", " - `\"footer\"`: A DataFrame containing the statisics\n", " - other stuff that you should ignore\n", "- `\"dataframe\"`: Returns a DataFrame you can look at in a notebook" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Use `render_inputs` for maximum flexibility\n", "\n", "As an example, let's assume we want to remove a few rows from the footer.\n", "\n", "Let's first look at the footer we get from `estimation_table`" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
target
(1)(2)(3)
Observations442442445
R$^2$0.040.40
Adj. R$^2$0.030.40
Residual Std. Error75.9060
F Statistic8.06$^{***}$72.90$^{***}$
\n", "
" ], "text/plain": [ " target \n", " (1) (2) (3)\n", "Observations 442 442 445\n", "R$^2$ 0.04 0.40 \n", "Adj. R$^2$ 0.03 0.40 \n", "Residual Std. Error 75.90 60 \n", "F Statistic 8.06$^{***}$ 72.90$^{***}$ " ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "render_inputs = em.estimation_table(models, return_type=\"render_inputs\")\n", "footer = render_inputs[\"footer\"]\n", "footer" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we can remove the rows we don't need and render it to html. " ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
 target
 (1)(2)(3)
Intercept152.00$^{*** }$152.00$^{*** }$142.00$^{*** }$
(3.61)(2.85)(3.14)
Age301.00$^{*** }$37.20$^{ }$51.50$^{*** }$
(77.10)(64.10)(2.72)
Sex17.40$^{ }$-107.00$^{* }$-33.80$^{*** }$
(77.10)(62.10)(1.62)
BMI787.00$^{*** }$
(65.40)
ABP417.00$^{*** }$
(69.50)
\n", "
R$^2$0.040.40
Observations442442445
\n", "
Note:***p<0.01; **p<0.05; *p<0.1
" ], "text/plain": [ "" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "render_inputs[\"footer\"] = footer.loc[[\"R$^2$\", \"Observations\"]]\n", "HTML(em.render_html(**render_inputs))" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "Using this 2-step-procedure, we can also easily add additional rows to the footer.\n", "\n", "Note that we add the row using `.loc[(\"Statsmodels\", )]` since the index of `render_inputs[\"footer\"]` is a MultiIndex.\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
 target
 (1)(2)(3)
Intercept152.00$^{*** }$152.00$^{*** }$142.00$^{*** }$
(3.61)(2.85)(3.14)
Age301.00$^{*** }$37.20$^{ }$51.50$^{*** }$
(77.10)(64.10)(2.72)
Sex17.40$^{ }$-107.00$^{* }$-33.80$^{*** }$
(77.10)(62.10)(1.62)
BMI787.00$^{*** }$
(65.40)
ABP417.00$^{*** }$
(69.50)
\n", "
R$^2$0.040.40
Observations442442445
StatsmodelsYesYesNo
\n", "
Note:***p<0.01; **p<0.05; *p<0.1
" ], "text/plain": [ "" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "render_inputs[\"footer\"].loc[(\"Statsmodels\",)] = [\"Yes\"] * 2 + [\"No\"]\n", "HTML(em.render_html(**render_inputs))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Advanced options \n", "\n", "Below is an exmample that demonstrates how to use advanced options to customize your table." ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "stats_dict = {\n", " \"n_obs\": \"Observations\",\n", " \"rsquared\": \"R$^2$\",\n", " \"rsquared_adj\": \"Adj. R$^2$\",\n", " \"resid_std_err\": \"Residual Std. Error\",\n", " \"fvalue\": \"F Statistic\",\n", " \"show_dof\": True,\n", "}" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Table Latex(render_latex(**render_inputs))Title
 Dependent variable: target
 Model 1Model 2Model 3
Constant152.133$^{*** }$152.133$^{*** }$142.123$^{*** }$
(3.610)(2.853)(3.142)
Age301.161$^{*** }$37.241$^{ }$51.456$^{*** }$
(77.060)(64.117)(2.718)
Gender17.392$^{ }$-106.578$^{* }$-33.789$^{*** }$
(77.060)(62.125)(1.618)
BMI787.179$^{*** }$
(65.424)
ABP416.674$^{*** }$
(69.495)
\n", "
Observations442442445
R$^2$0.0350.400
Adj. R$^2$0.0310.395
Residual Std. Error75.888(df=439)59.976(df=437)
F Statistic8.059$^{***}$(df=2;439)72.913$^{***}$(df=4;437)
\n", "
Note:***p<0.01; **p<0.05; *p<0.1
" ], "text/plain": [ "" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "HTML(\n", " em.estimation_table(\n", " models=models,\n", " return_type=\"html\",\n", " custom_param_names={\"Intercept\": \"Constant\", \"Sex\": \"Gender\"},\n", " custom_col_names=[\"Model 1\", \"Model 2\", \"Model 3\"],\n", " custom_col_groups={\"target\": \"Dependent variable: target\"},\n", " render_options={\"caption\": \"Table Latex(render_latex(**render_inputs))Title\"},\n", " stats_options=stats_dict,\n", " number_format=\"{0:.3f}\",\n", " )\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "***Note 1***: You can pass a dictionary for `custom_col_names` to rename specific columns, e.g. `custom_col_names={\"(1)\": \"Model 1\"}`, leaving names of the other columns at default values.\n", "\n", "***Note 2***: In addition to renaming the default column groups by passing a dictionary for `custom_col_groups`, you can also pass a list to create custom column groups, e.g. `custom_col_groups=[\"target\", \"target\", \"not target\"]` will group the first two columns under the name `\"target\"`, and the last column under the name `\"not target\"`.\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## LaTeX peculiarities\n", "\n", "By default, tables in `render_latex` are structured in compliance with `siunitx` package. This is done by setting column formats to `S` in the default rendering options defined internally. \n", "To get nicely formatted tables, you need to add the following to your LaTeX preamble:\n", "```latex\n", "\\usepackage{siunitx}\n", "\\sisetup{\n", " input-symbols = (),\n", " table-align-text-post = false\n", " group-digits = false,\n", " }\n", "```\n", "The first line in `\\sisetup` is necessary if you have parentheses in your table cells (e.g. when displaying standard errors or confidence intervals), otherwise LaTex will raise an error.\n", "\n", "The second argument is necessary so that there is no spacing between the significance stars and the numerical values.\n", "\n", "The third line prevents digits in numbers being grouped into groups of threes, which is the default behaviour.\n", "This line is optional, but recommended.\n", "\n", "By default, whenever calling `render_latex`, a warning will be raised about this. To silence the warning, set `siunitx_warning=False` in the relvant function calls (when calling `estimation_table` with `return_type=tex` or when calling `render_latex`)\n", "\n", "If you don't want to generate `siunitx` style tables, you can pass `render_options={\"column_format\":}` to your function calls. \n", "\n", "You can influence the format of the output table with keyword arguments passed via `render_options`. For the list of supported keyword arguments see [the documentation of pandas.io.formats.style.Styler.to_latex](https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_latex.html)\n", "\n", "\n", "\n", "By default, `siunitx` will center table columns around the decimal point. This means, that if there is a number in a column that has many comparatively larger number of symbols after the decimal point (e.g. when there is a number with scientific notation), there will be extra spacing between that column and the preceeding one, since there is as much space reserved for the column before the decimal point, as there is after it. \n", "\n", "You can adjust the spacing between columns, by using the format `S[table-format =x.y]` for the numeric columns, where `x` and `y` control the space pre and post the decimal point, respecitvely. We further show a case with the described problem and the solution to that problem. For number with scientific notations, use `S[table-format=x.yez]`, where `y` reserves the space for the exponential, and `z` reserves the space for the column after the decimal point.\n", "\n", "Compiling the following LaTex table will result in extra spacing between columns `(2)` and `(3)`:" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "```latex\n", "\n", "\\begin{tabular}{lSSS}\n", " \\toprule\n", " & \\multicolumn{3}{c}{target} \\\\\n", " \\cmidrule(lr){2-4}\n", "\n", " & (1) & (2) & (3) \\\\\n", " \\midrule\n", " Intercept & 152.00$^{*** }$ & 152.00$^{*** }$ & 1.43e08$^{*** }$ \\\\\n", " & (3.61) & (2.85) & (3.14) \\\\\n", " Age & 301.00$^{*** }$ & 37.20$^{ }$ & 51.50$^{*** }$ \\\\\n", " & (77.10) & (64.10) & (2.72) \\\\\n", " Sex & 17.40$^{ }$ & -107.00$^{* }$ & -33.80$^{*** }$ \\\\\n", " & (77.10) & (62.10) & (1.62) \\\\\n", " BMI & & 787.00$^{*** }$ & \\\\\n", " & & (65.40) & \\\\\n", " ABP & & 417.00$^{*** }$ & \\\\\n", " & & (69.50) & \\\\\n", " \\midrule\n", " R$^2$ & 0.04 & 0.40 & \\\\\n", " Observations & \\multicolumn{1}{c}{442} & \\multicolumn{1}{c}{442} & \\multicolumn{1}{c}{445} \\\\\n", " \\midrule\n", " \\textit{Note:} & \\multicolumn{3}{r}{$^{***}$p$<$0.01;$^{**}$p$<$0.05;$^{*}$p$<$0.1} \\\\\n", " \\bottomrule\n", "\\end{tabular}\n", "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can get a nicer output by setting the format of the last column to, for example, `S[table-format=3.2e4]`, via passing `render_options={'column_format':'lSSS[table-format = 3.2e4]'}`. The resulting table of `render_latex` will look like the following:" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "```latex\n", "\n", "\\begin{tabular}{lSSS[table-format = 3.2e4]}\n", " \\toprule\n", " & \\multicolumn{3}{c}{target} \\\\\n", " \\cmidrule(lr){2-4}\n", "\n", " & (1) & (2) & (3) \\\\\n", " \\midrule\n", " Intercept & 152.00$^{*** }$ & 152.00$^{*** }$ & 1.43e08$^{*** }$ \\\\\n", " & (3.61) & (2.85) & (3.14) \\\\\n", " Age & 301.00$^{*** }$ & 37.20$^{ }$ & 51.50$^{*** }$ \\\\\n", " & (77.10) & (64.10) & (2.72) \\\\\n", " Sex & 17.40$^{ }$ & -107.00$^{* }$ & -33.80$^{*** }$ \\\\\n", " & (77.10) & (62.10) & (1.62) \\\\\n", " BMI & & 787.00$^{*** }$ & \\\\\n", " & & (65.40) & \\\\\n", " ABP & & 417.00$^{*** }$ & \\\\\n", " & & (69.50) & \\\\\n", " \\midrule\n", " R$^2$ & 0.04 & 0.40 & \\\\\n", " Observations & \\multicolumn{1}{c}{442} & \\multicolumn{1}{c}{442} & \\multicolumn{1}{c}{445} \\\\\n", " \\midrule\n", " \\textit{Note:} & \\multicolumn{3}{r}{$^{***}$p$<$0.01;$^{**}$p$<$0.05;$^{*}$p$<$0.1} \\\\\n", " \\bottomrule\n", "\\end{tabular}\n", "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] } ], "metadata": { "@webio": { "lastCommId": null, "lastKernelId": null }, "interpreter": { "hash": "5cdb9867252288f10687117449de6ad870b49795ca695c868016dc0022895cce" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.10" } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/estimagic/tutorials/index.md ================================================ # Estimagic Tutorials Estimagic hast functions to estimate the parameters of maximum likelihood or simulation models. You provide a likelihood or moment simulation function. Estimagic produces parameter estimates and standard errors in a format that can be easily used to create publication quality latex or html tables. ```{toctree} --- maxdepth: 1 --- likelihood_overview msm_overview bootstrap_overview estimation_tables_overview ``` ================================================ FILE: docs/source/estimagic/tutorials/likelihood_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Likelihood estimation\n", "\n", "This notebook shows how to do a simple maximum likelihood (ml) estimation with estimagic. As an illustrating example, we implement a simple linear regression model. This is the same example model used as in the method of moments notebook.\n", "\n", "We proceed in 4 steps:\n", "\n", "\n", "1. Create a data generating process\n", "2. Set up a likelihood function\n", "3. Maximize the likelihood function\n", "4. Calculate standard errors, confidence intervals, and p-values\n", "\n", "The user only needs to do step 1 and 2. The rest is done by `estimate_ml`. \n", "\n", "To be very clear: Estimagic is not a package to estimate linear models or other models that are implemented in Stata, statsmodels or anywhere else. Its purpose is to estimate parameters with custom likelihood or method of simulated moments functions. We just use an ordered logit model as an example of a very simple likelihood function.\n", "\n", "\n", "## Model:\n", "\n", "$$ y = \\beta_0 + \\beta_1 x + \\epsilon, \\text{ where } \\epsilon \\sim N(0, \\sigma^2)$$\n", "\n", "We aim to estimate $\\beta_0, \\beta_1, \\sigma^2$." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "from scipy.stats import norm\n", "\n", "import estimagic as em\n", "\n", "rng = np.random.default_rng(seed=0)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Create a data generating process" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def simulate_data(params, n_draws):\n", " x = rng.normal(0, 1, size=n_draws)\n", " e = rng.normal(0, params.loc[\"sd\", \"value\"], size=n_draws)\n", " y = params.loc[\"intercept\", \"value\"] + params.loc[\"slope\", \"value\"] * x + e\n", " return pd.DataFrame({\"y\": y, \"x\": x})" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "true_params = pd.DataFrame(\n", " data=[[2, -np.inf], [-1, -np.inf], [1, 1e-10]],\n", " columns=[\"value\", \"lower_bound\"],\n", " index=[\"intercept\", \"slope\", \"sd\"],\n", ")\n", "true_params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data = simulate_data(true_params, n_draws=100)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Define the `loglike` function" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def normal_loglike(params, data):\n", " norm_rv = norm(\n", " loc=params.loc[\"intercept\", \"value\"] + params.loc[\"slope\", \"value\"] * data[\"x\"],\n", " scale=params.loc[\"sd\", \"value\"],\n", " )\n", " contributions = norm_rv.logpdf(data[\"y\"])\n", " return {\"contributions\": contributions, \"value\": contributions.sum()}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "A few remarks before we move on:\n", "\n", "1. There are numerically better ways to calculate the likelihood; we chose this implementation for brevity and readability. \n", "2. The loglike function takes params and other arguments. You are completely flexible with respect to the number and names of the other arguments as long as the first argument is params. \n", "3. The loglike function returns a dictionary with the entries \"contributions\" and \"value\". The \"contributions\" are the log likelihood evaluations of each individual in the dataset. The \"value\" are their sum. The \"value\" entry could be omitted, the \"contributions\" entry, however, is mandatory. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Estimate the model" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "start_params = true_params.assign(value=[100, 100, 100])\n", "\n", "res = em.estimate_ml(\n", " loglike=normal_loglike,\n", " params=start_params,\n", " optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n", " loglike_kwargs={\"data\": data},\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.summary().round(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 4. What's in the results?\n", "\n", "`LikelihoodResult` objects provide attributes and methods to calculate standard errors, confidence intervals, and p-values. For all three, several methods are available. You can even calculate cluster robust standard errors. \n", "\n", "A few examples are:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.cov(method=\"robust\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.se()" ] } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/estimagic/tutorials/msm_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# Method of Simulated Moments (MSM)\n", "\n", "This tutorial shows you how to do a Method of Simulated Moments estimation in estimagic. The Method of Simulated Moments (MSM) is a nonlinear estimation principle that is very useful for fitting complicated models to the data. The only ingredient required is a function that simulates the model outcomes you observe in some empirical dataset. \n", "\n", "In the tutorial here, we will use a simple linear regression model. This is the same model which we use in the tutorial on maximum likelihood estimation.\n", "\n", "Throughout the tutorial, we only talk about MSM estimation. However, the more general case of indirect inference estimation works exactly the same way. \n", "\n", "\n", "## Steps of MSM estimation\n", "\n", "1. Load (simulate) empirical data \n", "2. Define a function to calculate estimation moments on the data \n", "3. Calculate the covariance matrix of the empirical moments (with ``get_moments_cov``)\n", "4. Define a function to simulate moments from the model \n", "5. Estimate the model, calculate standard errors, do sensitivity analysis (with ``estimate_msm``)\n", "\n", "## Example: Estimate the parameters of a regression model\n", "\n", "The model we consider here is a simple regression model with only one explanatory variable (plus a constant). The goal is to estimate the slope coefficients and the error variance from a simulated data set.\n", "\n", "The estimation mechanics are exactly the same for more complicated models. A model is always defined by a function that can take parameters (here: the mean, variance and lower_cutoff and upper_cutoff) and returns a number of simulated moments (mean, variance, soft_min and soft_max of simulated exam points).\n", "\n", "### Model:\n", "\n", "$$ y = \\beta_0 + \\beta_1 x + \\epsilon, \\text{ where } \\epsilon \\sim N(0, \\sigma^2)$$\n", "\n", "We aim to estimate $\\beta_0, \\beta_1, \\sigma^2$." ] }, { "cell_type": "code", "execution_count": null, "id": "1", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import estimagic as em\n", "\n", "rng = np.random.default_rng(seed=0)" ] }, { "cell_type": "markdown", "id": "2", "metadata": {}, "source": [ "## 1. Simulate data" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "def simulate_data(params, n_draws, rng):\n", " x = rng.normal(0, 1, size=n_draws)\n", " e = rng.normal(0, params.loc[\"sd\", \"value\"], size=n_draws)\n", " y = params.loc[\"intercept\", \"value\"] + params.loc[\"slope\", \"value\"] * x + e\n", " return pd.DataFrame({\"y\": y, \"x\": x})" ] }, { "cell_type": "code", "execution_count": null, "id": "4", "metadata": {}, "outputs": [], "source": [ "true_params = pd.DataFrame(\n", " data=[[2, -np.inf], [-1, -np.inf], [1, 1e-10]],\n", " columns=[\"value\", \"lower_bound\"],\n", " index=[\"intercept\", \"slope\", \"sd\"],\n", ")\n", "\n", "data = simulate_data(true_params, n_draws=100, rng=rng)" ] }, { "cell_type": "markdown", "id": "5", "metadata": {}, "source": [ "## 2. Calculate Moments" ] }, { "cell_type": "code", "execution_count": null, "id": "6", "metadata": {}, "outputs": [], "source": [ "def calculate_moments(sample):\n", " moments = {\n", " \"y_mean\": sample[\"y\"].mean(),\n", " \"x_mean\": sample[\"x\"].mean(),\n", " \"yx_mean\": (sample[\"y\"] * sample[\"x\"]).mean(),\n", " \"y_sqrd_mean\": (sample[\"y\"] ** 2).mean(),\n", " \"x_sqrd_mean\": (sample[\"x\"] ** 2).mean(),\n", " }\n", " return pd.Series(moments)" ] }, { "cell_type": "code", "execution_count": null, "id": "7", "metadata": {}, "outputs": [], "source": [ "empirical_moments = calculate_moments(data)\n", "empirical_moments" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "## 3. Calculate the covariance matrix of empirical moments\n", "\n", "The covariance matrix of the empirical moments (``moments_cov``) is needed for three things:\n", "1. to calculate the weighting matrix\n", "2. to calculate standard errors\n", "3. to calculate sensitivity measures\n", "\n", "We will calculate ``moments_cov`` via a bootstrap. Depending on your problem, there can be other ways to calculate the covariance matrix." ] }, { "cell_type": "code", "execution_count": null, "id": "9", "metadata": {}, "outputs": [], "source": [ "moments_cov = em.get_moments_cov(\n", " data, calculate_moments, bootstrap_kwargs={\"n_draws\": 5_000, \"seed\": 0}\n", ")\n", "\n", "moments_cov" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "``get_moments_cov`` mainly just calls estimagic's bootstrap function. See our [bootstrap_tutorial](bootstrap_overview.ipynb) for background information. \n", "\n" ] }, { "cell_type": "markdown", "id": "11", "metadata": {}, "source": [ "## 4. Define a function to calculate simulated moments\n", "\n", "In a real world application, this is the step that would take most of the time. However, in our very simple example, all the work is already done by numpy." ] }, { "cell_type": "code", "execution_count": null, "id": "12", "metadata": {}, "outputs": [], "source": [ "def simulate_moments(params, n_draws=10_000, seed=0):\n", " rng = np.random.default_rng(seed)\n", " sim_data = simulate_data(params, n_draws, rng)\n", " sim_moments = calculate_moments(sim_data)\n", " return sim_moments" ] }, { "cell_type": "code", "execution_count": null, "id": "13", "metadata": {}, "outputs": [], "source": [ "simulate_moments(true_params)" ] }, { "cell_type": "markdown", "id": "14", "metadata": {}, "source": [ "## 5. Estimate the model parameters\n", "\n", "Estimating a model consists of the following steps:\n", "\n", "- Building a criterion function that measures a distance between simulated and empirical moments\n", "- Minimizing this criterion function\n", "- Calculating the Jacobian of the model\n", "- Calculating standard errors, confidence intervals and p-values\n", "- Calculating sensitivity measures\n", "\n", "This can all be done in one go with the ``estimate_msm`` function. This function has sensible default values, so you only need a minimum number of inputs. However, you can configure almost any aspect of the workflow via optional arguments. If you need even more control, you can call the lower level functions, which the now famliliar``estimate_msm`` function is built on, directly. " ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": {}, "outputs": [], "source": [ "start_params = true_params.assign(value=[100, 100, 100])\n", "\n", "res = em.estimate_msm(\n", " simulate_moments,\n", " empirical_moments,\n", " moments_cov,\n", " start_params,\n", " optimize_options=\"scipy_lbfgsb\",\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "16", "metadata": {}, "outputs": [], "source": [ "res.summary()" ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, "source": [ "## What's in the result?\n", "\n", "`MomentsResult` objects provide attributes and methods to calculate standard errors, confidence intervals and p-values. For all three, several methods are available. You can even calculate cluster robust standard errors.\n", "\n", "A few examples are:" ] }, { "cell_type": "code", "execution_count": null, "id": "18", "metadata": {}, "outputs": [], "source": [ "res.params" ] }, { "cell_type": "code", "execution_count": null, "id": "19", "metadata": {}, "outputs": [], "source": [ "res.cov(method=\"robust\")" ] }, { "cell_type": "code", "execution_count": null, "id": "20", "metadata": {}, "outputs": [], "source": [ "res.se()" ] }, { "cell_type": "markdown", "id": "21", "metadata": {}, "source": [ "## How to visualize sensitivity measures?" ] }, { "cell_type": "code", "execution_count": null, "id": "22", "metadata": {}, "outputs": [], "source": [ "from estimagic import lollipop_plot\n", "\n", "sensitivity_data = res.sensitivity(kind=\"bias\").abs().T\n", "\n", "fig = lollipop_plot(sensitivity_data)\n", "\n", "fig = fig.update_layout(height=500, width=900)\n", "fig.show()" ] } ], "metadata": { "kernelspec": { "display_name": "estimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" }, "vscode": { "interpreter": { "hash": "e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a" } } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/explanation/explanation_of_numerical_optimizers.md ================================================ (explanation-of-numerical-optimizers)= # Introduction to basic types of numerical optimization algorithms There are hundreds of different numerical optimization algorithm. However, most of them build on a few basic principles. Knowing those principles helps to classify algorithms and thus allows you to connect information about new algorithms with the stuff you already know. The main principles we describe here are: - Derivative based line search algorithms - Derivative based trust region algorithms - Derivative free trust region algorithms - Derivative free direct search algorithms This covers a large range of the algorithms that come with optimagic. In contrast, the following classes of optimizers are also accessible via optimagic, but not yet covered in this overview: - Conjugate gradient methods - Genetic algorithms - Grid or random search - Bayesian Optimization For each class of algorithms we describe the basic idea, show a gif of a stylized implementation with a graphical explanation of each iteration and a gif that shows how a real algorithm of the class converges. All of the above algorithms are local optimization algorithms that can (and will in fact) get stuck in local optima. If you need a global optimum, you will need to start them from several starting points and take the best result. ## Derivative based line search algorithms ### Basic idea 1. Use first derivative to get search direction 1. Use approximated second derivative to guess step length 1. Use a line search algorithm to see how far to go in the search direction In other words, the algorithm first fixes a promising direction and then figures out how far it should go in that direction. The important insight here is that even though the parameter space might be high dimensional, the line search problem remains one dimensional and thus simple to solve. Moreover, the line search problem is typically not solved exactly but only approximately. The exact termination conditions for the line search problem are complicated, but most of the time the initial guess for the step length is accepted. ### Stylized implementation ```{image} ../../_static/images/stylized_line_search.gif ``` ### Convergence of a real algorithm ```{image} ../../_static/images/history_l-bfgs-b.gif ``` ## Derivative based trust-region algorithms ### Basic idea 1. Fix a trust region radius 1. Construct a Taylor expansion of the function based on function value, gradient, and (approximation to) Hessian 1. Minimize the Taylor expansion within the trust region 1. Evaluate function again at the argmin of the Taylor expansion 1. Compare expected and actual improvement 1. Accept the new parameters if actual vs. expected improvement is good enough. 1. Potentially modify the trust region radius 1. Go back to 2. In other words, the algorithm first fixes a maximum step length (the trust region radius) and then figures out in which direction to go. If the surrogate model (usually a quadratic taylor expansion) approximates the function well, trust region algorithms can converge extremely fast. The main insight here is that evaluating the surrogate model is usually much cheaper than evaluating the actual criterion function and thus the trust region subproblem can be solved very fast. As can be seen in the stylized implementation, the approximation does not actually have to be very good. The only thing that matters is that it points the optimizer in the right direction. ### Stylized implementation ```{image} ../../_static/images/stylized_gradient_based_trust_region.gif ``` ### Convergence of a real algorithm ```{image} ../../_static/images/history_trust-ncg.gif ``` ## Derivative free trust region algorithms ### Basic Idea The basic idea is very similar to derivative based trust region algorithms. The only difference is that instead of a Taylor approximation which requires derivatives, we need to come up with another type of surrogate model. In order to fit this model, the algorithm evaluates the criterion function at a few points inside the trust region. Depending on how many points those are the surrogate model is a interpolation or regression model. If there are very few points it might even be an underdetermined interpolation model. In that case some kind of regularization is needed. Note that for differentiable functions without closed form derivatives, one way to define the surrogate model would be a Taylor approximation calculated from numerical derivatives. However, this would be a rather inefficient choice because points that are spaced more evenly throughout the trust region provide more information about the criterion function than the numerical derivatives. ### Stylized implementation ```{image} ../../_static/images/stylized_gradient_free_trust_region.gif ``` ### Convergence of a real algorithm ```{image} ../../_static/images/history_cobyla.gif ``` ## Derivative free direct search algorithms ### Basic Idea 1. Evaluate function at points lying in a fixed pattern around the current point 1. Accept the best point as new current point 1. Potentially modify the size or spread of the pattern 1. Go back to 1. Direct search algorithms are also called pattern search algorithms. They can typically deal well with small amounts of noise, because only the ordering of function values is used, not the magnitudes. However, they are relatively slow compared to the other algorithms. ### Stylized implementation ```{image} ../../_static/images/stylized_direct_search.gif ``` ### Convergence of a real algorithm ```{image} ../../_static/images/history_nelder-mead.gif ``` ================================================ FILE: docs/source/explanation/implementation_of_constraints.md ================================================ (implementation_of_constraints)= # How constraints are implemented Most of the optimizers wrapped in optimagic cannot deal natively with anything but box constraints. So the problem they can solve is: $$ \min_{x \in \mathbb{R}^k} f(x) \quad \text{s.t.} \hspace{0.5cm} l \leq x \leq u $$ However, in most econometric applications, we also need other constraints. For example, we may require that some parameters sum to a value, form a covariance matrix, or are probabilities. More abstractly, the problem becomes: $$ \min_{x \in \mathbb{R}^k} f(x) \quad \text{s.t.} \hspace{0.5cm} l \leq x \leq u \text{ and } C(x) = 0 $$ There are two basic ways of converting optimizers, which, natively, can only deal with box constraints, into constrained optimizers: Reparametrization and penalties. Below, we explain what both approaches are, why we chose the reparametrization approach over penalties, and which reparametrizations we are using for each type of constraint. In this text, we focus on constraints that can be solved by optimagic via bijective and differentiable transformations. General nonlinear constraints do not fall into this category. If you want to use nonlinear constraints, you can still do so, but optimagic will simply pass the constraints to your chosen optimizer. See {ref}`constraints` for more details. ## Possible approaches ### Reparametrizations In the reparametrization approach, we need to find an invertible mapping $g : \mathbb{R}^{k'} \to \mathbb{R}^k$, and two new bounds $l'$ and $u'$ such that: $$ l' \leq \tilde{x} \leq u' \iff l \leq g(\tilde{x}) \leq u \text { and } C(g(\tilde{x})) = 0 $$ This means that: $$ \min_{\tilde{x} \in \mathbb{R}^{k'}} f(g(\tilde{x})) \quad \text{s.t.} \hspace{0.5cm} l' \leq \tilde{x} \leq u'\\ $$ is equivalent to the original minimization problem. This sounds more complicated than it is. Let's look at the simple example of a two dimensional parameter vector, where our constraint is that the two parameters have to sum to 5. $$ x = (x_1, x_2) f(x) = x_1^2 + 2 x_2^2 c(x) = x_1 + x_2 - 5 \tilde{x} = x_1 g(\tilde{x}) = (\tilde{x}, 5 - \tilde{x}) $$ Typically, users implement such reparametrizations manually and write functions to convert between the parameters of interest and their reparametrized version. optimagic does this for you, for a large number of constraints that are typically used in econometric applications. For this approach to be efficient, it is crucial that the reparametrizations preserve desirable properties of the original problem. In particular, the mapping $g$ should be differentiable and if possible linear. Moreover, the dimensionality of $\tilde{x}$ should be chosen as small as possible. optimagic only implements constraints that can be enforced with differentiable transformations and always achieves full dimensionality reduction. ### Penalties The penalty approach is conceptually much simpler. Whenever $C(x) \neq 0$, a penalty term is added to the criterion function. If the penalty term is large enough (e.g. as large as the criterion function at the start values), this penalty ensures that any x that does not satisfy the constraints can not be optimal. While the generality and conceptual simplicity of this approach is attractive, it also has its drawbacks. Applying penalties in a naive way can introduce kinks, discontinuities, and even local optima into the penalized criterion. ## What optimagic does We chose to implement constraints via reparametrizations for the following reasons: - Reparametrizations ensure that the criterion function is only evaluated at parameters that satisfy all constraints. This is not only efficient, but essential if the criterion function is only defined for such parameters. - Reparametrizations can often achieve a substantial dimensionality reduction. In particular, fixes and equality constraints are implemented at zero cost, i.e. as efficiently as if you directly plugged them into your original problem. This is important because fixes and equality constraints often make user code much nicer and more flexible. - It is easier to preserve desirable properties such as convexity and differentiability with reparametrizations rather than penalties. The constraints that can be implemented via reparametrizations are available for all optimizers. More general constraints are only available with optimizers that can deal natively with them. This includes all optimizers from the `nlopt` and `ipopt` libraries. ## The non-trivial reparametrizations Fixed parameters, equality, and pairwise equality constraints can be implemented trivially with reparametrizations by simply plugging them into the criterion function. Increasing and decreasing constraints are internally implemented as linear constraints. The following section explains how the other types of constraints are implemented: ### Covariance and sdcorr constraints The main difficulty with covariance and sdcorr constraints is to keep the (implied) covariance matrix valid, i.e. positive semi-definite. In both cases, $\tilde{x}$ contains the non-zero elements of the lower triangular cholesky factor of the (implied) covariance matrix. For covariance constraints, $g$ is then simply the product of the cholesky factor with its transpose. For the sdcorr covariance matrix, the product is further converted to standard deviations and the unique elements of a covariance matrix. Several papers show that the cholesky reparametrization is a very efficient way to optimize over covariance matrices. Examples are {cite}`Pinheiro1996` and {cite}`Groeneveld1994`. A limitation of this approach is that there can be no additional fixes, box constraints, or other constraints on any of the involved parameters. (linear-constraint-implementation)= ### Linear constraints Assume we have m linear constraints on an n-dimensional parameter vector. Then the set of all parameter vectors that satisfies the constraints can be written as: $$ \mathbf{X} \equiv \{\mathbf{x} \in \mathbb{R}^n \mid \mathbf{l} \leq \mathbf{Ax} \leq \mathbf{u}\} $$ We are looking for a set $\mathbf{\tilde{X}}$ that only satisfies box constraints and reparametrizations. The reparametrizations will turn out to be a linear mapping, and thus have a matrix representation, say M. We are good if the following holds: $$ x \in \mathbf{X} \iff \exists \mathbf{\tilde{x}} \in \mathbf{\tilde{X}} \text{s.t.} \mathbf{x} = \mathbf{M\tilde{x}} $$ Suitable choices of $\mathbf{\tilde{X}}$ and $\mathbf{M}$ are: $$ \mathbf{\tilde{X}} \equiv \{(\tilde{x}_1, \tilde{x}_2)^T \mid \mathbf{\tilde{x}}_1 \in \mathbb{R}^{k} \text{ and } \mathbf{l} \leq \mathbf{\tilde{x}}_2 \leq \mathbf{l}\} \mathbf{M} = \left[ {\begin{array}{cc} \mathbb{I}_n[k] \\ A \\ \end{array} } \right]^{-1} $$ where $k = m - n$ and $\mathbb{I}_n[k]$ are the k rows of the identity matrix that make all rows of $\mathbf{M}$ linearly independent. **Proof:** "$\Rightarrow$": Let $x\in \mathbf{X}$, then we define $\mathbf{\tilde{x}} = \mathbf{M}^{-1} x$. Claim: $\mathbf{\tilde{x}} \in \mathbf{\tilde{X}}$: \\ $$ \mathbf{\tilde{x}} = \mathbf{M}^{-1} x = \left[ {\begin{array}{cc} \mathbb{I}_n[k]x \\ Ax \\ \end{array} } \right] = (\tilde{x}_1, \tilde{x}_2)^T $$ where $\tilde{x}_1 \in \mathbb{R}^k$ and $\mathbf{l} \leq \mathbf{\tilde{x}}_2 \leq \mathbf{u}$ because $\mathbf{l} \leq \mathbf{Ax} \leq \mathbf{u}$. Thus $\mathbf{\tilde{x}} \in \mathbf{\tilde{X}}$. "$\Leftarrow$" (Proof by negation): Let $x \not\in \mathbf{X}$ and define $\mathbf{\tilde{x}} = \mathbf{M}^{-1} x$. Claim $\mathbf{\tilde{x}} \not\in \mathbf{\tilde{X}}$. By the same argument as above we can show, that, because $\neg(\mathbf{l} \leq \mathbf{Ax} \leq \mathbf{u})$, $\mathbf{\tilde{x}} \not\in \mathbf{\tilde{X}}$. The rank condition on M makes it clear that there can be at most as many linear constraints as involved parameters. This includes any box constraints on the involved parameters. ### Probability constraints A probability constraint on k parameters means that all parameters lie in $[0, 1]$ and their sum equals one. While those are all linear constraints, they cannot be implemented in the way described above, because there are k + 1 constraints for k parameters. Instead we do the following $$ \tilde{x} = (\tilde{x}_1, \tilde{x}_2, \ldots, \tilde{x}_{k - 1})\\ g(\tilde{x}) = (\frac{\tilde{x}_1}{1 + \sum_{i=1}^{k-1}\tilde{x}_i}, \frac{\tilde{x}_2}{1 + \sum_{i=1}^{k-1}\tilde{x}_i}, \ldots, \frac{1}{1 + \sum_{i=1}^{k-1}\tilde{x}_i})\\ l' = (0, 0, \ldots, 0) $$ A limitation of this approach is that there can be no additional fixes, box constraints or other constraints on any of the involved parameters. **References** ```{eval-rst} .. bibliography:: ../refs.bib :filter: docname in docnames ``` ================================================ FILE: docs/source/explanation/index.md ================================================ # Explanation This section provides background information on numerical topics and details of optimagic. It is completely optional and not necessary if you are just starting out. ```{toctree} --- maxdepth: 1 --- implementation_of_constraints internal_optimizers why_optimization_is_hard.ipynb explanation_of_numerical_optimizers tests_for_supported_optimizers numdiff_background ``` ================================================ FILE: docs/source/explanation/internal_optimizers.md ================================================ (internal_optimizer_interface)= # Internal optimizers for optimagic optimagic provides a large collection of optimization algorithm that can be used by passing the algorithm name as `algorithm` into `maximize` or `minimize`. Advanced users can also use optimagic with their own algorithm, as long as it conforms with the internal optimizer interface. The advantages of using the algorithm with optimagic over using it directly are: - You can collect the optimizer history and create criterion_plots and params_plots. - You can use flexible formats for your start parameters (e.g. nested dicts or namedtuples) - optimagic turns unconstrained optimizers into constrained ones. - You can use logging. - You get great error handling for exceptions in the criterion function or gradient. - You get a parallelized and customizable numerical gradient if you don't have a closed form gradient. - You can compare your optimizer with all the other optimagic optimizers on our benchmark sets. All of this functionality is achieved by transforming a more complicated user provided problem into a simpler problem and then calling "internal optimizers" to solve the transformed problem. (functions_and_classes_for_internal_optimizers)= ## Functions and classes for internal optimizers The functions and classes below are everything you need to know to add an optimizer to optimagic. To see them in action look at [this guide](../how_to/how_to_add_optimizers.ipynb) ```{eval-rst} .. currentmodule:: optimagic.mark ``` ```{eval-rst} .. dropdown:: mark.minimizer The `mark.minimizer` decorator is used to provide algorithm specific information to optimagic. This information is used in the algorithm selection tool, for better error handling and for processing of the user provided optimization problem. .. autofunction:: minimizer ``` ```{eval-rst} .. currentmodule:: optimagic.optimization.internal_optimization_problem ``` ```{eval-rst} .. dropdown:: InternalOptimizationProblem The `InternalOptimizationProblem` is optimagic's internal representation of objective functions, derivatives, bounds, constraints, and more. This representation is already pretty close to what most algorithms expect (e.g. parameters and bounds are flat numpy arrays, no matter which format the user provided). .. autoclass:: InternalOptimizationProblem() :members: ``` ```{eval-rst} .. currentmodule:: optimagic.optimization.algorithm ``` ```{eval-rst} .. dropdown:: InternalOptimizeResult This is what you need to create from the output of a wrapped algorithm. .. autoclass:: InternalOptimizeResult :members: ``` ```{eval-rst} .. dropdown:: Algorithm .. autoclass:: Algorithm :members: :exclude-members: with_option_if_applicable ``` (naming-conventions)= ## Naming conventions for algorithm specific arguments To make switching between different algorithm as simple as possible, we align the names of commonly used convergence and stopping criteria. We also align the default values for stopping and convergence criteria as much as possible. ```{eval-rst} You can find the harmonized names and value here: :ref:`algo_options`. ``` To align the names of other tuning parameters as much as possible with what is already there, simple have a look at the optimizers we already wrapped. For example, if you are wrapping a bfgs or lbfgs algorithm from some libray, try to look at all existing wrappers of bfgs algorithms and use the same names for the same options. ## Algorithms that parallelize Algorithms that evaluate the objective function or derivatives in parallel should only do so via `InternalOptimizationProblem.batch_fun`, `InternalOptimizationProblem.batch_jac` or `InternalOptimizationProblem.batch_fun_and_jac`. If you parallelize in any other way, the automatic history collection will stop to work. In that case, call `om.mark.minimizer` with `disable_history=True`. In that case you can either do your own history collection and add that history to `InternalOptimizeResult` or the user has to rely on logging. ## Nonlinear constraints (to be written) ================================================ FILE: docs/source/explanation/numdiff_background.md ================================================ # Numerical differentiation: methods In this section we explain the mathematical background of forward, backward and central differences. The main ideas in this chapter are taken from {cite}`Dennis1996`. x is used for the pandas DataFrame with parameters. We index the entries of x as a n-dimensional vector, where n is the number of variables in params_sr. The forward difference for the gradient is given by: $$ \nabla f(x) = \begin{pmatrix}\frac{f(x + e_0 * h_0) - f(x)}{h_0}\\ \frac{f(x + e_1 * h_1) - f(x)}{h_1}\\.\\.\\.\\ \frac{f(x + e_n * h_n) - f(x)}{h_n} \end{pmatrix} $$ The backward difference for the gradient is given by: $$ \nabla f(x) = \begin{pmatrix}\frac{f(x) - f(x - e_0 * h_0)}{h_0}\\ \frac{f(x) - f(x - e_1 * h_1)}{h_1}\\.\\.\\.\\ \frac{f(x) - f(x - e_n * h_n)}{h_n} \end{pmatrix} $$ The central difference for the gradient is given by: $$ \nabla f(x) = \begin{pmatrix}\frac{f(x + e_0 * h_0) - f(x - e_0 * h_0)}{2 h_0}\\ \frac{f(x + e_1 * h_1) - f(x - e_1 * h_1)}{2 h_1}\\.\\.\\.\\ \frac{f(x + e_n * h_n) - f(x - e_n * h_n)}{2 h_n} \end{pmatrix} $$ For the optimal stepsize h the following rule of thumb is applied: $$ h_i = (1 + |x[i]|) * \sqrt\epsilon $$ With the above in mind it is easy to calculate the Jacobian matrix. The calculation of the finite difference w.r.t. each variable of params_sr yields a vector, which is the corresponding column of the Jacobian matrix. The optimal stepsize remains the same. For the Hessian matrix, we repeatedly call the finite differences functions. As we allow for central finite differences in the second order derivative only, the deductions for forward and backward, are left to the interested reader: $$ f_{i,j}(x) = &\frac{f_i(x + e_j * h_j) - f_i(x - e_j * h_j)}{h_j} \\ = &\frac{\frac{f(x + e_j * h_j + e_i * h_i) - f(x + e_j * h_j - e_i * h_i)}{h_i} - \frac{ f(x - e_j * h_j + e_i * h_i) - f(x - e_j * h_j - e_i * h_i) }{h_i}}{h_j} \\ = &\frac{ f(x + e_j * h_j + e_i * h_i) - f(x + e_j * h_j - e_i * h_i) }{h_j * h_i} \\ &+ \frac{ - f(x - e_j * h_j + e_i * h_i) + f(x - e_j * h_j - e_i * h_i) }{h_j * h_i} $$ For the optimal stepsize a different rule is used: $$ h_i = (1 + |x[i]|) * \sqrt[3]\epsilon $$ Similar deviations lead to the elements of the Hessian matrix calculated by backward and central differences. **References:** ```{eval-rst} .. bibliography:: ../refs.bib :filter: docname in docnames ``` ================================================ FILE: docs/source/explanation/tests_for_supported_optimizers.md ================================================ # How supported optimization algorithms are tested optimagic provides a unified interface that supports a large number of optimization algorithms from different libraries. Additionally, it allows putting constraints on the optimization problem. To test the external interface of all supported algorithms, we consider different criterion (benchmark) functions and test each algorithm with every type of constraint. ## Benchmark functions for testing ### Trid function > $f({x}) = \Sigma^{D}_{i=1}(x_{i} - 1)^2 - \Sigma^{D}_{i=2}(x_i x_{i-1})$ ### Rotated Hyper Ellipsoid function > $f({x}) = \Sigma^{D}_{i=1} \Sigma^{i}_{j=1}x_j^2$ ### Rosenbrock function > $\Sigma^{D-1}_{i=1}(100(x_i+1 - x_i^2)^2 + (x_i - 1)^2)$ ### Sphere function > $f({x}) = \Sigma^{D}_{i=1} ix_{i}^2$ ## How testcases are implemented We consider different implementations of each criterion and its gradient. All algorithms accept criterion functions specified in a dictionary, while a subset also accepts the criterion specified in scalar form. Likewise, if specified, the gradient of a criterion can be an np.ndarray or a pandas object. We test for all possible cases. For instance, for rotated hyper ellipsoid, we implement the following functions: - rotated_hyper_ellipsoid_scalar_criterion - rotated_hyper_ellipsoid_dict_criterion: This provides a dictionary wherein the `contributions` and `root_contributions` keys present the criterion as a least squares problem, relevant when we are testing a least squares algorithm. - rotated_hyper_ellipsoid_gradient - rotated_hyper_ellipsoid_pandas_gradient: Computes the gradient of the rotated hyper ellipsoid function, as a pandas object. - rotated_hyper_ellipsoid_criterion_and_gradient These criterion functions are specified in the `examples` directory. For an overview of all constraints supported in optimagic, please see [this how-to guide]. We write several test functions, each corresponding to the case of one constraint. Given the constraint, the test function considers all possible combinations of the algorithm, whether to maximize or to minimize, criterion function implementation, gradient implementation for that criterion (if provided), and whether `criterion_and_derivative` has been provided or not. Below, we show the calculations behind the true values, for each testcase (one criterion and one constraint). ### Trid: Solutions for three-dimension case > $f({x}) = (x_1-1)^2 + (x_2-1)^2 + (x_3-1)^2 - x_2 x_1 - x_3 x_2$ ```{eval-rst} .. dropdown:: No constraints .. code-block:: python constraints = [] :math:`x* = (3, 4, 3)` ``` ```{eval-rst} .. dropdown:: Fixed constraints .. code-block:: python constraints = [{"loc": "x_1", "type": "fixed", "value": 1}] :math:`x_{1} = 1 \rightarrow f(x) = (x_2 - 1)^2 + (x_3 - 1)^2 - x_2 - x_3 x_2 \\ \Rightarrow \frac{\delta f({x})}{\delta x_2} = 2x_2 - 3 - x_3 = 0 \Rightarrow x_3 = 2x_2 - 3\\ \Rightarrow \frac{\delta f({x})}{\delta x_3} = 2x_3 - 2 - x_2 = 0 \Rightarrow x_2 = 2x_3 - 2\\ \Rightarrow x_2 = \frac{8}{3} , \quad x_3 = \frac{7}{3}\\ \rightarrow x* = (1,\frac{8}{3}, \frac{7}{3})` ``` ```{eval-rst} .. dropdown:: Probability constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2"], "type": "probability"}] :math:`x_{1} + x_{2} = 1, \quad 0 \leq x_1 \leq 1, \quad 0 \leq x_2 \leq 1 \\ \rightarrow f({x}) = 3x_1^2 - 3x_1 - 3x_3 + x_3^2 + x_1 x_3 + 2 \\ \Rightarrow \frac{\delta f({x})}{\delta x_1} = 6x_1 - 3 + x_3 = 0 \Rightarrow x_3 = 3 - 6x_1\\ \Rightarrow \frac{\delta f({x})}{\delta x_3} = 2x_3 - 3 + x_1 = 0 \Rightarrow x_1 = 3 - 2x_3\\ \Rightarrow x_1 = \frac{3}{11}, \quad x_3 = \frac{15}{11}\\ \rightarrow x* = (\frac{3}{11}, \frac{8}{11}, \frac{15}{11})` ``` ```{eval-rst} .. dropdown:: Increasing constraint .. code-block:: python constraints = [{"loc": ["x_2", "x_3"], "type": "increasing"}] :math:`\mathcal{L}({x_i}) = (x_1 - 1)^2 + (x_2 - 1)^2 + (x_3 - 1)^2 - x_1 x_2 - x_3 x_2 - \lambda(x_3 - x_2)\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_1} = 2(x_1 - 1) - x_2 = 0\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_2} = 2(x_2 - 1) - x_1 - x_3 + \lambda = 0\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_3} = 2(x_3 - 1) - x_2 - \lambda = 0\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta \lambda} = - x_3 + x_2 = 0\\ \Rightarrow x_2 = 2(x_1 - 1) = x_3 = \frac{10}{3}\\ \Rightarrow 2(x_2 - 1) - x_1 - 2 = 0\\ \Rightarrow 4(x_1 - 1) - 2 - x_1 - 2 = 0\\ \Rightarrow 3x_1 - 8 = 0 \Rightarrow x_1 = \frac{8}{3}\\ \rightarrow x* = (\frac{8}{3}, \frac{10}{3}, \frac{10}{3})` ``` ```{eval-rst} .. dropdown:: Decreasing constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2"], "type": "decreasing"}] Solution unavailable. ``` ```{eval-rst} .. dropdown:: Equality constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "equality"}] :math:`x_{1} = x_{2} = x_{3} = x \\ \rightarrow f({x}) = x^2 - 6x + 3\\ \Rightarrow \frac{\delta f({x})}{\delta x} = 2x - 6 = 0\\ \Rightarrow x = 3\\ \rightarrow x* = (3,3,3)` ``` ```{eval-rst} .. dropdown:: Pairwise equality constraint .. code-block:: python constraints = [{"locs": ["x_1", "x_2"], "type": "pairwise_equality"}] :math:`x_{1} = x_{2} \\ \rightarrow f({x}) = 2(x_1 - 1)^2 + (x_3 - 1)^2 - x_1^2 - x_3 x_1\\ \Rightarrow \frac{\delta f({x})}{\delta x_1} = 2x_1 - x_3 - 4 = 0 \Rightarrow x_3 = 2x_1 - 4\\ \Rightarrow \frac{\delta f({x})}{\delta x_3} = 2x_3 - x_1 - 2 = 0 \Rightarrow x_1 = 2x_3 - 2\\ \Rightarrow x_1 = \frac{10}{3}, x_3 = \frac{8}{3}\\ \rightarrow x* = (\frac{10}{3},\frac{10}{3},\frac{8}{3})` ``` ```{eval-rst} .. dropdown:: Covariance constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "covariance"}] Solution unavailable. ``` ```{eval-rst} .. dropdown:: sdcorr constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "sdcorr"}] Solution unavailable. ``` ```{eval-rst} .. dropdown:: Linear constraint .. code-block:: python constraints = [{"loc": ["x_1", "x_2"], "type": "linear", "weights": [1, 2], "value": 4}] :math:`x_1 + 2x_2 = 4\\ \mathcal{L}({x_i}) = (x_1 - 1)^2 + (x_2 - 1)^2 + (x_3 - 1)^2 - x_1 x_2 - x_3 x_2 - \lambda(x_1 +2x_2-4)\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_1} = 2(x_1 - 1) - x_2 - \lambda = 0\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_2} = 2(x_2 - 1) - x_1 - x_3 - 2\lambda = 0\\ \Rightarrow \frac{\delta \mathcal{L}}{\delta x_3} = 2(x_3 - 1) - x_2 = 0 \\ \Rightarrow \frac{\delta \mathcal{L}}{\delta \lambda} = - x_1 - 2x_2 + 4 = 0\\ \Rightarrow x_2 = 2(x_3 - 1), \quad x_1 = 4 - 2x_2\\ \Rightarrow 2(4 - 2x_2 - 1) - x_2 = x_2 - 1 - 2 + x_2 - \frac{x_2}{4} - \frac{1}{2}\\ \rightarrow x* = (\frac{32}{27}, \frac{38}{27}, \frac{46}{27})` ``` ### Rotated Hyper Ellipsoid: Solutions for three-dimension case > $f({x}) = x^2_1 + (x^2_1 + x^2_2) + (x^2_1 + x^2_2 + x^2_3)$ > > > ```{eval-rst} > > .. dropdown:: No constraints > > > > .. code-block:: python > > > > constraints = [] > > > > :math:`x* = (0, 0, 0)` > > ``` > > > > ```{eval-rst} > > .. dropdown:: Fixed constraints > > > > .. code-block:: python > > > > constraints = [{"loc": "x_1", "type": "fixed", "value": 1}] > > > > :math:`x_{1} = 1 > > \rightarrow x* = (1, 0, 0)` > > ``` > > > > ```{eval-rst} > > .. dropdown:: Probability constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2"], "type": "probability"}] > > > > :math:`x_{1} + x_{2} = 1, \quad 0 \leq x_1 \leq 1, \quad 0 \leq x_2 \leq 1 \\ > > \mathcal{L}({x_i}) = x^2_1 + (x^2_1 + x^2_2) + (x^2_1 + x^2_2 + x^2_3)\\ > > -\lambda(x_1 +x_2-1)\\ > > \Rightarrow \frac{\delta \mathcal{L}}{\delta x_1}\\ > > = 6x_1 - \lambda = 0\\ > > \Rightarrow \frac{\delta \mathcal{L}}{\delta x_2}\\ > > = 4x_2 - \lambda = 0\\ > > \Rightarrow \frac{\delta \mathcal{L}}{\delta x_3}\\ > > = 2 x_3 = 0\\ > > \Rightarrow \frac{\delta \mathcal{L}}{\delta \lambda} \\ > > = -x_1 - x_2 + 1 = 0\\ > > \rightarrow x* = (\frac{2}{5}, \frac{3}{5}, 0),\\ > > \quad f({x*}) = \frac{6}{5}` > > ``` > > > > ```{eval-rst} > > .. dropdown:: Increasing constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_2", "x_3"], "type": "increasing"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: Decreasing constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2"], "type": "decreasing"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: Equality constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "equality"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: Pairwise equality constraints > > > > .. code-block:: python > > > > constraints = [{"locs": ["x_1", "x_2"], "type": "pairwise_equality"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: Covariance constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "covariance"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: sdcorr constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "sdcorr"}] > > > > Not binding :math:`\rightarrow x* = (0, 0, 0)` > > > > ``` > > > > ```{eval-rst} > > .. dropdown:: Linear constraints > > > > .. code-block:: python > > > > constraints = [{"loc": ["x_1", "x_2"], "type": "linear", "weights": [1, 2], "value": 4}] > > > > :math:`x_1 + 2x_2 = 4\\\mathcal{L}({x_i}) = x^2_1 + (x^2_1 + x^2_2) + > > (x^2_1 + x^2_2 + x^2_3) -\lambda(x_1 +2x_2-4)\\ > > \Rightarrow \frac{\delta\mathcal{L}}{\delta x_1} = 6x_1 - \lambda = 0\\ > > \Rightarrow \frac{\delta \\ > > \mathcal{L}}{\delta x_2} = 4x_2 - 2\lambda = 0\\ > > \Rightarrow \frac{\delta \\ > > \mathcal{L}}{\delta x_3} = 2 x_3 = 0\\ > > \Rightarrow \frac{\delta \\ > > \mathcal{L}}{\delta \lambda} = -x_1 - 2x_2 + 4 = 0\\ > > \rightarrow x* = (\frac{4}{7}, \frac{12}{7}, 0)` > > > > > > > > > > > > > > ``` ### Rosenbrock: Solutions for three-dimension case > $f({x}) = 100(x_2 - x_1^2) + (x_1 - 1)^2$ Global minima: $x* = (1, 1, 1)$ > ```{eval-rst} > .. dropdown:: No constraints > > .. code-block:: python > > constraints = [] > > :math:`x* = (1, 1, 1)` > > ``` > > ```{eval-rst} > .. dropdown:: Fixed constraints > > .. code-block:: python > > constraints = [{"loc": "x_1", "type": "fixed", "value": 1}] > > :math:`x_{1} = 1 \rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: Fixed constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2"], "type": "probability"}] > > No solution available. > ``` > > ```{eval-rst} > .. dropdown:: Increasing constraints > > .. code-block:: python > > constraints = [{"loc": ["x_2", "x_3"], "type": "increasing"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > > ``` > > ```{eval-rst} > .. dropdown:: Decreasing constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2"], "type": "decreasing"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: Equality constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "equality"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: Pairwise equality constraints > > .. code-block:: python > > constraints = [{"locs": ["x_1", "x_2"], "type": "pairwise_equality"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: Covariance constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "covariance"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: sdcorr constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2", "x_3"], "type": "sdcorr"}] > > Not binding :math:`\rightarrow x* = (1, 1, 1)` > ``` > > ```{eval-rst} > .. dropdown:: Linear constraints > > .. code-block:: python > > constraints = [{"loc": ["x_1", "x_2"], "type": "linear", "weights": [1, 2], "value": 4}] > > No solution available. > ``` [this how-to guide]: ../how_to/how_to_constraints.md ================================================ FILE: docs/source/explanation/why_optimization_is_hard.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Why optimization is difficult\n", "\n", "This tutorial shows why optimization is difficult and why you need some knowledge in order to solve optimization problems efficiently. It is meant for people who have no previous experience with numerical optimization and wonder why there are so many optimization algorithms and still none that works for all problems. For each potential problem we highlight, we also give some ideas on how to solve it. \n", "\n", "\n", "If you simply want to learn the mechanics of doing optimization with optimagic, check out the [quickstart guide](../tutorials/optimization_overview.ipynb)\n", "\n", "\n", "The take-home message of this notebook can be summarized as follows:\n", "\n", "- The only algorithms that are guaranteed to solve all problems are grid search or other algorithms that evaluate the criterion function almost everywhere in the parameter space.\n", "- If you have more than a hand full of parameters, these methods would take too long.\n", "- Thus, you have to know the properties of your optimization problem and have knowledge about different optimization algorithms in order to choose the right algorithm for your problem. \n", "\n", "This tutorial uses variants of the sphere function from the [quickstart guide](../tutorials/optimization_overview.ipynb)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import seaborn as sns\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere(x):\n", " return x @ x\n", "\n", "\n", "def sphere_gradient(x):\n", " return 2 * x" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Why grid search is infeasible\n", "\n", "Sampling based optimizers and grid search require the parameter space to be bounded in all directions. Let's assume we know that the optimum of the sphere function lies between -0.5 and 0.5, but don't know where it is exactly. \n", "\n", "In order to get a precision of 2 digits with grid search, we require the following number of function evaluations (depending on the number of parameters):" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dimensions = np.arange(10) + 1\n", "n_evals = 100**dimensions\n", "sns.lineplot(x=dimensions, y=n_evals);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "If you have 10 dimensions and evaluating your criterion function takes one second, you need about 3 billion years on a 1000 core cluster. Many of the real world criterion functions have hundreds of parameters and take minutes to evaluate once. This is called the curse of dimensionality." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Sampling based algorithms typically fix the number of criterion evaluations and apply them a bit smarter than algorithms that rummage the search space randomly. However, these smart tricks only work under additional assumptions. Thus, either you need to make assumptions on your problem or you will get the curse of dimensionality through the backdoor again. For easier analysis, assume we fix the number of function evaluations in a grid search instead of a sampling based algorithm and want to know which precision we can get, depending on the dimension:\n", "\n", "For 1 million function evaluations, we can expect the following precision:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dimensions = np.arange(10) + 1\n", "precision = 1e-6 ** (1 / dimensions)\n", "sns.lineplot(x=dimensions, y=precision);" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## How derivatives can solve the curse of dimensionality\n", "\n", "Derivative based methods do not try to evaluate the criterion function everywhere in the search space. Instead, they start at some point and go \"downhill\" from there. The gradient of the criterion function indicates which direction is downhill. Then there are different ways of determining how far to go in that direction. The time it takes to evaluate a derivative increases at most linearly in the number of parameters. Using the derivative information, optimizers can often find an optimum with very few function evaluations." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## How derivative based methods can fail\n", "\n", "To see how derivative based methods can fail, we use simple modifications of the sphere function. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "rng = np.random.default_rng(seed=0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere_with_noise(x, rng):\n", " return sphere(x) + rng.normal(scale=0.02)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "start_params = np.arange(5)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grid = np.linspace(-1, 1, 1000)\n", "sns.lineplot(\n", " x=grid,\n", " y=(grid**2) + rng.normal(scale=0.02, size=len(grid)),\n", ");" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere_with_noise,\n", " params=start_params,\n", " algorithm=\"scipy_lbfgsb\",\n", " logging=False,\n", " fun_kwargs={\"rng\": rng},\n", ")\n", "\n", "res.success" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res.message" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So the algorithm failed, but at least tells you that it did not succed. Let's look at a different kind of numerical noise that could come from rounding. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def piecewise_constant_sphere(x):\n", " return sphere(x.round(2))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sns.lineplot(x=grid, y=grid.round(2) ** 2);" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=piecewise_constant_sphere,\n", " params=start_params,\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "\n", "res" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This time, the algorithm failed silently." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/how_to/how_to_add_optimizers.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": { "vscode": { "languageId": "plaintext" } }, "source": [ "# How to add optimizers to optimagic\n", "\n", "This is a hands-on guide that shows you how to use custom optimizers with optimagic or\n", "how to contribute an optimizer to the optimagic library.\n", "\n", "We have many [examples of optimizers](https://github.com/optimagic-dev/optimagic/tree/main/src/optimagic/optimizers) that are already part of optimagic and you can learn a lot from looking at \n", "those. However, only looking at the final results might be a bit intimidating and does\n", "not show the process of exploring a new optimizer library and gradually developing a \n", "wrapper. \n", "\n", "This guide is there to fill the gap. It tells the story of how the `pygmo_gaco`\n", "optimizer was added to optimagic by someone who was unfamiliar with pygmo or the \n", "gaco algorithm. \n", "\n", "The steps of adding an algorithm are roughly as follows:\n", "\n", "1. **Understand how to use the algorithm**: Play around with the algorithm you want to \n", "add in a notebook and solve some simple problems with it. Only move on to the next step \n", "after you have a solid understanding. This is completely unrelated to optimagic and only\n", "about he algorithm implementation you want to wrap. \n", "2. **Understand how the algorithm works**: Read documentation,\n", "research papers and other resources to find out why this algorithm was created and what \n", "problems it is supposed to solve really well. \n", "3. **Implement the minimal wrapper**: Learn about the `om.mark.minimizer` decorator as \n", "well as the `om.InternalOptimizationProblem` and the `om.Algorithm` classes. Implement a \n", "minimal version of your wrapper and test it.\n", "4. **Complete and refactor the wrapper**: Make sure that all convergence criteria, \n", "stopping criteria, and tuning parameters the algorithm supports can be passed to your \n", "wrapper. Also check that the algorithm gets everything it needs to achieve maximum \n", "performance (e.g. closed form derivatives and batch function evaluators). Now is also \n", "the time to clean-up and refactor your code, especially if you wrap multiple optimizers \n", "from a library.\n", "5. **Align the wrapper with optimagic conventions**: Use harmonized names wherever \n", "a convention exists. Think about good names everywhere else. Set stopping criteria \n", "similar to other optimizers and try to adhere to our [design philosophy](style_guide) \n", "when it comes to tuning parameters. \n", "6. **Integrate your code into optimagic**: Learn how to add an optional dependency to \n", "optimagic, where you need to put your code and how to add tests and documentation. \n", "\n", "\n", "## Gen AI Policy \n", "\n", "It is ok to use GenAI and AI based coding assistants to speed up the process of adding \n", "an optimizer to optimagic. They can be very useful for step 1 and 2. However, AI models \n", "often fail completely when filling out the arguments of `om.mark.minimizer`, when you \n", "ask them to come up with good names for tuning parameters or when you auto-generate the \n", "documentation. \n", "\n", "Even for step 1 and 2 you should not use an AI Model naively, but upload a paper or \n", "documentation page to provide context to the AI.\n", "\n", "Our policy is therefore:\n", "1. Only use AI for drafts that you double-check; Never rely on AI producing correct results \n", "2. Be transparent about your use of AI \n", "\n", "We will reject all Pull Requests that violate this policy. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Understand how to use the algorithm\n", "\n", "Understanding how to use an algorithm means that you are at least able to solve a \n", "simple optimization problem (like a sphere function or a rosenbrock function). \n", "\n", "The best starting point for this are usually tutorials or example notebooks from the \n", "documentation. An AI model can also be a good idea. \n", "\n", "The things you need to find out for any new algorithm are:\n", "\n", "1. How to code up the objective function \n", "2. How to run an optimization at default values\n", "3. How to pass tuning parameters \n", "4. How to pass bounds, constraints, derivatives, batch evaluators, etc. \n", "5. How to get results back from the optimizer\n", "\n", "### Objective functions in pygmo\n", "\n", "To add pygmo_gaco, let's start by looking at the pygmo [tutorials](https://esa.github.io/pygmo2/tutorials/tutorials.html). Objective functions are coded up via the Problem class. We skip using [pre-defined problems](https://esa.github.io/pygmo2/tutorials/using_problem.html) because they will not help us and directly go to [user defined problems](https://esa.github.io/pygmo2/tutorials/coding_udp_simple.html).\n", "\n", "The following is copied from the documentation:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pygmo as pg\n", "\n", "\n", "class sphere_function:\n", " def fitness(self, x):\n", " return [sum(x * x)]\n", "\n", " def get_bounds(self):\n", " return ([-1, -1], [1, 1])\n", "\n", "\n", "prob = pg.problem(sphere_function())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This looks simple enough. No subclassing is required, `fitness` implements the objective\n", "function, which returns the objective value as a list of a scalar and `get_bounds` returns \n", "the bounds. We can immediately see how we would adjust this for any scalar objective \n", "function. \n", "\n", "### How to run an optimization at default values\n", "\n", "After copy pasting from a few tutorials we find the following:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# The initial population\n", "pop = pg.population(prob, size=20)\n", "# The algorithm; ker needs to be at most the population size to avoid errors\n", "algo = pg.algorithm(pg.gaco(ker=20))\n", "# The actual optimization process\n", "pop = algo.evolve(pop)\n", "# Getting the best individual in the population\n", "best_fitness = pop.get_f()[pop.best_idx()]\n", "print(best_fitness)\n", "best_x = pop.get_x()[pop.best_idx()]\n", "print(np.round(best_x, 4))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It looks like the optimization worked, even though the precision is not great. The true optimal function value is 0 and the true optimal parameters are [0, 0]. But global algorithms like gaco are almost never precise, so this is good enough. \n", "\n", "We can also see that pygmo is really organized around concepts that are specific to genetic optimizers. Examples are `population` and `evolve`. The optimagic wrapper will hide the details (i.e. users don't have to create a population) but still allow full customization (the population size will be an algorithm specific option that can be set by the user).\n", "\n", "### How to pass tuning parameters\n", "\n", "We already saw in the previous step that tuning parameters like `ker` are passed when the \n", "algorithm is created. \n", "\n", "All supported tuning parameters of gaco are listed and described \n", "[here](https://esa.github.io/pygmo2/algorithms.html#pygmo.gaco). Unfortunately, the \n", "description is not great so we'll have to look into the [paper](https://digital.csic.es/bitstream/10261/54957/3/Extended_ant_colony_2009.pdf) for details. \n", "\n", "\n", "### How to pass bounds, constraints, derivatives, batch evaluators, etc. \n", "\n", "- We already saw how to pass bounds via the Problem class \n", "- gaco does not support any other constraints, so we don't need to pass them \n", "- gaco is derivative free, so we don't need to pass derivatives \n", "- gaco can parallelize, so we need to find out how to pass a batch version of the \n", "objective function\n", "\n", "After searching around in the pygmo documentation, we find out that our Problem needs to \n", "be extended with a [`batch_fitness`](https://esa.github.io/pygmo2/problem.html#pygmo.problem.batch_fitness)\n", "and our algorithm needs to know about [`pg.bfe()`](https://esa.github.io/pygmo2/bfe.html).\n", "In our previous example it will look like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pygmo as pg\n", "\n", "\n", "class sphere_function:\n", " def fitness(self, x):\n", " return [sum(x * x)]\n", "\n", " def get_bounds(self):\n", " return ([-1, -1], [1, 1])\n", "\n", " # dvs represents a batch of parameter vectors at which the objective function is\n", " # evaluated. However it is stored in an unintuitive format that needs to be reshaped\n", " # to get at the actual parameter vectors.\n", " def batch_fitness(self, dvs):\n", " dim = len(self.get_bounds()[0])\n", " x_list = list(dvs.reshape(-1, dim))\n", " # we don't actually need to parallelize to find out how batch evaluators work\n", " # and optimagic will make it really easy to parallelize this later on.\n", " eval_list = [self.fitness(x)[0] for x in x_list]\n", " evals = np.array(eval_list)\n", " return evals\n", "\n", "\n", "prob = pg.problem(sphere_function())\n", "\n", "pop = pg.population(prob, size=20)\n", "\n", "# creating the algorithm now requires 3 steps\n", "pygmo_uda = pg.gaco(ker=20)\n", "pygmo_uda.set_bfe(pg.bfe())\n", "algo = pg.algorithm(pygmo_uda)\n", "\n", "pop = algo.evolve(pop)\n", "best_fitness = pop.get_f()[pop.best_idx()]\n", "print(best_fitness)\n", "best_x = pop.get_x()[pop.best_idx()]\n", "print(np.round(best_x, 4))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For this how-to guide we leave it at this basic exploration of the pygmo library. If you actually contributed an optimizer to optimagic, you would have to explore much more and document your exploration to convince us that you understand the library you wrap in detail. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### How to get results back \n", "\n", "The results are stored as part of the evolved population" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(\"Best function value: \", pop.get_f()[pop.best_idx()][0])\n", "print(\"Best parameters: \", pop.get_x()[pop.best_idx()])\n", "print(\"Number of function evaluations: \", pop.problem.get_fevals())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Understand how the algorithm works\n", "\n", "Here we want to find out as much as possible about the algorithm. Common questions \n", "that should be answered are:\n", "- For which kind of problems and situations was it designed?\n", "- How does it work (intuitively)?\n", "- Are there any papers, blogposts or other sources of information on the algorithm? \n", "- Which tuning parameters does it have and what do they mean? \n", "- Are there known limitations? \n", "\n", "### For which kind of problems and situations was it desigend \n", "\n", "gaco is a global optimizer that does not use derivative information. It should not be\n", "used if you only need a local optimum or if you have derivatives. Other algorithms would \n", "be more efficient and more precise there. \n", "\n", "Since gaco can evaluate the objective function in parallel it is designed for problems \n", "with expensive objective functions. \n", "\n", "\n", "### How does it work (intuitively)\n", "\n", "Ant colony optimization is a class of optimization algorithms modeled on the\n", "actions of an ant colony. Artificial \"ants\" (e.g. simulation agents) locate\n", "optimal solutions by moving through a parameter space representing all\n", "possible solutions. Real ants lay down pheromones directing each other to\n", "resources while exploring their environment. The simulated \"ants\" similarly\n", "record their positions and the quality of their solutions, so that in later\n", "simulation iterations more ants locate better solutions.\n", "\n", "The generalized ant colony algorithm generates future generations of ants by\n", "using a multi-kernel gaussian distribution based on three parameters (i.e.,\n", "pheromone values) which are computed depending on the quality of each\n", "previous solution. The solutions are ranked through an oracle penalty\n", "method.\n", "\n", "\n", "### Are there any papers, blogposts or other sources of information on the algorithm? \n", "\n", "gaco was proposed in M. Schlueter, et al. (2009). Extended ant colony optimization for \n", "non-convex mixed integer non-linear programming. Computers & Operations Research.\n", "\n", "See [here](https://digital.csic.es/bitstream/10261/54957/3/Extended_ant_colony_2009.pdf) for a free pdf. \n", "\n", "### Which tuning parameters does it have and what do they mean? \n", "\n", "The following is not just copied from the documentation but extended by reading the\n", "paper. It is super important to provide as much information as possible for every \n", "tunig parameter: \n", "\n", "- gen (int): number of generations.\n", "- ker (int): number of solutions stored in the solution archive. Must be <= the population\n", " size. \n", "- q (float): convergence speed parameter. This parameter manages the convergence speed\n", " towards the found minima (the smaller the faster). It must be positive and can be\n", " larger than 1. The default is 1.0 until **threshold** is reached. Then it\n", " is set to 0.01.\n", "- oracle (float): oracle parameter used in the penalty method.\n", "- acc (float): accuracy parameter for maintaining a minimum penalty\n", " function's values distances.\n", "- threshold (int): when the iteration counter reaches the threshold the\n", " convergence speed is set to 0.01 automatically. To deactivate this effect\n", " set the threshold to stopping.maxiter which is the largest allowed\n", " value.\n", "- n_gen_mark (int): parameter that determines the convergence speed of the standard \n", " deviations. This must be an integer.\n", "- impstop (int): if a positive integer is assigned here, the algorithm will count the \n", " runs without improvements, if this number exceeds the given value, the algorithm \n", " will be stopped.\n", "- evalstop (int): maximum number of function evaluations.\n", "- focus (float): this parameter makes the search for the optimum greedier\n", " and more focused on local improvements (the higher the greedier). If the\n", " value is very high, the search is more focused around the current best\n", " solutions. Values larger than 1 are allowed.\n", "- memory (bool): if True, memory is activated in the algorithm for multiple calls.\n", "- seed (int): seed used by the internal random number generator (default is random).\n", "\n", "\n", "### Are there known limitations \n", "\n", "No. \n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Implement the minimal wrapper\n", "\n", "\n", "### Learn the relevant functions and classes\n", "\n", "Before you implement a minimal wrapper, you need to familiarize yourself with a few\n", "important [classes and functions](functions_and_classes_for_internal_optimizers) \n", "you will need. \n", "\n", "- The `mark.miminizer` decorator \n", "- The `Algorithm` class \n", "- The `InternalOptimizationProblem` class \n", "- The `InternalOptimizeResult` class \n", "\n", "**Your task will be to subclass `Algorithm`. Your subclass must be decorated with\n", "`mark.minizer` and override `Algorithm._solve_internal_problem`. `_solve_internal_problem`\n", "takes an `InternalOptimizationProblem` and returns an `InternalOptimizeResult`**\n", "\n", "```{note}\n", "Users of optimagic never create instances of `InternalOptimizationProblem` nor \n", "do they call the `_solve_internal_problem` methods of algorithms. Instead they call \n", "`minimize` or `maximize` which are much more convenient and flexible. \n", "\n", "`minimize` and `maximize` will then create an `InternalOptimizationProblem` from the \n", "user's inputs, call the `_solve_internal_problem` method and postprocess it to create an \n", "OptimizeResult. \n", "\n", "To summarize: The public `minimize` interface is optimized for user-friendliness. The \n", "`InternalOptimizeProblem` is optimized for easy wrapping of external libraries. \n", "```\n", "\n", "Below we define a heavily commented minimal version of a wrapper for pygmo's gaco \n", "algorithm. We stay as close as possible to the pygmo examples we have worked with \n", "before and ignore most tuning parameters for now. \n", "\n", "\n", "### Write the minimal implementation" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dataclasses import dataclass\n", "\n", "from numpy.typing import NDArray\n", "\n", "import optimagic as om\n", "from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\n", "from optimagic.optimization.internal_optimization_problem import (\n", " InternalOptimizationProblem,\n", ")\n", "from optimagic.typing import AggregationLevel, PositiveInt\n", "\n", "try:\n", " import pygmo as pg\n", "\n", " IS_PYGMO_INSTALLED = True\n", "except ImportError:\n", " IS_PYGMO_INSTALLED = False\n", "\n", "\n", "@om.mark.minimizer(\n", " # you can pick the name; convention is lowercase with underscores\n", " name=\"pygmo_gaco\",\n", " # the type of problem this optimizer can solve -> scalar problems; Other optimizers\n", " # solve likelihood or least_squares problems.\n", " solver_type=AggregationLevel.SCALAR,\n", " # is the optimizer available? -> only if pygmo is installed\n", " is_available=IS_PYGMO_INSTALLED,\n", " # is the optimizer a global optimizer? -> yes\n", " is_global=True,\n", " # does the optimizer need the jacobian? -> no, gaco is derivative free\n", " needs_jac=False,\n", " # does the optimizer need the hessian? -> no, gaco is derivative free\n", " needs_hess=False,\n", " # does the optimizer support parallelism? -> yes\n", " supports_parallelism=True,\n", " # does the optimizer support bounds? -> yes\n", " supports_bounds=True,\n", " # does the optimizer support linear constraints? -> no\n", " supports_linear_constraints=False,\n", " # does the optimizer support nonlinear constraints? -> no\n", " supports_nonlinear_constraints=False,\n", " # should the history be disabled? -> no\n", " disable_history=False,\n", ")\n", "# All algortihms need to be frozen dataclasses.\n", "@dataclass(frozen=True)\n", "class PygmoGaco(Algorithm):\n", " # for now only set one parameter to get things running. The rest will come later.\n", " stopping_maxiter: PositiveInt = 1000\n", " n_cores: int = 1\n", "\n", " def _solve_internal_problem(\n", " self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n", " ) -> InternalOptimizeResult:\n", " # create a pygmo problem from the internal optimization problem\n", " # This is just slightly more abstract than before and actually simpler because\n", " # we have problem.batch_fun.\n", "\n", " n_cores = self.n_cores\n", "\n", " class PygmoProblem:\n", " def fitness(self, x):\n", " # problem.fun is not just the `fun` that was passed to `minimize` by\n", " # the user. It is a wrapper around fun with added error handling,\n", " # history collection, and reparametrization to enforce constraints.\n", " # Moreover, it always works on flat numpy arrays as parameters and\n", " # does not have additional arguments. The magic of optimagic is to\n", " # create this internal `fun` from the user's `fun`, so you don't have\n", " # to deal with constraints, weird parameter formats and similar when\n", " # implementing the wrapper.\n", " return [problem.fun(x)]\n", "\n", " def get_bounds(self):\n", " # problem.bounds is not just the `bounds` that was passed to `minimize`\n", " # by the user, which could have been a dictionary or some other non-flat\n", " # format. `problem.bounds` always contains flat arrays with lower and\n", " # upper bounds because this makes it easy to write wrappers.\n", " return (problem.bounds.lower, problem.bounds.upper)\n", "\n", " def batch_fitness(self, dvs):\n", " # The processing of dvs is pygmo specific.\n", " dim = len(self.get_bounds()[0])\n", " x_list = list(dvs.reshape(-1, dim))\n", " # problem.batch_fun is a parallelized version of problem.fun.\n", " eval_list = problem.batch_fun(x_list, n_cores)\n", " evals = np.array(eval_list)\n", " return evals\n", "\n", " prob = pg.problem(PygmoProblem())\n", " pop = pg.population(prob, size=20)\n", " pygmo_uda = pg.gaco(ker=20)\n", " pygmo_uda.set_bfe(pg.bfe())\n", " algo = pg.algorithm(pygmo_uda)\n", " pop = algo.evolve(pop)\n", " best_fun = pop.get_f()[pop.best_idx()][0]\n", " best_x = pop.get_x()[pop.best_idx()]\n", " n_fun_evals = pop.problem.get_fevals()\n", " # For now we only use a few fields of the InternalOptimizeResult.\n", " out = InternalOptimizeResult(\n", " x=best_x,\n", " fun=best_fun,\n", " n_fun_evals=n_fun_evals,\n", " )\n", " return out" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Test the minimal wrapper directly\n", "\n", "So now that we have a wrapper, what do we do with it? And how can we be sure it works?\n", "\n", "We'll first try it out directly with the `SphereExampleInternalOptimizationProblem`. \n", "This is only for debugging and testing purposes. A user would never create an \n", "InternalOptimizationProblem and call an algorithm with it. It's called \"Internal\" for \n", "a reason!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from optimagic.optimization.internal_optimization_problem import (\n", " SphereExampleInternalOptimizationProblem,\n", ")\n", "\n", "problem = SphereExampleInternalOptimizationProblem()\n", "\n", "gaco = PygmoGaco()\n", "\n", "result = gaco._solve_internal_problem(problem, x0=np.array([1.0, 1.0]))\n", "\n", "print(result.fun)\n", "print(result.x)\n", "print(result.n_fun_evals)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Use the minimal wrapper in minimize\n", "\n", "The internal testing gives us some confidence that the wrapper works correctly and would \n", "have been good for debugging if it didn't. But now we want to test the wrapper in the\n", "way it would be used later: via `minimize`\n", "\n", "With this we also get all the benefits of optimagic, from history collection and \n", "criterion plots to flexible parameter formats. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=lambda x: x @ x,\n", " params=np.arange(5),\n", " algorithm=PygmoGaco,\n", " bounds=om.Bounds(lower=-np.ones(5), upper=np.ones(5)),\n", ")\n", "\n", "om.criterion_plot(res, monotone=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 4 Complete and refactor the wrapper\n", "\n", "To keep things simple, we left out almost all tuning parameters of the gaco algorithm \n", "when we wrote the minimal wrapper. \n", "\n", "Now it's time to add them. You can add them one by one and make sure nothing breaks by \n", "testing your wrapper after each change - both with the internal problem and via \n", "minimize. \n", "\n", "Moreover, our code looks quite messy currently. Despite being a minimal wrapper, the \n", "`_solve_internal_problem` method is quite long, unstructured and hard to read. \n", "\n", "The result of completing and refactoring the wrapper is too long to be repeated in the \n", "notebook. Instead you can look at the actual [implementation in optimagic](\n", "https://github.com/optimagic-dev/optimagic/blob/ba2678753587f91cea54de69ff76cb3dcb4257d4/src/optimagic/optimizers/pygmo_optimizers.py#L70)\n", "\n", "\n", "The PygmoGaco class now contains all tuning parameters we identified in step 2 as\n", "dataclass fields. They all have very useful type-hints that don't just show whether\n", "a parameter is an int, str or float but also which values it can take (e.g. PositiveInt).\n", "\n", "`_solve_internal_problem` is now also much cleaner. It mainly maps our mor descriptive \n", "names of tuning parameters to the old pygmo names and then calls a function called \n", "`_minimize_pygmo` that does all the heavy lifting and can be re-used for other pygmo \n", "optimizers. \n", "\n", "The arguments to `mark.minimizer` have not changed. They always need te be set correctly,\n", "even for minimal working examples. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 5. Align the wrapper with optimagic conventions\n", "\n", "To make switching between different algorithm as simple as possible, we align the names \n", "of commonly used convergence and stopping criteria. We also align the default values for \n", "stopping and convergence criteria as much as possible. \n", "\n", "You can find the harmonized names and value [here](algo_options_docs). \n", "\n", "To align the names of other tuning parameters as much as possible with what is already \n", "there, simple have a look at the optimizers we already wrapped. For example, if you are \n", "wrapping a bfgs or lbfgs algorithm from some libray, try to look at all existing wrappers \n", "of bfgs algorithms and use the same names for the same options. \n", "\n", "You can see what this means for the gaco algorithm [here](\n", "https://github.com/optimagic-dev/optimagic/blob/ba2678753587f91cea54de69ff76cb3dcb4257d4/src/optimagic/optimizers/pygmo_optimizers.py#L70)\n", "\n", "In the future we will provide much more extensive guidelines for harmonization. \n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": "## 6. Integrate your code into optimagic\n\nSo far you could have worked in a Jupyter Notebook. Integrating your code into\noptimagic only requires a few small changes:\n\n1. Add new dependencies to the `[tool.pixi.feature.test.dependencies]` section of\n`pyproject.toml` and run `pixi install` to update the lock file. Then re-create the\nenvironment to make sure that the environment is the same as we will use for continuous\nintegration. If your dependencies don't work on all platforms (e.g. linux only packages),\nskip this entire step and reach out to a core contributor for help.\n2. Save the code for your algorithm wrapper in a .py file in `optimagic.algorithms`.\nUse an existing file if you wrap another algorithm from a library we already had.\nOtherwise, create a new file.\n3. Run `pre-commit run --all-files`. This will trigger an automatic code generation\nthat fully integrates your wrapper into our algorithm selection tool.\n4. Run `pytest`. This will run at least a few tests for your new algorithm. Add more\ntests for algorithm specific things (e.g. tests that make sure tuning parameters have\nthe intended effects).\n5. Write documentation. The documentation should contain everything you figured out in\nstep 2. You can either write it into the docstring of your algorithm class (preferred,\nas this is what we will do for all algorithms in the long run) or in `algorithms.md`\nin the documentation.\n6. Create a pull request [in the optimagic repository](https://github.com/optimagic-dev/optimagic)\nand ask for a review." } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.15" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/how_to/how_to_algorithm_selection.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "(how-to-select-algorithms)=\n", "# How to select a local optimizer\n", "\n", "This guide explains how to choose a local optimizer that works well for your problem. \n", "Depending on your [strategy for global optimization](how_to_globalization.ipynb) it \n", "is also relevant for global optimization problems. \n", "\n", "## Important facts \n", "\n", "- There is no optimizer that works well for all problems \n", "- Making the right choice can lead to enormous speedups\n", "- Making the wrong choice can mean that you [don't solve your problem at all](algo-selection-how-important). Sometimes,\n", "optimizers fail silently!\n", "\n", "\n", "## The three steps for selecting algorithms\n", "\n", "Algorithm selection is a mix of theory and experimentation. We recommend the following \n", "steps:\n", "\n", "1. **Theory**: Based on the properties of your problem, start with 3 to 5 candidate algorithms. \n", "You may use the decision tree below.\n", "2. **Experiments**: Run the candidate algorithms for a small number of function \n", "evaluations and compare the results in a *criterion plot*. As a rule of thumb, use \n", "between `n_params` and `10 * n_params` evaluations. \n", "3. **Optimization**: Re-run the algorithm with the best results until \n", "convergence. Use the best parameter vector from the experiments as start parameters.\n", "\n", "We will walk you through the steps in an [example](algo-selection-example-problem)\n", "below. These steps work well for most problems but sometimes you need \n", "[variations](algo-selection-steps-variations).\n", "\n", "\n", "## A decision tree \n", "\n", "This is a practical guide for narrowing down the set of algorithms to experiment with:\n", "\n", "```{mermaid}\n", "graph LR\n", " classDef highlight fill:#FF4500;\n", " A[\"Do you have
nonlinear
constraints?\"] -- yes --> B[\"differentiable?\"]\n", " B[\"Is your objective function differentiable?\"] -- yes --> C[\"ipopt
nlopt_slsqp
scipy_trust_constr\"]\n", " B[\"differentiable?\"] -- no --> D[\"scipy_cobyla
nlopt_cobyla\"]\n", "\n", " A[\"Do you have
nonlinear constraints?\"] -- no --> E[\"Can you exploit
a least-squares
structure?\"]\n", " E[\"Can you exploit
a least-squares
structure?\"] -- yes --> F[\"differentiable?\"]\n", " E[\"Can you exploit
a least-squares
structure?\"] -- no --> G[\"differentiable?\"]\n", "\n", " F[\"differentiable?\"] -- yes --> H[\"scipy_ls_lm
scipy_ls_trf
scipy_ls_dogbox\"]\n", " F[\"differentiable?\"] -- no --> I[\"nag_dflos
pounders
tao_pounders\"]\n", "\n", " G[\"differentiable?\"] -- yes --> J[\"scipy_lbfgsb
nlopt_lbfgsb
fides\"]\n", " G[\"differentiable?\"] -- no --> K[\"nlopt_bobyqa
nlopt_neldermead
neldermead_parallel\"]\n", "\n", "```\n", "\n", "Going through the different questions will give you a list of candidate algorithms. \n", "All algorithms in that list are designed for the same problem class but use different \n", "approaches to solve the problem. Which of them works best for your problem can only be \n", "found out through experimentation.\n", "\n", "```{note}\n", "Many books on numerical optimization focus strongly on the inner workings of algorithms.\n", "They will, for example, describe the difference between a trust-region algorithm and a \n", "line-search algorithm in a lot of detail. We have an [intuitive explanation](../explanation/explanation_of_numerical_optimizers.md) of this too. Understanding these details is important for configuring and\n", "troubleshooting optimizations, but not for algorithm selection. For example, If you have\n", "a scalar, differentiable problem without nonlinear constraints, the decision tree \n", "suggests `fides` and two variants of `lbfgsb`. `fides` is a trust-region algorithm, \n", "`lbfgsb` is a line-search algorithm. Both are designed to solve the same kinds of \n", "problems and which one works best needs to be found out through experimentation.\n", "```\n", "\n", "## Filtering algorithms \n", "\n", "An even more fine-grained version of the decision tree is built into optimagic's \n", "algorithm selection tool, which can filter algorithms based on the properties of \n", "your problem. To make this concrete, assume we are looking for a **local** optimizer for \n", "a **differentiable** problem with a **scalar** objective function and \n", "**bound constraints**. \n", "\n", "To find all algorithms that match our criteria, we can simply type:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import optimagic as om\n", "\n", "om.algos.Local.GradientBased.Scalar.Bounded.All" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The available filters are: GradientBased, GradientFree, Global, Local, Bounded, \n", "LinearConstrained, NonlinearConstrained, Scalar, LeastSquares, Likelihood, and Parallel.\n", "You can apply them in any order your want. They are also discoverable, i.e. the \n", "autocomplete feature of your editor will show you all filters you can apply on top of \n", "your current selection.\n", "\n", "Using `.All` after applying filters shows you all algorithms optimagic knows of that \n", "satisfy your criteria. Some of them require optional dependencies. To show only the \n", "algorithms that are available with the packages you have currently installed, use \n", "`.Available` instead of `.All`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "An even more fine-grained way of filtering is described in [Filtering Algorithms Using Bounds](filtering_algorithms_using_bounds)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "(algo-selection-example-problem)=\n", "\n", "## An example problem\n", "\n", "As an example we use the [Trid function](https://www.sfu.ca/~ssurjano/trid.html). The Trid function has no local minimum except \n", "the global one. It is defined for any number of dimensions, we will pick 20. As starting \n", "values we will pick the vector [0, 1, ..., 19]. \n", "\n", "A Python implementation of the function and its gradient looks like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "\n", "def trid_scalar(x):\n", " \"\"\"Implement Trid function: https://www.sfu.ca/~ssurjano/trid.html.\"\"\"\n", " return ((x - 1) ** 2).sum() - (x[1:] * x[:-1]).sum()\n", "\n", "\n", "def trid_gradient(x):\n", " \"\"\"Calculate gradient of trid function.\"\"\"\n", " l1 = np.insert(x, 0, 0)\n", " l1 = np.delete(l1, [-1])\n", " l2 = np.append(x, 0)\n", " l2 = np.delete(l2, [0])\n", " return 2 * (x - 1) - l1 - l2" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Step 1: Theory\n", "\n", "\n", "\n", "Let's go through the decision tree for the Trid function:\n", "\n", "1. **No** nonlinear constraints our solution needs to satisfy\n", "2. **No** least-squares structure we can exploit \n", "3. **Yes**, the function is differentiable. We even have a closed form gradient that \n", "we would like to use. \n", "\n", "We therefore end up with the candidate algorithms `scipy_lbfgsb`, `nlopt_lbfgsb`, and \n", "`fides`.\n", "\n", "```{note}\n", "If your function is differentiable but you do not have a closed form gradient (yet), \n", "we suggest to use at least one gradient based optimizer and one gradient free optimizer.\n", "in your experiments. Optimagic will use numerical gradients in that case. For details, \n", "see [here](how_to_derivatives.ipynb).\n", "```\n", "\n", "\n", "### Step 2: Experiments\n", "\n", "To find out which algorithms work well for our problem, we simply run optimizations with\n", "all candidate algorithms in a loop and store the result in a dictionary. We limit the \n", "number of function evaluations to 8. Since some algorithms only support a maximum number\n", "of iterations as stopping criterion we also limit the number of iterations to 8.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results = {}\n", "for algo in [\"scipy_lbfgsb\", \"nlopt_lbfgsb\", \"fides\"]:\n", " results[algo] = om.minimize(\n", " fun=trid_scalar,\n", " jac=trid_gradient,\n", " params=np.arange(20),\n", " algorithm=algo,\n", " algo_options={\"stopping_maxfun\": 8, \"stopping_maxiter\": 8},\n", " )\n", "\n", "fig = om.criterion_plot(results, max_evaluations=8)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "All optimizers work pretty well here and since this is a very simple problem, any of them \n", "would probably find the optimum in a reasonable time. However, `nlopt_lbfgsb` is a bit \n", "better than the others, so we will select it for the next step. In more difficult\n", "examples, the difference between optimizers can be much more pronounced.\n", "\n", "### Step 3: Optimization \n", "\n", "All that is left to do is to run the optimization until convergence with the best \n", "optimizer. To avoid duplicated calculations, we can already start from the previously \n", "best parameter vector:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "best_x = results[\"nlopt_lbfgsb\"].params\n", "results[\"nlopt_lbfgsb_complete\"] = om.minimize(\n", " fun=trid_scalar,\n", " jac=trid_gradient,\n", " params=best_x,\n", " algorithm=\"nlopt_lbfgsb\",\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Looking at the result in a criterion plot we can see that the optimizer converges after \n", "a bit more than 30 function evaluations. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(results)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "(algo-selection-steps-variations)=\n", "\n", "## Variations of the four steps\n", "\n", "The four steps described above work very well in most situations. However, sometimes \n", "it makes sense to deviate: \n", "\n", "- If you are unsure about some of the questions in step 1, select more algorithms for \n", "the experimentation phase and run more than 1 algorithm until convergence. \n", "- If it is very important to find a precise optimum, run more than 1 algorithm until \n", "convergence. \n", "- If you have a very fast objective function, simply run all candidate algorithms until \n", "convergence. \n", "- If you have a differentiable objective function but no closed form derivative, use \n", "at least one gradient based optimizer and one gradient free optimizer in the \n", "experiments. See [here](how_to_derivatives.ipynb) to learn more about derivatives.\n", "\n", "\n", "(algo-selection-how-important)=\n", "\n", "## How important was it?\n", "\n", "The Trid function is differentiable and very well behaved in almost every aspect. \n", "Moreover, it has a very short runtime. One would think that any optimizer can find its \n", "optimum. So let's compare the selected optimizer with a few others:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results = {}\n", "for algo in [\"nlopt_lbfgsb\", \"scipy_neldermead\", \"scipy_cobyla\"]:\n", " results[algo] = om.minimize(\n", " fun=trid_scalar,\n", " jac=trid_gradient,\n", " params=np.arange(20),\n", " algorithm=algo,\n", " )\n", "\n", "fig = om.criterion_plot(results)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can see that our chosen optimizer solves the problem with less than 35 function \n", "evaluations. At this point, the two gradient-free optimizers have not yet made \n", "significant progress. CoByLA gets reasonably close to an optimum after about 4k \n", "evaluations. Nelder-Mead gets stuck after 8k evaluations and fails to solve the problem. \n", "\n", "This example shows not only that the choice of optimizer is important but that the commonly \n", "held belief that gradient free optimizers are generally more robust than gradient based \n", "ones is dangerous! The Nelder-Mead algorithm did \"converge\" and reports success, but\n", "did not find the optimum. It did not even get stuck in a local optimum because we know \n", "that the Trid function does not have local optima except the global one. It just got \n", "stuck somewhere. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results[\"scipy_neldermead\"].success" ] } ], "metadata": { "kernelspec": { "display_name": "optimagic-docs", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/how_to/how_to_benchmarking.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# How to Benchmark Optimization Algorithms\n", "\n", "Benchmarking optimization algorithms is an important step when developing a new algorithm or when searching for an algorithm that is good at solving a particular problem. \n", "\n", "In general, benchmarking constists of the following steps:\n", "\n", "1. Define the test problems (or get pre-implemented ones)\n", "2. Define the optimization algorithms and the tuning parameters you want to try\n", "3. Run the benchmark\n", "4. Plot the results\n", "\n", "optimagic helps you with all of these steps!" ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "## 1. Get Test Problems\n", "\n", "optimagic includes the problems of [Moré and Wild (2009)](https://doi.org/10.1137/080724083) as well as [Cartis and Roberts](https://arxiv.org/abs/1710.11005).\n", "\n", "Each problem consist of the `inputs` (the criterion function and the start parameters) and the `solution` (the optimal parameters and criterion value) and optionally provides more information.\n", "\n", "Below we load a subset of the Moré and Wild problems and look at one particular Rosenbrock problem that has difficult start parameters." ] }, { "cell_type": "code", "execution_count": null, "id": "2", "metadata": {}, "outputs": [], "source": [ "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "problems = om.get_benchmark_problems(\"example\")" ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "## 2. Specify the Optimizers\n", "\n", "To select optimizers you want to benchmark on the set of problems, you can simply specify them as a list. Advanced examples - that do not only compare algorithms but also vary the `algo_options` - can be found below. " ] }, { "cell_type": "code", "execution_count": null, "id": "5", "metadata": {}, "outputs": [], "source": [ "optimizers = [\n", " \"nag_dfols\",\n", " \"scipy_neldermead\",\n", " \"scipy_truncated_newton\",\n", "]" ] }, { "cell_type": "markdown", "id": "6", "metadata": {}, "source": [ "## 3. Run the Benchmark\n", "\n", "Once you have your problems and your optimizers set up, you can simply use `run_benchmark`. The results are a dictionary with one entry for each (problem, algorithm) combination. Each entry not only saves the solution but also the history of the algorithm's criterion and parameter history. " ] }, { "cell_type": "code", "execution_count": null, "id": "7", "metadata": {}, "outputs": [], "source": [ "results = om.run_benchmark(\n", " problems,\n", " optimizers,\n", ")" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "## 4a. Profile plots\n", "\n", "**Profile Plots** compare optimizers over a whole problem set. \n", "\n", "The literature distinguishes **data profiles** and **performance profiles**. Data profiles use a normalized runtime measure whereas performance profiles use an absolute one. The profile plot does not normalize runtime by default. To do this, simply set `normalize_runtime` to True. For background information, check [Moré and Wild (2009)](https://doi.org/10.1137/080724083). " ] }, { "cell_type": "code", "execution_count": null, "id": "9", "metadata": {}, "outputs": [], "source": [ "fig = om.profile_plot(\n", " problems=problems,\n", " results=results,\n", ")\n", "\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ ":::{note}\n", "\n", "For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\n", "\n", ":::" ] }, { "cell_type": "markdown", "id": "11", "metadata": {}, "source": [ "The x axis shows runtime per problem. The y axis shows the share of problems each algorithm solved within that runtime. Thus, higher and further to the left values are desirable. Higher means more problems were solved and further to the left means, the algorithm found the solutions earlier. \n", "\n", "You can choose:\n", "\n", "- whether to use `n_evaluations` or `walltime` as **`runtime_measure`**\n", "- whether to normalize runtime such that the runtime of each problem is shown as a multiple of the fastest algorithm on that problem\n", "- how to determine when an evaluation is close enough to the optimum to be counted as converged. Convergence is always based on some measure of distance between the true solution and the solution found by an optimizer. Whether distiance is measured in parameter space, function space, or a combination of both can be specified. \n", "\n", "Below, we consider a problem to be solved if the distance between the parameters found by the optimizer and the true solution parameters are at most 0.1% of the distance between the start parameters and true solution parameters. " ] }, { "cell_type": "code", "execution_count": null, "id": "12", "metadata": {}, "outputs": [], "source": [ "fig = om.profile_plot(\n", " problems=problems,\n", " results=results,\n", " runtime_measure=\"n_evaluations\",\n", " stopping_criterion=\"x\",\n", " x_precision=0.001,\n", ")\n", "\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "13", "metadata": {}, "source": [ "## 4b. Convergence plots\n", "\n", "**Convergence Plots** look at particular problems and show the convergence of each optimizer on each problem. " ] }, { "cell_type": "code", "execution_count": null, "id": "14", "metadata": {}, "outputs": [], "source": [ "fig = om.convergence_plot(\n", " problems=problems,\n", " results=results,\n", " n_cols=2,\n", " problem_subset=[\"rosenbrock_good_start\", \"box_3d\"],\n", ")\n", "\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "15", "metadata": {}, "source": [ "The further to the left and the lower the curve of an algorithm, the better that algorithm performed.\n", "\n", "Often we are more interested in how close each algorithm got to the true solution in parameter space, not in criterion space as above. For this. we simply set the **`distance_measure`** to `parameter_space`. " ] }, { "cell_type": "code", "execution_count": null, "id": "16", "metadata": {}, "outputs": [], "source": [ "fig = om.convergence_plot(\n", " problems=problems,\n", " results=results,\n", " n_cols=2,\n", " problem_subset=[\"rosenbrock_good_start\", \"box_3d\"],\n", " distance_measure=\"parameter_distance\",\n", " stopping_criterion=\"x\",\n", ")\n", "\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, "source": [ "## 5a. Convergence report\n", "\n", "The **Convergence Report** shows for each problem and optimizer which problems the optimizer solved successfully, failed to do so, or where it stopped with an error. The respective strings are \"success\", \"failed\", or \"error\".\n", "Moreover, the last column of the ```pd.DataFrame``` displays the number of dimensions of the benchmark problem." ] }, { "cell_type": "code", "execution_count": null, "id": "18", "metadata": {}, "outputs": [], "source": [ "df = om.convergence_report(\n", " problems=problems,\n", " results=results,\n", " stopping_criterion=\"y\",\n", " x_precision=1e-4,\n", " y_precision=1e-4,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "19", "metadata": {}, "outputs": [], "source": [ "df" ] }, { "cell_type": "markdown", "id": "20", "metadata": {}, "source": [ "## 5b. Rank report\n", "\n", "The **Rank Report** shows the ranks of the algorithms for each problem; where 0 means the algorithm was the fastest on a given benchmark problem, 1 means it was the second fastest and so on. If an algorithm did not converge on a problem, the value is \"failed\". If an algorithm did encounter an error during optimization, the value is \"error\"." ] }, { "cell_type": "code", "execution_count": null, "id": "21", "metadata": {}, "outputs": [], "source": [ "df = om.rank_report(\n", " problems=problems,\n", " results=results,\n", " runtime_measure=\"n_evaluations\",\n", " stopping_criterion=\"y\",\n", " x_precision=1e-4,\n", " y_precision=1e-4,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "22", "metadata": {}, "outputs": [], "source": [ "df" ] }, { "cell_type": "markdown", "id": "23", "metadata": {}, "source": [ "## 5b. Traceback report\n", "\n", "The **Traceback Report** shows the tracebacks returned by the optimizers if they encountered an error during optimization. The resulting ```pd.DataFrame``` is empty if none of the optimizers terminated with an error, as in the example below." ] }, { "cell_type": "code", "execution_count": null, "id": "24", "metadata": {}, "outputs": [], "source": [ "df = om.traceback_report(problems=problems, results=results)" ] }, { "cell_type": "code", "execution_count": null, "id": "25", "metadata": {}, "outputs": [], "source": [ "df" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_bounds.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "(how-to-bounds)=\n", "\n", "# How to specify bounds\n", "\n", "## Constraints vs bounds \n", "\n", "optimagic distinguishes between bounds and constraints. Bounds are lower and upper bounds for parameters. In the literature, they are sometimes called box constraints. Examples for general constraints are linear constraints, probability constraints, or nonlinear constraints. You can find out more about general constraints in the next section on [How to specify constraints](how_to_constraints.md)." ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "## Example objective function\n", "\n", "Let’s again look at the sphere function:" ] }, { "cell_type": "code", "execution_count": null, "id": "2", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "def fun(x):\n", " return x @ x" ] }, { "cell_type": "code", "execution_count": null, "id": "4", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(fun=fun, params=np.arange(3), algorithm=\"scipy_lbfgsb\")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "id": "5", "metadata": {}, "source": [ "## Array params\n", "\n", "For params that are a `numpy.ndarray`, one can specify the lower and/or upper-bounds as an array of the same length.\n", "\n", "**Lower bounds**" ] }, { "cell_type": "code", "execution_count": null, "id": "6", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=fun,\n", " params=np.arange(3),\n", " bounds=om.Bounds(lower=np.ones(3)),\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "id": "7", "metadata": {}, "source": [ "**Lower & upper-bounds**" ] }, { "cell_type": "code", "execution_count": null, "id": "8", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=fun,\n", " params=np.arange(3),\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=om.Bounds(\n", " lower=np.array([-2, -np.inf, 1]),\n", " upper=np.array([-1, np.inf, np.inf]),\n", " ),\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "id": "9", "metadata": {}, "source": [ "## Pytree params\n", "\n", "Now let's look at a case where params is a more general pytree. We also update the sphere function by adding an intercept. Since the criterion always decreases when decreasing the intercept, there is no unrestricted solution. Lets fix a lower bound only for the intercept." ] }, { "cell_type": "code", "execution_count": null, "id": "10", "metadata": {}, "outputs": [], "source": [ "params = {\"x\": np.arange(3), \"intercept\": 3}\n", "\n", "\n", "def fun(params):\n", " return params[\"x\"] @ params[\"x\"] + params[\"intercept\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "11", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=fun,\n", " params=params,\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=om.Bounds(lower={\"intercept\": -2}),\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "id": "12", "metadata": {}, "source": [ "optimagic tries to match the user provided bounds with the structure of params. This allows you to specify bounds for subtrees of params. In case your subtree specification results in an unidentified matching, optimagic will tell you so with a `InvalidBoundsError`. " ] }, { "cell_type": "markdown", "id": "13", "metadata": {}, "source": [ "## params data frame\n", "\n", "It often makes sense to specify your parameters in a `pandas.DataFrame`, where you can utilize the multiindex for parameter naming. In this case, you can specify bounds as extra columns `lower_bound` and `upper_bound`.\n", "\n", "> **Note**\n", "> The columns are called `*_bound` instead of `*_bounds` like the argument passed to `minimize` or `maximize`. " ] }, { "cell_type": "code", "execution_count": null, "id": "14", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "\n", "params = pd.DataFrame(\n", " {\"value\": [0, 1, 2, 3], \"lower_bound\": [0, 1, 1, -2]},\n", " index=pd.MultiIndex.from_tuples([(\"x\", k) for k in range(3)] + [(\"intercept\", 0)]),\n", ")\n", "params" ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": {}, "outputs": [], "source": [ "def fun(params):\n", " x = params.loc[\"x\"][\"value\"].to_numpy()\n", " intercept = params.loc[\"intercept\"][\"value\"].iloc[0]\n", " value = x @ x + intercept\n", " return float(value)" ] }, { "cell_type": "code", "execution_count": null, "id": "16", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun,\n", " params=params,\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, "source": [ "(filtering_algorithms_using_bounds)=\n", "\n", "## Filtering algorithms\n", "\n", "It is further possible to filter algorithms based on whether they support bounds, if bounds are required to run, and if infinite bounds are supported. The AlgoInfo class provides all information about the chosen algorithm, which can be accessed with algo.algo_info... . Suppose we are looking for a optimizer that supports bounds and strictly require them for the algorithm to run properly.\n", "\n", "To find all algorithms that support bounds and cannot run without bounds, we can simply do:\n" ] }, { "cell_type": "code", "execution_count": null, "id": "18", "metadata": {}, "outputs": [], "source": [ "from optimagic.algorithms import AVAILABLE_ALGORITHMS\n", "\n", "algos_with_bounds_support = [\n", " algo\n", " for name, algo in AVAILABLE_ALGORITHMS.items()\n", " if algo.algo_info.supports_bounds\n", "]\n", "my_selection = [\n", " algo for algo in algos_with_bounds_support if algo.algo_info.needs_bounds\n", "]\n", "my_selection[0:3]" ] }, { "cell_type": "markdown", "id": "19", "metadata": {}, "source": [ "Similarly, to find all algorithms that support infinite values in bounds , we can do:" ] }, { "cell_type": "code", "execution_count": null, "id": "20", "metadata": {}, "outputs": [], "source": [ "my_selection2 = [\n", " algo\n", " for algo in algos_with_bounds_support\n", " if algo.algo_info.supports_infinite_bounds\n", "]\n", "my_selection2[0:3]" ] }, { "cell_type": "markdown", "id": "21", "metadata": {}, "source": [ "In case you you forget to specify bounds for a optimizer that strictly requires them or pass infinite values in bounds to a optimizer which does not support them, optimagic will raise an `IncompleteBoundsError`. " ] }, { "cell_type": "markdown", "id": "22", "metadata": {}, "source": [ "## Coming from scipy" ] }, { "cell_type": "markdown", "id": "23", "metadata": {}, "source": [ "If `params` is a flat numpy array, you can also provide bounds in any format that \n", "is supported by [`scipy.optimize.minimize`](\n", "https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html). " ] } ], "metadata": { "kernelspec": { "display_name": "optimagic-docs", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.11" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_change_plotting_backend.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# How to change the plotting backend" ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "optimagic supports various visualization libraries as plotting backends, which can be\n", "selected using the `backend` argument. In the following guide, we showcase the \n", "`criterion_plot` visualized using different backends." ] }, { "cell_type": "code", "execution_count": null, "id": "2", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "import optimagic as om\n", "\n", "\n", "def sphere(x):\n", " return x @ x\n", "\n", "\n", "results = {}\n", "for algo in [\"scipy_lbfgsb\", \"scipy_neldermead\"]:\n", " results[algo] = om.minimize(sphere, params=np.arange(5), algorithm=algo)" ] }, { "cell_type": "markdown", "id": "3", "metadata": {}, "source": [ "## Backends" ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "### Plotly" ] }, { "cell_type": "markdown", "id": "5", "metadata": {}, "source": [ "The default plotting library. To select the Plotly backend explicitly, set `backend=\"plotly\"`.\n", "\n", "The returned figure object is a [`plotly.graph_objects.Figure`](https://plotly.com/python-api-reference/generated/plotly.graph_objects.Figure.html).\n", "\n", "```{note}\n", "**Choose the Plotly renderer according to your environment:**\n", "\n", "- Use `plotly.io.renderers.default = \"notebook_connected\"` in Jupyter notebooks for interactive plots.\n", "- Use `plotly.io.renderers.default = \"browser\"` to open plots in your default web browser when running as a script.\n", "\n", "Refer to the [Plotly documentation](https://plotly.com/python/renderers/) for more details.\n", "```" ] }, { "cell_type": "code", "execution_count": null, "id": "6", "metadata": {}, "outputs": [], "source": [ "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "fig = om.criterion_plot(results, backend=\"plotly\") # Also the default\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "7", "metadata": {}, "source": [ "### Matplotlib" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "To select the Matplotlib backend, set `backend=\"matplotlib\"`.\n", "\n", "The returned figure object is a [`matplotlib.axes.Axes`](https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.html).\n", "\n", "In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is a 2-dimensional numpy array of `Axes` objects: [`numpy.ndarray`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html)[[`matplotlib.axes.Axes`]](https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.html) of shape `(n_rows, n_cols)`." ] }, { "cell_type": "code", "execution_count": null, "id": "9", "metadata": {}, "outputs": [], "source": [ "ax = om.criterion_plot(results, backend=\"matplotlib\")" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "### Bokeh" ] }, { "cell_type": "markdown", "id": "11", "metadata": {}, "source": [ "To select the Bokeh backend, set `backend=\"bokeh\"`.\n", "\n", "The returned figure object is a [`bokeh.plotting.figure`](https://docs.bokeh.org/en/latest/docs/reference/plotting/figure.html).\n", "\n", "In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is a [`bokeh.models.GridPlot`](https://docs.bokeh.org/en/latest/docs/reference/models/plots.html#bokeh.models.GridPlot) object.\n", "\n", "```{warning}\n", "- Bokeh applies themes globally. Passing the `template` parameter to a plotting function updates the theme for all existing and future Bokeh plots. If you do not pass `template`, a default template is applied, which will also change the global theme.\n", "- Bokeh doesn't support titles for grid plots. So, the `title` parameter in `slice_plot` is ignored when using the Bokeh backend.\n", "```\n" ] }, { "cell_type": "code", "execution_count": null, "id": "12", "metadata": {}, "outputs": [], "source": [ "from bokeh.io import output_notebook, show\n", "\n", "output_notebook()\n", "\n", "p = om.criterion_plot(results, backend=\"bokeh\")\n", "show(p)" ] }, { "cell_type": "markdown", "id": "13", "metadata": {}, "source": [ "### Altair" ] }, { "cell_type": "markdown", "id": "14", "metadata": {}, "source": [ "To select the Altair backend, set `backend=\"altair\"`.\n", "\n", "The returned figure object is an [`altair.Chart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html).\n", "\n", "In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is either an [`altair.Chart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html) if there is only one subplot, an [`altair.HConcatChart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.HConcatChart.html) if there is only one row, or an [`altair.VConcatChart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.VConcatChart.html) otherwise.\n", "\n", "```{warning}\n", "Altair applies themes globally. Passing the `template` parameter to a plotting function updates the theme for all existing and future Altair plots. If you do not pass `template`, a default template is applied, which will also change the global theme.\n", "```\n", "\n", "```{note}\n", "It is mostly not required to set the renderer manually, as Altair automatically\n", "selects the appropriate renderer based on your environment. In this example,\n", "we explicitly set the renderer to ensure correct display within the documentation.\n", "\n", "Refer to the [Altair documentation](https://altair-viz.github.io/user_guide/display_frontends.html) for more details.\n", "```\n" ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": {}, "outputs": [], "source": [ "import altair as alt\n", "\n", "# Setting the renderer is mostly not required. See note above.\n", "alt.renderers.enable(\"jupyter\")\n", "\n", "chart = om.criterion_plot(results, backend=\"altair\")\n", "chart.show()" ] }, { "cell_type": "markdown", "id": "16", "metadata": {}, "source": [ "## Customizing plots" ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, "source": [ "Here, we provide a simple example of how to customize plots created with different backends.\n", "\n", "::::{tab-set}\n", "\n", ":::{tab-item} Plotly\n", "\n", "```python\n", "fig = om.criterion_plot(results, backend=\"plotly\")\n", "\n", "# Configure Axes\n", "fig.update_yaxes(title_text=\"Custom Y Label\", title_font_size=20)\n", "fig.update_xaxes(range=[0, 100])\n", "\n", "# Change legend position\n", "fig.update_layout(legend=dict(xanchor=\"left\", yanchor=\"top\", x=1, y=0.6))\n", "\n", "# Configure line properties\n", "# The index corresponding to a line, can be inferred from the legend\n", "# In case of criterion_plot, it is the order of optimizers in `results`\n", "fig.data[0].update(line=dict(width=4))\n", "fig.data[1].update(line=dict(dash=\"dashdot\"))\n", "\n", "fig.show()\n", "```\n", ":::\n", "\n", ":::{tab-item} Matplotlib\n", "\n", "```python\n", "ax = om.criterion_plot(results, backend=\"matplotlib\")\n", "\n", "# Configure Axis\n", "ax.set_ylabel(ylabel=\"Custom Y Label\", fontsize=20)\n", "ax.set_xlim(0, 100)\n", "\n", "# Change legend position\n", "ax.figure.legends[0].set_loc(\"outside center right\")\n", "\n", "# Configure line properties\n", "# The index corresponding to a line, can be inferred from the legend\n", "# In case of criterion_plot, it is the order of optimizers in `results`\n", "ax.lines[0].set_linewidth(4)\n", "ax.lines[1].set_linestyle(\"dashdot\")\n", "```\n", "\n", ":::\n", "\n", ":::{tab-item} Bokeh\n", "\n", "```python\n", "from bokeh.models import Range1d\n", "\n", "p = om.criterion_plot(results, backend=\"bokeh\")\n", "\n", "# Configure Axes\n", "p.yaxis.axis_label = \"Custom Y Label\"\n", "p.yaxis.axis_label_text_font_size = \"20pt\"\n", "p.x_range = Range1d(0, 100)\n", "\n", "# Change legend position\n", "p.add_layout(p.legend[0], \"right\")\n", "p.legend[0].location = \"center\"\n", "\n", "# Configure line properties\n", "# The index corresponding to a line, can be inferred from the legend\n", "# In case of criterion_plot, it is the order of optimizers in `results`\n", "p.renderers[0].glyph.line_width = 4\n", "p.renderers[1].glyph.line_dash = \"dashdot\"\n", "\n", "show(p)\n", "```\n", "\n", ":::\n", "\n", ":::{tab-item} Altair\n", "\n", "```{note}\n", "Due to the nature of Altair charts, top-level configuration may not work as expected. In these cases, it might be necessary to override the encoding.\n", "```\n", "\n", "```python\n", "import altair as alt\n", "\n", "chart = om.criterion_plot(results, backend=\"altair\")\n", "\n", "# Configure Axes\n", "chart = chart.encode(\n", " y=alt.Y(\"y\", axis=alt.Axis(title=\"Custom Y Label\", titleFontSize=20)),\n", " x=alt.X(\"x\", scale=alt.Scale(domain=(0, 100))),\n", ")\n", "\n", "# Configure lines\n", "chart = chart.encode(\n", " strokeWidth=alt.condition(\n", " alt.datum.name == \"scipy_lbfgsb\", alt.value(4), alt.value(2)\n", " ),\n", " strokeDash=alt.condition(\n", " alt.datum.name == \"scipy_neldermead\", alt.value([8, 4, 2, 4]), alt.value([1, 0])\n", " ),\n", ")\n", "\n", "chart.show()\n", "```\n", "\n", ":::\n", "\n", "::::" ] } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.17" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_constraints.md ================================================ (constraints)= # How to specify constraints ## Constraints vs bounds optimagic distinguishes between bounds and constraints. Bounds are lower and upper bounds for parameters. In the literature, they are sometimes called box constraints. Bounds are specified as `lower_bounds` and `upper_bounds` argument to `maximize` and `minimize`. Examples with bounds can be found in [this tutorial]. To specify more general constraints on your parameters, you can use the argument `constraints`. The variety of constraints you can impose ranges from rather simple ones (e.g. parameters are fixed to a value, a group of parameters is required to be equal) to more complex ones (like general linear constraints, or even nonlinear constraints). ## Can you use constraints with all optimizers? With the exception of general nonlinear constraints, we implement constraints via reparametrizations. Details are explained [here]. This means that you can use all of the constraints with any optimizer that supports bounds. Some constraints (e.g. fixing parameters) can even be used with optimizers that do not support bounds. ## Example criterion function Let's look at a variation of the sphere function to illustrate what kinds of constraints you can impose and how you specify them in optimagic: ```{eval-rst} .. code-block:: python >>> import numpy as np >>> import optimagic as om >>> def fun(params): ... offset = np.linspace(1, 0, len(params)) ... x = params - offset ... return x @ x ``` The unconstrained optimum of a six-dimensional version of this problem is: ```{eval-rst} .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([2.5, 1, 1, 1, 1, -2.5]), ... algorithm="scipy_lbfgsb", ... ) >>> res.params.round(3) # doctest: +SKIP array([1. , 0.8, 0.6, 0.4, 0.2, 0. ]) ``` The unconstrained optimum is usually easy to see because all parameters enter the criterion function in a additively separable way. ## Types of constraints Below, we show a very simple example of each type of constraint implemented in optimagic. For each constraint, we will select a subset of the parameters on which the constraint is imposed via the `selector` argument, which is a function that takes in the full parameter vector and returns the subset of parameters that should be constrained. ```{eval-rst} .. dropdown:: fixed The simplest (but very useful) constraint fixes parameters at their start values. Let's take the above example and fix the first and last parameter to 2.5 and -2.5, respectively. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([2.5, 1, 1, 1, 1, -2.5]), ... algorithm="scipy_lbfgsb", ... constraints=om.FixedConstraint( ... selector=lambda params: params[[0, 5]] ... ), ... ) Looking at the optimization result, we get: >>> res.params.round(3) array([ 2.5, 0.8, 0.6, 0.4, 0.2, -2.5]) Which is indeed the correct constrained optimum. Fixes are compatible with all optimizers. ``` ```{eval-rst} .. dropdown:: increasing In our unconstrained example, the optimal parameters are decreasing from left to right. Let's impose the constraint that the second, third and fourth parameter increase (weakly): .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([1, 1, 1, 1, 1, 1]), ... algorithm="scipy_lbfgsb", ... constraints=om.IncreasingConstraint( ... selector=lambda params: params[[1, 2, 3]] ... ), ... ) Imposing the constraint on positions ``params[[1, 2, 3]]`` means that the parameter value at index position ``2`` has to be (weakly) greater than the value at position ``1``. Likewise, the parameter value at index position ``3`` has to be (weakly) greater than the value at position ``2``. Hence, imposing an increasing constraint with only one selected parameter has no effect. We need to specify at least two parameters to make a meaningful *relative* comparison. Note that the increasing constraint affect all three parameters, i.e. ``params[1]``, ``params[2]``, and ``params[3]`` because the optimal parameters in the unconstrained case are decreasing from left to right. Looking at the optimization result, we get: >>> res.params.round(3) array([1. , 0.6, 0.6, 0.6, 0.2, 0. ]) Which is indeed the correct constrained optimum. Increasing constraints are only compatible with optimizers that support bounds. ``` ```{eval-rst} .. dropdown:: decreasing In our unconstrained example, the optimal parameters are decreasing from left to right already - without imposing any constraints. If we imposed an decreasing constraint without changing the order, it would simply have no effect. So let's impose one in a different order: .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([1, 1, 1, 1, 1, 1]), ... algorithm="scipy_lbfgsb", ... constraints=om.DecreasingConstraint( ... selector=lambda params: params[[3, 0, 4]] ... ), ... ) Imposing the constraint on positions ``params[[3, 0, 4]]`` means that the parameter value at index position ``0`` has to be (weakly) smaller than the value at position ``3``. Likewise, the parameter value at index position ``4`` has to be (weakly) smaller than the value at position ``0``. Hence, imposing a decreasing constraint with only one selected parameter has no effect. We need to specify at least two parameters to make a meaningful *relative* comparison. Note that the decreasing constraint should have no effect on ``params[4]`` because it is smaller than the other two anyways in the unconstrained optimum, but it will change the optimal values of ``params[3]`` and ``params[0]``. Indeed we get: >>> res.params.round(3) array([ 0.7, 0.8, 0.6, 0.7, 0.2, -0. ]) Which is the correct optimum. Decreasing constraints are only compatible with optimizers that support bounds. ``` ```{eval-rst} .. dropdown:: equality In our example, all optimal parameters are different. Let's constrain the first and last to be equal to each other: .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([1, 1, 1, 1, 1, 1]), ... algorithm="scipy_lbfgsb", ... constraints=om.EqualityConstraint( ... selector=lambda params: params[[0, 5]] ... ), ... ) This yields: >>> res.params.round(3) array([0.5, 0.8, 0.6, 0.4, 0.2, 0.5]) Which is the correct solution. Equality constraints are compatible with all optimizers. ``` ```{eval-rst} .. dropdown:: pairwise_equality Pairwise equality constraints are similar to equality constraints but impose that two or more groups of parameters are pairwise equal. Let's look at an example: .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([1, 1, 1, 1, 1, 1]), ... algorithm="scipy_lbfgsb", ... constraints=om.PairwiseEqualityConstraint( ... selectors=[ ... lambda params: params[[0, 1]], ... lambda params: params[[2, 3]] ... ], ... ), ... ) This constraint imposes that ``params[0] == params[2]`` and ``params[1] == params[3]``. The optimal parameters with this constraint are: >>> res.params.round(3) array([ 0.8, 0.6, 0.8, 0.6, 0.2, -0. ]) ``` ```{eval-rst} .. dropdown:: probability Let's impose the constraint that the first four parameters form valid probabilities, i.e. they should add up to one and be between zero and one. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.array([0.3, 0.2, 0.25, 0.25, 1, 1]), ... algorithm="scipy_lbfgsb", ... constraints=om.ProbabilityConstraint( ... selector=lambda params: params[:4] ... ), ... ) This yields again the correct result: .. code-block:: python >>> res.params.round(2) # doctest: +SKIP array([0.53, 0.33, 0.13, 0. , 0.2 , 0. ]) ``` ```{eval-rst} .. dropdown:: covariance In many estimation problems, particularly when doing a maximum likelihood estimation, one has to estimate the covariance matrix of a random variable. The ``covariance`` costraint ensures that such a covariance matrix is always valid, i.e. positive semi-definite and symmetric. Due to its symmetry, only the lower triangle of a covariance matrix actually has to be estimated. Let's look at an example. We want to impose that the first three elements form the lower triangle of a valid covariance matrix. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.ones(6), ... algorithm="scipy_lbfgsb", ... constraints=om.FlatCovConstraint( ... selector=lambda params: params[:3] ... ), ... ) This yields the same solution as an unconstrained estimation because the constraint is not binding: >>> res.params.round(3) array([ 1.006, 0.784, 0.61 , 0.4 , 0.2 , -0. ]) We can now use one of optimagic's utility functions to actually build the covariance matrix out of the first three parameters: .. code-block:: python >>> from optimagic.utilities import cov_params_to_matrix >>> cov_params_to_matrix(res.params[:3]).round(2) # doctest: +NORMALIZE_WHITESPACE array([[1.01, 0.78], [0.78, 0.61]]) ``` ```{eval-rst} .. dropdown:: sdcorr ``sdcorr`` constraints are very similar to ``covariance`` constraints. The only difference is that instead of estimating a covariance matrix, we estimate standard deviations and the correlation matrix of random variables. Let's look at an example. We want to impose that the first three elements form valid standard deviations and a correlation matrix. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.ones(6), ... algorithm="scipy_lbfgsb", ... constraints=om.FlatSDCorrConstraint( ... selector=lambda params: params[:3] ... ), ... ) This yields the same solution as an unconstrained estimation because the constraint is not binding: >>> res.params.round(3) # doctest: +SKIP array([ 1. , 0.8, 0.6, 0.4, 0.2, -0. ]) We can now use one of optimagic's utility functions to actually build the standard deviations and the correlation matrix: .. code-block:: python >>> from optimagic.utilities import sdcorr_params_to_sds_and_corr >>> sd, corr = sdcorr_params_to_sds_and_corr(res.params[:3]) >>> sd.round(2) array([1. , 0.8]) >>> corr.round(2) # doctest: +NORMALIZE_WHITESPACE array([[1. , 0.6], [0.6, 1. ]]) ``` ```{eval-rst} .. dropdown:: linear Linear constraints are the most difficult but also the most powerful constraints in your toolkit. They can be used to express constraints of the form ``lower_bound <= weights.dot(x) <= upper_bound`` or ``weights.dot(x) = value`` where ``x`` are the selected parameters. Linear constraints have many of the other constraint types as special cases, but typically it is more convenient to use the special cases instead of expressing them as a linear constraint. Internally, it will make no difference. Let's impose the constraint that the average of the first four parameters is at least 0.95. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.ones(6), ... algorithm="scipy_lbfgsb", ... constraints=om.LinearConstraint( ... selector=lambda params: params[:4], ... lower_bound=0.95, ... weights=0.25, ... ), ... ) This yields: >>> res.params.round(2) array([ 1.25, 1.05, 0.85, 0.65, 0.2 , -0. ]) Where the first four parameters have an average of 0.95. In the above example, ``lower_bound`` and ``weights`` are scalars. They may, however, also be arrays (or even pytrees) with bounds and weights for each selected parameter. ``` ```{eval-rst} .. dropdown:: nonlinear .. warning:: General nonlinear constraints that are specified via a black-box constraint function can only be used if you choose an optimizer that supports it. This feature is currently supported by the algorithms: * ``ipopt`` * ``nlopt``: ``cobyla``, ``slsqp``, ``isres``, ``mma`` * ``scipy``: ``cobyla``, ``slsqp``, ``trust_constr`` You can use nonlinear constraints to express restrictions of the form ``lower_bound <= func(x) <= upper_bound`` or ``func(x) = value`` where ``x`` are the selected parameters and ``func`` is the constraint function. Let's impose the constraint that the product of all but the last parameter is 1. .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.ones(6), ... algorithm="scipy_slsqp", ... constraints=om.NonlinearConstraint( ... selector=lambda params: params[:-1], ... func=lambda x: np.prod(x), ... value=1.0, ... ), ... ) This yields: >>> res.params.round(2) array([ 1.31, 1.16, 1.01, 0.87, 0.75, -0. ]) Where the product of all but the last parameters is equal to 1. If you have a function that calculates the derivative of your constraint, you can add this under the key `"derivative"` to the constraint dictionary. Otherwise, numerical derivatives are calculated for you if needed. ``` ## Imposing multiple constraints at once The above examples all just impose one constraint at a time. To impose multiple constraints simultaneously, simple pass in a list of constraints. For example: ```{eval-rst} .. code-block:: python >>> res = om.minimize( ... fun=fun, ... params=np.ones(6), ... algorithm="scipy_lbfgsb", ... constraints=[ ... om.EqualityConstraint(selector=lambda params: params[:2]), ... om.LinearConstraint( ... selector=lambda params: params[2:5], ... weights=1, ... value=3, ... ), ... ], ... ) This yields: >>> res.params.round(2) array([0.9, 0.9, 1.2, 1. , 0.8, 0. ]) There are limits regarding the compatibility of overlapping constraints. You will get a descriptive error message if your constraints are not compatible. ``` ## How to select the parameters? The parameters can be selected via a `selector` function. This function takes in the full parameter vector and returns the subset of parameters that should be constrained. Let's assume we have defined parameters in a nested dictionary: ```python params = {"a": np.ones(2), "b": {"c": 3, "d": pd.Series([4, 5])}} ``` It is probably not a good idea to use a nested dictionary for so few parameters, but let's ignore that. Now assume we want to fix the parameters in the pandas Series at their start values. We can do so as follows: ```python res = om.minimize( fun=some_fun, params=params, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(selector=lambda params: params["b"]["d"]), ) ``` I.e. the value corresponding to `selector` is a python function that takes the full `params` and returns a subset. The selected subset does not have to be a numpy array, it can be an arbitrary pytree. Using lambda functions if often convenient, but we could have just as well defined the selector function using def. ```python def my_selector(params): return params["b"]["d"] res = om.minimize( fun=some_fun, params=params, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(selector=my_selector), ) ``` [here]: ../../explanation/implementation_of_constraints.md [this tutorial]: ../tutorials/optimization_overview.ipynb ================================================ FILE: docs/source/how_to/how_to_criterion_function.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "(how-to-fun)=\n", "\n", "# How to write objective functions\n", "\n", "optimagic is very flexible when it comes to the objective function and its derivatives. \n", "In this how-to guide we start with simple examples, that would also work with \n", "scipy.optimize before we show advanced options and their advantages. \n", "\n", "## The simplest case\n", "\n", "In the simplest case, `fun` maps a numpy array into a scalar objective value. The name\n", "of first argument of `fun` is arbitrary. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "import optimagic as om\n", "\n", "\n", "def sphere(x):\n", " return x @ x\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(3),\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "res.params.round(6)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## More flexible `params`\n", "\n", "In all but the most simple problems, a flat numpy array is not ideal to keep track of \n", "all the different parameters one wants to optimize over. Therefore, optimagic accepts \n", "objective functions that work with other parameter formats. Below we show a simple \n", "example. More examples can be found [here](how_to_start_parameters.md).\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def dict_fun(x):\n", " return x[\"a\"] ** 2 + x[\"b\"] ** 4\n", "\n", "\n", "res = om.minimize(\n", " fun=dict_fun,\n", " params={\"a\": 1, \"b\": 2},\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "\n", "res.params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The important thing is that the `params` provided to `minimize` need to have the format \n", "that is expected by the objective function.\n", "\n", "## Functions with additional arguments\n", "\n", "In many applications, the objective function takes more than `params` as argument. \n", "This can be achieved via `fun_kwargs`. Take the following simplified example:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def shifted_sphere(x, offset):\n", " return (x - offset) @ (x - offset)\n", "\n", "\n", "res = om.minimize(\n", " fun=shifted_sphere,\n", " params=np.arange(3),\n", " algorithm=\"scipy_lbfgsb\",\n", " fun_kwargs={\"offset\": np.ones(3)},\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`fun_kwargs` is a dictionary with keyword arguments for `fun`. There is no constraint\n", "on the number or names of those arguments." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Least-Squares problems\n", "\n", "Many estimation problems have a least-squares structure. If so, specialized optimizers that exploit this structure can be much faster than standard optimizers. The `sphere` function from above is the simplest possible least-squarse problem you could imagine: the least-squares residuals are just the params. \n", "\n", "To use least-squares optimizers in optimagic, you need to mark your function with \n", "a decorator and return the least-squares residuals instead of the aggregated function value. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@om.mark.least_squares\n", "def ls_sphere(params):\n", " return params\n", "\n", "\n", "res = om.minimize(\n", " fun=ls_sphere,\n", " params=np.arange(3),\n", " algorithm=\"pounders\",\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Any least-squares optimization problem is also a standard optimization problem. You \n", "can therefore optimize least-squares functions with scalar optimizers as well:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=ls_sphere,\n", " params=np.arange(3),\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Returning additional information\n", "\n", "You can return additional information such as intermediate results, debugging information, etc. in your objective function. This information will be stored in a database if you use [logging](how_to_logging.ipynb).\n", "\n", "To do so, you need to return a `FunctionValue` object." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere_with_info(x):\n", " return om.FunctionValue(value=x @ x, info={\"avg\": x.mean()})\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere_with_info,\n", " params=np.arange(3),\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "\n", "res.params.round(6)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The `info` can be an arbitrary dictionary. In the oversimplified example we returned the \n", "mean of the parameters, which could have been recovered from the params history that \n", "is collected anyways but in real applications this feature can be helpful. " ] } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/how_to/how_to_derivatives.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "(how-to-jac)=\n", "\n", "# How to speed up your optimization using derivatives\n", "\n", "Many optimization algorithms use derivatives to find good search directions. If you \n", "use a derivative based optimizer but do not provide derivatives of your objective \n", "function, optimagic calculates a numerical derivative for you. \n", "\n", "While this numerical derivative is usually precise enough to find good search directions \n", "it requires `n + 1` evaluations of the objective function (where `n` is the number of \n", "free parameters). For large `n` this becomes very slow.\n", "\n", "This how-to guide shows how you can speed up your optimization by parallelizing \n", "numerical derivatives or by providing closed form derivatives. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Parallel numerical derivatives\n", "\n", "If you have a computer with a few idle cores, the easiest way to speed up your\n", "optimization with a gradient based optimizer is to calculate numerical derivatives \n", "in parallel:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om\n", "\n", "\n", "def sphere(x):\n", " return x @ x\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " numdiff_options=om.NumdiffOptions(n_cores=6),\n", ")\n", "res.params.round(6)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Of course, for this super fast objective function, parallelizing will not yield an actual \n", "speedup. But if your objective function takes 100 milliseconds or longer to evaluate, \n", "you can parallelize efficiently to up to `n + 1` cores. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Custom derivatives\n", "\n", "If you don't want to solve your speed problem by throwing more compute at it, you can \n", "provide a derivative to optimagic that is faster than doing `n + 1` evaluations of `fun`. \n", "Here we show you how to hand-code it, but in practice you would usually use JAX or another \n", "autodiff framework to create the derivative." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere_gradient(x):\n", " return 2 * x\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " jac=sphere_gradient,\n", ")\n", "res.params.round(6)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In this example, the evaluation of `sphere_gradient` is even faster than evaluating `sphere`. \n", "\n", "In non-trivial functions, there are synergies between calculating the objective value and \n", "its derivative. Therefore, you can also provide a function that evaluates both at the same time. In such a case, providing fun is optional." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere_fun_and_gradient(x):\n", " return x @ x, 2 * x\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere, # optional when fun_and_jac is provided\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " fun_and_jac=sphere_fun_and_gradient,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`fun_and_jac` can be provided in addition to or instead of `jac` or `fun`. Providing them \n", "together gives optimagic more opportunities to save \n", "time by evaluating just the function that is needed for a given optimizer. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Derivatives with flexible params\n", "\n", "Derivatives are compatible with any format of params. In general, the gradients have \n", "just the same structure as your params. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def dict_fun(x):\n", " return x[\"a\"] ** 2 + x[\"b\"] ** 4\n", "\n", "\n", "def dict_gradient(x):\n", " return {\"a\": 2 * x[\"a\"], \"b\": 4 * x[\"b\"] ** 3}\n", "\n", "\n", "res = om.minimize(\n", " fun=dict_fun,\n", " params={\"a\": 1, \"b\": 2},\n", " algorithm=\"scipy_lbfgsb\",\n", " jac=dict_gradient,\n", ")\n", "res.params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This is also the convention that JAX uses, so any derivative you get via JAX will be \n", "compatible with optimagic. \n", "\n", "## Derivatives for least-squares functions\n", "\n", "When minimizing least-squares functions, you don't need the gradient of the objective \n", "value but the jacobian of the least-squares residuals. Moreover, this jacobian function \n", "needs to be decorated with the `mark.least_squares` decorator. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@om.mark.least_squares\n", "def ls_sphere(params):\n", " return params\n", "\n", "\n", "@om.mark.least_squares\n", "def ls_sphere_jac(params):\n", " return np.eye(len(params))\n", "\n", "\n", "res = om.minimize(\n", " fun=ls_sphere,\n", " params=np.arange(3),\n", " algorithm=\"scipy_ls_lm\",\n", " jac=ls_sphere_jac,\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The `fun_and_jac` argument works just analogous to the scalar case. \n", "\n", "Derivatives of least-squares functions again work with all valid formats of params. \n", "However, the structure of the jacobian can be a bit complicated. Again, JAX will do \n", "the right thing here, so we strongly suggest you calculate all your jacobians via JAX,\n", "especially if your params are not a flat numpy array. \n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Derivatives that work for scalar and least-squares optimizers\n", "\n", "If you want to seamlessly switch between scalar and least-squares optimizers, you can \n", "do so by providing even more versions of derivatives to `minimize`. You probably won't \n", "ever need this, but here is how you would do it. To pretend that this can be useful, \n", "we compare a scalar and a least squares optimizer in a criterion_plot:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results = {}\n", "for algorithm in [\"scipy_lbfgsb\", \"scipy_ls_lm\"]:\n", " results[algorithm] = om.minimize(\n", " fun=ls_sphere,\n", " params=np.arange(5),\n", " algorithm=algorithm,\n", " jac=[sphere_gradient, ls_sphere_jac],\n", " )\n", "\n", "fig = om.criterion_plot(results)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We see that both optimizers were super fast in solving this problem (mainly because the problem is so simple) and in this case the scalar optimizer was even faster. However, in non-trivial problems it almost always pays of to exploit the least-squares structure if you can." ] } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/how_to/how_to_document_optimizers.md ================================================ # How to document optimizers This guide shows you how to document algorithms in optimagic using our new documentation system. We'll walk through the process step-by-step using the `ScipyLBFGSB` optimizer as a complete example. ## When to Use This Guide Use this guide when you need to: - Document a new algorithm you've added to optimagic - Migrate existing algorithm documentation from the old split system (docstrings + `algorithms.md`) to the new system - Update or improve existing algorithm documentation If you're adding a completely new optimizer to optimagic, start with the "How to Add Optimizers guide" first, then use this guide to document your algorithm properly. ## Why the New Documentation System? Previously, algorithm documentation was scattered across multiple places: - Basic descriptions in the algorithm class docstrings - Detailed parameter descriptions in `algorithms.md` - Usage examples separate from the algorithm definitions This made it hard to maintain consistency and keep documentation up-to-date. The new system centralizes nearly all documentation in the algorithm code itself, making it: - Easier to maintain (documentation lives next to code) - More consistent (unified format across all algorithms) - Auto-generated (parameter lists appear automatically in docs) - Type-safe (documentation matches actual parameter types) ## The Documentation System Components Our documentation system has three main parts: 1. **Algorithm Class Documentation**: A comprehensive docstring in the algorithm dataclass that explains what the algorithm does, how it works, and when to use it 1. **Parameter Documentation**: Detailed docstrings for each parameter with mathematical formulations when needed 1. **Usage Integration**: A section in `algorithms.md` that show how to use the algorithm Let's walk through documenting an algorithm from start to finish. ## Example: Documenting ScipyLBFGSB We'll use the `ScipyLBFGSB` optimizer to show you exactly how to document an algorithm. This is a real example from the optimagic codebase, so you can follow along and see the results. ### Step 1: Understand Your Algorithm Before writing documentation, make sure you understand: - What the algorithm does mathematically - What problems it's designed to solve - How its parameters affect behavior - Any performance characteristics or limitations For L-BFGS-B, this means understanding it's a quasi-Newton method for bound-constrained optimization that approximates the Hessian using gradient history. ```{eval-rst} .. note:: If you are simply migrating an existing algorithm, you can mostly rely on the existing documentation in the algorithm class docstring and `algorithms.md`. ``` ### Step 2: Write the Algorithm Class Documentation The algorithm class docstring is the most important part. It should give users everything they need to decide whether to use this algorithm. Here's how we document `ScipyLBFGSB`: ```python # src/optimagic/optimizers/scipy_optimizers.py class ScipyLBFGSB(Algorithm): """Minimize a scalar differentiable function using the L-BFGS-B algorithm. The optimizer is taken from scipy, which calls the Fortran code written by the original authors of the algorithm. The Fortran code includes the corrections and improvements that were introduced in a follow up paper. lbfgsb is a limited memory version of the original bfgs algorithm, that deals with lower and upper bounds via an active set approach. The lbfgsb algorithm is well suited for differentiable scalar optimization problems with up to several hundred parameters. It is a quasi-newton line search algorithm. At each trial point it evaluates the criterion function and its gradient to find a search direction. It then approximates the hessian using the stored history of gradients and uses the hessian to calculate a candidate step size. Then it uses a gradient based line search algorithm to determine the actual step length. Since the algorithm always evaluates the gradient and criterion function jointly, the user should provide a ``fun_and_jac`` function that exploits the synergies in the calculation of criterion and gradient. The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary to scale the parameters. """ ``` **What makes this docstring effective:** - **Clear first line**: States exactly what the algorithm does - **Implementation details**: Explains it uses scipy's Fortran implementation - **Algorithm classification**: Identifies it as a quasi-Newton method - **Problem suitability**: Explains what problems it's good for - **How it works**: Brief explanation of the algorithm's approach - **Performance characteristics**: Mentions scale invariance - **Usage advice**: Suggests using `fun_and_jac` for efficiency ### Step 3: Document Individual Parameters Each parameter needs clear documentation explaining what it controls and how it affects the algorithm's behavior. ```python # Basic parameter documentation stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" # Parameter with mathematical formulation convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL r"""Converge if the relative change in the objective function is less than this value. More formally, this is expressed as. .. math:: \frac{f^k - f^{k+1}}{\max\{|f^k|, |f^{k+1}|, 1\}} \leq \textsf{convergence_ftol_rel}. """ # Parameter with external library context limited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH """The maximum number of variable metric corrections used to define the limited memory matrix. This is the 'maxcor' parameter in the SciPy documentation. The default value is taken from SciPy's L-BFGS-B implementation. Larger values use more memory but may converge faster for some problems. """ ``` **Key principles for parameter documentation:** - **Start with a clear description** of what the parameter controls - **Add mathematical formulations** when they clarify the exact meaning (use `r"""` for raw strings with LaTeX) - **Include external library context** when relevant (e.g., "Default value is taken from SciPy") - **Explain performance implications** when they matter - **Use proper type annotations** that match the parameter's constraints ```{eval-rst} .. warning:: If your optimizer module uses type hints (e.g., ``PositiveInt``, ``NonNegativeInt``), include the following at the top of your optimizer module: .. code-block:: python from __future__ import annotations Without this, type hints such as ``PositiveInt`` may appear decomposed in the documentation (e.g., as ``Annotated[int, Gt(gt=0)]``). ``` ### Step 4: Integrate into `algorithms.md` The final step is integrating your documented algorithm into the main documentation. This creates a dropdown section that shows users how to use the algorithm. Add the following to `docs/source/algorithms.md` in an `eval-rst` block: ```text .. dropdown:: scipy_lbfgsb **How to use this algorithm:** .. code-block:: python import optimagic as om om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...), ) or using the string interface: .. code-block:: python om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], algorithm="scipy_lbfgsb", algo_options={"stopping_maxiter": 1_000, ...}, ) **Description and available options:** .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB ``` **What this section provides:** - **The dropdown button and title**: Makes it easy to find the algorithm - **Concrete usage examples** showing both the object and string interfaces - **Algorithm-specific parameter** in the usage example - **Auto-generated documentation** via the `autoclass` directive that pulls in your docstrings ## Working with Existing Documentation If you're migrating an algorithm that already has documentation: ### Finding Existing Content Look for existing documentation in: - **Algorithm class docstrings**: Usually basic descriptions - **`docs/source/algorithms.md`**: Detailed parameter descriptions and examples - **Research papers**: For mathematical formulations and background - **External library docs**: For default values and parameter meanings ### Migration Strategy 1. **Start with the algorithm class**: Move the best description from `algorithms.md` to the class docstring 1. **Update and expand**: Add missing information about performance, usage, etc. 1. **Move parameter docs**: Transfer parameter descriptions from `algorithms.md` to individual parameter docstrings 1. **Verify accuracy**: Check that all information is current and correct 1. **Create new integration**: Replace the old `algorithms.md` section with the new dropdown format ## Common Pitfalls to Avoid - **Don't copy-paste generic descriptions**: Each algorithm needs specific, detailed documentation - **Don't skip mathematical formulations**: When convergence criteria or parameters have precise mathematical definitions, include them - **Don't ignore external library context**: Always mention where default values come from - **Don't use vague parameter descriptions**: "Controls the algorithm behavior" is not helpful - **Don't forget performance implications**: Users need to understand trade-offs between parameters ## Getting Help If you're stuck or need clarification: - Look at existing well-documented algorithms like `ScipyLBFGSB` - Check the {ref}`style_guide` for coding conventions - Ask questions in GitHub issues or discussions The goal is to make optimagic's algorithm documentation the best resource for understanding and using optimization algorithms effectively. ================================================ FILE: docs/source/how_to/how_to_errors_during_optimization.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "(how-to-errors)=\n", "\n", "# How to handle errors during optimization\n", "\n", "## Try to avoid errors\n", "\n", "Often, optimizers try quite extreme parameter vectors, which then can raise errors in your criterion function or derivative. Often, there are simple tricks to make your code more robust. Avoiding errors is always better than dealing with errors after they occur. \n", "\n", "- Avoid to take ``np.exp`` without further safeguards. With 64 bit floating point numbers, the exponential function is only well defined roughly between -700 and 700. Below it is 0, above it is inf. Sometimes you can use ``scipy.special.logsumexp`` to avoid unsafe evaluations of the exponential. Read [this](https://en.wikipedia.org/wiki/LogSumExp) for background information on the logsumexp trick.\n", "- Set bounds for your parameters that prevent extreme parameter constellations.\n", "- Use the ``bounds_distance`` option with a not too small value for ``covariance`` and ``sdcorr`` constraints.\n", "- Use `optimagic.utilities.robust_cholesky` instead of normal\n", " cholesky decompositions or try to avoid cholesky decompositions.\n", "- Use a less aggressive optimizer. Trust region optimizers like `fides` usually choose less extreme steps in the beginnig than line search optimizers like `scipy_bfgs` and `scip_lbfgsb`. \n", "\n", "## Do not use clipping\n", "\n", "A commonly chosen solution to numerical problems is clipping of extreme values. Naive clipping leads to flat areas in your criterion function and can cause spurious convergence. Only use clipping if you know that your optimizer can deal with flat parts. " ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "## Let optimagic do its magic\n", "\n", "Instead of avoiding errors in your criterion function, you can raise them and let optimagic deal with them. If you are using numerical derivatives, errors will automatically be raised if any entry in the derivative is not finite. \n", "\n", "### An example\n", "\n", "Let's look at a simple example from the Moré-Wild benchmark set that has a numerical instability. " ] }, { "cell_type": "code", "execution_count": null, "id": "2", "metadata": {}, "outputs": [], "source": [ "import warnings\n", "\n", "import numpy as np\n", "import plotly.io as pio\n", "from scipy.optimize import minimize as scipy_minimize\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om\n", "\n", "warnings.simplefilter(\"ignore\")" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "def jennrich_sampson(x):\n", " dim_out = 10\n", " fvec = (\n", " 2 * (1.0 + np.arange(1, dim_out + 1))\n", " - np.exp(np.arange(1, dim_out + 1) * x[0])\n", " - np.exp(np.arange(1, dim_out + 1) * x[1])\n", " )\n", " return fvec @ fvec\n", "\n", "\n", "correct_params = np.array([0.2578252135686162, 0.2578252135686162])\n", "correct_criterion = 124.3621823556148\n", "\n", "start_x = np.array([0.3, 0.4])" ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "### What would scipy do?" ] }, { "cell_type": "code", "execution_count": null, "id": "5", "metadata": {}, "outputs": [], "source": [ "scipy_res = scipy_minimize(jennrich_sampson, x0=start_x, method=\"L-BFGS-B\")" ] }, { "cell_type": "code", "execution_count": null, "id": "6", "metadata": {}, "outputs": [], "source": [ "scipy_res.success" ] }, { "cell_type": "code", "execution_count": null, "id": "7", "metadata": {}, "outputs": [], "source": [ "correct_params.round(4), scipy_res.x.round(4)" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "So, scipy thinks it solved the problem successfully but the result is far off. (Note that scipy would have given us a warning, but we disabled warnings in order to not clutter the output).\n", "\n", "### optimagic's error handling magic" ] }, { "cell_type": "code", "execution_count": null, "id": "9", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=jennrich_sampson,\n", " params=start_x,\n", " algorithm=\"scipy_lbfgsb\",\n", " error_handling=\"continue\",\n", ")\n", "\n", "correct_params, res.params" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "### How does the magic work\n", "\n", "When an error occurs and `error_handling` is set to `\"continue\"`, optimagic replaces your criterion with a dummy function (and adjusts the derivative accordingly). \n", "\n", "The dummy function has two important properties:\n", "\n", "1. Its value is always higher than criterion at start params. \n", "2. Its slope guides the optimizer back towards the start parameters. I.e., if you are minimizing, the direction of strongest decrease is towards the start parameters; if you are maximizing, the direction of strongest increase is towards the start parameters. \n", "\n", "Therefore, when hitting an undefined area, an optimizer can take a few steps back until it is in better territory and then continue its work. \n", "\n", "Importantly, the optimizer will not simply go back to a previously evaluated point (which would just lead to cyclical behavior). It will just go back in the direction it originally came from.\n", "\n", "In the concrete example, the dummy function would look similar to the following:" ] }, { "cell_type": "code", "execution_count": null, "id": "11", "metadata": {}, "outputs": [], "source": [ "def dummy(params):\n", " start_params = np.array([0.3, 0.4])\n", " # this is close to the actual value used by optimagic\n", " constant = 8000\n", " # the actual slope used by optimagic would be even smaller\n", " slope = 10_000\n", " diff = params - start_params\n", " return constant + slope * np.linalg.norm(diff)" ] }, { "cell_type": "markdown", "id": "12", "metadata": {}, "source": [ "Now, let's plot the two functions. For better illustration, we assume that the jennrich_sampson function is only defined until it reaches a value of 100_000 and the dummy function takes over from there. " ] }, { "cell_type": "code", "execution_count": null, "id": "13", "metadata": {}, "outputs": [], "source": [ "from plotly import graph_objects as go\n", "\n", "grid = np.linspace(0, 1)\n", "params = [np.full(2, val) for val in grid]\n", "values = np.array([jennrich_sampson(p) for p in params])\n", "values = np.where(values <= 1e5, values, np.nan)\n", "dummy_values = np.array([dummy(p) for p in params])\n", "dummy_values = np.where(np.isfinite(values), np.nan, dummy_values)" ] }, { "cell_type": "code", "execution_count": null, "id": "14", "metadata": {}, "outputs": [], "source": [ "fig = go.Figure()\n", "fig.add_trace(go.Scatter(x=grid, y=values))\n", "fig.add_trace(go.Scatter(x=grid, y=dummy_values))\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "15", "metadata": {}, "source": [ "We can see that the dummy function is lower than the highest achieved value of `jennrich_sampson` but higher than the start values. It is also rather flat. Fortunately, that is all we need. " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_globalization.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# How to choose a strategy for global optimization\n", "\n", "(to be written)" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/how_to/how_to_logging.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "(how-to-logging)=\n", "\n", "# How to use logging\n", "\n", "\n", "optimagic can keep a persistent log of the parameter and criterion values tried out by an optimizer in a sqlite database. \n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Turn logging on or off\n", "\n", "To enable logging, it suffices to provide a path to an sqlite database when calling ``maximize`` or ``minimize``. The database does not have to exist, optimagic will generate it for you. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from pathlib import Path\n", "\n", "import numpy as np\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere(params):\n", " return params @ params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Remove the log file if it exists (just needed for the example)\n", "log_file = Path(\"my_log.db\")\n", "if log_file.exists():\n", " log_file.unlink()\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " logging=\"my_log.db\",\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "In case the SQLite file already exists, this will raise a `FileExistsError` to prevent from accidentally polluting an existing database. If you want to reuse\n", "an existing database on purpose, you must explicitly provide the corresponding option for `if_database_exists`:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "log_options = om.SQLiteLogOptions(\n", " \"my_log.db\", if_database_exists=om.ExistenceStrategy.EXTEND\n", ")\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " logging=log_options,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Make logging faster\n", "\n", "By default, we use a very safe mode of sqlite that makes it almost impossible to corrupt the database. Even if your computer is suddenly shut down or unplugged. \n", "\n", "However, this makes writing logs rather slow, which becomes notable when the criterion function is very fast. \n", "\n", "In that case, you can enable `fast_logging`, which is still quite safe!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "log_options = om.SQLiteLogOptions(\n", " \"my_log.db\",\n", " fast_logging=True,\n", " if_database_exists=om.ExistenceStrategy.REPLACE,\n", ")\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " logging=log_options,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Reading the log\n", "To read the log after an optimization, extract the logger from the optimization result:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader = res.logger" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Alternatively, you can create the reader like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader = om.SQLiteLogReader(\"my_log.db\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Read the start params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader.read_start_params()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Read a specific iteration (use -1 for the last)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader.read_iteration(-1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Read the full history" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader.read_history().keys()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Plot the history from a log" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(\"my_log.db\")\n", "fig.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.params_plot(\"my_log.db\", selector=lambda x: x[1:3])\n", "fig.show()" ] } ], "metadata": { "interpreter": { "hash": "5cdb9867252288f10687117449de6ad870b49795ca695c868016dc0022895cce" }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: docs/source/how_to/how_to_multistart.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "(how-to-multistart)=\n", "\n", "# How to do multistart optimizations\n", "\n", "Sometimes you want to make sure that your optimization is robust to the initial\n", "parameter values, i.e. that it does not get stuck at a local optimum. This is where\n", "multistart comes in handy.\n", "\n", "\n", "## What does multistart (not) do\n", "\n", "In short, multistart iteratively runs local optimizations from different initial\n", "conditions. If enough local optimization convergence to the same point, it stops.\n", "Importantly, it cannot guarantee that the result is the global optimum, but it can\n", "increase your confidence in the result.\n", "\n", "## TL;DR\n", "\n", "To activate multistart at the default options, pass `multistart=True` to the `minimize`\n", "or `maximize` function, as well as finite bounds on the parameters (which are used to\n", "sample the initial points). The default options are discussed below." ] }, { "cell_type": "code", "execution_count": null, "id": "1", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om\n", "\n", "\n", "def fun(x):\n", " return x @ x\n", "\n", "\n", "x0 = np.arange(7) - 4\n", "\n", "bounds = om.Bounds(\n", " lower=np.full_like(x0, -5),\n", " upper=np.full_like(x0, 10),\n", ")\n", "\n", "algo_options = {\"stopping_maxfun\": 1_000}\n", "\n", "res = om.minimize(\n", " fun=fun,\n", " x0=x0,\n", " algorithm=\"scipy_neldermead\",\n", " algo_options=algo_options,\n", " bounds=bounds,\n", " multistart=True,\n", ")" ] }, { "cell_type": "markdown", "id": "2", "metadata": {}, "source": [ "In this example, we limited each local optimization to 1_000 function evaluations. In\n", "general, it is a good idea to limit the number of iterations and function evaluations\n", "for the local optimization. Because of the iterative nature of multistart, this\n", "limitation will usually not result in a precision issue." ] }, { "cell_type": "markdown", "id": "3", "metadata": {}, "source": [ "## What does multistart mean in optimagic?\n", "\n", "Our multistart optimizations are inspired by the [TikTak algorithm](https://github.com/serdarozkan/TikTak) and consist of the following steps:\n", "\n", "1. Draw a large exploration sample of parameter vectors randomly or using a\n", " low-discrepancy sequence.\n", "1. Evaluate the objective function in parallel on the exploration sample.\n", "1. Sort the parameter vectors from best to worst according to their objective function\n", " values. \n", "1. Run local optimizations iteratively. That is, the first local optimization is started\n", " from the best parameter vector in the sample. All subsequent ones are started from a\n", " convex combination of the currently best known parameter vector and the next sample\n", " point. " ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "## Visualizing multistart results\n", "\n", "To illustrate the multistart results, we will consider the optimization of a slightly\n", "more complex objective function, compared to `fun` from above. We also limit the\n", "number of exploration samples to 100." ] }, { "cell_type": "code", "execution_count": null, "id": "5", "metadata": {}, "outputs": [], "source": [ "def alpine(x):\n", " return np.sum(np.abs(x * np.sin(x) + 0.1 * x))\n", "\n", "\n", "res = om.minimize(\n", " alpine,\n", " x0=x0,\n", " algorithm=\"scipy_neldermead\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(n_samples=100, seed=0),\n", ")\n", "\n", "fig = om.criterion_plot(res, monotone=True)\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "6", "metadata": {}, "source": [ "In the above image we see the optimization history for all of the local optimizations\n", "that have been run by multistart. The turquoise line represents the history\n", "corresponding to the local optimization that found the overall best parameter.\n", "\n", "We see that running a single optimization would not have sufficed, as some local\n", "optimizations are stuck." ] }, { "cell_type": "markdown", "id": "7", "metadata": {}, "source": [ "## Multistart does not always run many optimization\n", "\n", "Since the local optimizations are run iteratively by multistart, it is possible that\n", "only a handful of optimizations are actually run if all of them converge to the same\n", "point. This convergence is determined by the `convergence_max_discoveries` option,\n", "which defaults to 2. This means that if 2 local optimizations report the same point,\n", "multistart will stop. Below we see that if we use the simpler objective function\n", "(`fun`), and the `scipy_lbfgsb` algorithm, multistart runs only 2 local optimizations,\n", "and then stops, as both of them converge to the same point. Note that, the\n", "`scipy_lbfgsb` algorithm can solve this simple problem precisely, without reaching the\n", "maximum number of function evaluations." ] }, { "cell_type": "code", "execution_count": null, "id": "8", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun,\n", " x0=x0,\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(n_samples=100, seed=0),\n", ")\n", "\n", "fig = om.criterion_plot(res)\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "9", "metadata": {}, "source": [ "## How to configure multistart\n", "\n", "Configuration of multistart can be done by passing an instance of\n", "`optimagic.MultistartOptions` to `minimize` or `maximize`. Let's look at a few examples\n", "configurations." ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "### How to run a specific number of optimizations\n", "\n", "To run a specific number of local optimizations, you need to set the `stopping_maxopt`\n", "option. Note that this does not set the number of exploration samples, which is\n", "controlled by the `n_samples` option. The number of exploration samples always needs\n", "to be at least as large as the number of local optimizations.\n", "\n", "Note that, as long as `convergence_max_discoveries` is smaller than `stopping_maxopt`,\n", "it is possible that a smaller number of local optimizations are run. To avoid this,\n", "set `convergence_max_discoveries` to a value at least as large as `stopping_maxopt`.\n", "\n", "To run, for example, 10 local optimizations from 15 exploration samples, do:" ] }, { "cell_type": "code", "execution_count": null, "id": "11", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " alpine,\n", " x0=x0,\n", " algorithm=\"scipy_neldermead\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(\n", " n_samples=15,\n", " stopping_maxopt=10,\n", " convergence_max_discoveries=10,\n", " ),\n", ")\n", "\n", "res.multistart_info.n_optimizations" ] }, { "cell_type": "markdown", "id": "12", "metadata": {}, "source": [ "### How to set a custom exploration sample\n", "\n", "If you want to start the multistart algorithm with a custom exploration sample, you can\n", "do so by passing a sequence of parameters to the `sample` option. Note that sequence\n", "elements must be of the same type as your parameter.\n", "\n", "To generate a sample of 100 random parameters and run them through the multistart\n", "algorithm, do:" ] }, { "cell_type": "code", "execution_count": null, "id": "13", "metadata": {}, "outputs": [], "source": [ "rng = np.random.default_rng(12345)\n", "\n", "sample = [x0 + rng.uniform(-1, 1, size=len(x0)) for _ in range(100)]\n", "\n", "res = om.minimize(\n", " alpine,\n", " x0=x0,\n", " algorithm=\"scipy_neldermead\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(sample=sample),\n", ")" ] }, { "cell_type": "markdown", "id": "14", "metadata": {}, "source": [ "### How to run multistart in parallel\n", "\n", "\n", "The multistart algorithm can be run in parallel by setting the `n_cores` option to a\n", "value greater than 1. This will run the algorithm in batches. By default, the batch\n", "size is set to `n_cores`, but can be controlled by setting the `batch_size` option. The\n", "default batch evaluator is `joblib`, but can be controlled by setting the\n", "`batch_evaluator` option to `\"pathos\"` or a custom callable.\n", "\n", "To run the multistart algorithm in parallel, do:" ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " alpine,\n", " x0=x0,\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(n_cores=2),\n", ")" ] }, { "cell_type": "markdown", "id": "16", "metadata": {}, "source": [ "## What to do if you do not have bounds\n", "\n", "Multistart requires finite bounds on the parameters. If your optimization problem is not\n", "bounded, you can set soft lower and upper bounds. These bounds will only be used to\n", "draw the exploration sample, and will not be used to constrain the local optimizations.\n", "\n", "To set soft bounds, do:" ] }, { "cell_type": "code", "execution_count": null, "id": "17", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " alpine,\n", " x0=x0,\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=om.Bounds(soft_lower=np.full_like(x0, -3), soft_upper=np.full_like(x0, 8)),\n", " multistart=True,\n", ")" ] }, { "cell_type": "markdown", "id": "18", "metadata": {}, "source": [ "## Understanding multistart results\n", "\n", "When activating multistart, the optimization result object corresponds to the local\n", "optimization that found the best objective function value. The result object has the\n", "additional attribute `multistart_info`, where all of the additional information is\n", "stored. It has the following attributes:\n", "\n", "- `local_optima`: A list with the results from all local optimizations that were performed.\n", "- `start_parameters`: A list with the start parameters from those optimizations \n", "- `exploration_sample`: A list with parameter vectors at which the objective function was evaluated in an initial exploration phase. \n", "- `exploration_results`: The corresponding objective values.\n", "- `n_optimizations`: The number of local optimizations that were run.\n", "\n", "To illustrate the multistart results, let us consider the optimization of the simple\n", "`fun` objective function from above." ] }, { "cell_type": "code", "execution_count": null, "id": "19", "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun,\n", " x0=x0,\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=bounds,\n", " algo_options=algo_options,\n", " multistart=om.MultistartOptions(n_samples=100, convergence_max_discoveries=2),\n", ")" ] }, { "cell_type": "markdown", "id": "20", "metadata": {}, "source": [ "### Start parameters\n", "\n", "The start parameters are the parameter vectors from which the local optimizations were\n", "started. Since the default number of `convergence_max_discoveries` is 2, and both\n", "local optimizations were successfull, the start parameters have 2 rows." ] }, { "cell_type": "code", "execution_count": null, "id": "21", "metadata": {}, "outputs": [], "source": [ "res.multistart_info.start_parameters" ] }, { "cell_type": "markdown", "id": "22", "metadata": {}, "source": [ "### Local Optima\n", "\n", "The local optima are the results from the local optimizations. Since in this example\n", "only two local optimizations were run, the local optima list has two elements, each of\n", "which is an optimization result object." ] }, { "cell_type": "code", "execution_count": null, "id": "23", "metadata": {}, "outputs": [], "source": [ "len(res.multistart_info.local_optima)" ] }, { "cell_type": "markdown", "id": "24", "metadata": {}, "source": [ "### Exploration sample\n", "\n", "The exploration sample is a list of parameter vectors at which the objective function\n", "was evaluated. Above, we chose a random exploration sample of 100 parameter vectors." ] }, { "cell_type": "code", "execution_count": null, "id": "25", "metadata": {}, "outputs": [], "source": [ "np.vstack(res.multistart_info.exploration_sample).shape" ] }, { "cell_type": "markdown", "id": "26", "metadata": {}, "source": [ "### Exploration results\n", "\n", "The exploration results are the objective function values at the exploration sample." ] }, { "cell_type": "code", "execution_count": null, "id": "27", "metadata": {}, "outputs": [], "source": [ "len(res.multistart_info.exploration_results)" ] }, { "cell_type": "markdown", "id": "28", "metadata": {}, "source": [ "### Number of local optimizations" ] }, { "cell_type": "code", "execution_count": null, "id": "29", "metadata": {}, "outputs": [], "source": [ "res.multistart_info.n_optimizations" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_scaling.md ================================================ (scaling)= # How to scale optimization problems Real world optimization problems often comprise parameters of vastly different orders of magnitudes. This is typically not a problem for gradient based optimization algorithms but can considerably slow down derivative free optimizers. Below we describe three simple heuristics to improve the scaling of optimization problems and discuss the pros and cons of each approach. ## What does well scaled mean In short, an optimization problem is well scaled if a fixed step in any direction yields a roughly similar sized change in the objective function. In practice, this can never be achieved perfectly (at least for nonlinear problems). However, one can easily improve over simply ignoring the problem altogether. ## TL;DR To activate scaling at the default options, pass `scaling=True` to the `minimize` or `maximize` function. This uses the start values heuristic explained below. The default options are discussed in the section {ref}`scaling-default-values`. ```{code-block} python --- emphasize-lines: 13 --- import numpy as np import optimagic as om def fun(x): return x @ x res = om.minimize( fun=fun, x0=np.arange(5), algorithm="scipy_lbfgsb", scaling=True, ) ``` ## Heuristics to improve scaling (scaling-start-values-heuristic)= ### Divide by absolute value of start parameters In many applications, parameters with very large start values will vary over a wide range and a change in that parameter will only lead to a relatively small change in the objective function. If this is the case, the scaling of the optimization problem can be improved by simply dividing all parameter vectors by the start parameters. **Advantages:** - Straightforward - Works with any type of constraints **Disadvantages:** - Makes scaling dependent on start values - Parameters with zero start value need special treatment **How to specify this scaling:** ```{code-block} python --- emphasize-lines: 5 --- res = om.minimize( fun=fun, x0=np.arange(5), algorithm="scipy_lbfgsb", scaling=om.ScalingOptions(method="start_values", clipping_value=0.1), ) ``` ### Divide by bounds In many optimization problems, one has additional information on bounds of the parameter space. Some of these bounds are hard (e.g. probabilities or variances are non negative), others are soft and derived from simple considerations (e.g. if a time discount factor were smaller than 0.7, we would not observe anyone to pursue a university degree in a structural model of educational choices; or if an infection probability was higher than 20% for distant contacts, the covid pandemic would have been over after a month). For parameters that strongly influence the objective function, the bounds stemming from these considerations are typically tighter than for parameters that have a small effect on the objective function. Thus, a natural approach to improve the scaling of the optimization problem is to re-map all parameters such that the bounds are [0, 1] for all parameters. This has the additional advantage that absolute and relative convergence criteria on parameter changes become the same. **Advantages:** - Straightforward - Works well in many practical applications - Scaling is independent of start values - No problems with division by zero **Disadvantages:** - Only works if all parameters have bounds - This prohibits some kinds of other constraints in optimagic **How to specify this scaling:** ```{code-block} python --- emphasize-lines: 5,6 --- res = om.minimize( fun=fun, x0=np.arange(5), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.zeros(5), upper=2 * np.arange(5) + 1), scaling=om.ScalingOptions(method="bounds", clipping_value=0.0), ) ``` ## Influencing the magnitude of parameters The above approaches align the scale of parameters relative to each other. However, the overall magnitude is set rather arbitrarily. For example, when dividing by start values, the magnitude of the scaled parameters is around one. When dividing by bounds, it is somewhere between zero and one. For the performance of numerical optimizers, only the relative scales are important. However, influencing the overall magnitude can be helpful to trick some optimizers into doing things they do not want to do. For example, when there is a minimal allowed initial trust region radius, increasing the magnitude of parameters allows to effectively make the trust region radius smaller. Setting the magnitude means simply adding one more entry to the scaling options. For example, if you want to scale by bounds and increase the magnitude by a factor of five: ```{code-block} python --- emphasize-lines: 6 --- res = om.minimize( fun=fun, x0=np.arange(5), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.zeros(5), upper=2 * np.arange(5) + 1), scaling=om.ScalingOptions(method="bounds", clipping_value=0.0, magnitude=5), ) ``` ## Remarks ### What is the `clipping_value` In all of the above heuristics, the parameter vector is divided (elementwise) by some other vector and it is possible that some entries of the divisor are zero or close to zero. The clipping value bounds the elements of the divisor away from zero. It should be set to a strictly non-zero number for the `"start_values"` and `"gradient"` approach. The `"bounds"` approach avoids division by exact zeros by construction. The `"clipping_value"` can still be used to avoid extreme upscaling of parameters with very tight bounds. However, this means that the bounds of the re-scaled problem are not exactly [0, 1] for all parameters. (scaling-default-values)= ### Default values Scaling is disabled by default. By passing `scaling=True`, we enable scaling at the default values. We use the `"start_values"` method with a `"clipping_value"` of 0.1 and a magnitude of 1.0. This is the default method because it can be used for all optimization problems and has low computational cost. We strongly recommend you read the above guidelines and choose the method that is most suitable for your problem. ================================================ FILE: docs/source/how_to/how_to_slice_plot.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# How to visualize an optimization problem\n", "\n", "Plotting the criterion function of an optimization problem can answer important questions\n", "- Is the function smooth?\n", "- Is the function flat in some directions?\n", "- Should the optimization problem be scaled?\n", "- Is a candidate optimum a global one?\n", "\n", "Below we show how to make a slice plot of the criterion function." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## The simple sphere function (again)\n", "\n", "Let's look at the simple sphere function again. This time, we specify params as dictionary, but of course, any other params format (recall [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html)) would work just as well. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere(params):\n", " x = np.array(list(params.values()))\n", " return x @ x" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "params = {\"alpha\": 0, \"beta\": 0, \"gamma\": 0, \"delta\": 0}\n", "bounds = om.Bounds(\n", " lower={name: -5 for name in params},\n", " upper={name: i + 2 for i, name in enumerate(params)},\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Creating a simple slice plot" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.slice_plot(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{note}\n", "\n", "For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\n", "\n", ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Interpreting the plot\n", "\n", "The plot gives us the following insights:\n", " \n", "- There is no sign of local optima. \n", "- There is no sign of noise or non-differentiablities (careful, grid might not be fine enough).\n", "- The problem seems to be convex.\n", "\n", "-> We would expect almost any derivative based optimizer to work well here (which we know to be correct in that case)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Using advanced options" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.slice_plot(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " # selecting a subset of params\n", " selector=lambda x: [x[\"alpha\"], x[\"beta\"]],\n", " # evaluate func in parallel\n", " n_cores=4,\n", " # rename the parameters\n", " param_names={\"alpha\": \"Alpha\", \"beta\": \"Beta\"},\n", " title=\"Amazing Plot\",\n", " # number of gridpoints in each dimension\n", " n_gridpoints=50,\n", ")\n", "fig.show()" ] } ], "metadata": { "kernelspec": { "display_name": "optimagic", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.18" } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/how_to/how_to_slice_plot_3d.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# Visualizing Objective Functions with `slice_plot_3d`\n", "\n", "In optimization, understanding the shape of your objective function is a key step toward choosing the right algorithm.\n", "\n", "This notebook introduces the `slice_plot_3d` tool, which provides flexible ways to visualize:\n", "- Single-parameter sensitivity through **univariate slice plots**,\n", "- Pairwise interactions through **contour** or **surface plots**,\n", "- Full parameter relationships through **subplot grids**.\n", "\n", "We will progress from basic to advanced usage, learning how to create clean and insightful plots easily.\n" ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "## Univariate slice Plot\n", "\n", "We start with a **univariate slice plot**.\n", "This plots the function along each parameter individually to the function value,\n", "while fixing others at their current values. This provides a clean view of how sensitive the function is to each parameter separately. We use the **Sphere function**, which sums the squares of each input.\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "\n", "import optimagic as om" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "# Define the Sphere function\n", "def sphere(params):\n", " x = np.array(list(params.values()))\n", " return np.sum(x**2)" ] }, { "cell_type": "code", "execution_count": null, "id": "4", "metadata": {}, "outputs": [], "source": [ "params = {\"alpha\": 0, \"beta\": 0, \"gamma\": 0, \"delta\": 0}\n", "bounds = om.Bounds(\n", " lower={name: -5 for name in params},\n", " upper={name: i + 2 for i, name in enumerate(params)},\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "5", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "6", "metadata": {}, "source": [ "## Univariate slice plot with selected parameters\n", "\n", "In many situations, we are interested in exploring only specific parameters.\n", "Using the `selector` argument, we can restrict the univariate plots to\n", "chosen parameters — here, we select `\"alpha\"` and `\"beta\"`.\n", "\n", "This focuses our visualization on dimensions of interest." ] }, { "cell_type": "code", "execution_count": null, "id": "7", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " selector=lambda p: [p[\"alpha\"], p[\"beta\"]],\n", " projection=\"univariate\",\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "## 3D Surface Plot for Two Parameters\n", "\n", "To better understand interaction between parameters,\n", "we can switch to a **3D surface plot**.\n", "\n", "Surface plots reveal valleys, ridges, and general landscape shapes clearly.\n", "Here, we vary `\"alpha\"` and `\"beta\"` simultaneously and plot the resulting surface." ] }, { "cell_type": "code", "execution_count": null, "id": "9", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " selector=lambda p: [p[\"alpha\"], p[\"beta\"]],\n", " projection=\"surface\",\n", " n_gridpoints=30,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "## 2D Contour Plot for Two Parameters\n", "\n", "Contour plots offer a 2D view with iso-function-value curves.\n", "\n", "They are especially useful for:\n", "- Finding basins or valleys.\n", "- Visualizing optimization paths.\n", "- Detecting steep or flat regions easily.\n", "\n", "Again, we use `\"alpha\"` and `\"beta\"` to generate the plot." ] }, { "cell_type": "code", "execution_count": null, "id": "11", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " selector=lambda p: [p[\"alpha\"], p[\"beta\"]],\n", " projection=\"contour\",\n", " n_gridpoints=30,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "12", "metadata": {}, "source": [ "## Grid View for Multiple Parameters\n", "When selecting more than two parameters, the slice_plot_3d function automatically constructs a grid-based visualization to analyze both individual and pairwise parameter effects.\n", "\n", "- **Diagonal** cells display 1D univariate slice plots, representing the isolated\n", "effect of each parameter on the function output.\n", "- **Off-diagonal** cells visualize pairwise interactions between parameters using\n", "either 3D surface or contour plots.\n", "\n", "\n", "### Single projection type\n", "##### (eg: `projection: \"surface\"`)\n", "\n", "By default, when a single projection type is specified (e.g., \"surface\" or \"contour\"), the following behavior is applied:\n", "\n", "- The **lower triangle** of the grid (i.e., plots below the diagonal) displays the\n", "specified projection type.\n", "- The **upper triangle** remains empty to avoid redundancy.\n", "\n", "This allows for a quick and uncluttered visualization of pairwise parameter interactions." ] }, { "cell_type": "code", "execution_count": null, "id": "13", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " projection=\"surface\",\n", " n_gridpoints=20,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "14", "metadata": {}, "source": [ "### Multiple projection types\n", "##### (eg: `projection: {\"lower\": \"surface\", \"upper\": \"contour\"}`)\n", "\n", "For enhanced flexibility, slice_plot_3d also supports customizing projection types independently for the upper and lower halves of the grid. This is done by passing a dictionary to the projection argument:\n", "\n", "- The **\"lower\"** key controls the projection type for plots below the diagonal.\n", "- The **\"upper\"** key controls the projection type for plots above the diagonal.\n", "\n", "For example, setting \"lower\" to \"surface\" and \"upper\" to \"contour\" enables simultaneous display of both 3D and 2D representations, maximizing interpretability." ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " projection={\"lower\": \"surface\", \"upper\": \"contour\"},\n", " n_gridpoints=20,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "16", "metadata": {}, "source": [ "This **dual-projection** layout is particularly useful when analyzing high-dimensional\n", "functions, as it provides both detailed surface representations and compact contour visualizations in a single coherent grid." ] }, { "cell_type": "markdown", "id": "17", "metadata": {}, "source": [ "## Full Customization of the Visualization\n", "\n", "`s‍lice_plot_3d` allows fine control over plot styling:\n", "\n", "- `layout_kwargs` adjusts figure size, titles, background themes.\n", "- `plot_kwargs` controls color maps, marker options, and plot styles.\n", "- `make_subplot_kwargs` configures grid spacing, axis sharing, and more.\n", "\n", "Here, we demonstrate a fully customized plot combining all these features." ] }, { "cell_type": "code", "execution_count": null, "id": "18", "metadata": {}, "outputs": [], "source": [ "fig = om.sandbox.slice_plot_3d(\n", " func=sphere,\n", " params=params,\n", " bounds=bounds,\n", " selector=lambda p: [p[\"alpha\"], p[\"beta\"], p[\"gamma\"]],\n", " projection=\"surface\",\n", " n_gridpoints=40,\n", " layout_kwargs={\n", " \"width\": 800,\n", " \"height\": 800,\n", " \"title\": {\"text\": \"Customized Sphere Function Visualization\"},\n", " \"template\": \"plotly_dark\",\n", " },\n", " make_subplot_kwargs={\n", " \"horizontal_spacing\": 0.1,\n", " \"vertical_spacing\": 0.1,\n", " },\n", " plot_kwargs={\n", " \"surface_plot\": {\"colorscale\": \"Viridis\", \"opacity\": 0.7},\n", " },\n", ")\n", "fig.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.17" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/how_to_specify_algorithm_and_algo_options.md ================================================ (specify-algorithm)= # How to specify and configure algorithms This how-to guide is about the mechanics of specifying and configuring optimizers in optimagic. It is not about choosing the right algorithm for your problem. For a discussion on choosing algorithms, see [this how-to guide](how_to_algorithm_selection.ipynb) There are two ways to specify and configure optimizers. The *optimagic way* and the *scipy way*. Both use the `algorithm` argument of `minimize` and `maximize` to specify an optimizer and both are super easy to use. As the name suggests, the *scipy way* is more familiar for users of scipy.optimize. The *optimagic way* adds discoverability and autocomplete. Using the *optimagic way*, you don't need to look things up in the documentation and rarely have to leave your editor, notebook or IDE. ::::{tab-set} :::{tab-item} The optimagic way :sync: optimagic ## Selecting an algorithm ```python import optimagic as om import numpy as np def fun(x): return x @ x om.minimize( fun=fun, params=np.arange(3), algorithm=om.algos.scipy_neldermead, ) ``` The algorithm is selected by passing an algorithm class. This class is usually not imported manually, but discovered using `om.algos`. After typing `om.algos.`, your editor will show you all algorithms you can choose from. ## Configuring an algorithm To configure an algorithm with advanced options, you can create an instance of the class: ```python algo = om.algos.scipy_neldermead( stopping_maxiter=100, adaptive=True, ) om.minimize( fun=fun, params=np.arange(3), algorithm=algo, ) ``` Again, you can use your editor's autocomplete to discover all options that your chosen algorithm supports. When the instance is created, the types and values of all options are checked. Should you make a mistake, you will get an error before you run your optimization. ## Advanced autocomplete in action Assume you need a gradient-free optimizer that supports bounds on the parameters. Moreover, you have a fixed computational budget, so you want to set stopping options. If you type `om.algos.`, your editor will show you all available optimizers and a list of categories you can use to filter the results. In our case, we select `GradientFree` and `Bounded`, and we could do that in any order we want. ![autocomplete_1](../_static/images/autocomplete_1.png) After selecting one of the displayed algorithms, in our case `scipy_neldermead`, the editor shows all tuning parameters of that optimizer. If you start to type `stopping`, you will see all stopping criteria that are available. ![autocomplete_2](../_static/images/autocomplete_2.png) ## Modifying an algorithm Given an algorithm, you can easily create a **modified copy** by using the `with_option` method. ```python # using copy constructors to create variants base_algo = om.algorithms.fides(stopping_maxiter=1000) algorithms = [ base_algo.with_option(trustregion_initial_radius=r) for r in [0.1, 0.2, 0.5] ] for algo in algorithms: minimize( fun=fun, params=np.arange(3), algorithm=algo, ) ``` ::: :::{tab-item} The scipy way :sync: scipy ## Selecting an algorithm ```python import optimagic as om import numpy as np def fun(x): return x @ x om.minimize( fun=fun, params=np.arange(3), algorithm="scipy_lbfgsb", ) ``` For a list of all supported algorithm names, see {ref}`list_of_algorithms`. ```{note} To provide full compatibility with scipy, you can also select algorithms with the argument `method` under their original scipy name, e.g. `method="L-BFGS-B"` instead of `algorithm="scipy_lbfgsb"`. ``` ## Configuring an algorithm To configure an algorithm, you can pass a dictionary to the `algo_options` argument. ```python options = { "stopping_maxiter": 100, "adaptive": True, } om.minimize( fun=fun, params=np.arange(3), algorithm="scipy_neldermead", algo_options=options, ) ``` If `algo_options` contains options that are not supported by the optimizer, they will be ignored and you get a warning. To find out which options are supported by an optimizer, see {ref}`list_of_algorithms`. ::: :::: ================================================ FILE: docs/source/how_to/how_to_start_parameters.md ================================================ (params)= # How to specify `params` `params` is the first argument of any criterion function in optimagic. It collects all the parameters to estimate, optimize, or differentiate over. In many optimization libraries, `params` must be a one-dimensional numpy array. In optimagic, it can be an arbitrary pytree (think nested dictionary) containing numbers, arrays, pandas.Series, and/or pandas.DataFrames. Below, we show a few examples of what is possible in optimagic and discuss the advantages and drawbacks of each of them. Again, we use the simple `sphere` function you know from other tutorials as an example. ```{eval-rst} .. tab-set:: .. tab-item:: Array A frequent choice of ``params`` is a one-dimensional numpy array. This is because one-dimensional numpy arrays are all that is supported by most optimizer libraries. In our opinion, it is rarely a good choice to represent parameters as flat numpy arrays and then access individual parameters or sclices by positions. The only exception are simple optimization problems with very-fast-to-evaluate criterion functions where any overhead must be avoided. If you still want to use one-dimensional numpy arrays, here is how: .. code-block:: python import optimagic as om def sphere(params): return params @ params om.minimize( fun=sphere, params=np.arange(3), algorithm="scipy_lbfgsb", ) .. tab-item:: DataFrame Originally, pandas DataFrames were the mandatory format for ``params`` in optimagic. They are still highly recommended and have a few special features. For example, they allow to bundle information on start parameters and bounds together into one data structure. Let's look at an example where we do that: .. code-block:: python def sphere(params): return (params["value"] ** 2).sum() params = pd.DataFrame( data={"value": [1, 2, 3], "lower_bound": [-np.inf, 1.5, 0]}, index=["a", "b", "c"], ) om.minimize( fun=sphere, params=params, algorithm="scipy_lbfgsb", ) DataFrames have many advantages: - It is easy to select single parameters or groups of parameters or work with the entire parameter vector. Especially, if you use a well designed MultiIndex. - It is very easy to produce publication quality LaTeX tables from them. - If you have nested models, you can easily update the parameter vector of a larger model with the values from a smaller one (e.g. to get good start parameters). - You can bundle information on bounds and values in one place. - It is easy to compare two params vectors for equality. If you are sure you won't have bounds on your parameter, you can also use a pandas.Series instead of a pandas.DataFrame. A drawback of DataFrames is that they are not JAX compatible. Another one is that they are a bit slower than numpy arrays. .. tab-item:: Dict ``params`` can also be a (nested) dictionary containing all of the above and more. .. code-block:: python def sphere(params): return params["a"] ** 2 + params["b"] ** 2 + (params["c"] ** 2).sum() res = om.minimize( fun=sphere, params={"a": 0, "b": 1, "c": pd.Series([2, 3, 4])}, algorithm="scipy_neldermead", ) Dictionarys of arrays are ideal if you want to do vectorized computations with groups of parameters. They are also a good choice if you calculate derivatives with JAX. While optimagic won't stop you, don't go too far! Having parameters in very deeply nested dictionaries makes it hard to visualize results and/or even to compare two estimation results. .. tab-item:: Scalar If you have a one-dimensional optimization problem, the natural way to represent your params is a float: .. code-block:: python def sphere(params): return params**2 om.minimize( fun=sphere, params=3, algorithm="scipy_lbfgsb", ) ``` ================================================ FILE: docs/source/how_to/how_to_visualize_histories.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# How to visualize optimizer histories\n", "\n", "optimagic's `criterion_plot` can visualize the history of function values for one or multiple optimizations. \n", "optimagic's `params_plot` can visualize the history of parameter values for one optimization. \n", "\n", "This can help you to understand whether your optimization actually converged and if not, which parameters are problematic. \n", "\n", "It can also help you to find the fastest optimizer for a given optimization problem. " ] }, { "cell_type": "code", "execution_count": null, "id": "1", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om" ] }, { "cell_type": "markdown", "id": "2", "metadata": {}, "source": [ "## Run two optimization to get example results" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": {}, "outputs": [], "source": [ "def sphere(x):\n", " return x @ x\n", "\n", "\n", "results = {}\n", "for algo in [\"scipy_lbfgsb\", \"scipy_neldermead\"]:\n", " results[algo] = om.minimize(sphere, params=np.arange(5), algorithm=algo)" ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "## Make a single criterion plot" ] }, { "cell_type": "code", "execution_count": null, "id": "5", "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(results[\"scipy_neldermead\"])\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "6", "metadata": {}, "source": [ ":::{note}\n", "\n", "For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\n", "\n", ":::" ] }, { "cell_type": "markdown", "id": "7", "metadata": {}, "source": [ "## Compare two optimizations in a criterion plot" ] }, { "cell_type": "code", "execution_count": null, "id": "8", "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(results)\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "9", "metadata": {}, "source": [ "## Use some advanced options of criterion plot" ] }, { "cell_type": "code", "execution_count": null, "id": "10", "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(\n", " results,\n", " # cut off after 180 evaluations\n", " max_evaluations=180,\n", " # show only the current best function value\n", " monotone=True,\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "11", "metadata": {}, "source": [ "## Make a params plot" ] }, { "cell_type": "code", "execution_count": null, "id": "12", "metadata": {}, "outputs": [], "source": [ "fig = om.params_plot(results[\"scipy_neldermead\"])\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "13", "metadata": {}, "source": [ "## Use advanced options of params plot" ] }, { "cell_type": "code", "execution_count": null, "id": "14", "metadata": {}, "outputs": [], "source": [ "fig = om.params_plot(\n", " results[\"scipy_neldermead\"],\n", " # cut off after 180 evaluations\n", " max_evaluations=180,\n", " # select only the last three parameters\n", " selector=lambda x: x[2:],\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "15", "metadata": {}, "source": [ "## Criterion plot with multistart optimization" ] }, { "cell_type": "code", "execution_count": null, "id": "16", "metadata": {}, "outputs": [], "source": [ "def alpine(x):\n", " return np.sum(np.abs(x * np.sin(x) + 0.1 * x))\n", "\n", "\n", "res = om.minimize(\n", " alpine,\n", " params=np.arange(7),\n", " bounds=om.Bounds(soft_lower=np.full(7, -3), soft_upper=np.full(7, 10)),\n", " algorithm=\"scipy_neldermead\",\n", " multistart=om.MultistartOptions(n_samples=100, convergence_max_discoveries=3),\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "17", "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(res, max_evaluations=1000, monotone=True)\n", "fig.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.17" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/how_to/index.md ================================================ (how-to)= # How-to Guides How-to Guides show how to achieve specific tasks. In many cases they show you how to use advanced options. For a more basic introduction, check out the [tutorials](tutorials). ```{toctree} --- maxdepth: 1 --- how_to_criterion_function how_to_start_parameters how_to_derivatives how_to_specify_algorithm_and_algo_options how_to_algorithm_selection how_to_bounds how_to_constraints how_to_globalization how_to_multistart how_to_visualize_histories how_to_change_plotting_backend how_to_scaling how_to_logging how_to_errors_during_optimization how_to_slice_plot how_to_benchmarking how_to_add_optimizers how_to_document_optimizers ``` ================================================ FILE: docs/source/index.md ================================================ #
```{raw} html ```

*optimagic* is a Python package for numerical optimization. It is a unified interface to optimizers from SciPy, NlOpt and many other Python packages. *optimagic*'s `minimize` function works just like SciPy's, so you don't have to adjust your code. You simply get more optimizers for free. On top you get powerful diagnostic tools, parallel numerical derivatives and more. If you want to see what *optimagic* can do, check out this [tutorial](tutorials/optimization_overview.ipynb) *optimagic* was formerly called *estimagic*, because it also provides functionality to perform statistical inference on estimated parameters. *estimagic* is now a subpackage of *optimagic*, which is documented [here](estimagic). `````{grid} 1 2 2 2 --- gutter: 3 --- ````{grid-item-card} :text-align: center :img-top: _static/images/light-bulb.svg :class-img-top: index-card-image :shadow: md ```{button-link} tutorials/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Tutorials ``` New users of optimagic should read this first. ```` ````{grid-item-card} :text-align: center :img-top: _static/images/book.svg :class-img-top: index-card-image :shadow: md ```{button-link} how_to/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- How-to Guides ``` Detailed instructions for specific and advanced tasks. ```` ````{grid-item-card} :text-align: center :img-top: _static/images/installation.svg :class-img-top: index-card-image :shadow: md ```{button-link} installation.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Installation ``` Installation instructions for optimagic and optional dependencies. ```` ````{grid-item-card} :text-align: center :img-top: _static/images/optimization.svg :class-img-top: index-card-image :shadow: md ```{button-link} algorithms.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Optimization Algorithms ``` List of numerical optimizers and their optional parameters. ```` ````{grid-item-card} :text-align: center :img-top: _static/images/books.svg :class-img-top: index-card-image :shadow: md ```{button-link} explanation/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Explanations ``` Background information on key topics central to the package. ```` ````{grid-item-card} :text-align: center :img-top: _static/images/coding.svg :class-img-top: index-card-image :shadow: md ```{button-link} reference/index.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- API Reference ``` Detailed description of the optimagic API. ```` ````{grid-item-card} :text-align: center :columns: 12 :img-top: _static/images/video.svg :class-img-top: index-card-image :shadow: md ```{button-link} videos.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Videos ``` Collection of tutorials, talks, and screencasts on optimagic. ```` ````` ```{toctree} --- hidden: true maxdepth: 1 --- tutorials/index how_to/index explanation/index reference/index development/index videos algorithms estimagic/index installation ``` ______________________________________________________________________ We thank all institutions that have funded or supported optimagic (formerly estimagic) ```{image} _static/images/aai-institute-logo.svg --- width: 185px --- ``` ```{image} _static/images/numfocus_logo.png --- width: 200 --- ``` ```{image} _static/images/tra_logo.png --- width: 240px --- ``` ```{image} _static/images/hoover_logo.png --- width: 192px --- ``` ```{image} _static/images/transferlab-logo.svg --- width: 420px --- ``` ______________________________________________________________________ **Useful links for search:** {ref}`genindex` | {ref}`modindex` | {ref}`search` ================================================ FILE: docs/source/installation.md ================================================ # Installation ## Basic installation The preferred way to install optimagic is via `conda` or `mamba`. To do so, open a terminal and type: ``` conda install -c conda-forge optimagic ``` Alternatively, you can install optimagic via pip: ``` pip install optimagic ``` In both cases, you get optimagic and all of its mandatory dependencies. ## Installing optional dependencies Only `scipy` is a mandatory dependency of optimagic. Other algorithms become available if you install more packages. We make this optional because you will rarely need all of them in the same project. For an overview of all optimizers and the packages you need to install to enable them, see {ref}`list_of_algorithms`. To enable all algorithms at once, do the following: ``` conda -c conda-forge install nlopt ``` ``` pip install Py-BOBYQA ``` ``` pip install DFO-LS ``` *Note*: We recommend to install `DFO-LS` version 1.5.3 or higher. Versions of 1.5.0 or lower also work but the versions `1.5.1` and `1.5.2` contain bugs that can lead to errors being raised. ``` conda install -c conda-forge petsc4py ``` *Note*: `` `petsc4py` `` is not available on Windows. ``` conda install -c conda-forge cyipopt ``` *Note*: Make sure you have at least `cyipopt` 1.4. ``` conda install -c conda-forge pygmo ``` ``` pip install fides>=0.7.4 ``` *Note*: Make sure you have at least `fides` 0.7.4. ================================================ FILE: docs/source/reference/algo_options.md ================================================ (algo_options)= # The default algorithm options ```{eval-rst} .. automodule:: optimagic.optimization.algo_options :members: ``` ================================================ FILE: docs/source/reference/batch_evaluators.md ================================================ (batch_evaluators)= # Batch evaluators ```{eval-rst} .. automodule:: optimagic.batch_evaluators :members: ``` ================================================ FILE: docs/source/reference/index.md ================================================ # optimagic API ```{eval-rst} .. currentmodule:: optimagic ``` (maximize-and-minimize)= ## Optimization ```{eval-rst} .. dropdown:: maximize .. autofunction:: maximize ``` ```{eval-rst} .. dropdown:: minimize .. autofunction:: minimize ``` ```{eval-rst} .. dropdown:: slice_plot .. autofunction:: slice_plot ``` ```{eval-rst} .. dropdown:: criterion_plot .. autofunction:: criterion_plot ``` ```{eval-rst} .. dropdown:: params_plot .. autofunction:: params_plot ``` ```{eval-rst} .. dropdown:: OptimizeResult .. autoclass:: OptimizeResult :members: ``` ```{eval-rst} .. dropdown:: Bounds .. autoclass:: Bounds :members: ``` ```{eval-rst} .. dropdown:: Constraints .. autoclass:: FixedConstraint :members: .. autoclass:: IncreasingConstraint :members: .. autoclass:: DecreasingConstraint :members: .. autoclass:: EqualityConstraint :members: .. autoclass:: ProbabilityConstraint :members: .. autoclass:: PairwiseEqualityConstraint :members: .. autoclass:: FlatCovConstraint :members: .. autoclass:: FlatSDCorrConstraint :members: .. autoclass:: LinearConstraint :members: .. autoclass:: NonlinearConstraint :members: ``` ```{eval-rst} .. dropdown:: NumdiffOptions .. autoclass:: NumdiffOptions :members: ``` ```{eval-rst} .. dropdown:: MultistartOptions .. autoclass:: MultistartOptions :members: ``` ```{eval-rst} .. dropdown:: ScalingOptions .. autoclass:: ScalingOptions :members: ``` ```{eval-rst} .. dropdown:: LogOptions .. autoclass:: SQLiteLogOptions :members: ``` ```{eval-rst} .. dropdown:: History .. autoclass:: History :members: ``` ```{eval-rst} .. dropdown:: count_free_params .. autofunction:: count_free_params ``` ```{eval-rst} .. dropdown:: check_constraints .. autofunction:: check_constraints ``` (first_derivative)= ## Derivatives ```{eval-rst} .. dropdown:: first_derivative .. autofunction:: first_derivative ``` ```{eval-rst} .. dropdown:: second_derivative .. autofunction:: second_derivative ``` (benchmarking)= ## Benchmarks ```{eval-rst} .. dropdown:: get_benchmark_problems .. autofunction:: get_benchmark_problems ``` ```{eval-rst} .. dropdown:: run_benchmark .. autofunction:: run_benchmark ``` ```{eval-rst} .. dropdown:: profile_plot .. autofunction:: profile_plot ``` ```{eval-rst} .. dropdown:: convergence_plot .. autofunction:: convergence_plot ``` (logreading)= ## Log reading ```{eval-rst} .. dropdown:: OptimizeLogReader .. autoclass:: OptimizeLogReader ``` ## Other: ```{toctree} --- maxdepth: 1 --- utilities algo_options batch_evaluators typing ``` ================================================ FILE: docs/source/reference/typing.md ================================================ (typing)= # Types ```{eval-rst} .. automodule:: optimagic.typing :members: ``` ================================================ FILE: docs/source/reference/utilities.md ================================================ (utilities)= # Utility functions ```{eval-rst} .. automodule:: optimagic.utilities :members: ``` ================================================ FILE: docs/source/refs.bib ================================================ % Encoding: UTF-8 @Book{Dennis1996, Title = {Numerical Methods for Unconstrained Optimization and Nonlinear Equations}, Author = {Dennis, J.E. and Schnabel, R.B.}, Publisher = {Society for Industrial and Applied Mathematics}, Year = {1996}, Series = {Classics in Applied Mathematics}, ISBN = {9780898713640}, Lccn = {lc95051776}, Url = {https://books.google.de/books?id=RtxcWd0eBD0C&redir_esc=y} } @book{Hansen2019, title = {Econometrics}, author = {Bruce E. Hansen}, editor = {Bruce E. Hansen}, publisher = {Unpublished}, year = {2019}, url = {https://www.ssc.wisc.edu/~bhansen/econometrics/}, owner = {janos}, timestamp = {2019.10.03} } @book{Hansen2020, title = {Econometrics}, author = {Bruce E. Hansen}, editor = {Bruce E. Hansen}, publisher = {Unpublished}, year = {2020}, address = {https://www.ssc.wisc.edu/~bhansen/econometrics/}, owner = {janos}, timestamp = {2020.03.04} } @book{Verbeek2008, title = {A Guide to Modern Econometrics}, author = {Verbeek, M.}, publisher = {Wiley}, year = {2008}, isbn = {9780470517697}, lccn = {2007050167}, url = {https://books.google.com/books?id=uEFm6pAJZhoC} } @book{Wassermann2006, title = {All of nonparametric statistics}, author = {Wasserman, Larry}, year = {2006}, publisher = {Springer Science \& Business Media} } @article{Groeneveld1994, author = {Eildert Groeneveld}, title = {A reparameterization to improve numerical optimization in multivariate REML (co)variance component estimation}, journal = {Genetics, Selection, Evolution : GSE}, year = {1994}, volume = {26}, pages = {537 - 545} } @article{Pinheiro1996, author = {José C. Pinheiro and Douglas M. Bates}, title = {Unconstrained Parameterizations for Variance-Covariance Matrices}, journal = {Statistics and Computing}, year = {1996}, volume = {6}, pages = {289--296} } @techreport{Kraft1988, author = {Kraft, Dieter}, institution = {DLR German Aerospace Center – Institute for Flight Mechanics}, title = {A software package for sequential quadratic programming}, year = {1988}, address = {Köln, Germany}, url = {http://degenerateconic.com/wp-content/uploads/2018/03/DFVLR_FB_88_28.pdf} } @book{Nocedal2006, author = {Nocedal, Jorge and Wright, Stephen}, publisher = {Springer Science \& Business Media}, title = {Numerical optimization}, year = {2006} } @incollection{Conn2000, author = {Conn, AR and Gould, NI and Toint, PL}, booktitle = {Trust region methods}, publisher = {Siam}, title = {Nonlinear equations and nonlinear fitting}, year = {2000}, pages = {749--774}, volume = {1} } @article{Byrd1999, author = {Byrd, Richard H and Hribar, Mary E and Nocedal, Jorge}, journal = {SIAM Journal on Optimization}, title = {An interior point algorithm for large-scale nonlinear programming}, year = {1999}, number = {4}, pages = {877--900}, volume = {9}, publisher = {SIAM} } @article{Lalee1998, author = {Lalee, Marucha and Nocedal, Jorge and Plantenga, Todd}, journal = {SIAM Journal on Optimization}, title = {On the implementation of an algorithm for large-scale equality constrained optimization}, year = {1998}, number = {3}, pages = {682--706}, volume = {8}, publisher = {SIAM} } @article{Gao2012, author = {Gao, Fuchang and Han, Lixing}, journal = {Computational Optimization and Applications}, title = {Implementing the Nelder-Mead simplex algorithm with adaptive parameters}, year = {2012}, number = {1}, pages = {259--277}, volume = {51}, publisher = {Springer} } @article{Powell1998, author = {Powell, Michael JD}, journal = {Acta numerica}, title = {Direct search algorithms for optimization calculations}, year = {1998}, pages = {287--336}, publisher = {Cambridge University Press} } @article{Powell2007, author = {Powell, Michael JD}, journal = {Mathematics Today-Bulletin of the Institute of Mathematics and its Applications}, title = {A view of algorithms for optimization without derivatives}, year = {2007}, number = {5}, pages = {170--174}, volume = {43}, publisher = {Citeseer} } @techreport{Benson2017, author = {Benson, S and McInnes, LC and Mor{\'e}, JJ and Munson, T and Sarich, J}, institution = {Technical Report ANL/MCS-TM-322, Argonne National Laboratory}, title = {TAO user manual (revision 3.7)}, year = {2017}, url = {http://web.mit.edu/tao-petsc_v3.7/tao_manual.pdf} } @techreport{Wild2015, author = {Wild, Stefan M.}, institution = {Argonne National Laboratory}, title = {Solving Derivative-Free Nonlinear Least Squares Problems with POUNDERS}, year = {2015}, url = {https://doi.org/10.1137/1.9781611974683.ch40} } @InProceedings{Wild2008, author = {Wild, Stefan M.}, title = {{MNH: A} Derivative-Free Optimization Algorithm Using Minimal Norm {Hessians}}, booktitle = {Tenth Copper Mountain Conference on Iterative Methods}, year = {2008}, month = {April}, gurl = {https://scholar.google.com/scholar?cluster=6407907761614456217}, url = {http://grandmaster.colorado.edu/~copper/2008/SCWinners/Wild.pdf}, } @misc{Cartis2018, author = {Coralia Cartis and Jan Fiala and Benjamin Marteau and Lindon Roberts}, title = {Improving the Flexibility and Robustness of Model-Based Derivative-Free Optimization Solvers}, year = {2018}, archiveprefix = {arXiv}, eprint = {1804.00154}, primaryclass = {math.OC} } @misc{Cartis2018a, author = {Coralia Cartis and Lindon Roberts and Oliver Sheridan-Methven}, title = {Escaping local minima with derivative-free methods: a numerical investigation}, year = {2018}, archiveprefix = {arXiv}, eprint = {1812.11343}, primaryclass = {math.OC} } @article{Powell2009, author = {Powell, Michael JD}, journal = {Cambridge NA Report NA2009/06, University of Cambridge, Cambridge}, title = {The BOBYQA algorithm for bound constrained optimization without derivatives}, year = {2009}, pages = {26--46} } @misc{Cartis2018b, author = {Coralia Cartis and Jan Fiala and Benjamin Marteau and Lindon Roberts}, title = {Improving the Flexibility and Robustness of Model-Based Derivative-Free Optimization Solvers}, year = {2018}, archiveprefix = {arXiv}, eprint = {1804.00154}, primaryclass = {math.OC} } @unpublished{Saxton2018, title = {resample: Randomization-based inference in Python.}, author = {Daniel Saxton}, year = {2018}, note = {unpublished}, url = {https://github.com/dsaxton/resample} } @article{CameronMiller2015, title = {A practitioner’s guide to cluster-robust inference}, author = {Cameron, A Colin and Miller, Douglas L}, journal = {Journal of human resources}, volume = {50}, number = {2}, pages = {317--372}, year = {2015}, publisher = {University of Wisconsin Press} } @article{Waechter2005, author = {Andreas Wächter and Lorenz T. Biegler}, journal = {{SIAM} Journal on Optimization}, title = {Line Search Filter Methods for Nonlinear Programming: Local Convergence}, year = {2005}, month = {jan}, number = {1}, pages = {32--48}, volume = {16}, doi = {10.1137/s1052623403426544}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})} } @article{Waechter2005a, author = {Andreas Wächter and Lorenz T. Biegler}, journal = {{SIAM} Journal on Optimization}, title = {Line Search Filter Methods for Nonlinear Programming: Motivation and Global Convergence}, year = {2005}, month = {jan}, number = {1}, pages = {1--31}, volume = {16}, doi = {10.1137/s1052623403426556}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})} } @article{Waechter2005b, author = {Andreas Wächter and Lorenz T. Biegler}, journal = {Mathematical Programming}, title = {On the implementation of an interior-point filter line-search algorithm for large-scale nonlinear programming}, year = {2005}, month = {apr}, number = {1}, pages = {25--57}, volume = {106}, doi = {10.1007/s10107-004-0559-y}, publisher = {Springer Science and Business Media {LLC}} } @article{Nocedal2009, author = {Jorge Nocedal and Andreas Wächter and Richard A. Waltz}, journal = {{SIAM} Journal on Optimization}, title = {Adaptive Barrier Update Strategies for Nonlinear Interior Methods}, year = {2009}, month = {jan}, number = {4}, pages = {1674--1693}, volume = {19}, doi = {10.1137/060649513}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})} } @article{Schlueter2009, author = {Martin Schlüter and Jose A. Egea and Julio R. Banga}, journal = {Computers {\&} Operations Research}, title = {Extended ant colony optimization for non-convex mixed integer nonlinear programming}, year = {2009}, month = {jul}, number = {7}, pages = {2217--2229}, volume = {36}, doi = {10.1016/j.cor.2008.08.015}, publisher = {Elsevier {BV}} } @article{Karaboga2007, author = {Dervis Karaboga and Bahriye Basturk}, journal = {Journal of Global Optimization}, title = {A powerful and efficient algorithm for numerical function optimization: artificial bee colony ({ABC}) algorithm}, year = {2007}, month = {apr}, number = {3}, pages = {459--471}, volume = {39}, doi = {10.1007/s10898-007-9149-x}, publisher = {Springer Science and Business Media {LLC}} } @article{Mernik2015, author = {Marjan Mernik and Shih-Hsi Liu and Dervis Karaboga and Matej {\v{C}}repin{\v{s}}ek}, journal = {Information Sciences}, title = {On clarifying misconceptions when comparing variants of the Artificial Bee Colony Algorithm by offering a new implementation}, year = {2015}, month = {jan}, pages = {115--127}, volume = {291}, doi = {10.1016/j.ins.2014.08.040}, publisher = {Elsevier {BV}} } @article{Storn1997, author = {Rainer Storn and Kenneth Price}, journal = {Journal of Global Optimization}, title = {Differential Evolution – A Simple and Efficient Heuristic for Global Optimization over Continuous Spaces}, year = {1997}, number = {4}, pages = {341--359}, volume = {11}, url = {https://link.springer.com/article/10.1023/A:1008202821328}, publisher = {Springer Science and Business Media {LLC}} } @article{Oliveto2007, author = {Pietro S. Oliveto and Jun He and Xin Yao}, journal = {International Journal of Automation and Computing}, title = {Time complexity of evolutionary algorithms for combinatorial optimization: A decade of results}, year = {2007}, month = {jul}, number = {3}, pages = {281--293}, volume = {4}, doi = {10.1007/s11633-007-0281-3}, publisher = {Springer Science and Business Media {LLC}} } @article{Brest2006, author = {Brest, Janez and Greiner, Sao and Boskovic, Borko and Mernik, Marjan and Zumer, Viljem}, journal = {IEEE Transactions on Evolutionary Computation}, title = {Self-Adapting Control Parameters in Differential Evolution: A Comparative Study on Numerical Benchmark Problems}, year = {2006}, number = {6}, pages = {646-657}, volume = {10}, doi = {10.1109/TEVC.2006.872133} } @inproceedings{Elsayed2011, author = {Elsayed, Saber M. and Sarker, Ruhul A. and Essam, Daryl L.}, booktitle = {2011 IEEE Congress of Evolutionary Computation (CEC)}, title = {Differential evolution with multiple strategies for solving CEC2011 real-world numerical optimization problems}, year = {2011}, pages = {1041-1048}, doi = {10.1109/CEC.2011.5949732} } @incollection{Hansen2006, author = {Nikolaus Hansen}, booktitle = {Towards a New Evolutionary Computation}, publisher = {Springer Berlin Heidelberg}, title = {The {CMA} Evolution Strategy: A Comparing Review}, year = {2006}, pages = {75--102}, doi = {10.1007/3-540-32494-1_4} } @article{Corana1987, author = {Corana, A. and Marchesi, M. and Martini, C. and Ridella, S.}, title = {Minimizing Multimodal Functions of Continuous Variables with the “Simulated Annealing” Algorithm—Corrigenda for This Article is Available Here}, year = {1987}, issue_date = {Sept. 1987}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {13}, number = {3}, issn = {0098-3500}, url = {https://doi.org/10.1145/29380.29864}, doi = {10.1145/29380.29864}, abstract = {A new global optimization algorithm for functions of continuous variables is presented, derived from the “Simulated Annealing” algorithm recently introduced in combinatorial optimization.The algorithm is essentially an iterative random search procedure with adaptive moves along the coordinate directions. It permits uphill moves under the control of a probabilistic criterion, thus tending to avoid the first local minima encountered.The algorithm has been tested against the Nelder and Mead simplex method and against a version of Adaptive Random Search. The test functions were Rosenbrock valleys and multiminima functions in 2,4, and 10 dimensions.The new method proved to be more reliable than the others, being always able to find the optimum, or at least a point very close to it. It is quite costly in term of function evaluations, but its cost can be predicted in advance, depending only slightly on the starting point.}, journal = {ACM Trans. Math. Softw.}, month = sep, pages = {262–280}, numpages = {19} } @Article{Poli2007, author = {Riccardo Poli and James Kennedy and Tim Blackwell}, journal = {Swarm Intelligence}, title = {Particle swarm optimization}, year = {2007}, month = {aug}, number = {1}, pages = {33--57}, volume = {1}, doi = {10.1007/s11721-007-0002-0}, publisher = {Springer Science and Business Media {LLC}}, } @Article{Wales1997, author = {David J. Wales and Jonathan P. K. Doye}, journal = {The Journal of Physical Chemistry A}, title = {Global Optimization by Basin-Hopping and the Lowest Energy Structures of Lennard-Jones Clusters Containing up to 110 Atoms}, year = {1997}, month = {jul}, number = {28}, pages = {5111--5116}, volume = {101}, publisher = {American Chemical Society ({ACS})}, } @InProceedings{Glasmachers2010, author = {Tobias Glasmachers and Tom Schaul and Sun Yi and Daan Wierstra and Jürgen Schmidhuber}, booktitle = {Proceedings of the 12th annual conference on Genetic and evolutionary computation - {GECCO} {\textquotesingle}10}, title = {Exponential natural evolution strategies}, year = {2010}, publisher = {{ACM} Press}, doi = {10.1145/1830483.1830557}, } @Article{Mahdavi2007, author = {M. Mahdavi and M. Fesanghary and E. Damangir}, journal = {Applied Mathematics and Computation}, title = {An improved harmony search algorithm for solving optimization problems}, year = {2007}, month = {may}, number = {2}, pages = {1567--1579}, volume = {188}, doi = {10.1016/j.amc.2006.11.033}, publisher = {Elsevier {BV}}, } @Article{Mirjalili2014, author = {Seyedali Mirjalili and Seyed Mohammad Mirjalili and Andrew Lewis}, journal = {Advances in Engineering Software}, title = {Grey Wolf Optimizer}, year = {2014}, month = {mar}, pages = {46--61}, volume = {69}, doi = {10.1016/j.advengsoft.2013.12.007}, publisher = {Elsevier {BV}}, } @Article{Kolda2003, author = {Tamara G. Kolda and Robert Michael Lewis and Virginia Torczon}, journal = {{SIAM} Review}, title = {Optimization by Direct Search: New Perspectives on Some Classical and Modern Methods}, year = {2003}, month = {jan}, number = {3}, pages = {385--482}, volume = {45}, doi = {10.1137/s003614450242889}, } @Article{Biscani2020, doi = {10.21105/joss.02338}, url = {https://doi.org/10.21105/joss.02338}, year = {2020}, publisher = {The Open Journal}, volume = {5}, number = {53}, pages = {2338}, author = {Francesco Biscani and Dario Izzo}, title = {A parallel global multiobjective framework for optimization: pagmo}, journal = {Journal of Open Source Software} } @Article{Coleman1996, author = {Thomas F. Coleman and Yuying Li}, journal = {SIAM Journal on Optimization}, title = {An Interior Trust Region Approach for Nonlinear Minimization Subject to Bounds}, year = {1996}, month = {may}, number = {2}, pages = {418--445}, volume = {6}, doi = {10.1137/0806023}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, } @Article{Coleman1994, author = {Thomas F. Coleman and Yuying Li}, journal = {Mathematcial Programming}, title = {On the convergence of interior-reflective Newton methods for nonlinear minimization subject to bounds}, year = {1994}, month = {oct}, number = {1-3}, pages = {189--224}, volume = {67}, doi = {10.1007/bf01582221}, publisher = {Springer Science and Business Media {LLC}}, } @Article{Broyden1965, author = {C. G. Broyden}, journal = {Mathematics of Computation}, title = {A class of methods for solving nonlinear simultaneous equations}, year = {1965}, number = {92}, pages = {577--577}, volume = {19}, publisher = {American Mathematical Society ({AMS})}, } @Book{Nocedal1999, editor = {Jorge Nocedal and Stephen J. Wright}, publisher = {Springer-Verlag}, title = {Numerical Optimization}, year = {1999}, doi = {10.1007/b98874}, } @Article{Fletcher1987, author = {R. Fletcher and C. Xu}, journal = {IMA Journal of Numerical Analysis}, title = {Hybrid Methods for Nonlinear Least Squares}, year = {1987}, number = {3}, pages = {371--389}, volume = {7}, doi = {10.1093/imanum/7.3.371}, publisher = {Oxford University Press ({OUP})}, } @Article{Kaelo2006, author = {Kaelo, P and Ali, M}, journal = {J. Optim. Theory Appl}, title = {Some variants of the controlled random search algorithm for global optimization}, year = {2006}, number = {2}, pages = {253--264}, volume = {130}, } @InBook{Price1978, author = {Price, W}, editor = {L. C. W. Dixon and G. P. Szego}, pages = {71--84}, publisher = {North-Holland Press}, title = {A controlled random search procedure for global optimization}, year = {1978}, address = {Amsterdam}, volume = {2}, booktitle = {Towards Global Optimization}, } @Article{Price1983, author = {Price, W}, journal = {J. Optim. Theory Appl}, title = {Global optimization by controlled random search}, year = {1983}, number = {3}, pages = {333--348}, volume = {40}, } @Article{Kraft1994, author = {Kraft, Dieter}, journal = {ACM Transactions on Mathematical Software}, title = {Algorithm 733: TOMP-Fortran modules for optimal control calculations}, year = {1994}, number = {3}, pages = {262--281}, volume = {20}, } @Article{Jones1993, author = {Jones, D and Perttunen, C and Stuckmann, B}, journal = {J. Optimization Theory and Applications}, title = {Lipschitzian optimization without the lipschitz constant}, year = {1993}, pages = {157}, volume = {79}, } @Article{Gablonsky2001, author = {Gablonsky, J and Kelley, C}, journal = {J. Global Optimization}, title = {A locally-biased form of the DIRECT algorithm}, year = {2001}, number = {1}, pages = {27--37}, volume = {21}, } @Article{DaSilva2010, author = {Da Silva, C and Santos, M and Goncalves, H and Hernandez-Figueroa}, journal = {IEEE Photonics Technology Letters}, title = {Designing Novel Photonic Devices by Bio-Inspired Computing}, year = {2010}, number = {15}, pages = {1177--1179}, volume = {22}, } @Misc{DaSilva2010a, author = {Da Silva, C and Santos}, title = {Parallel and Bio-Inspired Computing Applied to Analyze Microwave and Photonic Metamaterial Strucutures}, year = {2010}, } @Article{Beyer2002, author = {Beyer, H.-G and Schwefel, H.-P}, journal = {Journal Natural Computing}, title = {Evolution Strategies: A Comprehensive Introduction}, year = {2002}, number = {1}, pages = {3--52}, volume = {1}, } @article{Vent1975, author = {Vent, W.}, title = {Rechenberg, Ingo, Evolutionsstrategie — Optimierung technischer Systeme nach Prinzipien der biologischen Evolution. 170 S. mit 36 Abb. Frommann-Holzboog-Verlag. Stuttgart 1973. Broschiert}, journal = {Feddes Repertorium}, volume = {86}, number = {5}, pages = {337-337}, year = {1975} } @Article{PhilipRunarsson2005, author = {Philip Runarsson, Thomas and Yao, Xin}, journal = {IEEE Trans. on Systems, Man, and Cybernetics Part C: Applications and Reviews}, title = {Search biases in constrained evolutionary optimization}, year = {2005}, number = {2}, pages = {233--243}, volume = {35}, } @Article{Thomas2000, author = {Thomas, P and Runarsson, Xin and Yao}, journal = {IEEE Trans. Evolutionary Computation}, title = {Stochastic ranking for constrained evolutionary optimization}, year = {2000}, number = {3}, pages = {284--294}, volume = {4}, } @Article{Nelder1965, author = {Nelder, J and Mead, R}, journal = {The Computer Journal}, title = {A simplex method for function minimization}, year = {1965}, pages = {308--313}, volume = {7}, } @Misc{Brent1972, author = {Brent, Richard}, title = {Algorithms for Minimization without Derivatives}, year = {1972}, publisher = {Prentice-Hall}, } @InBook{Powell1994, author = {Powell, M}, editor = {S. Gomez and J.-P. Hennart}, pages = {51--67}, publisher = {Kluwer Academic}, title = {A direct search optimization method that models the objective and constraint functions by linear interpolation}, year = {1994}, address = {Dordrecht}, booktitle = {Advances in Optimization and Numerical Analysis}, } @Misc{Rowan1990, author = {Rowan, T}, title = {Functional Stability Analysis of Numerical Algorithms}, year = {1990}, } @InProceedings{Powell2004, author = {Powell, M}, booktitle = {Proc. 40th Workshop on Large Scale Nonlinear Optimization}, title = {The NEWUOA software for unconstrained optimization without derivatives}, year = {2004}, address = {Erice, Italy}, } @Article{Dembo1983, author = {Dembo, R and Steihaug, T}, journal = {Math. Programming}, title = {Truncated Newton algorithms for large-scale optimization}, year = {1983}, pages = {190--212}, volume = {26}, doi = {10.1007/BF02592055}, } @Article{Nocedal1989, author = {Nocedal ; D, J and Liu, J}, journal = {Math. Comput}, title = {On the limited memory BFGS method for large scale optimization}, year = {1989}, pages = {503--528}, volume = {35}, } @Article{Svanberg2002, author = {Svanberg, Krister}, journal = {SIAM J. Optim}, title = {A class of globally convergent optimization methods based on conservative convex separable approximations}, year = {2002}, number = {2}, pages = {555--573}, volume = {12}, } @Article{Vlcek2006, author = {Vlcek, J and Luksan, L}, journal = {J. Computational Appl. Math}, title = {Shifted limited-memory variable metric methods for large-scale unconstrained minimization}, year = {2006}, pages = {365--390}, volume = {186}, } @Article{Nocedal1980, author = {Nocedal, J.}, journal = {Math. Comput}, title = {Updating quasi-Newton matrices with limited storage}, year = {1980}, pages = {773--782}, volume = {35}, } @InProceedings{Chiang2014, author = {Chiang, N and Petra, C and Zavala, V}, booktitle = {Proceedings of the 18th power systems computation conference (PSCC)}, title = {Structured nonconvex optimization of large-scale energy systems using PIPS-NLP}, year = {2014}, address = {Wroclaw, Poland}, } @Article{Zhou2010, author = {Weijun Zhou and Xiaojun Chen}, journal = {SIAM Journal on Optimization}, title = {Global Convergence of a New Hybrid Gauss{\textendash}Newton Structured {BFGS} Method for Nonlinear Least Squares Problems}, year = {2010}, month = {jan}, number = {5}, pages = {2422--2441}, volume = {20}, doi = {10.1137/090748470}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, } @Article{Dennis1989, author = {J. E. Dennis and H. J. Martinez and R. A. Tapia}, journal = {Journal of Optimization Theory and Applications}, title = {Convergence theory for the structured {BFGS} secant method with an application to nonlinear least squares}, year = {1989}, month = {may}, number = {2}, pages = {161--178}, volume = {61}, doi = {10.1007/bf00962795}, publisher = {Springer Science and Business Media {LLC}}, } @Article{Huschens1994, author = {J. Huschens}, journal = {SIAM Journal on Optimization}, title = {On the Use of Product Structure in Secant Methods for Nonlinear Least Squares Problems}, year = {1994}, month = {feb}, number = {1}, pages = {108--129}, volume = {4}, doi = {10.1137/0804005}, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, } @Article{Berndt1974, title = {Estimation and Inference in Nonlinear Structural Models}, journal = {Annals of Economic and Social Measurement}, author = {Berndt, Ernst R. and Hall, Bronwyn and Hall, Robert and Hausman, Jerry}, year = {1974}, pages = {653-665}, booktitle = {Annals of Economic and Social Measurement, Volume 3, number 4}, publisher = {National Bureau of Economic Research, Inc}, } @Article{Halbert1982, author = {Halbert White}, journal = {Econometrica}, number = {1}, pages = {1--25}, publisher = {[Wiley, Econometric Society]}, title = {Maximum Likelihood Estimation of Misspecified Models}, volume = {50}, year = {1982}, } @article{More1983, title={Computing a Trust Region Step}, author={Jorge J. Mor{\'e} and Danny C. Sorensen}, journal={Siam Journal on Scientific and Statistical Computing}, year={1983}, volume={4}, pages={553-572} } @article{Bertsekas1982, author = {Bertsekas, Dimitri P.}, title = {Projected Newton Methods for Optimization Problems with Simple Constraints}, journal = {SIAM Journal on Control and Optimization}, volume = {20}, number = {2}, pages = {221-246}, year = {1982}, doi = {10.1137/0320018}, URL = {https://doi.org/10.1137/0320018}, } @article{Steihaug1983, author = {Steihaug, Trond}, title = {The Conjugate Gradient Method and Trust Regions in Large Scale Optimization}, journal = {SIAM Journal on Numerical Analysis}, volume = {20}, number = {3}, pages = {626-637}, year = {1983}, doi = {10.1137/0720042}, URL = {https://doi.org/10.1137/0720042}, } @InBook{Toint1981, title = {Towards an Efficient Sparsity Exploiting Newton Method for Minimization}, author = {Toint, {\relax Ph}ilippe L.}, booktitle = {Sparse Matrices and Their Uses}, publisher = {Academic Press}, year = {1981}, address = {London, England}, editor = {I. S. Duff}, pages = {57--88}, } @article{Zhang2010, author = {Zhang, Hongchao and Conn, Andrew R. and Scheinberg, Katya}, title = {A Derivative-Free Algorithm for Least-Squares Minimization}, journal = {SIAM Journal on Optimization}, volume = {20}, number = {6}, pages = {3555-3576}, year = {2010}, doi = {10.1137/09075531X}, URL = {https://doi.org/10.1137/09075531X}, } @book{Conn2009, author = {Conn, Andrew R. and Scheinberg, Katya and Vicente, Luis N.}, title = {Introduction to Derivative-Free Optimization}, publisher = {Society for Industrial and Applied Mathematics}, year = {2009}, doi = {10.1137/1.9780898718768}, URL = {https://epubs.siam.org/doi/abs/10.1137/1.9780898718768}, } @article{JAMES1975343, title = {Minuit - a system for function minimization and analysis of the parameter errors and correlations}, journal = {Computer Physics Communications}, volume = {10}, number = {6}, pages = {343-367}, year = {1975}, issn = {0010-4655}, doi = {https://doi.org/10.1016/0010-4655(75)90039-9}, url = {https://www.sciencedirect.com/science/article/pii/0010465575900399}, author = {F. James and M. Roos} } @misc{Hansen2023, title={The CMA Evolution Strategy: A Tutorial}, author={Nikolaus Hansen}, year={2023}, eprint={1604.00772}, archivePrefix={arXiv}, primaryClass={cs.LG}, url={https://arxiv.org/abs/1604.00772}, } @InProceedings{Kennedy1995, author={Kennedy, J. and Eberhart, R.}, booktitle={Proceedings of ICNN'95 - International Conference on Neural Networks}, title={Particle swarm optimization}, year={1995}, volume={4}, pages={1942-1948 vol.4}, keywords={Particle swarm optimization;Birds;Educational institutions;Marine animals;Testing;Humans;Genetic algorithms;Optimization methods;Artificial neural networks;Performance evaluation}, doi={10.1109/ICNN.1995.488968}, } @InProceedings{Zambrano2013, author = {Zambrano-Bigiarini, Mauricio and Clerc, Maurice and Rojas, Rodrigo}, booktitle = {2013 IEEE Congress on Evolutionary Computation}, title = {Standard Particle Swarm Optimisation 2011 at CEC-2013: A baseline for future PSO improvements}, year = {2013}, pages = {2337-2344}, keywords = {Optimization;Standards;Benchmark testing;Topology;Algorithm design and analysis;Convergence;Equations;particle swarm optimization;SPSO-2011;CEC-2013;random topology;rotational invariance;benchmark testing;evolutionary computation;optimization}, doi = {10.1109/CEC.2013.6557848}, } @inbook{randomsearch2010, author = {Zabinsky, Zelda}, year = {2010}, month = {06}, pages = {}, title = {Random Search Algorithms}, isbn = {9780470400531}, doi = {10.1002/9780470400531.eorms0704} } @INPROCEEDINGS{spsaimpl, author={Rastogi, Pushpendre and Zhu, Jingyi and Spall, James C.}, booktitle={2016 Annual Conference on Information Science and Systems (CISS)}, title={Efficient implementation of enhanced adaptive simultaneous perturbation algorithms}, year={2016}, volume={}, number={}, pages={298-303}, keywords={Estimation;Algorithm design and analysis;Adaptive Estimation;Simultaneous Perturbation Stochastic Approximation (SPSA);Woodbury Matrix Identity}, doi={10.1109/CISS.2016.7460518}} @inproceedings{tbpsaimpl, author = {Hellwig, Michael and Beyer, Hans-Georg}, year = {2016}, month = {09}, pages = {}, title = {Evolution under Strong Noise: A Self-Adaptive Evolution Strategy Can Reach the Lower Performance Bound - the pcCMSA-ES}, booktitle = {Parallel Problem Solving from Nature -- PPSN XIII},volume = {9921}, isbn = {9783319458229}, doi = {10.1007/978-3-319-45823-6_3} } @ARTICLE{cgaimpl, author={Harik, G.R. and Lobo, F.G. and Goldberg, D.E.}, journal={IEEE Transactions on Evolutionary Computation}, title={The compact genetic algorithm}, year={1999}, volume={3}, number={4}, pages={287-297}, keywords={Genetic algorithms;Algorithm design and analysis;Laboratories;Computer simulation;Genetic engineering;Probability distribution;Computational modeling;History;Convergence;Mathematical model}, doi={10.1109/4235.797971}} @inproceedings{bayesoptimimpl, author = {Raponi, Elena and Wang, Hao and Bujny, Mariusz and Boria, Simonetta and Doerr, Carola}, title = {High Dimensional Bayesian Optimization Assisted by Principal Component Analysis}, year = {2020}, isbn = {978-3-030-58111-4}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, url = {https://doi.org/10.1007/978-3-030-58112-1_12}, doi = {10.1007/978-3-030-58112-1_12}, abstract = {Bayesian Optimization (BO) is a surrogate-assisted global optimization technique that has been successfully applied in various fields, e.g., automated machine learning and design optimization. Built upon a so-called infill-criterion and Gaussian Process regression (GPR), the BO technique suffers from a substantial computational complexity and hampered convergence rate as the dimension of the search spaces increases. Scaling up BO for high-dimensional optimization problems remains a challenging task.In this paper, we propose to tackle the scalability of BO by hybridizing it with a Principal Component Analysis (PCA), resulting in a novel PCA-assisted BO (PCA-BO) algorithm. Specifically, the PCA procedure learns a linear transformation from all the evaluated points during the run and selects dimensions in the transformed space according to the variability of evaluated points. We then construct the GPR model, and the infill-criterion in the space spanned by the selected dimensions.We assess the performance of our PCA-BO in terms of the empirical convergence rate and CPU time on multi-modal problems from the COCO benchmark framework. The experimental results show that PCA-BO can effectively reduce the CPU time incurred on high-dimensional problems, and maintains the convergence rate on problems with an adequate global structure. PCA-BO therefore provides a satisfactory trade-off between the convergence rate and computational efficiency opening new ways to benefit from the strength of BO approaches in high dimensional numerical optimization.}, booktitle = {Parallel Problem Solving from Nature – PPSN XVI: 16th International Conference, PPSN 2020, Leiden, The Netherlands, September 5-9, 2020, Proceedings, Part I}, pages = {169–183}, numpages = {15}, keywords = {Dimensionality reduction, Principal Component Analysis, Black-box optimization, Bayesian optimization}, location = {Leiden, The Netherlands} } @book{Rechenberg1973, author = {Rechenberg, Ingo}, title = {Evolutionsstrategie: Optimierung technischer Systeme nach Prinzipien der biologischen Evolution}, publisher = {Frommann-Holzboog Verlag}, year = {1973}, url = {https://gwern.net/doc/reinforcement-learning/exploration/1973-rechenberg.pdf}, address = {Stuttgart}, note = {[Evolution Strategy: Optimization of Technical Systems According to the Principles of Biological Evolution]} } @article{Schumer1968, author={Schumer, M. and Steiglitz, K.}, journal={IEEE Transactions on Automatic Control}, title={Adaptive step size random search}, year={1968}, volume={13}, number={3}, pages={270-276}, keywords={Minimization methods;Gradient methods;Search methods;Adaptive control;Communication systems;Q measurement;Cost function;Newton method;Military computing}, doi={10.1109/TAC.1968.1098903} } @misc{edaimpl, title={Theory of Estimation-of-Distribution Algorithms}, author={Martin S. Krejca and Carsten Witt}, year={2018}, eprint={1806.05392}, archivePrefix={arXiv}, primaryClass={cs.NE}, url={https://arxiv.org/abs/1806.05392}, } @book{emnaimpl, author = {Larranaga, Pedro and Lozano, Jose}, year = {2002}, month = {01}, pages = {}, title = {Estimation of Distribution Algorithms: A New Tool for Evolutionary Computation}, isbn = {9781461356042}, publisher = {Springer}, journal = {Genetic algorithms and evolutionary computation ; 2}, doi = {10.1007/978-1-4615-1539-5} } @Misc{Nogueira2014, author={Fernando Nogueira}, title={{Bayesian Optimization}: Open source constrained global optimization tool for {Python}}, year={2014--}, url="https://github.com/bayesian-optimization/BayesianOptimization" } @article{Stander2002, author={Stander, Nielen and Craig, Kenneth}, year={2002}, month={06}, pages={}, title={On the robustness of a simple domain reduction scheme for simulation-based optimization}, volume={19}, journal={International Journal for Computer-Aided Engineering and Software (Eng. Comput.)}, doi={10.1108/02644400210430190} } @inproceedings{gardner2014bayesian, title={Bayesian optimization with inequality constraints.}, author={Gardner, Jacob R and Kusner, Matt J and Xu, Zhixiang Eddie and Weinberger, Kilian Q and Cunningham, John P}, booktitle={ICML}, volume={2014}, pages={937--945}, year={2014} } @article{gad2023pygad, title={Pygad: An intuitive genetic algorithm python library}, author={Gad, Ahmed Fawzy}, journal={Multimedia Tools and Applications}, pages={1--14}, year={2023}, publisher={Springer} } @INPROCEEDINGS{EberhartKennedy1995, author = {Eberhart, R. and Kennedy, J.}, booktitle = {MHS'95. Proceedings of the Sixth International Symposium on Micro Machine and Human Science}, title = {A new optimizer using particle swarm theory}, year = {1995}, pages = {39-43}, keywords = {Particle swarm optimization;Genetic algorithms;Testing;Acceleration;Particle tracking;Optimization methods;Artificial neural networks;Evolutionary computation;Performance evaluation;Statistics}, doi = {10.1109/MHS.1995.494215} } @INPROCEEDINGS{Lane2008SpatialPSO, author={Lane, James and Engelbrecht, Andries and Gain, James}, booktitle={2008 IEEE Swarm Intelligence Symposium}, title={Particle swarm optimization with spatially meaningful neighbours}, year={2008}, volume={}, number={}, pages={1-8}, keywords={Particle swarm optimization;Topology;Birds;Convergence;Computer science;USA Councils;Cities and towns;Africa;Cultural differences;Data structures;Delaunay Triangulation;Neighbour Topology;Particle Swarm Optimization;Heuristics}, doi={10.1109/SIS.2008.4668281} } @article{Ni2013, author = {Ni, Qingjian and Deng, Jianming}, title = {A New Logistic Dynamic Particle Swarm Optimization Algorithm Based on Random Topology}, journal = {The Scientific World Journal}, volume = {2013}, number = {1}, pages = {409167}, doi = {https://doi.org/10.1155/2013/409167}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2013/409167}, eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2013/409167}, abstract = {Population topology of particle swarm optimization (PSO) will directly affect the dissemination of optimal information during the evolutionary process and will have a significant impact on the performance of PSO. Classic static population topologies are usually used in PSO, such as fully connected topology, ring topology, star topology, and square topology. In this paper, the performance of PSO with the proposed random topologies is analyzed, and the relationship between population topology and the performance of PSO is also explored from the perspective of graph theory characteristics in population topologies. Further, in a relatively new PSO variant which named logistic dynamic particle optimization, an extensive simulation study is presented to discuss the effectiveness of the random topology and the design strategies of population topology. Finally, the experimental data are analyzed and discussed. And about the design and use of population topology on PSO, some useful conclusions are proposed which can provide a basis for further discussion and research.}, year = {2013} } @Comment{jabref-meta: databaseType:bibtex;} ================================================ FILE: docs/source/tutorials/bayes_opt_tutorial.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "id": "0", "metadata": {}, "source": [ "# `bayes_opt` Optimizer in optimagic" ] }, { "cell_type": "markdown", "id": "1", "metadata": {}, "source": [ "This tutorial demonstrates how to use the `\"bayes_opt\"` optimizer in optimagic. To use it, you need to have `bayesian-optimization` package installed. You can install it with the following command:\n", "```bash\n", "pip install bayesian-optimization\n", "```" ] }, { "cell_type": "markdown", "id": "2", "metadata": {}, "source": [ "### When to use Bayesian Optimization:\n", "- Function evaluations are expensive (e.g., simulations, experiments)\n", "- The function is a black box(it cannot be expressed in closed form)\n", "- You have a limited budget of function evaluations\n", "- When gradients are unavailable or computationally expensive to obtain\n", "\n", "### Key Concepts\n", "\n", "### Gaussian Processes (GP)\n", "The GP serves as a probabilistic model of your objective function. It provides both a mean prediction and uncertainty estimates.\n", "### Acquisition Functions\n", "These functions use the GP's predictions to decide where to evaluate next.\n", "\n", "Common acquisition functions include:\n", "- **Upper Confidence Bound (UCB)**: Balances mean prediction with uncertainty\n", "- **Expected Improvement (EI)**: Expected improvement over the current best\n", "- **Probability of Improvement (POI)**: Probability of improving over the current best" ] }, { "cell_type": "code", "execution_count": null, "id": "3", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "\n", "import optimagic as om\n", "from bayes_opt import acquisition" ] }, { "cell_type": "markdown", "id": "4", "metadata": {}, "source": [ "## Basic Usage of the `bayes_opt` Optimizer" ] }, { "cell_type": "markdown", "id": "5", "metadata": {}, "source": [ "Let's start with a simple example using a sphere function" ] }, { "cell_type": "code", "execution_count": null, "id": "6", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "def sphere(params):\n", " return params @ params" ] }, { "cell_type": "code", "execution_count": null, "id": "7", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# Note: bayes_opt requires bounds for all parameters\n", "bounds = om.Bounds(\n", " lower=np.full(2, -10.0),\n", " upper=np.full(2, 10.0)\n", ")\n", "bayesopt_res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(2),\n", " algorithm=\"bayes_opt\",\n", " bounds=bounds,\n", " algo_options={\"seed\": 1}\n", ")\n", "\n", "bayesopt_res.params" ] }, { "cell_type": "markdown", "id": "8", "metadata": {}, "source": [ "## Acquisition Functions in the `bayes_opt` Optimizer" ] }, { "cell_type": "markdown", "id": "9", "metadata": {}, "source": [ "In Bayesian optimization, the **acquisition function** decides *where to sample next*.\n", "It controls the trade-off between **exploration** (search new areas) and **exploitation** (focus on good areas).\n", "\n", "optimagic lets you set the acquisition function in different ways:" ] }, { "cell_type": "markdown", "id": "10", "metadata": {}, "source": [ "### 1. Using No Acquisition Function (Default)" ] }, { "cell_type": "code", "execution_count": null, "id": "11", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# Uses package defaults: UCB for unconstrained, EI for constrained\n", "acquisition_function = None" ] }, { "cell_type": "markdown", "id": "12", "metadata": {}, "source": [ "### 2. Using a String (Built-in acquisition functions)\n", "\n", "You can pass any of the following strings to select a standard acquisition function:\n", "\n", "* `\"ucb\"` / `\"upper_confidence_bound\"` – Upper Confidence Bound\n", "* `\"ei\"` / `\"expected_improvement\"` – Expected Improvement\n", "* `\"poi\"` / `\"probability_of_improvement\"` – Probability of Improvement" ] }, { "cell_type": "code", "execution_count": null, "id": "13", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "acquisition_function_str = \"ucb\"" ] }, { "cell_type": "markdown", "id": "14", "metadata": {}, "source": [ "### 3. Using a Class (Auto-Instantiated)\n", "\n", "You can also pass the class directly, optimagic will create an instance for it:" ] }, { "cell_type": "code", "execution_count": null, "id": "15", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "from bayes_opt.acquisition import UpperConfidenceBound\n", "\n", "acquisition_function_class = UpperConfidenceBound" ] }, { "cell_type": "markdown", "id": "16", "metadata": {}, "source": [ "### 4. Using an Instance" ] }, { "cell_type": "code", "execution_count": null, "id": "17", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "from bayes_opt.acquisition import ExpectedImprovement\n", "\n", "acquisition_function_instance = ExpectedImprovement(\n", " xi=0.1,\n", " exploration_decay=0.95,\n", " exploration_decay_delay=5\n", ")" ] }, { "cell_type": "markdown", "id": "18", "metadata": {}, "source": [ "### Example Run with configured acquisition functions" ] }, { "cell_type": "code", "execution_count": null, "id": "19", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere ,\n", " params=np.arange(2),\n", " algorithm=\"bayes_opt\",\n", " bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\n", " algo_options={\"seed\":1, \"acquisition_function\": acquisition_function_str,}\n", " # acquisition_function can be any of:\n", " # acquisition_function_str → e.g. \"ucb\", \"ei\", \"poi\"\n", " # acquisition_function_class → e.g. UpperConfidenceBound\n", " # acquisition_function_instance → e.g. ExpectedImprovement(xi=0.1)\n", " # None → defaults to \"ucb\"\n", " )\n", "\n", "res.params" ] }, { "cell_type": "markdown", "id": "20", "metadata": {}, "source": [ "## Custom Acquisition Functions" ] }, { "cell_type": "markdown", "id": "21", "metadata": {}, "source": [ "`bayesian-optimization` also allows us to write our own acquisition functions by subclassing its `AcquisitionFunction` class. This allows you to define exploration/exploitation strategies tailored to your specific problem." ] }, { "cell_type": "markdown", "id": "22", "metadata": {}, "source": [ "### Implementation Requirements\n", "\n", "When subclassing `AcquisitionFunction`, there are specific methods we must implement:\n", "\n", "1. **`base_acq(self, mean, std)` method (Required)**: This is the core method where you define the mathematical formula for your acquisition function. It takes the predicted mean and standard deviation from the Gaussian Process and returns the acquisition value(s).\n", "\n", "2. **`suggest` method (Optional but often needed)**: The base class provides a default implementation, but you may need to override it if you need to set up internal state (like `y_max` for EI/PI) before `base_acq` is called.\n", "\n", "3. **`get_acquisition_params` and `set_acquisition_params` methods (Optional but recommended)**: These are used for retrieving and setting the internal parameters of your acquisition function. Implementing them makes your acquisition function fully configurable and serializable." ] }, { "cell_type": "code", "execution_count": null, "id": "23", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "from bayes_opt.acquisition import AcquisitionFunction\n", "class CustomAcquisition(AcquisitionFunction):\n", " \"\"\"\n", " A simple custom acquisition function.\n", "\n", " This acquisition returns half of the predicted mean.\n", " It ignores the uncertainty (std), making it purely\n", " exploitation-oriented.\n", " \"\"\"\n", " def __init__(self):\n", " super().__init__()\n", "\n", " def base_acq(self, mean, std):\n", " return 0.5 * mean" ] }, { "cell_type": "markdown", "id": "24", "metadata": {}, "source": [ "### Using the Custom Acquisition Function\n", "\n", "Once you have defined your custom acquisition function, you can use it in optimagic by passing an instance or a class to the `acquisition_function` parameter:" ] }, { "cell_type": "code", "execution_count": null, "id": "25", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "acquisition_function = CustomAcquisition()\n", "\n", "res = om.minimize(\n", " fun=sphere ,\n", " params=np.arange(2),\n", " algorithm=\"bayes_opt\",\n", " bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\n", " algo_options={\"seed\":1, \"acquisition_function\": acquisition_function,}\n", " )\n", "\n", "res.params" ] }, { "cell_type": "markdown", "id": "26", "metadata": {}, "source": [ "### Meta Acquisition Functions" ] }, { "cell_type": "markdown", "id": "27", "metadata": {}, "source": [ "The `bayesian-optimization` package also provides meta acquisition functions that operate on other acquisition functions:\n", "\n", "1. **GPHedge**: Dynamically chooses the best acquisition function from a set of candidates based on their past performance.\n", "2. **ConstantLiar**: Used for parallelized optimization to discourage sampling near points that have already been suggested but not yet evaluated.\n", "\n", "Here's how to use GPHedge with multiple base acquisition functions:" ] }, { "cell_type": "markdown", "id": "28", "metadata": {}, "source": [ "### 1. **GPHedge**:\n", "Dynamically chooses the best acquisition function from a set of candidates based on their past performance.\n", "\n", "let’s define the **Branin function**, to use with Meta Acquisition functions." ] }, { "cell_type": "code", "execution_count": null, "id": "29", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "def branin(params):\n", " \"\"\"The Branin function - a classic optimization test function.\n", "\n", " Has three global minima at approximately:\n", " - (-π, 12.275)\n", " - (π, 2.275)\n", " - (9.42478, 2.475)\n", "\n", " Global minimum value: 0.397887\n", " \"\"\"\n", " x1, x2 = params[0], params[1]\n", "\n", " a = 1\n", " b = 5.1 / (4 * np.pi**2)\n", " c = 5 / np.pi\n", " r = 6\n", " s = 10\n", " t = 1 / (8 * np.pi)\n", "\n", " term1 = a * (x2 - b * x1**2 + c * x1 - r)**2\n", " term2 = s * (1 - t) * np.cos(x1)\n", " term3 = s\n", "\n", " return term1 + term2 + term3" ] }, { "cell_type": "code", "execution_count": null, "id": "30", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "from bayes_opt.acquisition import GPHedge, UpperConfidenceBound, ExpectedImprovement\n", "\n", "# Create a list of base acquisition functions\n", "base_acquisitions = [\n", " UpperConfidenceBound(kappa=2.576),\n", " ExpectedImprovement(xi=0.01),\n", " # Add more as needed\n", "]\n", "\n", "gphedge_acq = GPHedge(base_acquisitions)\n", "\n", "result = om.minimize(\n", " fun=branin,\n", " params=np.array([1.0, 1.0]),\n", " algorithm=\"bayes_opt\",\n", " bounds=bounds,\n", " algo_options={\n", " \"acquisition_function\": gphedge_acq,\n", " \"seed\": 42\n", " }\n", ")\n", "\n", "result.params, result.fun" ] }, { "cell_type": "markdown", "id": "31", "metadata": {}, "source": [ "### 2. ConstantLiar\n", "\n", "`ConstantLiar` is used for parallelized optimization. It discourages sampling near points that have already been suggested but not yet evaluated." ] }, { "cell_type": "code", "execution_count": null, "id": "32", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "from bayes_opt.acquisition import ConstantLiar, UpperConfidenceBound\n", "\n", "base_acq = UpperConfidenceBound(kappa=2.576)\n", "\n", "constant_liar_acq = ConstantLiar(base_acquisition=base_acq, strategy=\"max\")\n", "\n", "# Use in optimization (Note: ConstantLiar is primarily for async optimization)\n", "result = om.minimize(\n", " fun=sphere,\n", " params=np.array([1.0, 1.0]),\n", " algorithm=\"bayes_opt\",\n", " bounds=bounds,\n", " algo_options={\n", " \"acquisition_function\": constant_liar_acq,\n", " \"seed\": 42\n", " }\n", ")\n", "\n", "result.params" ] }, { "cell_type": "markdown", "id": "33", "metadata": {}, "source": [ "## Exploration vs Exploitation Trade-off" ] }, { "cell_type": "markdown", "id": "34", "metadata": {}, "source": [ "When using Bayesian optimization, the acquisition function decides where to sample next. It balances exploration (try new areas) vs exploitation (refine known good areas).\n", "\n", "- **Exploration**: Sampling in regions with high uncertainty\n", "- **Exploitation**: Sampling in regions with high predicted values\n", "\n", "### Related Parameters\n", "\n", "- **kappa** (UCB): Higher values → more exploration\n", "- **xi** (EI/POI): Higher values → more exploration\n", "- **exploration_decay**: Gradually shift from exploration to exploitation\n", "- **exploration_decay_delay**: When to start the decay" ] }, { "cell_type": "code", "execution_count": null, "id": "35", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "def f(x):\n", " \"\"\"Function with multiple peaks\"\"\"\n", " x = x[0]\n", " return float(\n", " np.exp(-(x - 2) ** 2) +\n", " np.exp(-(x - 6) ** 2 / 10) +\n", " 1 / (x ** 2 + 1)\n", " )\n", "x = np.linspace(-2, 10, 100)\n", "Y = [f([xi]) for xi in x]\n", "plt.plot(x, Y)\n", "plt.xlabel(\"x\")\n", "plt.ylabel(\"f(x)\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "36", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "def plot_bayes_opt(result):\n", " \"\"\"Plot optimization results\"\"\"\n", " evaluated_points = np.array([p[0] for p in result.history.params])\n", " function_values = np.array(result.history.fun)\n", "\n", " plt.figure(figsize=(8,5))\n", " plt.plot(x, Y, 'b-', label=\"Original function f(x)\")\n", " plt.scatter(evaluated_points, function_values, c=\"red\", s=60, zorder=3, label=\"Evaluated points\")\n", " plt.axvline(result.params[0], color=\"green\", linestyle=\"--\", label=\"Best param\")\n", "\n", " plt.xlabel(\"x\")\n", " plt.ylabel(\"f(x)\")\n", " plt.legend()\n", " plt.grid(True, alpha=0.3)\n", " plt.show()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "37", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# strategy: exploitation (kappa=0.1) - focuses on known good areas\n", "acquisition_function = acquisition.UpperConfidenceBound(kappa=0.1)\n", "result = om.maximize(\n", " fun=f,\n", " params=np.array([0.]),\n", " algorithm=\"bayes_opt\",\n", " bounds=om.Bounds(lower=np.full(1, -2.0), upper=np.full(1, 10.0)),\n", " algo_options={\n", " \"acquisition_function\": acquisition_function,\n", " \"seed\": 987234,\n", " }\n", ")\n", "\n", "# Notice: Points cluster around peaks, might also get stuck in local optimum\n", "plot_bayes_opt(result)" ] }, { "cell_type": "code", "execution_count": null, "id": "38", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# strategy: exploration (kappa=10) - explores more broadly\n", "acquisition_function = acquisition.UpperConfidenceBound(kappa=10)\n", "result = om.maximize(\n", " fun=f,\n", " params=np.array([0.]),\n", " algorithm=\"bayes_opt\",\n", " bounds=om.Bounds(lower=np.full(1, -2.0), upper=np.full(1, 10.0)),\n", " algo_options={\n", " \"acquisition_function\": acquisition_function,\n", " \"seed\": 987234,\n", " }\n", ")\n", "\n", "# Notice: Points are more spread out, better chance of finding global optimum\n", "plot_bayes_opt(result)" ] }, { "cell_type": "markdown", "id": "39", "metadata": {}, "source": [ "## Sequential Domain Reduction (SDR)" ] }, { "cell_type": "markdown", "id": "40", "metadata": {}, "source": [ "Sequential Domain Reduction (SDR) progressively narrows the search space around promising regions. This can significantly improve optimization, especially for high-dimensional problems.\n", "\n", "### SDR Parameters\n", "\n", "- `enable_sdr`: Enable/disable Sequential Domain Reduction\n", "- `sdr_gamma_osc`: Controls oscillation damping (default: 0.7)\n", "- `sdr_gamma_pan`: Controls panning behavior (default: 1.0)\n", "- `sdr_eta`: Zooming parameter for region shrinking (default: 0.9)\n", "- `sdr_minimum_window`: Minimum window size (default: 0.0)" ] }, { "cell_type": "markdown", "id": "41", "metadata": {}, "source": [ "### SDR Example" ] }, { "cell_type": "code", "execution_count": null, "id": "42", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "def ackley(x):\n", " \"\"\"Global minimum: f(x*) = 0 at x* = (0, 0)\"\"\"\n", " x0, x1 = x\n", " arg1 = -0.2 * np.sqrt(0.5 * (x0 ** 2 + x1 ** 2))\n", " arg2 = 0.5 * (np.cos(2 * np.pi * x0) + np.cos(2 * np.pi * x1))\n", " return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e\n", "\n", "start_params = np.array([2.0, 2.0])\n", "bounds = om.Bounds(\n", " lower=np.array([-32.768, -32.768]),\n", " upper=np.array([32.768, 32.768])\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "43", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# Standard Bayesian Optimization without SDR\n", "result_standard = om.minimize(\n", " fun=ackley,\n", " params=start_params,\n", " algorithm=\"bayes_opt\",\n", " bounds=bounds,\n", " algo_options={\n", " \"enable_sdr\": False,\n", " \"n_iter\": 50,\n", " \"init_points\": 2,\n", " \"seed\": 1,\n", " \"acquisition_function\": \"ucb\",\n", " }\n", ")\n", "\n", "print(\"Standard Bayesian Optimization:\")\n", "print(\"Best function value:\", result_standard.fun)\n", "print(\"Best parameters:\", result_standard.x)" ] }, { "cell_type": "code", "execution_count": null, "id": "44", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# Bayesian Optimization with SDR\n", "result_sdr = om.minimize(\n", " fun=ackley,\n", " params=start_params,\n", " algorithm=\"bayes_opt\",\n", " bounds=bounds,\n", " algo_options={\n", " \"enable_sdr\": True,\n", " \"sdr_minimum_window\": 0.5,\n", " \"sdr_gamma_osc\": 0.7,\n", " \"sdr_gamma_pan\": 1.0,\n", " \"sdr_eta\": 0.9,\n", " \"n_iter\": 50,\n", " \"init_points\": 2,\n", " \"seed\": 1,\n", " \"acquisition_function\": \"ucb\",\n", " }\n", ")\n", "\n", "print(\"Bayesian Optimization with SDR:\")\n", "print(\"Best function value:\", result_sdr.fun)\n", "print(\"Best parameters:\", result_sdr.x)" ] }, { "cell_type": "code", "execution_count": null, "id": "45", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# Compare convergence behavior\n", "results = {\n", " \"Standard BO\": result_standard,\n", " \"BO with SDR\": result_sdr\n", "}\n", "\n", "# SDR typically converges faster than standard BO\n", "fig = om.criterion_plot(results)\n", "fig.show()" ] }, { "cell_type": "markdown", "id": "46", "metadata": {}, "source": [ "## Gaussian Process Configuration" ] }, { "cell_type": "markdown", "id": "47", "metadata": {}, "source": [ "`\"bayesian-optimization\"` uses a Gaussian Process (GP) as the surrogate model. Its behavior can be tuned with these options via algo_options:\n", "\n", "\n", "* **alpha**: noise level in function evaluations\n", "\n", " * lower values (e.g.,`1e-6`): assumes nearly precise function evaluations\n", " * higher values (e.g., `1e-2`): assumes noisy evaluations\n", "\n", "* **n\\_restarts**: Number of times to restart the optimization.\n", "\n", "* **seed** → ensures reproducible results.\n" ] }, { "cell_type": "code", "execution_count": null, "id": "48", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "algo_options = {\n", " \"alpha\": 1e-3,\n", " \"n_restarts\": 5,\n", " \"seed\": 42,\n", "}\n", "\n", "result_configured = om.minimize(\n", " fun=sphere,\n", " params=np.array([3.0, 3.0]),\n", " algorithm=\"bayes_opt\",\n", " bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\n", " algo_options=algo_options\n", ")\n", "\n", "print(\"Configured GP results:\")\n", "print(f\" Best value: {result_configured.fun}\")\n", "print(f\" Function evaluations: {result_configured.n_fun_evals}\")" ] }, { "cell_type": "markdown", "id": "49", "metadata": {}, "source": [ "## Summary\n", "\n", "Bayesian optimization is a powerful tool for optimizing expensive black-box functions. Key takeaways:\n", "\n", "1. **Choose the right acquisition function** based on your exploration/exploitation needs\n", "2. **Tune acquisition parameters** like kappa (UCB) or xi (EI) to control the trade-off\n", "3. **Use SDR** for high-dimensional problems to focus the search\n", "4. **Configure the GP properly** with appropriate noise levels and restarts\n", "\n", "For more detailed information, check out the [bayesian-optimization documentation](https://bayesian-optimization.github.io/BayesianOptimization/3.1.0/index.html#)." ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 5 } ================================================ FILE: docs/source/tutorials/index.md ================================================ (tutorials)= # Tutorials This section provides an overview of optimagic. It's a good starting point if you are new to optimagic. For more in-depth examples using advanced options, check out the [how-to guides](how-to). `````{grid} 1 2 2 3 --- gutter: 3 --- ````{grid-item-card} :text-align: center :img-top: ../_static/images/optimization.svg :class-img-top: index-card-image :shadow: md ```{button-link} optimization_overview.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Optimization ``` Learn numerical optimization with estimagic. ```` ````{grid-item-card} :text-align: center :img-top: ../_static/images/differentiation.svg :class-img-top: index-card-image :shadow: md ```{button-link} numdiff_overview.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- Differentiation ``` Learn numerical differentiation with estimagic. ```` ````{grid-item-card} :text-align: center :img-top: ../_static/images/bayesian_optimization.svg :class-img-top: index-card-image :shadow: md ```{button-link} bayes_opt_tutorial.html --- click-parent: ref-type: ref class: stretched-link index-card-link sd-text-primary --- bayes_opt Optimizer ``` Tutorial on the bayes_opt optimizer in optimagic. ```` ````` ```{toctree} --- hidden: true maxdepth: 1 --- optimization_overview numdiff_overview bayes_opt_tutorial ``` ================================================ FILE: docs/source/tutorials/numdiff_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Numerical differentiation\n", "\n", "In this tutorial, you will learn how to numerically differentiate functions with\n", "optimagic." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "\n", "import optimagic as om" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Basic usage of `first_derivative`" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def fun(params):\n", " return params @ params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fd = om.first_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", ")\n", "\n", "fd.derivative" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Basic usage of `second_derivative`" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sd = om.second_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", ")\n", "\n", "sd.derivative.round(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## You can parallelize" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fd = om.first_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", " n_cores=4,\n", ")\n", "\n", "fd.derivative" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sd = om.second_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", " n_cores=4,\n", ")\n", "\n", "sd.derivative.round(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## `params` do not have to be vectors\n", "\n", "In optimagic, params can be arbitrary [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html). Examples are (nested) dictionaries of numbers, arrays and pandas objects. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def dict_fun(params):\n", " return params[\"a\"] ** 2 + params[\"b\"] ** 2 + (params[\"c\"] ** 2).sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fd = om.first_derivative(\n", " func=dict_fun,\n", " params={\"a\": 0, \"b\": 1, \"c\": pd.Series([2, 3, 4])},\n", ")\n", "\n", "fd.derivative" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Description of the output\n", "\n", "> Note. Understanding the output of the first and second derivative requires terminolgy\n", "> of pytrees. Please refer to the\n", "> [JAX documentation of pytrees](https://jax.readthedocs.io/en/latest/pytrees.html).\n", "\n", "The output tree of `first_derivative` has the same structure as the params tree.\n", "Equivalent to the 1-d numpy array case, where the gradient is a vector of shape\n", "`(len(params),)`. If, however, the params tree contains non-scalar entries like\n", "`numpy.ndarray`'s, `pandas.Series`', or `pandas.DataFrame`'s, the output is not expanded\n", "but a block is created instead. In the above example, the entry `params[\"c\"]` is a\n", "`pandas.Series` with 3 entries. Thus, the first derivative output contains the\n", "corresponding 3x1-block of the gradient at the position `[\"c\"]`:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fd.derivative[\"c\"]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sd = om.second_derivative(\n", " func=dict_fun,\n", " params={\"a\": 0, \"b\": 1, \"c\": pd.Series([2, 3, 4])},\n", ")\n", "\n", "sd.derivative" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Description of the output\n", "\n", "> Note. Understanding the output of the first and second derivative requires terminolgy\n", "> of pytrees. Please refer to the\n", "> [JAX documentation of pytrees](https://jax.readthedocs.io/en/latest/pytrees.html).\n", "\n", "The output of `second_derivative` when using a general pytrees looks more complex but\n", "is easy once we remember that the second derivative is equivalent to applying the first\n", "derivative twice.\n", "\n", "The output tree is a product of the params tree with itself. This is equivalent to the\n", "1-d numpy array case, where the hessian is a matrix of shape\n", "`(len(params), len(params))`. If, however, the params tree contains non-scalar entries\n", "like `numpy.ndarray`'s, `pandas.Series`', or `pandas.DataFrame`'s, the output is not\n", "expanded but a block is created instead. In the above example, the entry `params[\"c\"]`\n", "is a 3-dimensional `pandas.Series`. Thus, the second derivative output contains the\n", "corresponding 3x3-block of the hessian at the position `[\"c\"][\"c\"]`:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sd.derivative[\"c\"][\"c\"].round(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## There are many options\n", "\n", "You can choose which finite difference method to use, whether we should respect\n", "parameter bounds, or whether to evaluate the function in parallel. Let's go through\n", "some basic examples. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## You can choose the difference method\n", "\n", "> Note. A mathematical explanation of the background of the difference methods can be\n", "> found on the corresponding [explanation page](../explanation/numdiff_background.md)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fd = om.first_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", " method=\"backward\", # default: 'central'\n", ")\n", "\n", "fd.derivative" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "sd = om.second_derivative(\n", " func=fun,\n", " params=np.arange(5),\n", " method=\"forward\", # default: 'central_cross'\n", ")\n", "\n", "sd.derivative.round(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## You can add bounds " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "params = np.arange(5)\n", "\n", "fd = om.first_derivative(\n", " func=fun,\n", " params=params,\n", " # forces first_derivative to use forward differences\n", " bounds=om.Bounds(lower=params, upper=params + 1),\n", ")\n", "\n", "fd.derivative" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Of course, bounds also work in `second_derivative`." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" }, "vscode": { "interpreter": { "hash": "40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f" } } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/tutorials/optimization_overview.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Numerical optimization\n", "\n", "Using simple examples, this tutorial shows how to do an optimization with optimagic. More details on the topics covered here can be found in the [how to guides](../how_to/index.md)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "import plotly.io as pio\n", "\n", "pio.renderers.default = \"notebook_connected\"\n", "\n", "import optimagic as om" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Basic usage of `minimize`\n", "\n", "The basic usage of `optimagic.minimize` is very similar to `scipy.optimize.minimize`" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere(params):\n", " return params @ params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "lbfgsb_res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", ")\n", "\n", "lbfgsb_res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## `params` do not have to be vectors\n", "\n", "In optimagic, params can by arbitrary [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html). Examples are (nested) dictionaries of numbers, arrays and pandas objects. This is very useful if you have many parameters!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def dict_sphere(params):\n", " return params[\"a\"] ** 2 + params[\"b\"] ** 2 + (params[\"c\"] ** 2).sum()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "nm_res = om.minimize(\n", " fun=dict_sphere,\n", " params={\"a\": 0, \"b\": 1, \"c\": pd.Series([2, 3, 4])},\n", " algorithm=\"scipy_neldermead\",\n", ")\n", "\n", "nm_res.params" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## You can compare optimizers\n", "\n", "In practice, it is super hard to pick the right optimizer for your problem. With optimagic, you can simply try a few and compare their results!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "results = {\"lbfgsb\": lbfgsb_res, \"nelder_mead\": nm_res}\n", "fig = om.criterion_plot(results, max_evaluations=300)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{note}\n", "\n", "For details on using other plotting backends, see [How to change the plotting backend](../how_to/how_to_change_plotting_backend.ipynb).\n", "\n", ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can also zoom in on the history of specific parameters. This can be super helpful to diagnose problems in the optimization. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.params_plot(\n", " nm_res,\n", " max_evaluations=300,\n", " # optionally select a subset of parameters to plot\n", " selector=lambda params: params[\"c\"],\n", ")\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## There are many optimizers\n", "\n", "By default, optimagic comes with optimizers from scipy, including global optimizers \n", "and least-squares optimizers. But we also have wrappers for algorithms from **NlOpt**, \n", "**Pygmo**, as well as several optimizers from individual packages like **fides**, \n", "**ipopt**, **pybobyqa** and **dfols**. \n", "\n", "To use optimizers that are not from scipy, follow our [installation guide](../installation.md) for optional dependencies. To see which optimizers we have, check out the [full list](../algorithms.md).\n", "\n", "If you are missing your favorite optimizer in the list, let us know with an [issue](https://github.com/optimagic-dev/optimagic/issues)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Amazing autocomplete \n", "\n", "Assume you need a gradient-free optimizer that supports bounds on the parameters. Moreover, you have a fixed computational budget, so you want to set stopping options. \n", "\n", "In most optimizer libraries, you would have to spend a few minutes with the docs to find an optimizer that fits your needs and the stopping options it supports. In optimagic, all of this is discoverable in your editor!\n", "\n", "If you type `om.algos.`, your editor will show you all available optimizers and a list of categories you can use to filter the results. In our case, we select `GradientFree` and `Bounded`, and we could do that in any order we want.\n", "\n", "\n", "![autocomplete_1](../_static/images/autocomplete_1.png)\n", "\n", "\n", "After selecting one of the displayed algorithms, in our case `scipy_neldermead`, the editor shows all tuning parameters of that optimizer. If you start to type `stopping`, you will see all stopping criteria that are available.\n", "\n", "\n", "![autocomplete_2](../_static/images/autocomplete_2.png)\n", "\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Adding bounds\n", "\n", "As any optimizer library, optimagic lets you specify bounds for the parameters." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "bounds = om.Bounds(lower=np.arange(5) - 2, upper=np.array([10, 10, 10, np.inf, np.inf]))\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " bounds=bounds,\n", ")\n", "\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Fixing parameters \n", "\n", "On top of bounds, you can also fix one or more parameters during the optimization. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " constraints=om.FixedConstraint(selector=lambda params: params[[1, 3]]),\n", ")\n", "\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Other constraints\n", "\n", "As an example, let's impose the constraint that the first three parameters are valid probabilities, i.e. they are between zero and one and sum to one:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere,\n", " params=np.array([0.1, 0.5, 0.4, 4, 5]),\n", " algorithm=\"scipy_lbfgsb\",\n", " constraints=om.ProbabilityConstraint(selector=lambda params: params[:3]),\n", ")\n", "\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For a full overview of the constraints we support and the corresponding syntaxes, check out [the documentation](../how_to/how_to_constraints.md).\n", "\n", "Note that `\"scipy_lbfgsb\"` is not a constrained optimizer. If you want to know how we achieve this, check out [the explanations](../explanation/implementation_of_constraints.md)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## There is also maximize\n", "\n", "If you ever forgot to switch back the sign of your criterion function after doing a maximization with `scipy.optimize.minimize`, there is good news:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def upside_down_sphere(params):\n", " return -params @ params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.maximize(\n", " fun=upside_down_sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_bfgs\",\n", ")\n", "\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "optimagic got your back." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Speeding up your optimization with derivatives \n", "\n", "You can speed up your optimization by providing closed form derivatives. Those derivatives can be hand-coded or calculated with JAX!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def sphere_gradient(params):\n", " return 2 * params\n", "\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " jac=sphere_gradient,\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Alternatively, you can let optimagic calculate numerical derivatives with parallelized finite differences. This is very handy if you do not want to invest the time to derive the derivatives of your criterion function. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " numdiff_options=om.NumdiffOptions(n_cores=6),\n", ")\n", "\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For more details and examples check out [how-to speed up your optimization with derivatives](../how_to/how_to_derivatives.ipynb)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Turn local optimizers global with multistart\n", "\n", "Multistart optimization requires finite soft bounds on all parameters. Those bounds will\n", "be used for sampling but not enforced during optimization." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "bounds = om.Bounds(soft_lower=np.full(10, -5), soft_upper=np.full(10, 15))\n", "\n", "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(10),\n", " algorithm=\"scipy_neldermead\",\n", " bounds=bounds,\n", " multistart=om.MultistartOptions(convergence_max_discoveries=5),\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## And plot the history of all local optimizations" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = om.criterion_plot(res)\n", "fig.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Exploit the structure of your optimization problem\n", "\n", "Many estimation problems have a least-squares structure. If so, specialized optimizers that exploit this structure can be much faster than standard optimizers. The `sphere` function from above is the simplest possible least-squarse problem you could imagine: the least-squares residuals are just the params. \n", "\n", "To use least-squares optimizers in optimagic, you need to declare mark your function with \n", "a decorator and return the least-squares residuals instead of the aggregated function value. \n", "\n", "More details can be found [here](../how_to/how_to_criterion_function.md)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@om.mark.least_squares\n", "def ls_sphere(params):\n", " return params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=ls_sphere,\n", " params=np.arange(5),\n", " algorithm=\"pounders\",\n", ")\n", "res.params.round(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Of course, any least-squares problem can also be solved with a standard optimizer. \n", "\n", "There are also specialized optimizers for likelihood functions. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Using and reading persistent logging\n", "\n", "For long-running and difficult optimizations, it can be worthwhile to store the progress in a persistent log file. You can do this by providing a path to the `logging` argument:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "res = om.minimize(\n", " fun=sphere,\n", " params=np.arange(5),\n", " algorithm=\"scipy_lbfgsb\",\n", " logging=\"my_log.db\",\n", " log_options={\"if_database_exists\": \"replace\"},\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "You can read the entries in the log file (while the optimization is still running or after it has finished) as follows:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "reader = om.OptimizeLogReader(\"my_log.db\")\n", "reader.read_history().keys()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For more information on what you can do with the log file and LogReader object, check out [the logging tutorial](../how_to/how_to_logging.ipynb)\n", "\n", "The persistent log file is always instantly synchronized when the optimizer tries a new parameter vector. This is very handy if an optimization has to be aborted and you want to extract the current status. It can be displayed in `criterion_plot` and `params_plot`, even while the optimization is running. " ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" }, "vscode": { "interpreter": { "hash": "40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f" } } }, "nbformat": 4, "nbformat_minor": 4 } ================================================ FILE: docs/source/videos.md ================================================ (list_of_videos)= # Videos Check out our tutorials, talks and screencasts about optimagic. ## Talks and tutorials ### EuroSciPy 2023 (Talk) ```{raw} html ``` ### EuroSciPy 2023 (Tutorial) ```{raw} html ``` ### SciPy 2022 (Tutorial) ```{raw} html ``` ## Screencasts The screencasts are part of the course _Effective Programming Practices for Economists_, taught at the University of Bonn by [Hans-Martin von Gaudecker](https://www.wiwi.uni-bonn.de/gaudecker/), and previously also [Janoś Gabler](https://github.com/janosg). You can find all screencasts of the course on the [course webite](https://effective-programming-practices.vercel.app/landing-page.html). Here, we show the screencasts about numerical optimization and optimagic. ### Introduction to numerical optimization ```{raw} html ``` ### Using optimagic’s minimize and maximize ```{raw} html ``` ### Visualizing optimizer histories ```{raw} html ``` ### Choosing optimization algorithms ```{raw} html ``` ================================================ FILE: pyproject.toml ================================================ # ====================================================================================== # Project metadata # ====================================================================================== [project] name = "optimagic" description = "Tools to solve difficult numerical optimization problems." requires-python = ">=3.12" dependencies = [ "cloudpickle>=2.2", "joblib>=1.1", "numpy>=1.26", "pandas>=2.1", "plotly>=5.14", "pybaum>=0.1.2", "scipy>=1.11", "sqlalchemy>=2.0", "annotated-types>=0.4", "typing-extensions>=4.5", ] dynamic = ["version"] keywords = [ "nonlinear optimization", "optimization", "derivative free optimization", "global optimization", "parallel optimization", "statistics", "estimation", "extremum estimation", "inference", "numerical differentiation", "finite differences", "method of simulated moments", "maximum likelihood", ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", "Topic :: Scientific/Engineering", ] authors = [ { name = "Janos Gabler", email = "janos.gabler@gmail.com" }, ] maintainers = [ { name = "Janos Gabler", email = "janos.gabler@gmail.com" }, { name = "Tim Mensinger", email = "mensingertim@gmail.com" }, ] [project.readme] file = "README.md" content-type = "text/markdown" [project.license] text = "MIT" [project.urls] Repository = "https://github.com/optimagic-dev/optimagic" Github = "https://github.com/optimagic-dev/optimagic" Tracker = "https://github.com/optimagic-dev/optimagic/issues" # ====================================================================================== # Build system configuration # ====================================================================================== [build-system] requires = ["hatchling", "hatch_vcs"] build-backend = "hatchling.build" [tool.hatch.build.hooks.vcs] version-file = "src/optimagic/_version.py" [tool.hatch.build.targets.sdist] exclude = ["tests"] only-packages = true [tool.hatch.build.targets.wheel] only-include = ["src"] sources = ["src"] [tool.hatch.version] source = "vcs" [tool.hatch.metadata] allow-direct-references = true # ====================================================================================== # Ruff configuration # ====================================================================================== [tool.ruff] target-version = "py312" fix = true [tool.ruff.lint] select = [ # isort "I", # pyflakes "F", # pycodestyle "E", "W", # flake8-2020 "YTT", # flake8-bugbear "B", # flake8-quotes "Q", # pylint "PLE", "PLR", "PLW", # misc lints "PIE", # tidy imports "TID", # implicit string concatenation "ISC", # pydocstyle "D", ] extend-ignore = [ # Missing docstrings — not enforced for now "D100", # public module "D101", # public class "D102", # public method "D103", # public function "D104", # public package "D105", # magic method "D107", # __init__ # Docstring content/style rules — too noisy to enforce for now "D205", # blank line between summary and description "D414", # section has no body "D415", # first line punctuation "D417", # missing argument descriptions # allow module import not at top of file, important for notebooks "E402", # do not assign a lambda expression, use a def "E731", # Too many arguments to function call "PLR0913", # Too many returns "PLR0911", # Too many branches "PLR0912", # Too many statements "PLR0915", # Magic number "PLR2004", # Consider `elif` instead of `else` then `if` to remove indentation level "PLR5501", # For calls to warnings.warn(): No explicit `stacklevel` keyword argument found "B028", # Incompatible with formatting "ISC001", ] [tool.ruff.lint.per-file-ignores] "docs/source/conf.py" = ["E501", "ERA001", "DTZ005"] "src/optimagic/parameters/kernel_transformations.py" = ["ARG001", "N806"] "docs/source/*" = ["B018"] "src/optimagic/algorithms.py" = ["E501"] [tool.ruff.lint.pydocstyle] convention = "google" # ====================================================================================== # Pytest configuration # ====================================================================================== [tool.pytest.ini_options] filterwarnings = [ "ignore:Using or importing the ABCs from 'collections'", "ignore:the imp module is deprecated", "ignore:Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.", "ignore:In a future version of pandas all arguments of concat except for the argument 'objs' will be keyword-only", "ignore:Please use `MemoizeJac` from the `scipy.optimize` namespace", "ignore:`scipy.optimize.optimize.MemoizeJac` is deprecated", "ignore:Some algorithms did not converge. Their walltime has been set to a very high value instead of infinity because Timedeltas do notsupport infinite values", "ignore:In a future version, the Index constructor will not infer numeric dtypes when passed object-dtype sequences", "ignore:distutils Version classes are deprecated. Use packaging.version instead", "ignore:Standard matrix inversion failed due to LinAlgError", "ignore:delta_grad == 0.0", "ignore:Widget._active_widgets is deprecated", "ignore:Widget._widget_types is deprecated", "ignore:Widget.widget_types is deprecated", "ignore:Widget.widgets is deprecated", "ignore:Parallelization together with", "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated", "ignore:The following exception was caught when evaluating", "ignore:The following exception was caught when calculating", "ignore:Usage of the parameter log_options", ] addopts = ["--doctest-modules", "--pdbcls=pdbp:Pdb"] markers = [ "wip: Tests that are work-in-progress.", "slow: Tests that take a long time to run and are skipped in continuous integration.", "jax: Tests that require jax to be installed and are skipped on non-Linux systems.", ] norecursedirs = ["docs", ".tools"] # ====================================================================================== # Misc configuration # ====================================================================================== [tool.yamlfix] line_length = 88 sequence_style = "block_style" none_representation = "null" # ====================================================================================== # Mypy configuration # ====================================================================================== [tool.mypy] files = ["src", "tests", ".tools"] check_untyped_defs = true disallow_any_generics = true disallow_untyped_defs = true disallow_incomplete_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = true [[tool.mypy.overrides]] module = [ "optimagic.benchmarking", "optimagic.benchmarking.benchmark_reports", "optimagic.benchmarking.cartis_roberts", "optimagic.benchmarking.get_benchmark_problems", "optimagic.benchmarking.more_wild", "optimagic.benchmarking.noise_distributions", "optimagic.benchmarking.process_benchmark_results", "optimagic.benchmarking.run_benchmark", "optimagic.differentiation", "optimagic.differentiation.derivatives", "optimagic.differentiation.finite_differences", "optimagic.differentiation.generate_steps", "optimagic.differentiation.richardson_extrapolation", "optimagic.examples", "optimagic.examples.numdiff_functions", "optimagic.optimization", "optimagic.optimization.algo_options", "optimagic.optimization.convergence_report", "optimagic.optimization.optimization_logging", "optimagic.optimization.optimize_result", "optimagic.optimization.optimize", "optimagic.optimization.multistart", "optimagic.optimization.scipy_aliases", "optimagic.optimization.create_optimization_problem", "optimagic.optimizers._pounders", "optimagic.optimizers._pounders.pounders_auxiliary", "optimagic.optimizers._pounders.pounders_history", "optimagic.optimizers._pounders._conjugate_gradient", "optimagic.optimizers._pounders._steihaug_toint", "optimagic.optimizers._pounders._trsbox", "optimagic.optimizers._pounders.bntr", "optimagic.optimizers._pounders.gqtpar", "optimagic.optimizers._pounders.linear_subsolvers", "optimagic.optimizers", "optimagic.optimizers.tranquilo", "optimagic.optimizers.pygmo_optimizers", "optimagic.optimizers.scipy_optimizers", "optimagic.optimizers.nag_optimizers", "optimagic.optimizers.neldermead", "optimagic.optimizers.nlopt_optimizers", "optimagic.optimizers.ipopt", "optimagic.optimizers.fides", "optimagic.optimizers.pounders", "optimagic.optimizers.tao_optimizers", "optimagic.parameters", "optimagic.parameters.block_trees", "optimagic.parameters.check_constraints", "optimagic.parameters.consolidate_constraints", "optimagic.parameters.constraint_tools", "optimagic.parameters.conversion", "optimagic.parameters.kernel_transformations", "optimagic.parameters.nonlinear_constraints", "optimagic.parameters.process_constraints", "optimagic.parameters.process_selectors", "optimagic.parameters.space_conversion", "optimagic.parameters.tree_conversion", "optimagic.parameters.tree_registry", "optimagic.shared", "optimagic.shared.check_option_dicts", "optimagic.shared.compat", "optimagic.shared.process_user_function", "optimagic.visualization", "optimagic.visualization.convergence_plot", "optimagic.visualization.backends", "optimagic.visualization.deviation_plot", "optimagic.visualization.history_plots", "optimagic.visualization.plotting_utilities", "optimagic.visualization.profile_plot", "optimagic.visualization.slice_plot", "optimagic", "optimagic.decorators", "optimagic.exceptions", "optimagic.utilities", "optimagic.deprecations", "estimagic", "estimagic.examples", "estimagic.examples.logit", "estimagic.estimate_ml", "estimagic.estimate_msm", "estimagic.estimation_summaries", "estimagic.msm_weighting", "estimagic.bootstrap_ci", "estimagic.bootstrap_helpers", "estimagic.bootstrap_outcomes", "estimagic.bootstrap_samples", "estimagic.bootstrap", "estimagic.ml_covs", "estimagic.msm_covs", "estimagic.shared_covs", "estimagic.msm_sensitivity", "estimagic.estimation_table", "estimagic.lollipop_plot", ] check_untyped_defs = false disallow_any_generics = false disallow_untyped_defs = false [[tool.mypy.overrides]] module = "tests.*" disallow_untyped_defs = false ignore_errors = true [[tool.mypy.overrides]] module = [ "pybaum", "scipy", "scipy.linalg", "scipy.linalg.lapack", "scipy.stats", "scipy.optimize", "scipy.ndimage", "scipy.optimize._trustregion_exact", "plotly", "plotly.graph_objects", "plotly.express", "plotly.subplots", "matplotlib", "matplotlib.pyplot", "cyipopt", "nlopt", "bokeh", "bokeh.layouts", "bokeh.models", "bokeh.plotting", "bokeh.application", "bokeh.application.handlers", "bokeh.application.handlers.function", "bokeh.server", "bokeh.server.server", "bokeh.command", "bokeh.command.util", "fides", "petsc4py", "petsc4py.PETSc", "tranquilo", "tranquilo.tranquilo", "tranquilo.options", "tranquilo.process_arguments", "dfols", "pybobyqa", "pygmo", "jax", "joblib", "cloudpickle", "numba", "pathos", "pathos.pools", "optimagic._version", "annotated_types", "pdbp", "iminuit", "nevergrad", "nevergrad.optimization.base", "pygad", "pyswarms", "pyswarms.backend.topology", "yaml", "gradient_free_optimizers", "gradient_free_optimizers.optimizers.base_optimizer", ] ignore_missing_imports = true # ====================================================================================== # Pixi configuration # ====================================================================================== [tool.pixi.workspace] channels = ["conda-forge"] platforms = ["linux-64", "osx-arm64", "win-64"] # --- Base dependencies (all environments) ------------------------------------------- [tool.pixi.dependencies] python = ">=3.12,<3.15" # Prefer conda-forge builds for compiled scientific packages numpy = ">=2.0.0" scipy = ">=1.11" pandas = ">=2.1,<3" jupyterlab = ">=4.0" cyipopt = ">=1.4.0" pygmo = ">=2.19.0" nlopt = ">=2.7" statsmodels = ">=0.14" matplotlib = ">=3.8" bokeh = ">=3.2" altair = ">=5.1" seaborn = ">=0.13" pyyaml = ">=6.0.1" jinja2 = ">=3.1" tranquilo = ">=0.1.1" iminuit = ">=2.25" cma = ">=3.3" pygad = ">=3.2" pytorch-cpu = ">=2.2" ruff = ">=0.15.5,<0.16" [tool.pixi.pypi-dependencies] optimagic = { path = ".", editable = true } pdbp = "*" bayesian-optimization = ">=1.4" dfo-ls = ">=1.5.3" py-bobyqa = ">=1.3" fides = "==0.7.4" kaleido = ">=0.2.1" gradient-free-optimizers = ">=1.6.0" pyswarms = ">=1.3" # --- Python version features -------------------------------------------------------- [tool.pixi.feature.py312.dependencies] python = "~=3.12.0" [tool.pixi.feature.py313.dependencies] python = "~=3.13.0" [tool.pixi.feature.py314.dependencies] python = "~=3.14.0" # --- Feature: tests (test infrastructure) -------------------------------------------- [tool.pixi.feature.tests.dependencies] pytest = ">=7.2" pytest-cov = ">=4.0" pytest-xdist = ">=3.2" [tool.pixi.feature.tests.tasks] tests = { cmd = "pytest", description = "Run the full test suite" } tests-fast = { cmd = "pytest -m 'not slow and not jax'", description = "Run tests excluding slow and jax tests" } tests-with-cov = { cmd = "pytest --cov-report=xml --cov=src", description = "Run tests with XML coverage report" } # --- Feature: type-checking (mypy + type stubs) -------------------------------------- [tool.pixi.feature.type-checking.dependencies] mypy = "==1.19.1" [tool.pixi.feature.type-checking.pypi-dependencies] pandas-stubs = "*" types-cffi = "*" types-openpyxl = "*" types-jinja2 = "*" sqlalchemy-stubs = "*" [tool.pixi.feature.type-checking.tasks] mypy = { cmd = "mypy", description = "Run mypy type checker" } # --- Feature: linux (Linux-only deps) ------------------------------------------------ [tool.pixi.feature.linux] platforms = ["linux-64"] [tool.pixi.feature.linux.dependencies] jax = ">=0.4.8" petsc4py = ">=3.18" # --- Feature: docs ------------------------------------------------------------------- [tool.pixi.feature.docs.dependencies] sphinx = ">=8.2.3" sphinxcontrib-bibtex = "*" sphinx-copybutton = "*" sphinx-design = "*" sphinx-llm = "*" sphinx-llms-txt = "*" ipython = "*" ipython_genutils = "*" myst-nb = "*" furo = "*" anywidget = "*" patsy = "*" [tool.pixi.feature.docs.pypi-dependencies] sphinxcontrib-mermaid = "*" intersphinx-registry = "*" [tool.pixi.feature.docs.tasks] build-docs = { cmd = "make html", cwd = "docs", description = "Build the HTML documentation" } # --- Compat features (version overrides for backward-compat CI) --------------------- [tool.pixi.feature.old-plotly.dependencies] plotly = "<6" [tool.pixi.feature.old-plotly.pypi-dependencies] kaleido = "<0.3" [tool.pixi.feature.nevergrad.pypi-dependencies] bayesian-optimization = "==1.4.0" nevergrad = "*" # --- Environments -------------------------------------------------------------------- [tool.pixi.environments] # Default dev environment (Python 3.14, everything) default = { features = ["tests", "py314"], solve-group = "py314" } # Linux CI (with JAX + PETSc) tests-linux-py312 = { features = ["tests", "linux", "py312"], solve-group = "linux-py312" } tests-linux-py313 = { features = ["tests", "linux", "py313"], solve-group = "linux-py313" } tests-linux-py314 = { features = ["tests", "linux", "py314"], solve-group = "linux-py314" } # macOS/Windows CI tests-py312 = { features = ["tests", "py312"], solve-group = "py312" } tests-py313 = { features = ["tests", "py313"], solve-group = "py313" } tests-py314 = { features = ["tests", "py314"], solve-group = "py314" } # Backward-compat CI (Python 3.12, lowest supported) tests-old-plotly = { features = ["tests", "old-plotly", "py312"] } # Nevergrad CI (bayesian-optimization==1.4.0) tests-nevergrad-py312 = { features = ["tests", "nevergrad", "py312"] } tests-nevergrad-py313 = { features = ["tests", "nevergrad", "py313"] } tests-nevergrad-py314 = { features = ["tests", "nevergrad", "py314"] } # Type checking type-checking = { features = ["type-checking", "py314"], solve-group = "py314" } # Docs docs = { features = ["docs", "py314"], solve-group = "py314" } ================================================ FILE: src/estimagic/__init__.py ================================================ import warnings from dataclasses import dataclass from estimagic import utilities from estimagic.bootstrap import BootstrapResult, bootstrap from estimagic.estimate_ml import LikelihoodResult, estimate_ml from estimagic.estimate_msm import MomentsResult, estimate_msm from estimagic.estimation_table import ( estimation_table, render_html, render_latex, ) from estimagic.lollipop_plot import lollipop_plot from estimagic.msm_weighting import get_moments_cov from optimagic import OptimizeLogReader as _OptimizeLogReader from optimagic import OptimizeResult as _OptimizeResult from optimagic import __version__ from optimagic import check_constraints as _check_constraints from optimagic import convergence_plot as _convergence_plot from optimagic import convergence_report as _convergence_report from optimagic import count_free_params as _count_free_params from optimagic import criterion_plot as _criterion_plot from optimagic import first_derivative as _first_derivative from optimagic import get_benchmark_problems as _get_benchmark_problems from optimagic import maximize as _maximize from optimagic import minimize as _minimize from optimagic import params_plot as _params_plot from optimagic import profile_plot as _profile_plot from optimagic import rank_report as _rank_report from optimagic import run_benchmark as _run_benchmark from optimagic import second_derivative as _second_derivative from optimagic import slice_plot as _slice_plot from optimagic import traceback_report as _traceback_report from optimagic.decorators import deprecated MSG = ( "estimagic.{name} has been deprecated in version 0.5.0. Use optimagic.{name} " "instead. This function will be removed in version 0.6.0." ) minimize = deprecated(_minimize, MSG.format(name="minimize")) maximize = deprecated(_maximize, MSG.format(name="maximize")) first_derivative = deprecated(_first_derivative, MSG.format(name="first_derivative")) second_derivative = deprecated(_second_derivative, MSG.format(name="second_derivative")) run_benchmark = deprecated(_run_benchmark, MSG.format(name="run_benchmark")) get_benchmark_problems = deprecated( _get_benchmark_problems, MSG.format(name="get_benchmark_problems") ) convergence_report = deprecated( _convergence_report, MSG.format(name="convergence_report") ) rank_report = deprecated(_rank_report, MSG.format(name="rank_report")) traceback_report = deprecated(_traceback_report, MSG.format(name="traceback_report")) profile_plot = deprecated(_profile_plot, MSG.format(name="profile_plot")) convergence_plot = deprecated(_convergence_plot, MSG.format(name="convergence_plot")) slice_plot = deprecated(_slice_plot, MSG.format(name="slice_plot")) check_constraints = deprecated(_check_constraints, MSG.format(name="check_constraints")) count_free_params = deprecated(_count_free_params, MSG.format(name="count_free_params")) criterion_plot = deprecated(_criterion_plot, MSG.format(name="criterion_plot")) params_plot = deprecated(_params_plot, MSG.format(name="params_plot")) class OptimizeLogReader(_OptimizeLogReader): def __init__(self, path): warnings.warn( "estimagic.OptimizeLogReader has been deprecated in version 0.5.0. Use " "optimagic.OptimizeLogReader instead. This class will be removed in version" " 0.6.0.", FutureWarning, ) super().__init__(path) @dataclass class OptimizeResult(_OptimizeResult): def __post_init__(self): warnings.warn( "estimagic.OptimizeResult has been deprecated in version 0.5.0. Use " "optimagic.OptimizeResult instead. This class will be removed in version " "0.6.0.", FutureWarning, ) __all__ = [ "LikelihoodResult", "estimate_ml", "estimate_msm", "MomentsResult", "estimate_msm", "BootstrapResult", "bootstrap", "get_moments_cov", "estimation_table", "render_html", "render_latex", "utilities", "minimize", "maximize", "first_derivative", "second_derivative", "run_benchmark", "get_benchmark_problems", "profile_plot", "convergence_plot", "convergence_report", "rank_report", "traceback_report", "lollipop_plot", "slice_plot", "check_constraints", "count_free_params", "OptimizeLogReader", "OptimizeResult", "criterion_plot", "params_plot", "__version__", ] ================================================ FILE: src/estimagic/batch_evaluators.py ================================================ from optimagic.batch_evaluators import joblib_batch_evaluator as _joblib_batch_evaluator from optimagic.batch_evaluators import ( pathos_mp_batch_evaluator as _pathos_mp_batch_evaluator, ) from optimagic.batch_evaluators import ( process_batch_evaluator as _process_batch_evaluator, ) from optimagic.decorators import deprecated MSG = ( "estimagic.batch_evaluators.{name} has been deprecated in version 0.5.0. Use " "optimagic.batch_evaluators.{name} instead. This function will be removed in " "version 0.6.0." ) pathos_mp_batch_evaluator = deprecated( _pathos_mp_batch_evaluator, MSG.format(name="pathos_mp_batch_evaluator") ) joblib_batch_evaluator = deprecated( _joblib_batch_evaluator, MSG.format(name="joblib_batch_evaluator") ) process_batch_evaluator = deprecated( _process_batch_evaluator, MSG.format(name="process_batch_evaluator") ) ================================================ FILE: src/estimagic/bootstrap.py ================================================ import functools from dataclasses import dataclass from functools import cached_property from typing import Any import numpy as np import pandas as pd from pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten from estimagic.bootstrap_ci import calculate_ci from estimagic.bootstrap_helpers import check_inputs from estimagic.bootstrap_outcomes import get_bootstrap_outcomes from estimagic.shared_covs import calculate_estimation_summary from optimagic.batch_evaluators import joblib_batch_evaluator from optimagic.parameters.block_trees import matrix_to_block_tree from optimagic.parameters.tree_registry import get_registry from optimagic.utilities import get_rng def bootstrap( outcome, data, *, existing_result=None, outcome_kwargs=None, n_draws=1_000, weight_by=None, cluster_by=None, seed=None, n_cores=1, error_handling="continue", batch_evaluator=joblib_batch_evaluator, ): """Use the bootstrap to calculate inference quantities. Args: outcome (callable): A function that computes the statistic of interest. data (pd.DataFrame): Dataset. existing_result (BootstrapResult): An existing BootstrapResult object from a previous call of bootstrap(). Default is None. outcome_kwargs (dict): Additional keyword arguments for outcome. n_draws (int): Number of bootstrap samples to draw. If len(existing_outcomes) >= n_draws, a random subset of existing_outcomes is used. weight_by (str): Column name of variable with weights or None. cluster_by (str): Column name of variable to cluster by or None. seed (Union[None, int, numpy.random.Generator]): If seed is None or int the numpy.random.default_rng is used seeded with seed. If seed is already a Generator instance then that instance is used. n_cores (int): number of jobs for parallelization. error_handling (str): One of "continue", "raise". Default "continue" which means that bootstrap estimates are only calculated for those samples where no errors occur and a warning is produced if any error occurs. batch_evaluator (str or Callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the estimagic batch_evaluators. See :ref:`batch_evaluators`. Returns: BootstrapResult: A BootstrapResult object storing information on summary statistics, the covariance matrix, and estimated boostrap outcomes. """ if callable(outcome): check_inputs(data=data, weight_by=weight_by, cluster_by=cluster_by) if outcome_kwargs is not None: outcome = functools.partial(outcome, **outcome_kwargs) else: raise TypeError("outcome must be a callable.") if existing_result is None: base_outcome = outcome(data) existing_outcomes = [] elif isinstance(existing_result, BootstrapResult): base_outcome = existing_result.base_outcome existing_outcomes = existing_result.outcomes else: raise ValueError("existing_result must be None or a BootstrapResult.") rng = get_rng(seed) n_existing = len(existing_outcomes) if n_draws > n_existing: new_outcomes = get_bootstrap_outcomes( data=data, outcome=outcome, weight_by=weight_by, cluster_by=cluster_by, rng=rng, n_draws=n_draws - n_existing, n_cores=n_cores, error_handling=error_handling, batch_evaluator=batch_evaluator, ) all_outcomes = existing_outcomes + new_outcomes else: random_indices = rng.choice(n_existing, n_draws, replace=False) all_outcomes = [existing_outcomes[k] for k in random_indices] # ================================================================================== # Process results # ================================================================================== registry = get_registry(extended=True) flat_outcomes = [ tree_just_flatten(_outcome, registry=registry) for _outcome in all_outcomes ] internal_outcomes = np.array(flat_outcomes) result = BootstrapResult( _base_outcome=base_outcome, _internal_outcomes=internal_outcomes, _internal_cov=np.cov(internal_outcomes, rowvar=False), ) return result @dataclass class BootstrapResult: _base_outcome: Any _internal_outcomes: np.ndarray _internal_cov: np.ndarray @cached_property def _se(self): return self.se() @cached_property def _cov(self): return self.cov() @cached_property def _ci(self): return self.ci() @cached_property def _p_values(self): return self.p_values() @cached_property def _summary(self): return self.summary() @property def base_outcome(self): """Returns the base outcome statistic(s). Returns: pytree: Pytree of base outcomes, i.e. the outcome statistic(s) evaluated on the original data set. """ return self._base_outcome @cached_property def outcomes(self): """Returns the estimated bootstrap outcomes. Returns: List[Any]: The boostrap outcomes as a list of pytrees. """ registry = get_registry(extended=True) _, treedef = tree_flatten(self._base_outcome, registry=registry) outcomes = [ tree_unflatten(treedef, out, registry=registry) for out in self._internal_outcomes ] return outcomes def se(self): """Calculate standard errors. Returns: Any: The standard errors of the estimated parameters as a block-pytree, numpy.ndarray, or pandas.DataFrame. """ cov = self._internal_cov se = np.sqrt(np.diagonal(cov)) registry = get_registry(extended=True) _, treedef = tree_flatten(self._base_outcome, registry=registry) se = tree_unflatten(treedef, se, registry=registry) return se def cov(self, return_type="pytree"): """Calculate the variance-covariance matrix of the estimated parameters. Args: return_type (str): One of "pytree", "array" or "dataframe". Default pytree. If "array", a 2d numpy array with the covariance is returned. If "dataframe", a pandas DataFrame with parameter names in the index and columns are returned. The default is "pytree". Returns: Any: The covariance matrix of the estimated parameters as a block-pytree, numpy.ndarray, or pandas.DataFrame. """ cov = self._internal_cov if return_type == "dataframe": registry = get_registry(extended=True) names = np.array(leaf_names(self._base_outcome, registry=registry)) cov = pd.DataFrame(cov, columns=names, index=names) elif return_type == "pytree": cov = matrix_to_block_tree(cov, self._base_outcome, self._base_outcome) elif return_type != "array": raise ValueError( "return_type must be one of pytree, array, or dataframe, " f"not {return_type}." ) return cov def ci(self, ci_method="percentile", ci_level=0.95): """Calculate confidence intervals. Args: ci_method (str): Method of choice for computing confidence intervals. The default is "percentile". ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. Returns: Any: Pytree with the same structure as base_outcome containing lower bounds of confidence intervals. Any: Pytree with the same structure as base_outcome containing upper bounds of confidence intervals. """ registry = get_registry(extended=True) base_outcome_flat, treedef = tree_flatten(self._base_outcome, registry=registry) lower_flat, upper_flat = calculate_ci( base_outcome_flat, self._internal_outcomes, ci_method, ci_level ) lower = tree_unflatten(treedef, lower_flat, registry=registry) upper = tree_unflatten(treedef, upper_flat, registry=registry) return lower, upper def p_values(self): """Calculate p-values. Returns: Any: A pytree with the same structure as base_outcome containing p-values for the parameter estimates. """ msg = "Bootstrap p_values are not yet implemented." raise NotImplementedError(msg) def summary(self, ci_method="percentile", ci_level=0.95): """Create a summary of bootstrap results. Args: ci_method (str): Method of choice for confidence interval computation. The default is "percentile". ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. Returns: pd.DataFrame: The estimation summary as a DataFrame containing information on the mean, standard errors, as well as the confidence intervals. Soon this will be a pytree. """ registry = get_registry(extended=True) names = leaf_names(self.base_outcome, registry=registry) summary_data = _calulcate_summary_data_bootstrap( self, ci_method=ci_method, ci_level=ci_level ) summary = calculate_estimation_summary( summary_data=summary_data, names=names, free_names=names, ) return summary def _calulcate_summary_data_bootstrap(bootstrap_result, ci_method, ci_level): lower, upper = bootstrap_result.ci(ci_method=ci_method, ci_level=ci_level) summary_data = { "value": bootstrap_result.base_outcome, "standard_error": bootstrap_result.se(), "ci_lower": lower, "ci_upper": upper, "p_value": np.full(len(lower), np.nan), # p-values are not implemented yet } return summary_data ================================================ FILE: src/estimagic/bootstrap_ci.py ================================================ import numpy as np from scipy.stats import norm from estimagic.bootstrap_helpers import check_inputs def calculate_ci( base_outcome, estimates, ci_method="percentile", ci_level=0.95, ): """Compute confidence interval of bootstrap estimates. Parts of the code of the subfunctions of this function are taken from Daniel Saxton's resample library, as found on https://github.com/dsaxton/resample/ Args: base_outcome (list): List of flat base outcomes, i.e. the outcome statistic(s) evaluated on the original data set. estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. ci_method (str): Method of choice for computing confidence intervals. The default is "percentile". ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. Returns: np.ndarray: 1d array of the lower confidence interval, where the k'th entry contains the lower confidence interval for the k'th parameter. np.ndarray: 1d array of the upper confidence interval, where the k'th entry contains the upper confidence interval for the k'th parameter. """ check_inputs(ci_method=ci_method, ci_level=ci_level, skipdata=True) alpha = 1 - ci_level if ci_method == "percentile": cis = _ci_percentile(estimates, alpha) elif ci_method == "bc": cis = _ci_bc(estimates, base_outcome, alpha) elif ci_method == "t": cis = _ci_t(estimates, base_outcome, alpha) elif ci_method == "basic": cis = _ci_basic(estimates, base_outcome, alpha) elif ci_method == "normal": cis = _ci_normal(estimates, base_outcome, alpha) return cis[:, 0], cis[:, 1] def _ci_percentile(estimates, alpha): """Compute percentile type confidence interval of bootstrap estimates. Args: estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. alpha (float): Statistical significance level of choice. Returns: cis (np.ndarray): 2d array where k'th row contains the upper and lower CI for k'th parameter. """ num_params = estimates.shape[1] cis = np.zeros((num_params, 2)) for k in range(num_params): q = _eqf(estimates[:, k]) cis[k, :] = q(alpha / 2), q(1 - alpha / 2) return cis def _ci_bc(estimates, base_outcome, alpha): """Compute bc type confidence interval of bootstrap estimates. Args: estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. base_outcome (list): List of flat base outcomes, i.e. the outcome statistics evaluated on the original data set. alpha (float): Statistical significance level of choice. Returns: cis (np.ndarray): 2d array where k'th row contains the upper and lower CI for k'th parameter. """ num_params = estimates.shape[1] cis = np.zeros((num_params, 2)) for k in range(num_params): q = _eqf(estimates[:, k]) params = estimates[:, k] # Bias correction z_naught = norm.ppf(np.mean(params <= base_outcome[k])) z_low = norm.ppf(alpha) z_high = norm.ppf(1 - alpha) p1 = norm.cdf(z_naught + (z_naught + z_low)) p2 = norm.cdf(z_naught + (z_naught + z_high)) cis[k, :] = q(p1), q(p2) return cis def _ci_t(estimates, base_outcome, alpha): """Compute studentized confidence interval of bootstrap estimates. Args: estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. base_outcome (list): List of flat base outcomes, i.e. the outcome statistics evaluated on the original data set. alpha (float): Statistical significance level of choice. Returns: cis (np.ndarray): 2d array where k'th row contains the upper and lower CI for k'th parameter. """ num_params = estimates.shape[1] cis = np.zeros((num_params, 2)) for k in range(num_params): params = estimates[:, k] theta_std = np.std(params) tq = _eqf((params - base_outcome[k]) / theta_std) t1 = tq(1 - alpha / 2) t2 = tq(alpha / 2) cis[k, :] = base_outcome[k] - theta_std * t1, base_outcome[k] - theta_std * t2 return cis def _ci_normal(estimates, base_outcome, alpha): """Compute approximate normal confidence interval of bootstrap estimates. Args: estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. base_outcome (list): List of flat base outcomes, i.e. the outcome statistics evaluated on the original data set. alpha (float): Statistical significance level of choice. Returns: cis (np.ndarray): 2d array where k'th row contains the upper and lower CI for k'th parameter. """ num_params = estimates.shape[1] cis = np.zeros((num_params, 2)) for k in range(num_params): params = estimates[:, k] theta_std = np.std(params) t = norm.ppf(alpha / 2) cis[k, :] = base_outcome[k] + theta_std * t, base_outcome[k] - theta_std * t return cis def _ci_basic(estimates, base_outcome, alpha): """Compute basic bootstrap confidence interval of bootstrap estimates. Args: estimates (np.ndarray): Array of estimates computed on the bootstrapped samples. base_outcome (list): List of flat base outcomes, i.e. the outcome statistics evaluated on the original data set. alpha (float): Statistical significance level of choice. Returns: cis (np.ndarray): 2d array where k'th row contains the upper and lower CI for k'th parameter. """ num_params = estimates.shape[1] cis = np.zeros((num_params, 2)) for k in range(num_params): q = _eqf(estimates[:, k]) cis[k, :] = ( 2 * base_outcome[k] - q(1 - alpha / 2), 2 * base_outcome[k] - q(alpha / 2), ) return cis def _eqf(sample): """Return empirical quantile function of the given sample. Args: sample (np.ndarray): Sample to base quantile function on. Returns: f (callable): Quantile function for given sample. """ def f(x): return np.quantile(sample, x) return f ================================================ FILE: src/estimagic/bootstrap_helpers.py ================================================ import pandas as pd def check_inputs( data=None, weight_by=None, cluster_by=None, ci_method="percentile", ci_level=0.95, skipdata=False, ): """Check validity of inputs. Args: data (pd.DataFrame): Dataset. weight_by (str): Column name of variable with weights. cluster_by (str): Column name of variable to cluster by. ci_method (str): Method of choice for computing confidence intervals. The default is "percentile". ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. skipdata (bool): Whether to skip all checks on the data argument. """ ci_method_list = ["percentile", "bc", "t", "normal", "basic"] if not skipdata: if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series): raise TypeError("Data must be a pandas.DataFrame or pandas.Series.") elif (weight_by is not None) and (weight_by not in data.columns.tolist()): raise ValueError( "Input 'weight_by' must be None or a column name of 'data'." ) elif (cluster_by is not None) and (cluster_by not in data.columns.tolist()): raise ValueError( "Input 'cluster_by' must be None or a column name of 'data'." ) if ci_method not in ci_method_list: msg = ( "ci_method must be 'percentile', 'bc', 't', 'basic' or 'normal', " f"'{ci_method}' was supplied" ) raise ValueError(msg) if ci_level > 1 or ci_level < 0: raise ValueError("Input 'ci_level' must be in [0,1].") ================================================ FILE: src/estimagic/bootstrap_outcomes.py ================================================ from estimagic.bootstrap_helpers import check_inputs from estimagic.bootstrap_samples import get_bootstrap_indices from optimagic.batch_evaluators import process_batch_evaluator def get_bootstrap_outcomes( data, outcome, weight_by=None, cluster_by=None, rng=None, n_draws=1000, n_cores=1, error_handling="continue", batch_evaluator="joblib", ): """Draw bootstrap samples and calculate outcomes. Args: data (pandas.DataFrame): original dataset. outcome (callable): function of the dataset calculating statistic of interest. Returns a general pytree (e.g. pandas Series, dict, numpy array, etc.). weight_by (str): column name of the variable with weights. cluster_by (str): column name of the variable to cluster by. rng (numpy.random.Generator): A random number generator. n_draws (int): number of bootstrap draws. n_cores (int): number of jobs for parallelization. error_handling (str): One of "continue", "raise". Default "continue" which means that bootstrap estimates are only calculated for those samples where no errors occur and a warning is produced if any error occurs. batch_evaluator (str or Callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the estimagic batch_evaluators. See :ref:`batch_evaluators`. Returns: estimates (list): List of pytrees of estimated bootstrap outcomes. """ check_inputs(data=data, weight_by=weight_by, cluster_by=cluster_by) batch_evaluator = process_batch_evaluator(batch_evaluator) indices = get_bootstrap_indices( data=data, rng=rng, weight_by=weight_by, cluster_by=cluster_by, n_draws=n_draws, ) estimates = _get_bootstrap_outcomes_from_indices( indices=indices, data=data, outcome=outcome, n_cores=n_cores, error_handling=error_handling, batch_evaluator=batch_evaluator, ) return estimates def _get_bootstrap_outcomes_from_indices( indices, data, outcome, n_cores, error_handling, batch_evaluator, ): arguments = [{"data": data, "indices": ind, "outcome": outcome} for ind in indices] raw_estimates = batch_evaluator( _take_indices_and_calculate_outcome, arguments, n_cores=n_cores, unpack_symbol="**", error_handling=error_handling, ) estimates = [est for est in raw_estimates if not isinstance(est, str)] tracebacks = [est for est in raw_estimates if isinstance(est, str)] if not estimates: msg = ( "Calculating of all bootstrap outcomes failed. The tracebacks of the " "raised Exceptions are reproduced below:" ) raise RuntimeError(msg + "\n\n" + "\n\n".join(tracebacks)) if tracebacks: msg = ( "Calculating bootstrap outcomes failed for some samples. Those samples " "are excluded from the calculation of bootstrap standard errors and " "confidence intervals, rendering them invalid. Do not use them for " "anything but diagnostic purposes. Check warnings for more information. " ) return estimates def _take_indices_and_calculate_outcome(indices, data, outcome): return outcome(data.iloc[indices]) ================================================ FILE: src/estimagic/bootstrap_samples.py ================================================ import numpy as np import pandas as pd def get_bootstrap_indices( data, rng, weight_by=None, cluster_by=None, n_draws=1000, ): """Draw positional indices for the construction of bootstrap samples. Storing the positional indices instead of the full bootstrap samples saves a lot of memory for datasets with many variables. Args: data (pandas.DataFrame): original dataset. rng (numpy.random.Generator): A random number generator. weight_by (str): column name of the variable with weights. cluster_by (str): column name of the variable to cluster by. n_draws (int): number of draws, only relevant if seeds is None. Returns: list: list of numpy arrays with positional indices """ n_obs = len(data) probs = _calculate_bootstrap_indices_weights(data, weight_by, cluster_by) if cluster_by is None: bootstrap_indices = list( rng.choice(n_obs, size=(n_draws, n_obs), replace=True, p=probs) ) else: clusters = data[cluster_by].unique() drawn_clusters = rng.choice( clusters, size=(n_draws, len(clusters)), replace=True, p=probs ) bootstrap_indices = _convert_cluster_ids_to_indices( data[cluster_by], drawn_clusters ) return bootstrap_indices def _calculate_bootstrap_indices_weights(data, weight_by, cluster_by): """Calculate weights for drawing bootstrap indices. If weights_by is not None and cluster_by is None, the weights are normalized to sum to one. If weights_by and cluster_by are both not None, the weights are normalized to sum to one within each cluster. Args: data (pandas.DataFrame): original dataset. weight_by (str): column name of the variable with weights. cluster_by (str): column name of the variable to cluster by. Returns: list: None or pd.Series of weights. """ if weight_by is None: probs = None else: if cluster_by is None: probs = data[weight_by] / data[weight_by].sum() else: cluster_weights = data.groupby(cluster_by, sort=False)[weight_by].sum() probs = cluster_weights / cluster_weights.sum() return probs def _convert_cluster_ids_to_indices(cluster_col, drawn_clusters): """Convert the drawn clusters to positional indices of individual observations. Args: cluster_col (pandas.Series): """ bootstrap_indices = [] cluster_to_locs = pd.Series(np.arange(len(cluster_col)), index=cluster_col) for draw in drawn_clusters: bootstrap_indices.append(cluster_to_locs[draw].to_numpy()) return bootstrap_indices def get_bootstrap_samples( data, rng, weight_by=None, cluster_by=None, n_draws=1000, ): """Draw bootstrap samples. If you have memory issues you should use get_bootstrap_indices instead and construct the full samples only as needed. Args: data (pandas.DataFrame): original dataset. rng (numpy.random.Generator): A random number generator. weight_by (str): weights for the observations. cluster_by (str): column name of the variable to cluster by. n_draws (int): number of draws, only relevant if seeds is None. Returns: list: list of resampled datasets. """ indices = get_bootstrap_indices( data=data, rng=rng, weight_by=weight_by, cluster_by=cluster_by, n_draws=n_draws, ) datasets = _get_bootstrap_samples_from_indices(data=data, bootstrap_indices=indices) return datasets def _get_bootstrap_samples_from_indices(data, bootstrap_indices): """Convert bootstrap indices into actual bootstrap samples. Args: data (pandas.DataFrame): original dataset. bootstrap_indices (list): List with numpy arrays containing positional indices of observations in data. Returns: list: list of DataFrames """ out = [data.iloc[idx] for idx in bootstrap_indices] return out ================================================ FILE: src/estimagic/config.py ================================================ from pathlib import Path EXAMPLE_DIR = Path(__file__).parent / "examples" ================================================ FILE: src/estimagic/estimate_ml.py ================================================ import warnings from dataclasses import asdict, dataclass, field from functools import cached_property from typing import Any, Dict import numpy as np import pandas as pd from estimagic.ml_covs import ( cov_cluster_robust, cov_hessian, cov_jacobian, cov_robust, cov_strata_robust, ) from estimagic.shared_covs import ( FreeParams, calculate_ci, calculate_estimation_summary, calculate_free_estimates, calculate_p_values, calculate_summary_data_estimation, get_derivative_case, transform_covariance, transform_free_cov_to_cov, transform_free_values_to_params_tree, ) from optimagic import deprecations, mark from optimagic.deprecations import ( replace_and_warn_about_deprecated_bounds, ) from optimagic.differentiation.derivatives import first_derivative, second_derivative from optimagic.differentiation.numdiff_options import ( NumdiffPurpose, get_default_numdiff_options, pre_process_numdiff_options, ) from optimagic.exceptions import InvalidFunctionError, NotAvailableError from optimagic.optimization.fun_value import ( convert_fun_output_to_function_value, enforce_return_type, ) from optimagic.optimization.optimize import maximize from optimagic.optimization.optimize_result import OptimizeResult from optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree from optimagic.parameters.bounds import Bounds, pre_process_bounds from optimagic.parameters.conversion import Converter, get_converter from optimagic.parameters.space_conversion import InternalParams from optimagic.shared.check_option_dicts import ( check_optimization_options, ) from optimagic.typing import AggregationLevel from optimagic.utilities import get_rng, to_pickle def estimate_ml( loglike, params, optimize_options, *, bounds=None, constraints=None, logging=None, loglike_kwargs=None, jacobian=None, jacobian_kwargs=None, jacobian_numdiff_options=None, hessian=None, hessian_kwargs=None, hessian_numdiff_options=None, design_info=None, # deprecated log_options=None, lower_bounds=None, upper_bounds=None, numdiff_options=None, ): """Do a maximum likelihood (ml) estimation. This is a high level interface of our lower level functions for maximization, numerical differentiation and inference. It does the full workflow for maximum likelihood estimation with just one function call. While we have good defaults, you can still configure each aspect of each step via the optional arguments of this function. If you find it easier to do the maximization separately, you can do so and just provide the optimal parameters as ``params`` and set ``optimize_options=False`` Args: loglike (callable): Likelihood function that takes a params (and potentially other keyword arguments) a pytree containing the likelihood contributions for each observation or a FunctionValue object. params (pytree): A pytree containing the estimated or start parameters of the likelihood model. If the supplied parameters are estimated parameters, set optimize_options to False. Pytrees can be a numpy array, a pandas Series, a DataFrame with "value" column, a float and any kind of (nested) dictionary or list containing these elements. See :ref:`params` for examples. optimize_options (dict, Algorithm, str or False): Keyword arguments that govern the numerical optimization. Valid entries are all arguments of :func:`~estimagic.optimization.optimize.minimize` except for those that are passed explicilty to ``estimate_ml``. If you pass False as optimize_options you signal that ``params`` are already the optimal parameters and no numerical optimization is needed. If you pass a str as optimize_options it is used as the ``algorithm`` option. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. constraints (list, dict): List with constraint dictionaries or single dict. See :ref:`constraints`. logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. log_options (dict): Additional keyword arguments to configure the logging. - "fast_logging": A boolean that determines if "unsafe" settings are used to speed up write processes to the database. This should only be used for very short running criterion functions where the main purpose of the log is monitoring and it would not be catastrophic to get a corrupted database in case of a sudden system shutdown. If one evaluation of the criterion function (and gradient if applicable) takes more than 100 ms, the logging overhead is negligible. - "if_table_exists": (str) One of "extend", "replace", "raise". What to do if the tables we want to write to already exist. Default "extend". - "if_database_exists": (str): One of "extend", "replace", "raise". What to do if the database we want to write to already exists. Default "extend". loglike_kwargs (dict): Additional keyword arguments for loglike. jacobian (callable or None): A function that takes ``params`` and potentially other keyword arguments and returns the jacobian of loglike["contributions"] with respect to the params. Note that you only need to pass a Jacobian function if you have a closed form Jacobian. If you pass None, a numerical Jacobian will be calculated. jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function. jacobian_numdiff_options (dict): Keyword arguments for the calculation of numerical derivatives for the calculation of standard errors. See :ref:`first_derivative` for details. hessian (callable or None or False): A function that takes ``params`` and potentially other keyword arguments and returns the Hessian of loglike["value"] with respect to the params. If you pass None, a numerical Hessian will be calculated. If you pass ``False``, you signal that no Hessian should be calculated. Thus, no result that requires the Hessian will be calculated. hessian_kwargs (dict): Additional keyword arguments for the Hessian function. hessian_numdiff_options (dict): Keyword arguments for the calculation of numerical derivatives for the calculation of standard errors. design_info (pandas.DataFrame): DataFrame with one row per observation that contains some or all of the variables "psu" (primary sampling unit), "strata" and "fpc" (finite population corrector). See :ref:`robust_likelihood_inference` for details. Returns: LikelihoodResult: A LikelihoodResult object. """ # ================================================================================== # handle deprecations # ================================================================================== bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) if numdiff_options is not None: deprecations.throw_numdiff_options_deprecated_in_estimate_ml_future_warning() if jacobian_numdiff_options is None: jacobian_numdiff_options = numdiff_options if hessian_numdiff_options is None: hessian_numdiff_options = numdiff_options deprecations.throw_dict_constraints_future_warning_if_required(constraints) # ================================================================================== # Check and process inputs # ================================================================================== loglike = mark.likelihood(loglike) bounds = pre_process_bounds(bounds) jacobian_numdiff_options = pre_process_numdiff_options(jacobian_numdiff_options) hessian_numdiff_options = pre_process_numdiff_options(hessian_numdiff_options) # TODO: Replace dict_constraints with constraints, once we deprecate dictionary # constraints. dict_constraints = deprecations.pre_process_constraints(constraints) if jacobian_numdiff_options is None: jacobian_numdiff_options = get_default_numdiff_options( purpose=NumdiffPurpose.ESTIMATE_JACOBIAN ) if hessian_numdiff_options is None: hessian_numdiff_options = get_default_numdiff_options( purpose=NumdiffPurpose.ESTIMATE_HESSIAN ) is_optimized = optimize_options is False if not is_optimized: # If optimize_options is not a dictionary and not False, we assume it represents # an algorithm. The actual testing of whether it is a valid algorithm is done # when `maximize` is called. if not isinstance(optimize_options, dict): optimize_options = {"algorithm": optimize_options} check_optimization_options( optimize_options, usage="estimate_ml", algorithm_mandatory=True, ) jac_case = get_derivative_case(jacobian) hess_case = get_derivative_case(hessian) loglike_kwargs = {} if loglike_kwargs is None else loglike_kwargs jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs hessian_kwargs = {} if hessian_kwargs is None else hessian_kwargs # ================================================================================== # Calculate estimates via maximization (if necessary) # ================================================================================== # Note: We do not need to handle deprecations for the optimization because that # is already done inside `maximize`. if is_optimized: estimates = params opt_res = None else: opt_res = maximize( fun=loglike, fun_kwargs=loglike_kwargs, params=params, bounds=bounds, constraints=constraints, logging=logging, log_options=log_options, **optimize_options, ) estimates = opt_res.params # ================================================================================== # Do first function evaluations at estimated parameters # ================================================================================== try: loglike_eval = loglike(estimates, **loglike_kwargs) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating loglike at estimated params." raise InvalidFunctionError(msg) from e if callable(jacobian): try: jacobian_eval = jacobian(estimates, **jacobian_kwargs) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating closed form jacobian at estimated params." raise InvalidFunctionError(msg) from e else: jacobian_eval = None if callable(hessian): try: hessian_eval = hessian(estimates, **hessian_kwargs) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating closed form hessian at estimated params." raise InvalidFunctionError(msg) from e else: hessian_eval = None # ================================================================================== # Handle deprecated function output # ================================================================================== if deprecations.is_dict_output(loglike_eval): deprecations.throw_dict_output_warning() loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval) loglike = deprecations.replace_dict_output(loglike) else: loglike_eval = convert_fun_output_to_function_value( loglike_eval, AggregationLevel.LIKELIHOOD ) loglike = enforce_return_type(AggregationLevel.LIKELIHOOD)(loglike) # ================================================================================== # Get the converter for params and function outputs # ================================================================================== converter, internal_estimates = get_converter( params=estimates, constraints=dict_constraints, bounds=bounds, func_eval=loglike_eval.value, solver_type="contributions", derivative_eval=jacobian_eval, ) # ================================================================================== # Calculate internal jacobian # ================================================================================== if jac_case == "closed-form": int_jac = converter.derivative_to_internal( jacobian_eval, internal_estimates.values ) elif jac_case == "numerical": def func(x): p = converter.params_from_internal(x) loglike_eval = loglike(p, **loglike_kwargs) if deprecations.is_dict_output(loglike_eval): deprecations.throw_dict_output_warning() loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval) out = loglike_eval.internal_value(AggregationLevel.LIKELIHOOD) return out jac_res = first_derivative( func=func, params=internal_estimates.values, bounds=Bounds( lower=internal_estimates.lower_bounds, upper=internal_estimates.upper_bounds, ), error_handling="continue", **asdict(jacobian_numdiff_options), ) int_jac = jac_res.derivative else: int_jac = None if dict_constraints in [None, []] and jacobian_eval is None and int_jac is not None: loglike_contribs = loglike_eval.value jacobian_eval = matrix_to_block_tree( int_jac, outer_tree=loglike_contribs, inner_tree=estimates, ) if jacobian_eval is None: _no_jac_reason = ( "no closed form jacobian was provided and there are constraints" ) else: _no_jac_reason = None # ================================================================================== # Calculate internal Hessian # ================================================================================== if hess_case == "skip": int_hess = None elif hess_case == "numerical": def func(x): p = converter.params_from_internal(x) loglike_eval = loglike(p, **loglike_kwargs) if deprecations.is_dict_output(loglike_eval): deprecations.throw_dict_output_warning() loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval) out = loglike_eval.internal_value(AggregationLevel.SCALAR) return out hess_res = second_derivative( func=func, params=internal_estimates.values, bounds=Bounds( lower=internal_estimates.lower_bounds, upper=internal_estimates.upper_bounds, ), error_handling="continue", **asdict(hessian_numdiff_options), ) int_hess = hess_res.derivative elif hess_case == "closed-form" and dict_constraints: raise NotImplementedError( "Closed-form Hessians are not yet compatible with constraints." ) elif hess_case == "closed-form": int_hess = block_tree_to_matrix( hessian_eval, outer_tree=params, inner_tree=params, ) else: raise ValueError() if dict_constraints in [None, []] and hessian_eval is None and int_hess is not None: hessian_eval = matrix_to_block_tree( int_hess, outer_tree=params, inner_tree=params, ) if hessian_eval is None: if hess_case == "skip": _no_hess_reason = "the hessian calculation was explicitly skipped." else: _no_hess_reason = ( "no closed form hessian was provided and there are constraints" ) else: _no_hess_reason = None # ================================================================================== # create a LikelihoodResult object # ================================================================================== free_estimates = calculate_free_estimates(estimates, internal_estimates) res = LikelihoodResult( _params=estimates, _converter=converter, _optimize_result=opt_res, _jacobian=jacobian_eval, _no_jacobian_reason=_no_jac_reason, _hessian=hessian_eval, _no_hessian_reason=_no_hess_reason, _internal_jacobian=int_jac, _internal_hessian=int_hess, _design_info=design_info, _internal_estimates=internal_estimates, _free_estimates=free_estimates, _has_constraints=dict_constraints not in [None, []], ) return res @dataclass class LikelihoodResult: """Likelihood estimation results object.""" _params: Any _internal_estimates: InternalParams _free_estimates: FreeParams _converter: Converter _has_constraints: bool _optimize_result: OptimizeResult | None = None _jacobian: Any = None _no_jacobian_reason: str | None = None _hessian: Any = None _no_hessian_reason: str | None = None _internal_jacobian: np.ndarray | None = None _internal_hessian: np.ndarray | None = None _design_info: pd.DataFrame | None = None _cache: Dict = field(default_factory=dict) def __post_init__(self): if self._internal_jacobian is None and self._internal_hessian is None: raise ValueError( "At least one of _internal_jacobian or _internal_hessian must be " "not None." ) elif self._internal_jacobian is None: valid_methods = ["hessian"] elif self._internal_hessian is None: valid_methods = ["jacobian"] else: valid_methods = ["jacobian", "hessian", "robust"] if self._design_info is not None: if "psu" in self._design_info: valid_methods.append("cluster_robust") if {"strata", "psu", "fpc"}.issubset(self._design_info): valid_methods.append("strata_robust") self._valid_methods = set(valid_methods) def _get_free_cov( self, method, n_samples, bounds_handling, seed, ): if method not in self._valid_methods: msg = f"Invalid method: {method}. Valid methods are {self._valid_methods}." raise ValueError(msg) args = (method, n_samples, bounds_handling, seed) is_cached = args in self._cache if is_cached: free_cov = self._cache[args] else: free_cov = _calculate_free_cov_ml( method=method, internal_estimates=self._internal_estimates, converter=self._converter, internal_jacobian=self._internal_jacobian, internal_hessian=self._internal_hessian, n_samples=n_samples, design_info=self._design_info, bounds_handling=bounds_handling, seed=seed, ) if seed is not None: self._cache[args] = free_cov elif self._converter.has_transforming_constraints: msg = ( "seed is set to None and constraints are transforming. This leads " "to randomness in the result. To avoid random behavior, choose a " "non-None seed." ) warnings.warn(msg) return free_cov @property def params(self): return self._params @property def optimize_result(self): return self._optimize_result @property def jacobian(self): if self._jacobian is None: raise NotAvailableError( f"No jacobian is available because {self._no_jacobian_reason}." ) return self._jacobian @property def hessian(self): if self._hessian is None: raise NotAvailableError( f"No hessian is available because {self._no_hessian_reason}." ) return self._hessian @cached_property def _se(self): return self.se() @cached_property def _cov(self): return self.cov() @cached_property def _summary(self): return self.summary() @cached_property def _ci(self): return self.ci() @cached_property def _p_values(self): return self.p_values() def se( self, method="jacobian", n_samples=10_000, bounds_handling="clip", seed=None, ): """Calculate standard errors. Args: method (str): One of "jacobian", "hessian", "robust", "cluster_robust", "strata_robust". Default "jacobian". "cluster_robust" is only available if design_info containts a columns called "psu" that identifies the primary sampling unit. "strata_robust" is only available if the columns "strata", "fpc" and "psu" are in design_info. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: A pytree with the same structure as params containing standard errors for the parameter estimates. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_se = np.sqrt(np.diagonal(free_cov)) se = transform_free_values_to_params_tree( values=free_se, free_params=self._free_estimates, params=self._params, ) return se def cov( self, method="jacobian", n_samples=10_000, bounds_handling="clip", return_type="pytree", seed=None, ): """Calculate the variance-covariance (matrix) of the estimated parameters. Args: method (str): One of "jacobian", "hessian", "robust", "cluster_robust", "strata_robust". Default "jacobian". "cluster_robust" is only available if design_info containts a columns called "psu" that identifies the primary sampling unit. "strata_robust" is only available if the columns "strata", "fpc" and "psu" are in design_info. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. return_type (str): One of "pytree", "array" or "dataframe". Default pytree. If "array", a 2d numpy array with the covariance is returned. If "dataframe", a pandas DataFrame with parameter names in the index and columns are returned. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: The covariance matrix of the estimated parameters as block-pytree, numpy.ndarray or pandas.DataFrame. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) cov = transform_free_cov_to_cov( free_cov=free_cov, free_params=self._free_estimates, params=self._params, return_type=return_type, ) return cov def summary( self, method="jacobian", n_samples=10_000, ci_level=0.95, bounds_handling="clip", seed=None, ): """Create a summary of estimation results. Args: method (str): One of "jacobian", "hessian", "robust", "cluster_robust", "strata_robust". Default "jacobian". "cluster_robust" is only available if design_info containts a columns called "psu" that identifies the primary sampling unit. "strata_robust" is only available if the columns "strata", "fpc" and "psu" are in design_info. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: The estimation summary as pytree of DataFrames. """ summary_data = calculate_summary_data_estimation( self, free_estimates=self._free_estimates, method=method, ci_level=ci_level, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) summary = calculate_estimation_summary( summary_data=summary_data, names=self._free_estimates.all_names, free_names=self._free_estimates.free_names, ) return summary def ci( self, method="jacobian", n_samples=10_000, ci_level=0.95, bounds_handling="clip", seed=None, ): """Calculate confidence intervals. Args: method (str): One of "jacobian", "hessian", "robust", "cluster_robust", "strata_robust". Default "jacobian". "cluster_robust" is only available if design_info containts a columns called "psu" that identifies the primary sampling unit. "strata_robust" is only available if the columns "strata", "fpc" and "psu" are in design_info. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: Pytree with the same structure as params containing lower bounds of confidence intervals. Any: Pytree with the same structure as params containing upper bounds of confidence intervals. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_lower, free_upper = calculate_ci( free_values=self._free_estimates.values, free_standard_errors=np.sqrt(np.diagonal(free_cov)), ci_level=ci_level, ) lower, upper = ( transform_free_values_to_params_tree( values, free_params=self._free_estimates, params=self._params ) for values in (free_lower, free_upper) ) return lower, upper def p_values( self, method="jacobian", n_samples=10_000, bounds_handling="clip", seed=None, ): """Calculate p-values. Args: method (str): One of "jacobian", "hessian", "robust", "cluster_robust", "strata_robust". Default "jacobian". "cluster_robust" is only available if design_info containts a columns called "psu" that identifies the primary sampling unit. "strata_robust" is only available if the columns "strata", "fpc" and "psu" are in design_info. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: Pytree with the same structure as params containing p-values. Any: Pytree with the same structure as params containing p-values. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_p_values = calculate_p_values( free_values=self._free_estimates.values, free_standard_errors=np.sqrt(np.diagonal(free_cov)), ) p_values = transform_free_values_to_params_tree( free_p_values, free_params=self._free_estimates, params=self._params ) return p_values def to_pickle(self, path): """Save the LikelihoodResult object to pickle. Args: path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle. """ to_pickle(self, path=path) def _calculate_free_cov_ml( method, internal_estimates, converter, internal_jacobian, internal_hessian, n_samples, design_info, bounds_handling, seed, ): if method == "jacobian": int_cov = cov_jacobian(internal_jacobian) elif method == "hessian": int_cov = cov_hessian(internal_hessian) elif method == "robust": int_cov = cov_robust(jac=internal_jacobian, hess=internal_hessian) elif method == "cluster_robust": int_cov = cov_cluster_robust( jac=internal_jacobian, hess=internal_hessian, design_info=design_info ) elif method == "strata_robust": int_cov = cov_strata_robust( jac=internal_jacobian, hess=internal_hessian, design_info=design_info ) rng = get_rng(seed) free_cov = transform_covariance( internal_params=internal_estimates, internal_cov=int_cov, converter=converter, rng=rng, n_samples=n_samples, bounds_handling=bounds_handling, ) return free_cov ================================================ FILE: src/estimagic/estimate_msm.py ================================================ """Do a method of simlated moments estimation.""" import functools import warnings from collections.abc import Callable from dataclasses import asdict, dataclass, field from functools import cached_property from typing import Any, Dict, Union import numpy as np import pandas as pd from pybaum import leaf_names, tree_just_flatten from estimagic.msm_covs import cov_optimal, cov_robust from estimagic.msm_sensitivity import ( calculate_actual_sensitivity_to_noise, calculate_actual_sensitivity_to_removal, calculate_fundamental_sensitivity_to_noise, calculate_fundamental_sensitivity_to_removal, calculate_sensitivity_to_bias, calculate_sensitivity_to_weighting, ) from estimagic.msm_weighting import get_weighting_matrix from estimagic.shared_covs import ( FreeParams, calculate_ci, calculate_estimation_summary, calculate_free_estimates, calculate_p_values, calculate_summary_data_estimation, get_derivative_case, transform_covariance, transform_free_cov_to_cov, transform_free_values_to_params_tree, ) from optimagic import deprecations, mark from optimagic.deprecations import ( replace_and_warn_about_deprecated_bounds, ) from optimagic.differentiation.derivatives import first_derivative from optimagic.differentiation.numdiff_options import ( NumdiffPurpose, get_default_numdiff_options, pre_process_numdiff_options, ) from optimagic.exceptions import InvalidFunctionError from optimagic.optimization.fun_value import LeastSquaresFunctionValue from optimagic.optimization.optimize import minimize from optimagic.optimization.optimize_result import OptimizeResult from optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree from optimagic.parameters.bounds import Bounds, pre_process_bounds from optimagic.parameters.conversion import Converter, get_converter from optimagic.parameters.space_conversion import InternalParams from optimagic.parameters.tree_registry import get_registry from optimagic.shared.check_option_dicts import ( check_optimization_options, ) from optimagic.utilities import get_rng, to_pickle def estimate_msm( simulate_moments, empirical_moments, moments_cov, params, optimize_options, *, bounds=None, constraints=None, logging=None, simulate_moments_kwargs=None, weights="diagonal", jacobian=None, jacobian_kwargs=None, jacobian_numdiff_options=None, # deprecated log_options=None, lower_bounds=None, upper_bounds=None, numdiff_options=None, ): """Do a method of simulated moments or indirect inference estimation. This is a high level interface for our lower level functions for minimization, numerical differentiation, inference and sensitivity analysis. It does the full workflow for MSM or indirect inference estimation with just one function call. While we have good defaults, you can still configure each aspect of each steps vial the optional arguments of this functions. If you find it easier to do the minimization separately, you can do so and just provide the optimal parameters as ``params`` and set ``optimize_options=False``. Args: simulate_moments (callable): Function that takes params and potentially other keyword arguments and returns a pytree with simulated moments. If the function returns a dict containing the key ``"simulated_moments"`` we only use the value corresponding to that key. Other entries are stored in the log database if you use logging. empirical_moments (pandas.Series): A pytree with the same structure as the result of ``simulate_moments``. moments_cov (pandas.DataFrame): A block-pytree containing the covariance matrix of the empirical moments. This is typically calculated with our ``get_moments_cov`` function. params (pytree): A pytree containing the estimated or start parameters of the model. If the supplied parameters are estimated parameters, set optimize_options to False. Pytrees can be a numpy array, a pandas Series, a DataFrame with "value" column, a float and any kind of (nested) dictionary or list containing these elements. See :ref:`params` for examples. optimize_options (dict, Algorithm, str or False): Keyword arguments that govern the numerical optimization. Valid entries are all arguments of :func:`~estimagic.optimization.optimize.minimize` except for those that can be passed explicitly to ``estimate_msm``. If you pass False as ``optimize_options`` you signal that ``params`` are already the optimal parameters and no numerical optimization is needed. If you pass a str as optimize_options it is used as the ``algorithm`` option. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. simulate_moments_kwargs (dict): Additional keyword arguments for ``simulate_moments``. weights (str): One of "diagonal" (default), "identity" or "optimal". Note that "optimal" refers to the asymptotically optimal weighting matrix and is often not a good choice due to large finite sample bias. constraints (list, dict): List with constraint dictionaries or single dict. See :ref:`constraints`. logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. log_options (dict): Additional keyword arguments to configure the logging. - "fast_logging" (bool): A boolean that determines if "unsafe" settings are used to speed up write processes to the database. This should only be used for very short running criterion functions where the main purpose of the log is a monitoring and it would not be catastrophic to get a corrupted database in case of a sudden system shutdown. If one evaluation of the criterion function (and gradient if applicable) takes more than 100 ms, the logging overhead is negligible. - "if_table_exists" (str): One of "extend", "replace", "raise". What to do if the tables we want to write to already exist. Default "extend". - "if_database_exists" (str): One of "extend", "replace", "raise". What to do if the database we want to write to already exists. Default "extend". jacobian (callable): A function that take ``params`` and potentially other keyword arguments and returns the jacobian of simulate_moments with respect to the params. jacobian_kwargs (dict): Additional keyword arguments for the jacobian function. jacobian_numdiff_options (dict): Keyword arguments for the calculation of numerical derivatives for the calculation of standard errors. See :ref:`first_derivative` for details. Note that by default we increase the step_size by a factor of 2 compared to the rule of thumb for optimal step sizes. This is because many msm criterion functions are slightly noisy. Returns: dict: The estimated parameters, standard errors and sensitivity measures and covariance matrix of the parameters. """ # ================================================================================== # handle deprecations # ================================================================================== bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) if numdiff_options is not None: deprecations.throw_numdiff_options_deprecated_in_estimate_msm_future_warning() if jacobian_numdiff_options is not None: jacobian_numdiff_options = numdiff_options deprecations.throw_dict_constraints_future_warning_if_required(constraints) # ================================================================================== # Check and process inputs # ================================================================================== bounds = pre_process_bounds(bounds) # TODO: Replace dict_constraints with constraints, once we deprecate dictionary # constraints. dict_constraints = deprecations.pre_process_constraints(constraints) jacobian_numdiff_options = pre_process_numdiff_options(jacobian_numdiff_options) if jacobian_numdiff_options is None: jacobian_numdiff_options = get_default_numdiff_options( purpose=NumdiffPurpose.ESTIMATE_JACOBIAN ) if weights not in ["diagonal", "optimal", "identity"]: raise NotImplementedError("Custom weighting matrices are not yet implemented.") is_optimized = optimize_options is False if not is_optimized: # If optimize_options is not a dictionary and not False, we assume it represents # an algorithm. The actual testing of whether it is a valid algorithm is done # when `minimize` is called. if not isinstance(optimize_options, dict): optimize_options = {"algorithm": optimize_options} check_optimization_options( optimize_options, usage="estimate_msm", algorithm_mandatory=True, ) jac_case = get_derivative_case(jacobian) weights, internal_weights = get_weighting_matrix( moments_cov=moments_cov, method=weights, empirical_moments=empirical_moments, return_type="pytree_and_array", ) internal_moments_cov = block_tree_to_matrix( moments_cov, outer_tree=empirical_moments, inner_tree=empirical_moments, ) jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs simulate_moments_kwargs = ( {} if simulate_moments_kwargs is None else simulate_moments_kwargs ) # ================================================================================== # Calculate estimates via minimization (if necessary) # ================================================================================== if is_optimized: estimates = params opt_res = None else: funcs = get_msm_optimization_functions( simulate_moments=simulate_moments, empirical_moments=empirical_moments, weights=weights, simulate_moments_kwargs=simulate_moments_kwargs, # Always pass None because we do not support closed form jacobians during # optimization yet. Otherwise we would get a NotImplementedError jacobian=None, jacobian_kwargs=jacobian_kwargs, ) opt_res = minimize( bounds=bounds, constraints=constraints, logging=logging, log_options=log_options, params=params, **funcs, # contains the criterion func and possibly more **optimize_options, ) estimates = opt_res.params # ================================================================================== # do first function evaluations # ================================================================================== try: sim_mom_eval = simulate_moments(estimates, **simulate_moments_kwargs) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating simulate_moments at estimated params." raise InvalidFunctionError(msg) from e if callable(jacobian): try: jacobian_eval = jacobian(estimates, **jacobian_kwargs) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating derivative at estimated params." raise InvalidFunctionError(msg) from e else: jacobian_eval = None # ================================================================================== # get converter for params and function outputs # ================================================================================== if isinstance(sim_mom_eval, dict) and "simulated_moments" in sim_mom_eval: func_eval = {"contributions": sim_mom_eval["simulated_moments"]} else: func_eval = {"contributions": sim_mom_eval} converter, internal_estimates = get_converter( params=estimates, constraints=dict_constraints, bounds=bounds, func_eval=func_eval, solver_type="contributions", derivative_eval=jacobian_eval, ) # ================================================================================== # Calculate internal jacobian # ================================================================================== if jac_case == "closed-form": x = converter.params_to_internal(estimates) int_jac = converter.derivative_to_internal(jacobian_eval, x) else: def func(x): params = converter.params_from_internal(x) sim_mom = simulate_moments(params, **simulate_moments_kwargs) if isinstance(sim_mom, dict) and "simulated_moments" in sim_mom: sim_mom = sim_mom["simulated_moments"] registry = get_registry(extended=True) out = np.array(tree_just_flatten(sim_mom, registry=registry)) return out int_jac = first_derivative( func=func, params=internal_estimates.values, bounds=Bounds( lower=internal_estimates.lower_bounds, upper=internal_estimates.upper_bounds, ), error_handling="continue", **asdict(jacobian_numdiff_options), ).derivative # ================================================================================== # Calculate external jac (if no constraints and not closed form ) # ================================================================================== if dict_constraints in [None, []] and jacobian_eval is None and int_jac is not None: jacobian_eval = matrix_to_block_tree( int_jac, outer_tree=empirical_moments, inner_tree=estimates, ) if jacobian_eval is None: _no_jac_reason = ( "no closed form jacobian was provided and there are constraints" ) else: _no_jac_reason = None # ================================================================================== # Create MomentsResult # ================================================================================== free_estimates = calculate_free_estimates(estimates, internal_estimates) res = MomentsResult( _params=estimates, _weights=weights, _converter=converter, _optimize_result=opt_res, _internal_weights=internal_weights, _internal_moments_cov=internal_moments_cov, _internal_jacobian=int_jac, _jacobian=jacobian_eval, _no_jacobian_reason=_no_jac_reason, _empirical_moments=empirical_moments, _internal_estimates=internal_estimates, _free_estimates=free_estimates, _has_constraints=dict_constraints not in [None, []], ) return res def get_msm_optimization_functions( simulate_moments, empirical_moments, weights, *, simulate_moments_kwargs=None, jacobian=None, jacobian_kwargs=None, ): """Construct criterion functions and their derivatives for msm estimation. Args: simulate_moments (callable): Function that takes params and potentially other keyworrd arguments and returns simulated moments as a pandas Series. Alternatively, the function can return a dict with any number of entries as long as one of those entries is "simulated_moments". empirical_moments (pandas.Series): A pandas series with the empirical equivalents of the simulated moments. weights (pytree): The weighting matrix as block pytree. simulate_moments_kwargs (dict): Additional keyword arguments for ``simulate_moments``. jacobian (callable or pandas.DataFrame): A function that take ``params`` and potentially other keyword arguments and returns the jacobian of simulate_moments with respect to the params. Alternatively you can pass a pandas.DataFrame with the jacobian at the optimal parameters. This is only possible if you pass ``optimize_options=False``. jacobian_kwargs (dict): Additional keyword arguments for jacobian. Returns: dict: Dictionary containing at least the entry "fun". If enough inputs are provided it also contains the entries "jac" and "fun_and_jac". All values are functions that take params as only argument. """ flat_weights = block_tree_to_matrix( weights, outer_tree=empirical_moments, inner_tree=empirical_moments, ) chol_weights = np.linalg.cholesky(flat_weights) registry = get_registry(extended=True) flat_emp_mom = tree_just_flatten(empirical_moments, registry=registry) _simulate_moments = _partial_kwargs(simulate_moments, simulate_moments_kwargs) _jacobian = _partial_kwargs(jacobian, jacobian_kwargs) criterion = mark.least_squares( functools.partial( _msm_criterion, simulate_moments=_simulate_moments, flat_empirical_moments=flat_emp_mom, chol_weights=chol_weights, registry=registry, ) ) out = {"fun": criterion} if _jacobian is not None: raise NotImplementedError( "Closed form jacobians are not yet supported in estimate_msm" ) return out def _msm_criterion( params, simulate_moments, flat_empirical_moments, chol_weights, registry ): """Calculate msm criterion given parameters and building blocks.""" simulated = simulate_moments(params) if isinstance(simulated, dict) and "simulated_moments" in simulated: simulated = simulated["simulated_moments"] if isinstance(simulated, np.ndarray) and simulated.ndim == 1: simulated_flat = simulated else: simulated_flat = np.array(tree_just_flatten(simulated, registry=registry)) deviations = simulated_flat - flat_empirical_moments residuals = deviations @ chol_weights return LeastSquaresFunctionValue(value=residuals) def _partial_kwargs(func, kwargs): """Partial keyword arguments into a function. In contrast to normal partial this works if kwargs in None. If func is not a callable it simply returns None. """ if isinstance(func, Callable): if kwargs not in (None, {}): out = functools.partial(func, **kwargs) else: out = func else: out = None return out @dataclass class MomentsResult: """Method of moments estimation results object.""" _params: Any _internal_estimates: InternalParams _free_estimates: FreeParams _weights: Any _converter: Converter _internal_moments_cov: np.ndarray _internal_weights: np.ndarray _internal_jacobian: np.ndarray _empirical_moments: Any _has_constraints: bool _optimize_result: Union[OptimizeResult, None] = None _jacobian: Any = None _no_jacobian_reason: Union[str, None] = None _cache: Dict = field(default_factory=dict) def _get_free_cov(self, method, n_samples, bounds_handling, seed): if method not in {"optimal", "robust"}: msg = f"Invalid method {method}. method must be in {'optimal', 'robust'}" raise ValueError(msg) args = (method, n_samples, bounds_handling, seed) is_cached = args in self._cache if is_cached: free_cov = self._cache[args] else: free_cov = _calculate_free_cov_msm( internal_estimates=self._internal_estimates, internal_jacobian=self._internal_jacobian, internal_moments_cov=self._internal_moments_cov, internal_weights=self._internal_weights, converter=self._converter, method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) if seed is not None: self._cache[args] = free_cov elif self._converter.has_transforming_constraints: msg = ( "seed is set to None and constraints are transforming. This leads " "to randomness in the result. To avoid random behavior, choose a " "non-None seed." ) warnings.warn(msg) return free_cov @property def params(self): return self._params @property def optimize_result(self): return self._optimize_result @property def weights(self): return self._weights @property def jacobian(self): return self._jacobian @cached_property def _se(self): return self.se() @cached_property def _cov(self): return self.cov() @cached_property def _summary(self): return self.summary() @cached_property def _ci(self): return self.ci() @cached_property def _p_values(self): return self.p_values() def se( self, method="robust", n_samples=10_000, bounds_handling="clip", seed=None, ): """Calculate standard errors. Args: method (str): One of "robust", "optimal". Despite the name, "optimal" is not recommended in finite samples and "optimal" standard errors are only valid if the asymptotically optimal weighting matrix has been used. It is only supported because it is needed to calculate sensitivity measures. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: A pytree with the same structure as params containing standard errors for the parameter estimates. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_se = np.sqrt(np.diagonal(free_cov)) se = transform_free_values_to_params_tree( values=free_se, free_params=self._free_estimates, params=self._params, ) return se def cov( self, method="robust", n_samples=10_000, bounds_handling="clip", return_type="pytree", seed=None, ): """Calculate the variance-covariance matrix of the estimated parameters. Args: method (str): One of "robust", "optimal". Despite the name, "optimal" is not recommended in finite samples and "optimal" standard errors are only valid if the asymptotically optimal weighting matrix has been used. It is only supported because it is needed to calculate sensitivity measures. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. return_type (str): One of "pytree", "array" or "dataframe". Default pytree. If "array", a 2d numpy array with the covariance is returned. If "dataframe", a pandas DataFrame with parameter names in the index and columns are returned. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: The covariance matrix of the estimated parameters as block-pytree or numpy array. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) cov = transform_free_cov_to_cov( free_cov=free_cov, free_params=self._free_estimates, params=self._params, return_type=return_type, ) return cov def summary( self, method="robust", n_samples=10_000, ci_level=0.95, bounds_handling="clip", seed=None, ): """Create a summary of estimation results. Args: method (str): One of "robust", "optimal". Despite the name, "optimal" is not recommended in finite samples and "optimal" standard errors are only valid if the asymptotically optimal weighting matrix has been used. It is only supported because it is needed to calculate sensitivity measures. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: The estimation summary as pytree of DataFrames. """ summary_data = calculate_summary_data_estimation( self, free_estimates=self._free_estimates, method=method, ci_level=ci_level, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) summary = calculate_estimation_summary( summary_data=summary_data, names=self._free_estimates.all_names, free_names=self._free_estimates.free_names, ) return summary def ci( self, method="robust", n_samples=10_000, ci_level=0.95, bounds_handling="clip", seed=None, ): """Calculate confidence intervals. Args: method (str): One of "robust", "optimal". Despite the name, "optimal" is not recommended in finite samples and "optimal" standard errors are only valid if the asymptotically optimal weighting matrix has been used. It is only supported because it is needed to calculate sensitivity measures. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: Pytree with the same structure as params containing lower bounds of confidence intervals. Any: Pytree with the same structure as params containing upper bounds of confidence intervals. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_lower, free_upper = calculate_ci( free_values=self._free_estimates.values, free_standard_errors=np.sqrt(np.diagonal(free_cov)), ci_level=ci_level, ) lower, upper = ( transform_free_values_to_params_tree( values, free_params=self._free_estimates, params=self._params ) for values in (free_lower, free_upper) ) return lower, upper def p_values( self, method="robust", n_samples=10_000, bounds_handling="clip", seed=None, ): """Calculate p-values. Args: method (str): One of "robust", "optimal". Despite the name, "optimal" is not recommended in finite samples and "optimal" standard errors are only valid if the asymptotically optimal weighting matrix has been used. It is only supported because it is needed to calculate sensitivity measures. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. Returns: Any: Pytree with the same structure as params containing p-values. Any: Pytree with the same structure as params containing p-values. """ free_cov = self._get_free_cov( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) free_p_values = calculate_p_values( free_values=self._free_estimates.values, free_standard_errors=np.sqrt(np.diagonal(free_cov)), ) p_values = transform_free_values_to_params_tree( free_p_values, free_params=self._free_estimates, params=self._params ) return p_values def sensitivity( self, kind="bias", n_samples=10_000, bounds_handling="clip", seed=None, return_type="pytree", ): """Calculate sensitivity measures for moments estimates. The sensitivity measures are based on the following papers: Andrews, Gentzkow & Shapiro (2017, Quarterly Journal of Economics) Honore, Jorgensen & de Paula (https://onlinelibrary.wiley.com/doi/full/10.1002/jae.2779) In the papers the different kinds of sensitivity measures are just called m1, e2, e3, e4, e5 and e6. We try to give them more informative names, but list the original names for references. Args: kind (str): The following kinds are supported: - "bias": Origally m1. How strongly would the parameter estimates be biased if the kth moment was misspecified, i.e not zero in expectation? - "noise_fundamental": Originally e2. How much precision would be lost if the kth moment was subject to a little additional noise if the optimal weighting matrix was used? - "noise": Originally e3. How much precision would be lost if the kth moment was subjet to a little additional noise? - "removal": Originally e4. How much precision would be lost if the kth moment was excluded from the estimation? - "removal_fundamental": Originally e5. How much precision would be lost if the kth moment was excluded from the estimation if the asymptotically optimal weighting matrix was used. - "weighting": Originally e6. How would the precision change if the weight of the kth moment is increased a little? n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you are using constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. seed (int): Seed for the random number generator. Only used if there are transforming constraints. return_type (str): One of "array", "dataframe" or "pytree". Default pytree. If your params or moments have a very nested format, return_type "dataframe" might be the better choice. Returns: Any: The sensitivity measure as a pytree, numpy array or DataFrame. In 2d formats, the sensitivity measures have one row per estimated parameter and one column per moment. """ if self._has_constraints: raise NotImplementedError( "Sensitivity measures with constraints are not yet implemented." ) jac = self._internal_jacobian weights = self._internal_weights moments_cov = self._internal_moments_cov params_cov = self._get_free_cov( method="robust", n_samples=n_samples, bounds_handling=bounds_handling, seed=seed, ) weights_opt = get_weighting_matrix( moments_cov=moments_cov, method="optimal", empirical_moments=self._empirical_moments, ) params_cov_opt = cov_optimal(jac, weights_opt) if kind == "bias": raw = calculate_sensitivity_to_bias(jac=jac, weights=weights) elif kind == "noise_fundamental": raw = calculate_fundamental_sensitivity_to_noise( jac=jac, weights=weights_opt, moments_cov=moments_cov, params_cov_opt=params_cov_opt, ) elif kind == "noise": m1 = calculate_sensitivity_to_bias(jac=jac, weights=weights) raw = calculate_actual_sensitivity_to_noise( sensitivity_to_bias=m1, weights=weights, moments_cov=moments_cov, params_cov=params_cov, ) elif kind == "removal": raw = calculate_actual_sensitivity_to_removal( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov, ) elif kind == "removal_fundamental": raw = calculate_fundamental_sensitivity_to_removal( jac=jac, moments_cov=moments_cov, params_cov_opt=params_cov_opt, ) elif kind == "weighting": raw = calculate_sensitivity_to_weighting( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov, ) else: raise ValueError(f"Invalid kind: {kind}") if return_type == "array": out = raw elif return_type == "pytree": out = matrix_to_block_tree( raw, outer_tree=self._params, inner_tree=self._empirical_moments, ) elif return_type == "dataframe": registry = get_registry(extended=True) row_names = self._internal_estimates.names col_names = leaf_names(self._empirical_moments, registry=registry) out = pd.DataFrame( data=raw, index=row_names, columns=col_names, ) else: msg = ( f"Invalid return type: {return_type}. Valid are 'pytree', 'array' " "and 'dataframe'" ) raise ValueError(msg) return out def to_pickle(self, path): """Save the MomentsResult object to pickle. Args: path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle. """ to_pickle(self, path=path) def _calculate_free_cov_msm( internal_estimates, internal_jacobian, internal_moments_cov, internal_weights, converter, method, n_samples, bounds_handling, seed, ): if method == "optimal": internal_cov = cov_optimal(internal_jacobian, internal_weights) else: internal_cov = cov_robust( internal_jacobian, internal_weights, internal_moments_cov ) rng = get_rng(seed) free_cov = transform_covariance( internal_params=internal_estimates, internal_cov=internal_cov, converter=converter, n_samples=n_samples, rng=rng, bounds_handling=bounds_handling, ) return free_cov ================================================ FILE: src/estimagic/estimation_summaries.py ================================================ ================================================ FILE: src/estimagic/estimation_table.py ================================================ import re from copy import deepcopy from functools import partial from pathlib import Path from warnings import warn import numpy as np import pandas as pd from optimagic.shared.compat import pd_df_map suppress_performance_warnings = np.testing.suppress_warnings() suppress_performance_warnings.filter(category=pd.errors.PerformanceWarning) @suppress_performance_warnings def estimation_table( models, *, return_type="dataframe", render_options=None, show_col_names=True, show_col_groups=None, show_index_names=False, show_inference=True, show_stars=True, show_footer=True, custom_param_names=None, custom_col_names=None, custom_col_groups=None, custom_index_names=None, custom_notes=None, confidence_intervals=False, significance_levels=(0.1, 0.05, 0.01), append_notes=True, notes_label="Note:", stats_options=None, number_format=("{0:.3g}", "{0:.5f}", "{0:.4g}"), add_trailing_zeros=True, escape_special_characters=True, siunitx_warning=True, ): r"""Generate html or LaTex tables provided (lists of) of models. The function can create publication quality tables in various formats from statsmodels or estimagic results. It allows for extensive customization via optional arguments and almost limitless flexibility when using a two-stage approach where the ``return_type`` is set to ``"render_inputs"``, the resulting dictionary representation of the table is modified and that modified version is then passed to ``render_latex`` or ``render_html``. The formatting of the numbers in the table is completely configurable via the ``number_format`` argument. By default we round to three significant digits (i.e. the three leftmost non-zero digits are displayed). This is very different from other table packages and motivated by the fact that most estimation tables give a wrong feeling of precision by showing too many decimal points. Args: models (list): list of estimation results. The models can come from statmodels or be constructed from the outputs of `estimagic.estimate_ml` or `estimagic.estimate_msm`. With a little bit of work it is also possible to construct them out of R or other results. If a model is not a statsmodels results they must be dictionaries with the following entries: "params" (a DataFrame with value column), "info" (a dictionary with summary statistics such as "n_obs", "rsquared", ...) and "name" (a string), or a DataFrame with value column. If a models is a statsmodels result, model.endog_names is used as name and the rest is extracted from corresponding statsmodels attributes. The model names do not have to be unique but if they are not, models with the same name need to be grouped together. return_type (str): Can be "dataframe", "latex", "html", "render_inputs" or a file path with the extension .tex or .html. If "render_inputs" is passed, a dictionary with the entries "body", "footer" and other information is returned. The entries can be modified by the user ( e.g. change formatting, renameof columns or index, ...) and then passed to ``render_latex`` or ``render_html``. Default "dataframe". render_options (dict): a dictionary with keyword arguments that are passed to df.style.to_latex or df.style.to_html, depending on the return_type. The default is None. show_col_names (bool): If True, the column names are displayed. The default column names are the model names if the model names are unique, otherwise (1), (2), etc.. Default True. show_col_groups (bool): If True, the column groups are displayed. The default column groups are the model names if the model names are not unique and undefined otherwise. Default None. None means that the column groups are displayed if they are defined. show_index_names (bool): If True, the index names are displayed. Default False. This is mostly relevant when working with estimagic style params DataFrames with a MultiIndex. show_inference(bool): If True, inference (standard errors or confidence intervals) are displayed below parameter values. Default True. show_stars (bool): a boolean variable for displaying significance stars. Default is True. show_footer (bool): a boolean variable for displaying statistics, e.g. R2, Obs numbers. Default is True. Which statistics are displayed and how they are labeled can be determined via ``stats_options``. custom_param_names (dict): Dictionary that is used to rename parameters. The keys are the old parameter names or index entries. The values are the new names. Default None. custom_col_names (dict or list): A list of column names or dict to rename the default column names. The default column names are the model names if the model names are unique, otherwise (1), (2), etc.. custom_col_groups (dict or list): A list of column group or dict to rename the default column groups. The default column groups are the model names if the model names are not unique and undefined otherwise. custom_index_names (dict or list): Dictionary or list to set the names of the index levels of the parameters. This is mostly relevant when working with estimagic style params DataFrames with a MultiIndex and only used if "index_names" is set to True in the render_options. Default None. custom_notes (list): A list of strings for additional notes. Default is None. confidence_intervals (bool): If True, display confidence intervals as inference values. Display standard errors otherwise. Default False. significance_levels (list): a list of floats for p value's significance cut-off values. This is used to generate the significance stars. Default is [0.1,0.05,0.01]. append_notes (bool): A boolean variable for printing p value cutoff explanation and additional notes, if applicable. Default is True. notes_label (str): A sting to print as the title of the notes section, if applicable. Default is 'Notes' stats_options (dict): A dictionary that determines which statistics (e.g. R-Squared, No. of Observations) are displayed and how they are labeled. The keys are the names of the statistics inside the model['info'] dictionary or attribute names of a statsmodels results object. The values are the new labels to be displayed for those statistics, i.e. the set of the values is used as row names in the table. number_format (int, str, iterable or callable): A callable, iterable, integer or string that is used to apply string formatter(s) to floats in the table. Default ("{0:.3g}", "{0:.5f}", "{0:.4g}"). add_trailing_zeros (bool): If True, format floats such that they have same number of digits after the decimal point. Default True. siunitx_warning (bool): If True, print warning about LaTex preamble to add for proper compilation of when working with siunitx package. Default True. escape_special_characters (bool): If True, replaces special characters in parameter and model names with LaTeX or HTML safe sequences. Returns: res_table (data frame, str or dictionary): depending on the rerturn type, data frame with formatted strings, a string for html or latex tables, or a dictionary with statistics and parameters dataframes, and strings for footers is returned. If the return type is a path, the function saves the resulting table at the given path. """ if not isinstance(models, (tuple, list)): raise TypeError(f"models must be a list or tuple. Not: {type(models)}") models = [_process_model(model) for model in models] model_names = _get_model_names(models) default_col_names, default_col_groups = _get_default_column_names_and_groups( model_names ) column_groups = _customize_col_groups( default_col_groups=default_col_groups, custom_col_groups=custom_col_groups ) column_names = _customize_col_names( default_col_names=default_col_names, custom_col_names=custom_col_names ) show_col_groups = _update_show_col_groups(show_col_groups, column_groups) stats_options = _set_default_stats_options(stats_options) body, footer = _get_estimation_table_body_and_footer( models, column_names, column_groups, custom_param_names, custom_index_names, significance_levels, stats_options, show_col_names, show_col_groups, show_stars, show_inference, confidence_intervals, number_format, add_trailing_zeros, ) render_inputs = { "body": body, "footer": footer, "render_options": render_options, } if return_type == "render_inputs": out = render_inputs elif str(return_type).endswith("tex"): out = _render_latex( **render_inputs, show_footer=show_footer, append_notes=append_notes, notes_label=notes_label, significance_levels=significance_levels, custom_notes=custom_notes, siunitx_warning=siunitx_warning, show_index_names=show_index_names, show_col_names=show_col_names, escape_special_characters=escape_special_characters, ) elif str(return_type).endswith("html"): out = render_html( **render_inputs, show_footer=show_footer, append_notes=append_notes, notes_label=notes_label, custom_notes=custom_notes, significance_levels=significance_levels, show_index_names=show_index_names, show_col_names=show_col_names, escape_special_characters=escape_special_characters, ) elif return_type == "dataframe": if show_footer: footer.index.names = body.index.names out = pd.concat([body.reset_index(), footer.reset_index()]).set_index( body.index.names ) else: out = body else: raise ValueError( f"""Value of return type can be either of ['data_frame', 'render_inputs','latex' ,'html'] or a path ending with '.html' or '.tex'. Not: {return_type}.""" ) return_type = Path(return_type) if return_type.suffix not in (".html", ".tex"): return out else: return_type.write_text(out) @suppress_performance_warnings def render_latex( body, footer, render_options=None, show_footer=True, append_notes=True, notes_label="Note:", significance_levels=(0.1, 0.05, 0.01), custom_notes=None, siunitx_warning=True, show_index_names=False, show_col_names=True, show_col_groups=True, escape_special_characters=True, ): r"""Return estimation table in LaTeX format as string. Args: body (pandas.DataFrame): DataFrame with formatted strings of parameter values, inferences (standard errors or confidence intervals, if applicable) and significance stars (if applicable). footer (pandas.DataFrame): DataFrame with formatted strings of summary statistics (such as number of observations, r-squared, etc.) render_options(dict): A dictionary with custom kwargs to pass to pd.Styler.to_latex(), to update the default options. An example keyword argument is: - siunitx (bool): If True, the table is structured to be compatible with siunitx package. Default is set to True internally. For the list of all possible arguments, see documentation of `pandas.io.formats.style.Styler.to_latex`. show_footer (bool): a boolean variable for displaying footer_df. Default True. append_notes (bool): A boolean variable for printing p value cutoff explanation and additional notes, if applicable. Default is True. notes_label (str): A sting to print as the title of the notes section, if applicable. Default is 'Notes' significance_levels (list or tuple): a list of floats for p value's significance cutt-off values. Default is [0.1,0.05,0.01]. custom_notes (list): A list of strings for additional notes. Default is None. siunitx_warning (bool): If True, print warning about LaTex preamble to add for proper compilation of when working with siunitx package. Default True. show_index_names (bool): If True, display index names in the table. show_col_names (bool): If True, the column names are displayed. show_col_groups (bool): If True, the column groups are displayed. escape_special_characters (bool): If True, replaces the characters &, %, $, #, _, {, }, ~, ^, and \ in parameter and model names with LaTeX-safe sequences. Returns: latex_str (str): The resulting string with Latex tabular code. """ return _render_latex( body=body, footer=footer, render_options=render_options, show_footer=show_footer, append_notes=append_notes, notes_label=notes_label, significance_levels=significance_levels, custom_notes=custom_notes, siunitx_warning=siunitx_warning, show_index_names=show_index_names, show_col_names=show_col_names, show_col_groups=show_col_groups, escape_special_characters=escape_special_characters, ) def _render_latex( body, footer, render_options=None, show_footer=True, append_notes=True, notes_label="Note:", significance_levels=(0.1, 0.05, 0.01), custom_notes=None, siunitx_warning=True, show_index_names=False, show_col_names=True, show_col_groups=True, escape_special_characters=True, ): """See docstring of render_latex for more information.""" if not pd.__version__ >= "1.4.0": raise ValueError( r"""render_latex or estimation_table with return_type="latex" requires pandas 1.4.0 or higher. Update to a newer version of pandas or use estimation_table with return_type="render_inputs" and manually render those results using the DataFrame.to_latex method. """ ) if siunitx_warning: warn( r"""Proper LaTeX compilation requires the package siunitx and adding \sisetup{ input-symbols = (), table-align-text-post = false, group-digits = false, } to your main tex file. To turn this warning off set value of siunitx_warning = False""" ) body = body.copy(deep=True) try: ci_in_body = body.loc[("",)][body.columns[0]].str.contains(";").any() except KeyError: ci_in_body = False if ci_in_body: body.loc[("",)] = pd_df_map(body.loc[("",)], "{{{}}}".format).values if body.columns.nlevels > 1: column_groups = body.columns.get_level_values(0) else: column_groups = None group_to_col_position = _create_group_to_col_position(column_groups) n_levels = body.index.nlevels n_columns = len(body.columns) if escape_special_characters: escape_special_characters = "latex" else: escape_special_characters = None body_styler = _get_updated_styler( body, show_index_names=show_index_names, show_col_names=show_col_names, show_col_groups=show_col_groups, escape_special_characters=escape_special_characters, ) default_options = { "multicol_align": "c", "hrules": True, "siunitx": True, "column_format": "l" * n_levels + "S" * n_columns, "multirow_align": "t", } if render_options: default_options.update(render_options) latex_str = body_styler.to_latex(**default_options) if group_to_col_position: temp_str = "\n" for k in group_to_col_position: max_col = max(group_to_col_position[k]) + n_levels + 1 min_col = min(group_to_col_position[k]) + n_levels + 1 temp_str += f"\\cmidrule(lr){{{min_col}-{max_col}}}" temp_str += "\n" latex_str = ( latex_str.split("\\\\", 1)[0] + "\\\\" + temp_str + latex_str.split("\\\\", 1)[1] ) latex_str = latex_str.split("\\bottomrule")[0] if show_footer: footer = footer.copy(deep=True) footer = footer.apply(_center_align_integers_and_non_numeric_strings, axis=1) footer_styler = footer.style stats_str = footer_styler.to_latex(**default_options) if "\\midrule" in stats_str: stats_str = ( "\\midrule" + stats_str.split("\\midrule")[1].split("\\bottomrule")[0] ) else: stats_str = ( "\\midrule" + stats_str.split("\\toprule")[1].split("\\bottomrule")[0] ) latex_str += stats_str notes = _generate_notes_latex( append_notes, notes_label, significance_levels, custom_notes, body ) latex_str += notes latex_str += "\\bottomrule\n\\end{tabular}\n" if latex_str.startswith("\\begin{table}"): latex_str += "\n\\end{table}\n" return latex_str def render_html( body, footer, render_options=None, show_footer=True, append_notes=True, notes_label="Note:", custom_notes=None, significance_levels=(0.1, 0.05, 0.01), show_index_names=False, show_col_names=True, show_col_groups=True, escape_special_characters=True, **kwargs, # noqa: ARG001 ): """Return estimation table in html format as string. Args: body (pandas.DataFrame): DataFrame with formatted strings of parameter values, inferences (standard errors or confidence intervals, if applicable) and significance stars (if applicable). footer (pandas.DataFrame): DataFrame with formatted strings of summary statistics (such as number of observations, r-squared, etc.) notes (str): The html string with notes with additional information (e.g. mapping from pvalues to significance stars) to append to the footer of the estimation table string with LaTex code for the notes section. render_options(dict): A dictionary with custom kwargs to pass to pd.to_latex(), to update the default options. An example is `{header: False}` that disables displaying column names. show_footer (bool): a boolean variable for displaying footer_df. Default True. append_notes (bool): A boolean variable for printing p value cutoff explanation and additional notes, if applicable. Default is True. notes_label (str): A sting to print as the title of the notes section, if applicable. Default is 'Notes' significance_levels (list or tuple): a list of floats for p value's significance cutt-off values. Default is [0.1,0.05,0.01]. show_index_names (bool): If True, display index names in the table. show_col_names (bool): If True, the column names are displayed. show_col_groups (bool): If True, the column groups are displayed. escape_special_characters (bool): If True, replace the characters &, <, >, ', and " in parameter and model names with HTML-safe sequences. Returns: html_str (str): The resulting string with html tabular code. """ if not pd.__version__ >= "1.4.0": raise ValueError( r"""render_html or estimation_table with return_type="html" requires pandas 1.4.0 or higher. Update to a newer version of pandas or use estimation_table with return_type="render_inputs" and manually render those results using the DataFrame.to_html method. """ ) n_levels = body.index.nlevels n_columns = len(body.columns) html_str = "" if escape_special_characters: escape_special_characters = "html" else: escape_special_characters = None body_styler = _get_updated_styler( body, show_index_names=show_index_names, show_col_names=show_col_names, show_col_groups=show_col_groups, escape_special_characters=escape_special_characters, ) default_options = {"exclude_styles": True} if render_options: default_options.update(render_options) html_str = body_styler.to_html(**default_options).split("\n")[0] if show_footer: stats_str = """ """.format(n_levels + n_columns) stats_str += ( footer.style.to_html(**default_options) .split("\n")[1] .split("\n")[0] ) stats_str = re.sub(r"(?<=[\d)}{)])}", "", re.sub(r"{(?=[}\d(])", "", stats_str)) html_str += stats_str notes = _generate_notes_html( append_notes, notes_label, significance_levels, custom_notes, body ) html_str += notes html_str += "\n" return html_str def _process_model(model): """Check model validity, convert to dictionary. Args: model: Estimation result. See docstring of estimation_table for more info. Returns: processed_model: A dictionary with keys params, info and name. """ if isinstance(model, dict): params = model["params"].copy(deep=True) info = model.get("info", {}) name = model.get("name", "") elif isinstance(model, pd.DataFrame): params = model.copy(deep=True) info = {} name = None else: try: params = _extract_params_from_sm(model) info = {**_extract_info_from_sm(model)} name = info.pop("name") except (KeyboardInterrupt, SystemExit): raise except Exception as e: raise TypeError( f"""Model can be of type dict, pd.DataFrame or a statsmodels result. Model {model} is of type {type(model)}.""" ) from e if "pvalue" in params.columns: params = params.rename(columns={"pvalue": "p_value"}) processed_model = {"params": params, "info": info, "name": name} return processed_model def _get_estimation_table_body_and_footer( models, column_names, column_groups, custom_param_names, custom_index_names, significance_levels, stats_options, show_col_names, show_col_groups, show_stars, show_inference, confidence_intervals, number_format, add_trailing_zeros, ): """Create body and footer blocs with significance stars and inference values. Applies number formatting to parameters and summary statitistics. Concatinates infere values to parameter values if applicable, Adds significance stars if applicable. Args: models (list): List of dictionaries with keys 'params', 'info' and 'name'. column_names (list): List of strigs to display as names of the model columns in estimation table. column_groups (list or NoneType): If defined, list of strings to display as names of groups of model columns in estimation table. custom_param_names (dict or list): A list of strings to display as parameter names or a mapping from original to custom paramter names. custom_index_names (dict or list): Dictionary or list to set the names of the index levels of the parameters. significance_levels (list): a list of floats for p value's significance cutt-off values. stats_options (dict): A dictionary with displayed statistics names as keys, and statistics names to be retrieved from model['info'] as values show_col_names (bool): If True, the column names are displayed. show_col_groups (bool): If True, the column groups are displayed. show_stars (bool): a boolean variable for printing significance stars. show_inference(bool): If True, inference (standard errors or confidence intervals) below param values. confidence_intervals (bool): If True, display confidence intervals as inference values. number_format (int, str, iterable or callable): A callable, iterable, integer or callable that is used to apply string formatter(s) to floats in the table. add_trailing_zeros (bool): If True, format floats such that they have same number of digits after the decimal point. Returns: body (DataFrame): DataFrame data frame with formatted strings of parameter and inference values and significance stars to display in estimation table. footer (DataFrame): DataFrame with formatted strings of summary statistics to display at the bottom of estimation table. """ body, max_trail = _build_estimation_table_body( models, column_names, column_groups, custom_param_names, custom_index_names, show_col_names, show_col_groups, show_inference, show_stars, confidence_intervals, significance_levels, number_format, add_trailing_zeros, ) footer = _build_estimation_table_footer( models, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ) footer.columns = body.columns return body, footer def _build_estimation_table_body( models, column_names, column_groups, custom_param_names, custom_index_names, show_col_names, show_col_groups, show_inference, show_stars, confidence_intervals, significance_levels, number_format, add_trailing_zeros, ): """Create body bloc significance stars and inference values. Applies number formatting to parameters. Concatinates inference values to parameter values if applicable. Adds significance stars if applicable. Args: models (list): List of dictionaries with keys 'params', 'info' and 'name'. column_names (list): List of strigs to display as names of the model columns in estimation table. column_groups (list or NoneType): If defined, list of strings to display as names of groups of model columns in estimation table. custom_param_names (dict or list): A list of strings to display as parameter names or a mapping from original to custom paramter names. custom_index_names (dict or list): Dictionary or list to set the names of the index levels of the parameters. significance_levels (list): a list of floats for p value's significance cutt-off values. show_col_names (bool): If True, the column names are displayed. show_col_groups (bool): If True, the column groups are displayed. show_stars (bool): a boolean variable for printing significance stars. show_inference(bool): If True, inference (standard errors or confidence intervals) below param values. confidence_intervals (bool): If True, display confidence intervals as inference values. number_format (int, str, iterable or callable): A callable, iterable, integer or callable that is used to apply string formatter(s) to floats in the table. add_trailing_zeros (bool): If True, format floats such that they have same number of digits after the decimal point. Returns: body (DataFrame): DataFrame data frame with formatted strings of parameter and inference values and significance stars to display in estimation table. max_trail (int): Integer that shows the maximum number of digits after a decimal point in the parameters DataFrame. Is passed to `_build_estimation_table_footer` to get same number of trailing zeros as in parameters DataFrame and torender_latex for formatting tables in siunitx package. """ dfs, max_trail = _reindex_and_float_format_params( models, show_inference, confidence_intervals, number_format, add_trailing_zeros ) to_convert = [] if show_stars: for df, mod in zip(dfs, models, strict=False): to_convert.append( pd.concat([df, mod["params"].reindex(df.index)["p_value"]], axis=1) ) else: to_convert = dfs # convert DataFrames to string series with inference and siginificance # information. to_concat = [ _convert_frame_to_string_series( df, significance_levels, show_stars, ) for df in to_convert ] df = pd.concat(to_concat, axis=1) df = _process_frame_indices( df=df, custom_param_names=custom_param_names, custom_index_names=custom_index_names, show_col_names=show_col_names, show_col_groups=show_col_groups, column_names=column_names, column_groups=column_groups, ) return df, max_trail def _build_estimation_table_footer( models, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ): """Create footer bloc of estimation table. Applies number formatting to parameters and summary statitistics. Concatinates infere values to parameter values if applicable, Adds significance stars if applicable. Args: models (list): List of dictionaries with keys 'params', 'info' and 'name'. stats_options (dict): A dictionary with displayed statistics names as keys, and statistics names to be retrieved from model['info'] as values significance_levels (list): a list of floats for p value's significance cutt-off values. number_format (int, str, iterable or callable): A callable, iterable, integer or callable that is used to apply string formatter(s) to floats in the table. add_trailing_zeros (bool): If True, format floats such that they haave same number of digits after the decimal point. max_trail (int): If add_trailing_zeros is True, add corresponding number of trailing zeros to floats in the stats DataFrame to have number of digits after a decimal point equal to max_trail for each float. Returns: footer (DataFrame): DataFrame with formatted strings of summary statistics to display at the bottom of estimation table. """ to_concat = [ _create_statistics_sr( mod, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ) for mod in models ] stats = pd.concat(to_concat, axis=1) return stats def _reindex_and_float_format_params( models, show_inference, confidence_intervals, number_format, add_trailing_zeros ): """Reindex all params DataFrames with a common index and apply number formatting.""" dfs = _get_params_frames_with_common_index(models) cols_to_format = _get_cols_to_format(show_inference, confidence_intervals) formatted_frames, max_trail = _apply_number_formatting_frames( dfs, cols_to_format, number_format, add_trailing_zeros ) return formatted_frames, max_trail def _get_params_frames_with_common_index(models): """Get a list of params frames, reindexed with a common index.""" dfs = [model["params"] for model in models] common_index = _get_common_index(dfs) out = [model["params"].reindex(common_index) for model in models] return out def _get_common_index(dfs): """Get common index from a list of DataFrames.""" common_index = [] for d_ in dfs: common_index += [ind for ind in d_.index.to_list() if ind not in common_index] return common_index def _get_cols_to_format(show_inference, confidence_intervals): """Get the list of names of columns that need to be formatted. By default, formatting is applied to parameter values. If inference values need to displayed, adds confidence intervals or standard erros to the list. """ cols = ["value"] if show_inference: if confidence_intervals: cols += ["ci_lower", "ci_upper"] else: cols.append("standard_error") return cols def _apply_number_formatting_frames(dfs, columns, number_format, add_trailing_zeros): """Apply string formatter to specific columns of a list of DataFrames.""" raw_formatted = [ _apply_number_format(df[columns], number_format, format_integers=False) for df in dfs ] max_trail = int(max([_get_digits_after_decimal(df) for df in raw_formatted])) if add_trailing_zeros: formatted = [ _apply_number_format(df, max_trail, format_integers=True) for df in raw_formatted ] else: formatted = raw_formatted return formatted, max_trail def _update_show_col_groups(show_col_groups, column_groups): """Set the value of show_col_groups to False or True given column_groups. Updates the default None to True if column_groups is not None. Sets to False otherwise. """ if show_col_groups is None: if column_groups is not None: show_col_groups = True else: show_col_groups = False return show_col_groups def _set_default_stats_options(stats_options): """Define some default summary statistics to display in estimation table.""" if stats_options is None: stats_options = { "n_obs": "Observations", "rsquared": "R$^2$", "rsquared_adj": "Adj. R$^2$", "resid_std_err": "Residual Std. Error", "fvalue": "F Statistic", } else: if not isinstance(stats_options, dict): raise TypeError( f"""stats_options can be of types dict or NoneType. Not: {type(stats_options)}.""" ) return stats_options def _get_model_names(processed_models): """Get names of model names if defined, set based on position otherwise. Args: processed_models (list): List of estimation results processed to dictionaries. Returns: names (list): List of model names given either by name attribute of each model if defined or the position (counting from 1) of each model in parentheses. """ names = [] for i, mod in enumerate(processed_models): if mod.get("name"): names.append(mod["name"]) else: names.append(f"({i + 1})") _check_order_of_model_names(names) return names def _check_order_of_model_names(model_names): """Check identically named models are adjacent. Args: model_names (list): List of model names. Raises: ValueError: if models that share a name are not next to each other. """ group_to_col_index = _create_group_to_col_position(model_names) for positions in group_to_col_index.values(): if positions != list(range(positions[0], positions[-1] + 1)): raise ValueError( "If there are repetitions in model_names, models with the " f"same name need to be adjacent. You provided: {model_names}" ) def _get_default_column_names_and_groups(model_names): """Get column names and groups to display in the estimation table. Args: model_names (list): List of model names. Returns: col_names (list): List of estimation column names to display in estimation table. Same as model_names if model_names are unique. Given by column position (counting from 1) in braces otherwise. col_groups (list or NoneType): If defined, list of strings unique values of which will define column groups. Not defined if model_names are unique. """ if len(set(model_names)) == len(model_names): col_groups = None col_names = model_names else: col_groups = model_names col_names = [f"({i + 1})" for i in range(len(model_names))] return col_names, col_groups def _customize_col_groups(default_col_groups, custom_col_groups): """Change default (inferred) column group titles using custom column groups. Args: default_col_groups (list or NoneType): The inferred column groups. custom_col_groups (list or dict): Dictionary mapping defautl column group titles to custom column group titles, if the defautl column groups are defined. Must be a list of the same lenght as models otherwise. Returns: col_groups (list): Column groups to display in estimation table. """ if custom_col_groups: if not default_col_groups: if not isinstance(custom_col_groups, list): raise ValueError( """With unique model names, multiple models can't be grouped under common group name. Provide list of unique group names instead, if you wish to add column level.""" ) col_groups = custom_col_groups else: if isinstance(custom_col_groups, list): col_groups = custom_col_groups elif isinstance(custom_col_groups, dict): col_groups = ( pd.Series(default_col_groups).replace(custom_col_groups).to_list() ) else: raise TypeError( f"""Invalid type for custom_col_groups. Can be either list or dictionary, or NoneType. Not: {type(col_groups)}.""" ) else: col_groups = default_col_groups return col_groups def _customize_col_names(default_col_names, custom_col_names): """Change default (inferred) column names using custom column names. Args: deafult_col_names (list): The default (inferred) column names. custom_col_names (list or dict): Dictionary mapping default column names to custom column names, or list to display as the name of each model column. Returns: column_names (list): The column names to display in the estimatino table. """ if not custom_col_names: col_names = default_col_names elif isinstance(custom_col_names, dict): col_names = list(pd.Series(default_col_names).replace(custom_col_names)) elif isinstance(custom_col_names, list): if not len(custom_col_names) == len(default_col_names): raise ValueError( f"""If provided as a list, custom_col_names should have same length as default_col_names. Lenght of custom_col_names {len(custom_col_names)} !=length of default_col_names {len(default_col_names)}""" ) elif any(isinstance(i, list) for i in custom_col_names): raise ValueError("Custom_col_names cannot be a nested list") col_names = custom_col_names else: raise TypeError( f"""Invalid type for custom_col_names. Can be either list or dictionary, or NoneType. Not: {col_names}.""" ) return col_names def _create_group_to_col_position(column_groups): """Get mapping from column groups to column positions. Args: column_names (list): The column groups to display in the estimatino table. Returns: group_to_col_index(dict): The mapping from column group titles to column positions. """ if column_groups is not None: group_to_col_index = {group: [] for group in list(set(column_groups))} for i, group in enumerate(column_groups): group_to_col_index[group].append(i) else: group_to_col_index = None return group_to_col_index def _convert_frame_to_string_series( df, significance_levels, show_stars, ): """Return processed value series with significance stars and inference information. Args: df (DataFrame): params DataFrame of the model significance_levels (list): see main docstring number_format (int, str, iterable or callable): see main docstring show_inference (bool): see main docstring confidence_intervals (bool): see main docstring show_stars (bool): see main docstring Returns: sr (pd.Series): string series with values and inferences. """ value_sr = df["value"] if show_stars: sig_bins = [-1, *sorted(significance_levels)] + [2] value_sr += "$^{" value_sr += ( pd.cut( df["p_value"], bins=sig_bins, labels=[ "*" * (len(significance_levels) - i) for i in range(len(significance_levels) + 1) ], ) .astype("str") .replace("nan", "") .replace(np.nan, "") ) value_sr += " }$" if "ci_lower" in df: ci_lower = df["ci_lower"] ci_upper = df["ci_upper"] inference_sr = "(" inference_sr += ci_lower inference_sr += r";" inference_sr += ci_upper inference_sr += ")" sr = _combine_series(value_sr, inference_sr) elif "standard_error" in df: standard_error = df["standard_error"] inference_sr = "(" + standard_error + ")" sr = _combine_series(value_sr, inference_sr) else: sr = value_sr # replace empty braces with empty string sr = sr.where(sr.apply(lambda x: bool(re.search(r"\d", x))), "") sr.name = "" return sr def _combine_series(value_sr, inference_sr): """Merge value and inference series. Return string series with parameter values and precision values below respective param values. Args: values_sr (Series): string series of estimated parameter values inference_sr (Series): string series of inference values Returns: series: combined string series of param and inference values """ value_df = value_sr.to_frame(name="") original_cols = value_df.columns value_df.reset_index(drop=False, inplace=True) index_names = [item for item in value_df.columns if item not in original_cols] # set the index to even numbers, starting at 0 value_df.index = value_df.index * 2 inference_df = inference_sr.to_frame(name="") inference_df.reset_index(drop=False, inplace=True) # set the index to odd numbers, starting at 1 inference_df.index = (inference_df.index * 2) + 1 inference_df[index_names[-1]] = "" df = pd.concat([value_df, inference_df]).sort_index() df.set_index(index_names, inplace=True, drop=True) return df[""] def _create_statistics_sr( model, stats_options, significance_levels, show_stars, number_format, add_trailing_zeros, max_trail, ): """Process statistics values, return string series. Args: model (estimation result): see main docstring stats_options (dict): see main docstring significance_levels (list): see main docstring show_stars (bool): see main docstring number_format (int, str, iterable or callable): see main docstring add_trailing_zeros (bool): If True, format floats such that they haave same number of digits after the decimal point. max_trail (int): If add_trailing_zeros is True, add corresponding number of trailing zeros to floats in the stats DataFrame to have number of digits after a decimal point equal to max_trail for each float. Returns: series: string series with summary statistics values and additional info if applicable. """ stats_values = {} stats_options = deepcopy(stats_options) if "show_dof" in stats_options: show_dof = stats_options.pop("show_dof") else: show_dof = None for k in stats_options: stats_values[stats_options[k]] = model["info"].get(k, np.nan) raw_formatted = _apply_number_format( pd.DataFrame(pd.Series(stats_values)), number_format, format_integers=False ) if add_trailing_zeros: formatted = _apply_number_format( raw_formatted, max_trail, format_integers=False ) else: formatted = raw_formatted stats_values = formatted.to_dict()[0] if "fvalue" in model["info"] and "F Statistic" in stats_values: if show_stars and "f_pvalue" in model["info"]: sig_bins = [-1, *sorted(significance_levels)] + [2] sig_icon_fstat = "*" * ( len(significance_levels) - np.digitize(model["info"]["f_pvalue"], sig_bins) + 1 ) stats_values["F Statistic"] = ( stats_values["F Statistic"] + "$^{" + sig_icon_fstat + "}$" ) if show_dof: fstat_str = "{{{}(df={};{})}}" stats_values["F Statistic"] = fstat_str.format( stats_values["F Statistic"], int(model["info"]["df_model"]), int(model["info"]["df_resid"]), ) if "resid_std_err" in model["info"] and "Residual Std. Error" in stats_values: if show_dof: rse_str = "{{{}(df={})}}" stats_values["Residual Std. Error"] = rse_str.format( stats_values["Residual Std. Error"], int(model["info"]["df_resid"]) ) stat_sr = pd.Series(stats_values) # the following is to make sure statistics dataframe has as many levels of # indices as the parameters dataframe. stat_ind = np.empty((len(stat_sr), model["params"].index.nlevels - 1), dtype=str) stat_ind = np.concatenate( [stat_sr.index.values.reshape(len(stat_sr), 1), stat_ind], axis=1 ).T stat_sr.index = pd.MultiIndex.from_arrays(stat_ind) return stat_sr.astype("str").replace("nan", "") def _process_frame_indices( df, custom_param_names, custom_index_names, show_col_names, show_col_groups, column_names, column_groups, ): """Process body DataFrame, customize the header. Args: df (DataFrame): string DataFrame with parameter values and inferences. custom_param_names (dict): see main docstring custom_index_names (list): see main docstring show_col_names (bool): see main docstring column_names (list): List of column names to display in estimation table. column_groups (list): List of column group titles to display in estimation table. Returns: processed_df (DataFrame): string DataFrame with customized header. """ # The column names of the df are empty strings. # If show_col_names is True, rename columns using column_names. # Add column level if show col_groups is True. if show_col_names: if show_col_groups: df.columns = pd.MultiIndex.from_tuples( [(i, j) for i, j in zip(column_groups, column_names, strict=False)] ) else: df.columns = column_names if custom_index_names: if isinstance(custom_index_names, list): df.index.names = custom_index_names elif isinstance(custom_index_names, dict): df.rename_axis(index=custom_index_names, inplace=True) else: raise TypeError( f"""Invalid custom_index_names can be of type either list or dict, or NoneType. Not: {type(custom_index_names)}.""" ) if custom_param_names: ind = df.index.to_frame() ind = ind.replace(custom_param_names) df.index = pd.MultiIndex.from_frame(ind) return df def _generate_notes_latex( append_notes, notes_label, significance_levels, custom_notes, df ): """Generate the LaTex script of the notes section. Args: append_notes (bool): see main docstring notes_label (str): see main docstring significance_levels (list): see main docstring custom_notes (str): see main docstring df (DataFrame): params DataFrame of estimation model Returns: notes_latex (str): a string with LaTex script """ n_levels = df.index.nlevels n_columns = len(df.columns) significance_levels = sorted(significance_levels) notes_text = "" if append_notes: notes_text += "\\midrule\n" notes_text += "\\textit{{{}}} & \\multicolumn{{{}}}{{r}}{{".format( notes_label, str(n_columns + n_levels - 1) ) # iterate over penultimate significance_lelvels since last item of legend # is not followed by a semi column for i in range(len(significance_levels) - 1): star = "*" * (len(significance_levels) - i) notes_text += f"$^{{{star}}}$p$<${significance_levels[i]};" notes_text += "$^{*}$p$<$" + str(significance_levels[-1]) + "} \\\\\n" if custom_notes: amp_n = "&" * n_levels if isinstance(custom_notes, list): if not all(isinstance(n, str) for n in custom_notes): not_str_notes = [n for n in custom_notes if not isinstance(n, str)] not_str_notes_types = [type(n) for n in not_str_notes] raise ValueError( f"""Each custom note can only be of string type. The following notes: {not_str_notes} are of types {not_str_notes_types} respectively.""" ) for n in custom_notes: notes_text += """ {}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n""".format( amp_n, n_columns, n ) elif isinstance(custom_notes, str): notes_text += "{}\\multicolumn{{{}}}{{r}}\\textit{{{}}}\\\\\n".format( amp_n, n_columns, custom_notes ) else: raise TypeError( f"""Custom notes can be either a string or a list of strings. Not: {type(custom_notes)}.""" ) return notes_text def _generate_notes_html( append_notes, notes_label, significance_levels, custom_notes, df ): """Generate the html script of the notes section of the estimation table. Args: append_notes (bool): see main docstring notes_label (str): see main docstring significance_levels (list): see main docstring custom_notes (str): see main docstring df (DataFrame): params DataFrame of estimation model Returns: notes_latex (str): a string with html script """ n_levels = df.index.nlevels n_columns = len(df.columns) significance_levels = sorted(significance_levels) notes_text = """ """.format(n_columns + n_levels) if append_notes: notes_text += """ {}""".format(notes_label, n_columns + n_levels - 1) for i in range(len(significance_levels) - 1): stars = "*" * (len(significance_levels) - i) notes_text += f"{stars}p<{significance_levels[i]}; " notes_text += f"""*p<{significance_levels[-1]} """ if custom_notes: if isinstance(custom_notes, list): if not all(isinstance(n, str) for n in custom_notes): not_str_notes = [n for n in custom_notes if not isinstance(n, str)] not_str_notes_types = [type(n) for n in not_str_notes] raise ValueError( f"""Each custom note can only be of string type. The following notes: {not_str_notes} are of types {not_str_notes_types} respectively.""" ) notes_text += """ {} """.format(n_columns + n_levels - 1, custom_notes[0]) if len(custom_notes) > 1: for i in range(1, len(custom_notes)): notes_text += """ {} """.format(n_columns + n_levels - 1, custom_notes[i]) elif isinstance(custom_notes, str): notes_text += """ {} """.format(n_columns + n_levels - 1, custom_notes) else: raise TypeError( f"""Custom notes can be either a string or a list of strings, not {type(custom_notes)}.""" ) return notes_text def _extract_params_from_sm(model): """Convert statsmodels like estimation result to estimagic like params dataframe.""" to_concat = [] params_list = ["params", "pvalues", "bse"] for col in params_list: to_concat.append(getattr(model, col)) to_concat.append(model.conf_int()) params_df = pd.concat(to_concat, axis=1) params_df.columns = ["value", "p_value", "standard_error", "ci_lower", "ci_upper"] return params_df def _extract_info_from_sm(model): """Process statsmodels estimation result to retrieve summary statistics as dict.""" info = {} key_values = [ "rsquared", "rsquared_adj", "fvalue", "f_pvalue", "df_model", "df_resid", ] for kv in key_values: info[kv] = getattr(model, kv) info["name"] = model.model.endog_names info["resid_std_err"] = np.sqrt(model.scale) info["n_obs"] = model.df_model + model.df_resid + 1 return info def _apply_number_format(df_raw, number_format, format_integers): """Apply string format to DataFrame cells. Args: df_raw (DataFrame): The DataFrame with float values to format. number_format (str, list, tuple, callable or int): User defined number format to apply to the DataFrame. format_integers (bool): Apply number format also to integers Returns: df_formatted (DataFrame): Formatted DataFrame. """ processed_format = _process_number_format(number_format) df_raw = df_raw.copy(deep=True) if isinstance(processed_format, (list, tuple)): df_formatted = df_raw.copy(deep=True).astype("float") for formatter in processed_format[:-1]: df_formatted = pd_df_map(df_formatted, formatter.format).astype("float") df_formatted = pd_df_map( df_formatted.astype("float"), processed_format[-1].format ) elif isinstance(processed_format, str): df_formatted = pd_df_map( df_raw.astype("str"), partial(_format_non_scientific_numbers, format_string=processed_format), ) elif callable(processed_format): df_formatted = pd_df_map(df_raw, processed_format) # Don't format integers: set to original value if not format_integers: integer_locs = pd_df_map(df_raw, _is_integer) df_formatted[integer_locs] = pd_df_map( df_raw[integer_locs].astype(float), "{:.0f}".format ) return df_formatted def _format_non_scientific_numbers(number_string, format_string): """Apply number format if the number string is not in scientific format.""" if "e" in number_string: out = number_string else: out = format_string.format(float(number_string)) return out def _process_number_format(raw_format): """Process the user define formatter. Reduces cases for number format in apply_number_format. """ if isinstance(raw_format, str): processed_format = [raw_format] elif isinstance(raw_format, int): processed_format = f"{{0:.{raw_format}f}}" elif callable(raw_format) or isinstance(raw_format, (list, tuple)): processed_format = raw_format else: raise TypeError( f"""Number format can be either of [str, int, tuple, list, callable] types. Not: {type(raw_format)}.""" ) return processed_format def _get_digits_after_decimal(df): """Get the maximum number of digits after a decimal point in a DataFrame.""" max_trail = 0 for c in df.columns: try: trail_length = ( ( df[c][~df[c].astype("str").str.contains("e")] .astype("str") .str.split(".", expand=True)[1] .astype("str") .replace("None", "") ) .str.len() .max() ) except KeyError: trail_length = 0 max_trail = max(trail_length, max_trail) return max_trail def _center_align_integers_and_non_numeric_strings(sr): """Align integer numbers and strings at the center of model column.""" sr = deepcopy(sr) for i in sr.index: if _is_integer(sr[i]): sr[i] = f"\\multicolumn{{1}}{{c}}{{{str(int(float(sr[i])))}}}" else: string_without_stars = sr[i].split("$", 1)[0] if not string_without_stars.replace(".", "").isnumeric(): sr[i] = f"\\multicolumn{{1}}{{c}}{{{sr[i]}}}" return sr def _get_updated_styler( df, show_index_names, show_col_names, show_col_groups, escape_special_characters ): """Return pandas.Styler object based ont the data and styling options.""" styler = df.style if not show_index_names: styler = styler.hide(names=True) if not show_col_names: styler = styler.hide(axis=1) if not show_col_groups: styler = styler.hide(axis=1, level=0) for ax in [0, 1]: styler = styler.format_index(escape=escape_special_characters, axis=ax) return styler def _is_integer(num): """Check if number is an integer (including a float with only zeros as digits)""" try: out = int(float(num)) == float(num) except ValueError: out = False return out ================================================ FILE: src/estimagic/examples/__init__.py ================================================ ================================================ FILE: src/estimagic/examples/diabetes.csv ================================================ ,Age,Sex,BMI,ABP,S1,S2,S3,S4,S5,S6,target 0,0.0380759064334241,0.0506801187398187,0.0616962065186885,0.0218723549949558,-0.0442234984244464,-0.0348207628376986,-0.0434008456520269,-0.00259226199818282,0.0199084208763183,-0.0176461251598052,151.0 1,-0.00188201652779104,-0.044641636506989,-0.0514740612388061,-0.0263278347173518,-0.00844872411121698,-0.019163339748222,0.0744115640787594,-0.0394933828740919,-0.0683297436244215,-0.09220404962683,75.0 2,0.0852989062966783,0.0506801187398187,0.0444512133365941,-0.00567061055493425,-0.0455994512826475,-0.0341944659141195,-0.0323559322397657,-0.00259226199818282,0.00286377051894013,-0.0259303389894746,141.0 3,-0.0890629393522603,-0.044641636506989,-0.0115950145052127,-0.0366564467985606,0.0121905687618,0.0249905933641021,-0.0360375700438527,0.0343088588777263,0.0226920225667445,-0.0093619113301358,206.0 4,0.00538306037424807,-0.044641636506989,-0.0363846922044735,0.0218723549949558,0.00393485161259318,0.0155961395104161,0.0081420836051921,-0.00259226199818282,-0.0319914449413559,-0.0466408735636482,135.0 5,-0.0926954778032799,-0.044641636506989,-0.0406959404999971,-0.0194420933298793,-0.0689906498720667,-0.0792878444118122,0.0412768238419757,-0.076394503750001,-0.0411803851880079,-0.0963461565416647,97.0 6,-0.0454724779400257,0.0506801187398187,-0.0471628129432825,-0.015999222636143,-0.040095639849843,-0.0248000120604336,0.000778807997017968,-0.0394933828740919,-0.0629129499162512,-0.0383566597339788,138.0 7,0.063503675590561,0.0506801187398187,-0.00189470584028465,0.0666296740135272,0.0906198816792644,0.108914381123697,0.0228686348215404,0.0177033544835672,-0.0358167281015492,0.00306440941436832,63.0 8,0.0417084448844436,0.0506801187398187,0.0616962065186885,-0.0400993174922969,-0.0139525355440215,0.00620168565673016,-0.0286742944356786,-0.00259226199818282,-0.0149564750249113,0.0113486232440377,110.0 9,-0.0709002470971626,-0.044641636506989,0.0390621529671896,-0.0332135761048244,-0.0125765826858204,-0.034507614375909,-0.0249926566315915,-0.00259226199818282,0.0677363261102861,-0.0135040182449705,310.0 10,-0.0963280162542995,-0.044641636506989,-0.0838084234552331,0.0081008722200108,-0.103389471327095,-0.0905611890362353,-0.0139477432193303,-0.076394503750001,-0.0629129499162512,-0.0342145528191441,101.0 11,0.0271782910803654,0.0506801187398187,0.0175059114895716,-0.0332135761048244,-0.00707277125301585,0.0459715403040008,-0.0654906724765493,0.0712099797536354,-0.096433222891784,-0.0590671943081523,69.0 12,0.0162806757273067,-0.044641636506989,-0.0288400076873072,-0.00911348124867051,-0.00432086553661359,-0.00976888589453599,0.0449584616460628,-0.0394933828740919,-0.0307512098645563,-0.0424987666488135,179.0 13,0.00538306037424807,0.0506801187398187,-0.00189470584028465,0.0081008722200108,-0.00432086553661359,-0.0157187066685371,-0.0029028298070691,-0.00259226199818282,0.0383932482116977,-0.0135040182449705,185.0 14,0.0453409833354632,-0.044641636506989,-0.0256065714656645,-0.0125563519424068,0.0176943801946045,-6.12835790604833e-05,0.0817748396869335,-0.0394933828740919,-0.0319914449413559,-0.0756356219674911,118.0 15,-0.0527375548420648,0.0506801187398187,-0.0180618869484982,0.0804011567884723,0.0892439288210632,0.107661787276539,-0.0397192078479398,0.108111100629544,0.0360557900898319,-0.0424987666488135,171.0 16,-0.00551455497881059,-0.044641636506989,0.0422955891888323,0.0494153205448459,0.0245741444856101,-0.0238605666750649,0.0744115640787594,-0.0394933828740919,0.0522799997967812,0.0279170509033766,166.0 17,0.0707687524926,0.0506801187398187,0.0121168511201671,0.0563010619323185,0.034205814493018,0.0494161733836856,-0.0397192078479398,0.0343088588777263,0.027367707542609,-0.00107769750046639,144.0 18,-0.0382074010379866,-0.044641636506989,-0.0105172024313319,-0.0366564467985606,-0.0373437341334407,-0.0194764882100115,-0.0286742944356786,-0.00259226199818282,-0.0181182673078967,-0.0176461251598052,97.0 19,-0.0273097856849279,-0.044641636506989,-0.0180618869484982,-0.0400993174922969,-0.00294491267841247,-0.0113346282034837,0.0375951860378887,-0.0394933828740919,-0.0089440189577978,-0.0549250873933176,168.0 20,-0.0491050163910452,-0.044641636506989,-0.0568631216082106,-0.0435421881860331,-0.0455994512826475,-0.043275771306016,0.000778807997017968,-0.0394933828740919,-0.0119006848015081,0.0154907301588724,68.0 21,-0.0854304009012408,0.0506801187398187,-0.0223731352440218,0.00121513083253827,-0.0373437341334407,-0.0263657543693812,0.0155053592133662,-0.0394933828740919,-0.072128454601956,-0.0176461251598052,49.0 22,-0.0854304009012408,-0.044641636506989,-0.00405032998804645,-0.00911348124867051,-0.00294491267841247,0.00776742796567782,0.0228686348215404,-0.0394933828740919,-0.0611765950943345,-0.0135040182449705,68.0 23,0.0453409833354632,0.0506801187398187,0.0606183944448076,0.0310533436263482,0.0287020030602135,-0.0473467013092799,-0.0544457590642881,0.0712099797536354,0.133598980013008,0.135611830689079,245.0 24,-0.0636351701951234,-0.044641636506989,0.0358287167455469,-0.0228849640236156,-0.0304639698424351,-0.0188501912864324,-0.00658446761115617,-0.00259226199818282,-0.0259524244351894,-0.0549250873933176,184.0 25,-0.067267708646143,0.0506801187398187,-0.0126728265790937,-0.0400993174922969,-0.0153284884022226,0.0046359433477825,-0.0581273968683752,0.0343088588777263,0.0191990330785671,-0.0342145528191441,202.0 26,-0.107225631607358,-0.044641636506989,-0.0773415510119477,-0.0263278347173518,-0.0896299427450836,-0.0961978613484469,0.0265502726256275,-0.076394503750001,-0.0425721049227942,-0.0052198044153011,137.0 27,-0.0236772472339084,-0.044641636506989,0.0595405823709267,-0.0400993174922969,-0.0428475455662452,-0.0435889197678055,0.0118237214092792,-0.0394933828740919,-0.0159982677581387,0.0403433716478807,85.0 28,0.0526060602375023,-0.044641636506989,-0.0212953231701409,-0.0745280244296595,-0.040095639849843,-0.0376390989938044,-0.00658446761115617,-0.0394933828740919,-0.000609254186102297,-0.0549250873933176,131.0 29,0.0671362140415805,0.0506801187398187,-0.00620595413580824,0.063186803319791,-0.0428475455662452,-0.0958847128866574,0.052321737254237,-0.076394503750001,0.0594238004447941,0.0527696923923848,283.0 30,-0.0600026317441039,-0.044641636506989,0.0444512133365941,-0.0194420933298793,-0.00982467696941811,-0.00757684666200928,0.0228686348215404,-0.0394933828740919,-0.0271286455543265,-0.0093619113301358,129.0 31,-0.0236772472339084,-0.044641636506989,-0.0654856181992578,-0.081413765817132,-0.0387196869916418,-0.0536096705450705,0.0596850128624111,-0.076394503750001,-0.0371283460104736,-0.0424987666488135,59.0 32,0.0344433679824045,0.0506801187398187,0.125287118877662,0.0287580963824284,-0.0538551684318543,-0.0129003705124313,-0.10230705051742,0.108111100629544,0.000271485727907132,0.0279170509033766,341.0 33,0.030810829531385,-0.044641636506989,-0.0503962491649252,-0.00222773986119799,-0.0442234984244464,-0.0899348921126563,0.118591217727804,-0.076394503750001,-0.0181182673078967,0.00306440941436832,87.0 34,0.0162806757273067,-0.044641636506989,-0.063329994051496,-0.0573136709609782,-0.0579830270064577,-0.0489124436182275,0.0081420836051921,-0.0394933828740919,-0.0594726974107223,-0.0673514081378217,65.0 35,0.0489735217864827,0.0506801187398187,-0.030995631835069,-0.0492803060204031,0.0493412959332305,-0.00413221358232442,0.133317768944152,-0.0535158088069373,0.0213108465682448,0.0196328370737072,102.0 36,0.0126481372762872,-0.044641636506989,0.0228949718589761,0.0528581912385822,0.00806271018719657,-0.0285577936019079,0.0375951860378887,-0.0394933828740919,0.0547240033481791,-0.0259303389894746,265.0 37,-0.00914709342983014,-0.044641636506989,0.0110390390462862,-0.0573136709609782,-0.0249601584096305,-0.0429626228442264,0.0302319104297145,-0.0394933828740919,0.01703713241478,-0.0052198044153011,276.0 38,-0.00188201652779104,0.0506801187398187,0.0713965151836166,0.0976155102571536,0.0878679759628621,0.0754074957122168,-0.0213110188275045,0.0712099797536354,0.0714240327805764,0.0237749439885419,252.0 39,-0.00188201652779104,0.0506801187398187,0.0142724752679289,-0.0745280244296595,0.00255889875439205,0.00620168565673016,-0.0139477432193303,-0.00259226199818282,0.0191990330785671,0.00306440941436832,90.0 40,0.00538306037424807,0.0506801187398187,-0.00836157828357004,0.0218723549949558,0.054845107366035,0.07321545647969,-0.0249926566315915,0.0343088588777263,0.0125531528133893,0.094190761540732,100.0 41,-0.099960554705319,-0.044641636506989,-0.0676412423470196,-0.108956731367022,-0.0744944613048712,-0.072711726714232,0.0155053592133662,-0.0394933828740919,-0.0498684677352306,-0.0093619113301358,55.0 42,-0.0600026317441039,0.0506801187398187,-0.0105172024313319,-0.0148515990830405,-0.0497273098572509,-0.0235474182132754,-0.0581273968683752,0.0158582984397717,-0.00991895736315477,-0.0342145528191441,61.0 43,0.0199132141783263,-0.044641636506989,-0.0234509473179027,-0.0710851537359232,0.0204462859110067,-0.0100820343563255,0.118591217727804,-0.076394503750001,-0.0425721049227942,0.0734802269665584,92.0 44,0.0453409833354632,0.0506801187398187,0.068163078961974,0.0081008722200108,-0.0167044412604238,0.0046359433477825,-0.0765355858888105,0.0712099797536354,0.0324332257796019,-0.0176461251598052,259.0 45,0.0271782910803654,0.0506801187398187,-0.0353068801305926,0.0322009670761646,-0.0112006298276192,0.00150445872988718,-0.0102661054152432,-0.00259226199818282,-0.0149564750249113,-0.0507829804784829,53.0 46,-0.0563700932930843,-0.044641636506989,-0.0115950145052127,-0.0332135761048244,-0.0469754041408486,-0.0476598497710694,0.00446044580110504,-0.0394933828740919,-0.00797939755454164,-0.0880619427119953,190.0 47,-0.0781653239992017,-0.044641636506989,-0.0730303027164241,-0.0573136709609782,-0.0841261313122791,-0.0742774690231797,-0.0249926566315915,-0.0394933828740919,-0.0181182673078967,-0.0839198357971606,142.0 48,0.0671362140415805,0.0506801187398187,-0.041773752573878,0.0115437429137471,0.00255889875439205,0.00588853719494063,0.0412768238419757,-0.0394933828740919,-0.0594726974107223,-0.0217882320746399,75.0 49,-0.0418399394890061,0.0506801187398187,0.0142724752679289,-0.00567061055493425,-0.0125765826858204,0.00620168565673016,-0.0728539480847234,0.0712099797536354,0.0354619386607697,-0.0135040182449705,142.0 50,0.0344433679824045,-0.044641636506989,-0.00728376620968916,0.0149866136074833,-0.0442234984244464,-0.0373259505320149,-0.0029028298070691,-0.0394933828740919,-0.02139368094036,0.00720651632920303,155.0 51,0.0598711371395414,0.0506801187398187,0.0164280994156907,0.0287580963824284,-0.0414715927080441,-0.029184090525487,-0.0286742944356786,-0.00259226199818282,-0.00239668149341427,-0.0217882320746399,225.0 52,-0.0527375548420648,-0.044641636506989,-0.00943939035745095,-0.00567061055493425,0.0397096259258226,0.0447189464568426,0.0265502726256275,-0.00259226199818282,-0.0181182673078967,-0.0135040182449705,59.0 53,-0.00914709342983014,-0.044641636506989,-0.0159062628007364,0.0700725447072635,0.0121905687618,0.0221722572079963,0.0155053592133662,-0.00259226199818282,-0.0332487872476258,0.0486275854775501,104.0 54,-0.0491050163910452,-0.044641636506989,0.0250505960067379,0.0081008722200108,0.0204462859110067,0.0177881787429428,0.052321737254237,-0.0394933828740919,-0.0411803851880079,0.00720651632920303,182.0 55,-0.0418399394890061,-0.044641636506989,-0.0493184370910443,-0.0366564467985606,-0.00707277125301585,-0.0226079728279068,0.0854564774910206,-0.0394933828740919,-0.0664881482228354,0.00720651632920303,128.0 56,-0.0418399394890061,-0.044641636506989,0.0412177771149514,-0.0263278347173518,-0.0318399227006362,-0.0304366843726451,-0.0360375700438527,0.00294290613320356,0.0336568129023847,-0.0176461251598052,52.0 57,-0.0273097856849279,-0.044641636506989,-0.063329994051496,-0.0504279295735057,-0.0896299427450836,-0.104339721354975,0.052321737254237,-0.076394503750001,-0.0561575730950062,-0.0673514081378217,37.0 58,0.0417084448844436,-0.044641636506989,-0.064407806125377,0.0356438377699009,0.0121905687618,-0.057993749010124,0.181179060397284,-0.076394503750001,-0.000609254186102297,-0.0507829804784829,170.0 59,0.063503675590561,0.0506801187398187,-0.0256065714656645,0.0115437429137471,0.0644767773734429,0.048476727998317,0.0302319104297145,-0.00259226199818282,0.0383932482116977,0.0196328370737072,170.0 60,-0.0709002470971626,-0.044641636506989,-0.00405032998804645,-0.0400993174922969,-0.0662387441556644,-0.0786615474882331,0.052321737254237,-0.076394503750001,-0.0514005352605825,-0.0342145528191441,61.0 61,-0.0418399394890061,0.0506801187398187,0.00457216660300077,-0.0538708002672419,-0.0442234984244464,-0.0273051997547498,-0.0802172236928976,0.0712099797536354,0.0366457977933988,0.0196328370737072,144.0 62,-0.0273097856849279,0.0506801187398187,-0.00728376620968916,-0.0400993174922969,-0.0112006298276192,-0.0138398158977999,0.0596850128624111,-0.0394933828740919,-0.0823814832581028,-0.0259303389894746,52.0 63,-0.034574862586967,-0.044641636506989,-0.0374625042783544,-0.0607565416547144,0.0204462859110067,0.0434663526096845,-0.0139477432193303,-0.00259226199818282,-0.0307512098645563,-0.0714935150526564,128.0 64,0.0671362140415805,0.0506801187398187,-0.0256065714656645,-0.0400993174922969,-0.0634868384392622,-0.0598726397808612,-0.0029028298070691,-0.0394933828740919,-0.0191970476139445,0.0113486232440377,71.0 65,-0.0454724779400257,0.0506801187398187,-0.0245287593917836,0.0597439326260547,0.00531080447079431,0.0149698425868371,-0.0544457590642881,0.0712099797536354,0.0423448954496075,0.0154907301588724,163.0 66,-0.00914709342983014,0.0506801187398187,-0.0180618869484982,-0.0332135761048244,-0.0208322998350272,0.0121515064307313,-0.0728539480847234,0.0712099797536354,0.000271485727907132,0.0196328370737072,150.0 67,0.0417084448844436,0.0506801187398187,-0.0148284507268555,-0.0171468461892456,-0.00569681839481472,0.00839372488925688,-0.0139477432193303,-0.00185423958066465,-0.0119006848015081,0.00306440941436832,97.0 68,0.0380759064334241,0.0506801187398187,-0.0299178197611881,-0.0400993174922969,-0.0332158755588373,-0.0241737151368545,-0.0102661054152432,-0.00259226199818282,-0.0129079422541688,0.00306440941436832,160.0 69,0.0162806757273067,-0.044641636506989,-0.0460850008694016,-0.00567061055493425,-0.0758704141630723,-0.0614383820898088,-0.0139477432193303,-0.0394933828740919,-0.0514005352605825,0.0196328370737072,178.0 70,-0.00188201652779104,-0.044641636506989,-0.0697968664947814,-0.0125563519424068,-0.000193006962010205,-0.00914258897095694,0.0707299262746723,-0.0394933828740919,-0.0629129499162512,0.0403433716478807,48.0 71,-0.00188201652779104,-0.044641636506989,0.0336730925977851,0.125158475807044,0.0245741444856101,0.0262431872112602,-0.0102661054152432,-0.00259226199818282,0.0267142576335128,0.0610539062220542,270.0 72,0.063503675590561,0.0506801187398187,-0.00405032998804645,-0.0125563519424068,0.103003457403075,0.0487898764601065,0.056003375058324,-0.00259226199818282,0.0844952822124031,-0.0176461251598052,202.0 73,0.0126481372762872,0.0506801187398187,-0.02021751109626,-0.00222773986119799,0.0383336730676214,0.05317395492516,-0.00658446761115617,0.0343088588777263,-0.00514530798026311,-0.0093619113301358,111.0 74,0.0126481372762872,0.0506801187398187,0.00241654245523897,0.0563010619323185,0.0273260502020124,0.0171618818193638,0.0412768238419757,-0.0394933828740919,0.00371173823343597,0.0734802269665584,85.0 75,-0.00914709342983014,0.0506801187398187,-0.030995631835069,-0.0263278347173518,-0.0112006298276192,-0.00100072896442909,-0.0213110188275045,-0.00259226199818282,0.0062093156165054,0.0279170509033766,42.0 76,-0.0309423241359475,0.0506801187398187,0.0282840322283806,0.0700725447072635,-0.126780669916514,-0.106844909049291,-0.0544457590642881,-0.047980640675551,-0.0307512098645563,0.0154907301588724,170.0 77,-0.0963280162542995,-0.044641636506989,-0.0363846922044735,-0.0745280244296595,-0.0387196869916418,-0.0276183482165393,0.0155053592133662,-0.0394933828740919,-0.0740888714915354,-0.00107769750046639,200.0 78,0.00538306037424807,-0.044641636506989,-0.0579409336820915,-0.0228849640236156,-0.0676146970138656,-0.0683276482491785,-0.0544457590642881,-0.00259226199818282,0.0428956878925287,-0.0839198357971606,252.0 79,-0.103593093156339,-0.044641636506989,-0.0374625042783544,-0.0263278347173518,0.00255889875439205,0.0199802179754696,0.0118237214092792,-0.00259226199818282,-0.0683297436244215,-0.0259303389894746,113.0 80,0.0707687524926,-0.044641636506989,0.0121168511201671,0.0425295791573734,0.0713565416644485,0.0534871033869495,0.052321737254237,-0.00259226199818282,0.0253931349154494,-0.0052198044153011,143.0 81,0.0126481372762872,0.0506801187398187,-0.0223731352440218,-0.0297707054110881,0.0108146159035988,0.0284352264437869,-0.0213110188275045,0.0343088588777263,-0.00608024819631442,-0.00107769750046639,51.0 82,-0.0164121703318693,-0.044641636506989,-0.0353068801305926,-0.0263278347173518,0.0328298616348169,0.0171618818193638,0.100183028707369,-0.0394933828740919,-0.0702093127286876,-0.0797777288823259,52.0 83,-0.0382074010379866,-0.044641636506989,0.00996122697240527,-0.0469850588797694,-0.0593589798646588,-0.0529833736214915,-0.0102661054152432,-0.0394933828740919,-0.0159982677581387,-0.0424987666488135,210.0 84,0.00175052192322852,-0.044641636506989,-0.0396181284261162,-0.100923366426447,-0.0290880169842339,-0.0301235359108556,0.0449584616460628,-0.0501947079281055,-0.0683297436244215,-0.129483011860342,65.0 85,0.0453409833354632,-0.044641636506989,0.0713965151836166,0.00121513083253827,-0.00982467696941811,-0.00100072896442909,0.0155053592133662,-0.0394933828740919,-0.0411803851880079,-0.0714935150526564,141.0 86,-0.0709002470971626,0.0506801187398187,-0.0751859268641859,-0.0400993174922969,-0.051103262715452,-0.015092409744958,-0.0397192078479398,-0.00259226199818282,-0.096433222891784,-0.0342145528191441,55.0 87,0.0453409833354632,-0.044641636506989,-0.00620595413580824,0.0115437429137471,0.0631008245152418,0.0162224364339952,0.0965013909032818,-0.0394933828740919,0.0428956878925287,-0.0383566597339788,134.0 88,-0.0527375548420648,0.0506801187398187,-0.0406959404999971,-0.067642283042187,-0.0318399227006362,-0.0370128020702253,0.0375951860378887,-0.0394933828740919,-0.0345237153303495,0.0693381200517237,42.0 89,-0.0454724779400257,-0.044641636506989,-0.0482406250171634,-0.0194420933298793,-0.000193006962010205,-0.0160318551303266,0.0670482884705852,-0.0394933828740919,-0.0247911874324607,0.0196328370737072,111.0 90,0.0126481372762872,-0.044641636506989,-0.0256065714656645,-0.0400993174922969,-0.0304639698424351,-0.0451546620767532,0.0780932018828464,-0.076394503750001,-0.072128454601956,0.0113486232440377,98.0 91,0.0453409833354632,-0.044641636506989,0.0519958978537604,-0.0538708002672419,0.0631008245152418,0.0647604480113727,-0.0102661054152432,0.0343088588777263,0.0372320112089689,0.0196328370737072,164.0 92,-0.0200447087828888,-0.044641636506989,0.00457216660300077,0.0976155102571536,0.00531080447079431,-0.0207290820571696,0.0633666506664982,-0.0394933828740919,0.0125531528133893,0.0113486232440377,48.0 93,-0.0491050163910452,-0.044641636506989,-0.064407806125377,-0.10207098997955,-0.00294491267841247,-0.0154055582067476,0.0633666506664982,-0.0472426182580328,-0.0332487872476258,-0.0549250873933176,96.0 94,-0.0781653239992017,-0.044641636506989,-0.0169840748746173,-0.0125563519424068,-0.000193006962010205,-0.0135266674360104,0.0707299262746723,-0.0394933828740919,-0.0411803851880079,-0.09220404962683,90.0 95,-0.0709002470971626,-0.044641636506989,-0.0579409336820915,-0.081413765817132,-0.0455994512826475,-0.0288709420636975,-0.0434008456520269,-0.00259226199818282,0.00114379737951254,-0.0052198044153011,162.0 96,0.0562385986885218,0.0506801187398187,0.00996122697240527,0.0494153205448459,-0.00432086553661359,-0.0122740735888523,-0.0434008456520269,0.0343088588777263,0.060787754150744,0.0320591578182113,150.0 97,-0.0273097856849279,-0.044641636506989,0.088641508365711,-0.0251802111642493,0.0218222387692079,0.0425269072243159,-0.0323559322397657,0.0343088588777263,0.00286377051894013,0.0776223338813931,279.0 98,0.00175052192322852,0.0506801187398187,-0.00512814206192736,-0.0125563519424068,-0.0153284884022226,-0.0138398158977999,0.0081420836051921,-0.0394933828740919,-0.00608024819631442,-0.0673514081378217,92.0 99,-0.00188201652779104,-0.044641636506989,-0.064407806125377,0.0115437429137471,0.0273260502020124,0.0375165318356834,-0.0139477432193303,0.0343088588777263,0.0117839003835759,-0.0549250873933176,83.0 100,0.0162806757273067,-0.044641636506989,0.0175059114895716,-0.0228849640236156,0.0603489187988395,0.0444057979950531,0.0302319104297145,-0.00259226199818282,0.0372320112089689,-0.00107769750046639,128.0 101,0.0162806757273067,0.0506801187398187,-0.0450071887955207,0.063186803319791,0.0108146159035988,-0.00037443204085002,0.0633666506664982,-0.0394933828740919,-0.0307512098645563,0.036201264733046,102.0 102,-0.0926954778032799,-0.044641636506989,0.0282840322283806,-0.015999222636143,0.0369577202094203,0.0249905933641021,0.056003375058324,-0.0394933828740919,-0.00514530798026311,-0.00107769750046639,302.0 103,0.0598711371395414,0.0506801187398187,0.0412177771149514,0.0115437429137471,0.0410855787840237,0.0707102687853738,-0.0360375700438527,0.0343088588777263,-0.0109044358473771,-0.0300724459043093,198.0 104,-0.0273097856849279,-0.044641636506989,0.0649296427403312,-0.00222773986119799,-0.0249601584096305,-0.0172844489774848,0.0228686348215404,-0.0394933828740919,-0.0611765950943345,-0.063209301222987,95.0 105,0.0235457526293458,0.0506801187398187,-0.0320734439089499,-0.0400993174922969,-0.0318399227006362,-0.0216685274425382,-0.0139477432193303,-0.00259226199818282,-0.0109044358473771,0.0196328370737072,53.0 106,-0.0963280162542995,-0.044641636506989,-0.0762637389380668,-0.0435421881860331,-0.0455994512826475,-0.0348207628376986,0.0081420836051921,-0.0394933828740919,-0.0594726974107223,-0.0839198357971606,134.0 107,0.0271782910803654,-0.044641636506989,0.0498402737059986,-0.0550184238203444,-0.00294491267841247,0.0406480164535787,-0.0581273968683752,0.0527594193156808,-0.0529587932392004,-0.0052198044153011,144.0 108,0.0199132141783263,0.0506801187398187,0.045529025410475,0.0299057198322448,-0.062110885581061,-0.0558017097775973,-0.0728539480847234,0.0269286347025444,0.0456008084141249,0.0403433716478807,232.0 109,0.0380759064334241,0.0506801187398187,-0.00943939035745095,0.0023627543856408,0.00118294589619092,0.0375165318356834,-0.0544457590642881,0.0501763408543672,-0.0259524244351894,0.106617082285236,81.0 110,0.0417084448844436,0.0506801187398187,-0.0320734439089499,-0.0228849640236156,-0.0497273098572509,-0.0401442866881206,0.0302319104297145,-0.0394933828740919,-0.126097385560409,0.0154907301588724,104.0 111,0.0199132141783263,-0.044641636506989,0.00457216660300077,-0.0263278347173518,0.023198191627409,0.0102726156599941,0.0670482884705852,-0.0394933828740919,-0.0236445575721341,-0.0466408735636482,59.0 112,-0.0854304009012408,-0.044641636506989,0.0207393477112143,-0.0263278347173518,0.00531080447079431,0.01966706951368,-0.0029028298070691,-0.00259226199818282,-0.0236445575721341,0.00306440941436832,246.0 113,0.0199132141783263,0.0506801187398187,0.0142724752679289,0.063186803319791,0.0149424744782022,0.0202933664372591,-0.0470824834561139,0.0343088588777263,0.0466607723568145,0.0900486546258972,297.0 114,0.0235457526293458,-0.044641636506989,0.110197749843329,0.063186803319791,0.0135665216200011,-0.0329418720669614,-0.0249926566315915,0.0206554441536399,0.09924022573399,0.0237749439885419,258.0 115,-0.0309423241359475,0.0506801187398187,0.00133873038135806,-0.00567061055493425,0.0644767773734429,0.0494161733836856,-0.0470824834561139,0.108111100629544,0.0837967663655224,0.00306440941436832,229.0 116,0.0489735217864827,0.0506801187398187,0.0584627702970458,0.0700725447072635,0.0135665216200011,0.0206065148990486,-0.0213110188275045,0.0343088588777263,0.0220040504561505,0.0279170509033766,275.0 117,0.0598711371395414,-0.044641636506989,-0.0212953231701409,0.0872868981759448,0.0452134373586271,0.0315667110616823,-0.0470824834561139,0.0712099797536354,0.0791210813896579,0.135611830689079,281.0 118,-0.0563700932930843,0.0506801187398187,-0.0105172024313319,0.0253152256886921,0.023198191627409,0.0400217195299996,-0.0397192078479398,0.0343088588777263,0.0206123307213641,0.0569117993072195,179.0 119,0.0162806757273067,-0.044641636506989,-0.0471628129432825,-0.00222773986119799,-0.019456346976826,-0.0429626228442264,0.0339135482338016,-0.0394933828740919,0.027367707542609,0.0279170509033766,200.0 120,-0.0491050163910452,-0.044641636506989,0.00457216660300077,0.0115437429137471,-0.0373437341334407,-0.0185370428246429,-0.0176293810234174,-0.00259226199818282,-0.0398095943643375,-0.0217882320746399,200.0 121,0.063503675590561,-0.044641636506989,0.0175059114895716,0.0218723549949558,0.00806271018719657,0.0215459602844172,-0.0360375700438527,0.0343088588777263,0.0199084208763183,0.0113486232440377,173.0 122,0.0489735217864827,0.0506801187398187,0.0810968238485447,0.0218723549949558,0.0438374845004259,0.0641341510877936,-0.0544457590642881,0.0712099797536354,0.0324332257796019,0.0486275854775501,180.0 123,0.00538306037424807,0.0506801187398187,0.034750904671666,-0.00108011630809546,0.152537760298315,0.198787989657293,-0.0618090346724622,0.185234443260194,0.0155668445407018,0.0734802269665584,84.0 124,-0.00551455497881059,-0.044641636506989,0.023972783932857,0.0081008722200108,-0.0345918284170385,-0.0388916928409625,0.0228686348215404,-0.0394933828740919,-0.0159982677581387,-0.0135040182449705,121.0 125,-0.00551455497881059,0.0506801187398187,-0.00836157828357004,-0.00222773986119799,-0.0332158755588373,-0.0636304213223356,-0.0360375700438527,-0.00259226199818282,0.0805854642386665,0.00720651632920303,161.0 126,-0.0890629393522603,-0.044641636506989,-0.0611743699037342,-0.0263278347173518,-0.0552311212900554,-0.0545491159304391,0.0412768238419757,-0.076394503750001,-0.0939356455087147,-0.0549250873933176,99.0 127,0.0344433679824045,0.0506801187398187,-0.00189470584028465,-0.0125563519424068,0.0383336730676214,0.0137172487396789,0.0780932018828464,-0.0394933828740919,0.00455189046612778,-0.0963461565416647,109.0 128,-0.0527375548420648,-0.044641636506989,-0.0622521819776151,-0.0263278347173518,-0.00569681839481472,-0.005071658967693,0.0302319104297145,-0.0394933828740919,-0.0307512098645563,-0.0714935150526564,115.0 129,0.00901559882526763,-0.044641636506989,0.0164280994156907,0.00465800152627453,0.0094386630453977,0.0105857641217836,-0.0286742944356786,0.0343088588777263,0.0389683660308856,0.11904340302974,268.0 130,-0.0636351701951234,0.0506801187398187,0.0961861928828773,0.104501251644626,-0.00294491267841247,-0.00475851050590347,-0.00658446761115617,-0.00259226199818282,0.0226920225667445,0.0734802269665584,274.0 131,-0.0963280162542995,-0.044641636506989,-0.0697968664947814,-0.067642283042187,-0.019456346976826,-0.0107083312799046,0.0155053592133662,-0.0394933828740919,-0.0468794828442166,-0.0797777288823259,158.0 132,0.0162806757273067,0.0506801187398187,-0.0212953231701409,-0.00911348124867051,0.034205814493018,0.047850431074738,0.000778807997017968,-0.00259226199818282,-0.0129079422541688,0.0237749439885419,107.0 133,-0.0418399394890061,0.0506801187398187,-0.0536296853865679,-0.0400993174922969,-0.0841261313122791,-0.0717722813288634,-0.0029028298070691,-0.0394933828740919,-0.072128454601956,-0.0300724459043093,83.0 134,-0.0745327855481821,-0.044641636506989,0.0433734012627132,-0.0332135761048244,0.0121905687618,0.000251864882729031,0.0633666506664982,-0.0394933828740919,-0.0271286455543265,-0.0466408735636482,103.0 135,-0.00551455497881059,-0.044641636506989,0.056307146149284,-0.0366564467985606,-0.0483513569990498,-0.0429626228442264,-0.0728539480847234,0.0379989709653172,0.0507815133629732,0.0569117993072195,272.0 136,-0.0926954778032799,-0.044641636506989,-0.0816527993074713,-0.0573136709609782,-0.0607349327228599,-0.068014499787389,0.0486400994501499,-0.076394503750001,-0.0664881482228354,-0.0217882320746399,85.0 137,0.00538306037424807,-0.044641636506989,0.0498402737059986,0.0976155102571536,-0.0153284884022226,-0.0163450035921162,-0.00658446761115617,-0.00259226199818282,0.01703713241478,-0.0135040182449705,280.0 138,0.0344433679824045,0.0506801187398187,0.11127556191721,0.076958286094736,-0.0318399227006362,-0.03388131745233,-0.0213110188275045,-0.00259226199818282,0.028016506523264,0.0734802269665584,336.0 139,0.0235457526293458,-0.044641636506989,0.0616962065186885,0.0528581912385822,-0.0345918284170385,-0.0489124436182275,-0.0286742944356786,-0.00259226199818282,0.0547240033481791,-0.0052198044153011,281.0 140,0.0417084448844436,0.0506801187398187,0.0142724752679289,0.0425295791573734,-0.0304639698424351,-0.00131387742621863,-0.0434008456520269,-0.00259226199818282,-0.0332487872476258,0.0154907301588724,118.0 141,-0.0273097856849279,-0.044641636506989,0.0476846495582368,-0.0469850588797694,0.034205814493018,0.0572448849284239,-0.0802172236928976,0.13025177315509,0.0450661683362615,0.131469723774244,317.0 142,0.0417084448844436,0.0506801187398187,0.0121168511201671,0.0390867084636372,0.054845107366035,0.0444057979950531,0.00446044580110504,-0.00259226199818282,0.0456008084141249,-0.00107769750046639,235.0 143,-0.0309423241359475,-0.044641636506989,0.00564997867688165,-0.00911348124867051,0.0190703330528056,0.00682798258030921,0.0744115640787594,-0.0394933828740919,-0.0411803851880079,-0.0424987666488135,60.0 144,0.030810829531385,0.0506801187398187,0.0466068374843559,-0.015999222636143,0.0204462859110067,0.0506687672308438,-0.0581273968683752,0.0712099797536354,0.0062093156165054,0.00720651632920303,174.0 145,-0.0418399394890061,-0.044641636506989,0.128520555099304,0.063186803319791,-0.0332158755588373,-0.0326287236051719,0.0118237214092792,-0.0394933828740919,-0.0159982677581387,-0.0507829804784829,259.0 146,-0.0309423241359475,0.0506801187398187,0.0595405823709267,0.00121513083253827,0.0121905687618,0.0315667110616823,-0.0434008456520269,0.0343088588777263,0.0148227108412663,0.00720651632920303,178.0 147,-0.0563700932930843,-0.044641636506989,0.0929527566612346,-0.0194420933298793,0.0149424744782022,0.0234248510551544,-0.0286742944356786,0.0254525898675081,0.0260560896336847,0.0403433716478807,128.0 148,-0.0600026317441039,0.0506801187398187,0.0153502873418098,-0.0194420933298793,0.0369577202094203,0.0481635795365275,0.0191869970174533,-0.00259226199818282,-0.0307512098645563,-0.00107769750046639,96.0 149,-0.0491050163910452,0.0506801187398187,-0.00512814206192736,-0.0469850588797694,-0.0208322998350272,-0.0204159335953801,-0.0691723102806364,0.0712099797536354,0.061237907519701,-0.0383566597339788,126.0 150,0.0235457526293458,-0.044641636506989,0.0703187031097357,0.0253152256886921,-0.0345918284170385,-0.014466112821379,-0.0323559322397657,-0.00259226199818282,-0.0191970476139445,-0.0093619113301358,288.0 151,0.00175052192322852,-0.044641636506989,-0.00405032998804645,-0.00567061055493425,-0.00844872411121698,-0.0238605666750649,0.052321737254237,-0.0394933828740919,-0.0089440189577978,-0.0135040182449705,88.0 152,-0.034574862586967,0.0506801187398187,-0.000816893766403737,0.0700725447072635,0.0397096259258226,0.0669524872438994,-0.0654906724765493,0.108111100629544,0.0267142576335128,0.0734802269665584,292.0 153,0.0417084448844436,0.0506801187398187,-0.0439293767216398,0.063186803319791,-0.00432086553661359,0.0162224364339952,-0.0139477432193303,-0.00259226199818282,-0.0345237153303495,0.0113486232440377,71.0 154,0.0671362140415805,0.0506801187398187,0.0207393477112143,-0.00567061055493425,0.0204462859110067,0.0262431872112602,-0.0029028298070691,-0.00259226199818282,0.00864028293306308,0.00306440941436832,197.0 155,-0.0273097856849279,0.0506801187398187,0.0606183944448076,0.0494153205448459,0.0851160702464598,0.0863676918748504,-0.0029028298070691,0.0343088588777263,0.0378144788263439,0.0486275854775501,186.0 156,-0.0164121703318693,-0.044641636506989,-0.0105172024313319,0.00121513083253827,-0.0373437341334407,-0.0357602082230672,0.0118237214092792,-0.0394933828740919,-0.02139368094036,-0.0342145528191441,25.0 157,-0.00188201652779104,0.0506801187398187,-0.0331512559828308,-0.0182944697767768,0.0314539087766158,0.0428400556861055,-0.0139477432193303,0.0199174217361217,0.0102256424049578,0.0279170509033766,84.0 158,-0.0127796318808497,-0.044641636506989,-0.0654856181992578,-0.0699375301828207,0.00118294589619092,0.0168487333575743,-0.0029028298070691,-0.00702039650329191,-0.0307512098645563,-0.0507829804784829,96.0 159,-0.00551455497881059,-0.044641636506989,0.0433734012627132,0.0872868981759448,0.0135665216200011,0.00714113104209875,-0.0139477432193303,-0.00259226199818282,0.0423448954496075,-0.0176461251598052,195.0 160,-0.00914709342983014,-0.044641636506989,-0.0622521819776151,-0.0745280244296595,-0.0235842055514294,-0.0132135189742209,0.00446044580110504,-0.0394933828740919,-0.0358167281015492,-0.0466408735636482,53.0 161,-0.0454724779400257,0.0506801187398187,0.0638518306664503,0.0700725447072635,0.133274420283499,0.131461070372543,-0.0397192078479398,0.108111100629544,0.0757375884575476,0.0859065477110625,217.0 162,-0.0527375548420648,-0.044641636506989,0.0304396563761424,-0.0745280244296595,-0.0235842055514294,-0.0113346282034837,-0.0029028298070691,-0.00259226199818282,-0.0307512098645563,-0.00107769750046639,172.0 163,0.0162806757273067,0.0506801187398187,0.0724743272574975,0.076958286094736,-0.00844872411121698,0.00557538873315109,-0.00658446761115617,-0.00259226199818282,-0.0236445575721341,0.0610539062220542,131.0 164,0.0453409833354632,-0.044641636506989,-0.019139699022379,0.0218723549949558,0.0273260502020124,-0.0135266674360104,0.100183028707369,-0.0394933828740919,0.0177634778671173,-0.0135040182449705,214.0 165,-0.0418399394890061,-0.044641636506989,-0.0665634302731387,-0.0469850588797694,-0.0373437341334407,-0.043275771306016,0.0486400994501499,-0.0394933828740919,-0.0561575730950062,-0.0135040182449705,59.0 166,-0.0563700932930843,0.0506801187398187,-0.0600965578298533,-0.0366564467985606,-0.0882539898868825,-0.0708328359434948,-0.0139477432193303,-0.0394933828740919,-0.0781409106690696,-0.104630370371334,70.0 167,0.0707687524926,-0.044641636506989,0.0692408910358548,0.0379390850138207,0.0218222387692079,0.00150445872988718,-0.0360375700438527,0.0391060045915944,0.0776327891955595,0.106617082285236,220.0 168,0.00175052192322852,0.0506801187398187,0.0595405823709267,-0.00222773986119799,0.0617248716570406,0.063194705702425,-0.0581273968683752,0.108111100629544,0.0689822116363026,0.12732761685941,268.0 169,-0.00188201652779104,-0.044641636506989,-0.0266843835395454,0.0494153205448459,0.0589729659406384,-0.0160318551303266,-0.0470824834561139,0.0712099797536354,0.133598980013008,0.0196328370737072,152.0 170,0.0235457526293458,0.0506801187398187,-0.02021751109626,-0.0366564467985606,-0.0139525355440215,-0.015092409744958,0.0596850128624111,-0.0394933828740919,-0.096433222891784,-0.0176461251598052,47.0 171,-0.0200447087828888,-0.044641636506989,-0.0460850008694016,-0.0986281192858133,-0.0758704141630723,-0.0598726397808612,-0.0176293810234174,-0.0394933828740919,-0.0514005352605825,-0.0466408735636482,74.0 172,0.0417084448844436,0.0506801187398187,0.0713965151836166,0.0081008722200108,0.0383336730676214,0.0159092879722056,-0.0176293810234174,0.0343088588777263,0.0734100780491161,0.0859065477110625,295.0 173,-0.0636351701951234,0.0506801187398187,-0.0794971751597095,-0.00567061055493425,-0.071742555588469,-0.0664487574784414,-0.0102661054152432,-0.0394933828740919,-0.0181182673078967,-0.0549250873933176,101.0 174,0.0162806757273067,0.0506801187398187,0.00996122697240527,-0.0435421881860331,-0.0965097070360893,-0.0946321190394993,-0.0397192078479398,-0.0394933828740919,0.01703713241478,0.00720651632920303,151.0 175,0.0671362140415805,-0.044641636506989,-0.0385403163522353,-0.0263278347173518,-0.0318399227006362,-0.0263657543693812,0.0081420836051921,-0.0394933828740919,-0.0271286455543265,0.00306440941436832,127.0 176,0.0453409833354632,0.0506801187398187,0.0196615356373334,0.0390867084636372,0.0204462859110067,0.0259300387494707,0.0081420836051921,-0.00259226199818282,-0.003303712578677,0.0196328370737072,237.0 177,0.0489735217864827,-0.044641636506989,0.0272062201544997,-0.0251802111642493,0.023198191627409,0.0184144756665219,-0.0618090346724622,0.0800662487638535,0.0722236508199124,0.0320591578182113,225.0 178,0.0417084448844436,-0.044641636506989,-0.00836157828357004,-0.0263278347173518,0.0245741444856101,0.0162224364339952,0.0707299262746723,-0.0394933828740919,-0.0483617248028919,-0.0300724459043093,81.0 179,-0.0236772472339084,-0.044641636506989,-0.0159062628007364,-0.0125563519424068,0.0204462859110067,0.0412743133771578,-0.0434008456520269,0.0343088588777263,0.0140724525157685,-0.0093619113301358,151.0 180,-0.0382074010379866,0.0506801187398187,0.00457216660300077,0.0356438377699009,-0.0112006298276192,0.00588853719494063,-0.0470824834561139,0.0343088588777263,0.0163049527999418,-0.00107769750046639,107.0 181,0.0489735217864827,-0.044641636506989,-0.0428515646477589,-0.0538708002672419,0.0452134373586271,0.0500424703072647,0.0339135482338016,-0.00259226199818282,-0.0259524244351894,-0.063209301222987,64.0 182,0.0453409833354632,0.0506801187398187,0.00564997867688165,0.0563010619323185,0.0644767773734429,0.0891860280309562,-0.0397192078479398,0.0712099797536354,0.0155668445407018,-0.0093619113301358,138.0 183,0.0453409833354632,0.0506801187398187,-0.0353068801305926,0.063186803319791,-0.00432086553661359,-0.00162702588800815,-0.0102661054152432,-0.00259226199818282,0.0155668445407018,0.0569117993072195,185.0 184,0.0162806757273067,-0.044641636506989,0.023972783932857,-0.0228849640236156,-0.0249601584096305,-0.0260526059075917,-0.0323559322397657,-0.00259226199818282,0.0372320112089689,0.0320591578182113,265.0 185,-0.0745327855481821,0.0506801187398187,-0.0180618869484982,0.0081008722200108,-0.019456346976826,-0.0248000120604336,-0.0654906724765493,0.0343088588777263,0.0673172179146849,-0.0176461251598052,101.0 186,-0.0817978624502212,0.0506801187398187,0.0422955891888323,-0.0194420933298793,0.0397096259258226,0.0575580333902134,-0.0691723102806364,0.108111100629544,0.0471861678860197,-0.0383566597339788,137.0 187,-0.067267708646143,-0.044641636506989,-0.0547074974604488,-0.0263278347173518,-0.0758704141630723,-0.082106180567918,0.0486400994501499,-0.076394503750001,-0.0868289932162924,-0.104630370371334,143.0 188,0.00538306037424807,-0.044641636506989,-0.00297251791416553,0.0494153205448459,0.0741084473808508,0.0707102687853738,0.0449584616460628,-0.00259226199818282,-0.00149858682029207,-0.0093619113301358,141.0 189,-0.00188201652779104,-0.044641636506989,-0.0665634302731387,0.00121513083253827,-0.00294491267841247,0.00307020103883484,0.0118237214092792,-0.00259226199818282,-0.0202887477516296,-0.0259303389894746,79.0 190,0.00901559882526763,-0.044641636506989,-0.0126728265790937,0.0287580963824284,-0.0180803941186249,-0.005071658967693,-0.0470824834561139,0.0343088588777263,0.0233748412798208,-0.0052198044153011,292.0 191,-0.00551455497881059,0.0506801187398187,-0.041773752573878,-0.0435421881860331,-0.0799982727376757,-0.0761563597939169,-0.0323559322397657,-0.0394933828740919,0.0102256424049578,-0.0093619113301358,178.0 192,0.0562385986885218,0.0506801187398187,-0.030995631835069,0.0081008722200108,0.0190703330528056,0.0212328118226277,0.0339135482338016,-0.0394933828740919,-0.0295276227417736,-0.0590671943081523,91.0 193,0.00901559882526763,0.0506801187398187,-0.00512814206192736,-0.0641994123484507,0.0699805888062474,0.0838625041805342,-0.0397192078479398,0.0712099797536354,0.0395398780720242,0.0196328370737072,116.0 194,-0.067267708646143,-0.044641636506989,-0.0590187457559724,0.0322009670761646,-0.051103262715452,-0.0495387405418066,-0.0102661054152432,-0.0394933828740919,0.00200784054982379,0.0237749439885419,86.0 195,0.0271782910803654,0.0506801187398187,0.0250505960067379,0.0149866136074833,0.0259500973438113,0.048476727998317,-0.0397192078479398,0.0343088588777263,0.00783714230182385,0.0237749439885419,122.0 196,-0.0236772472339084,-0.044641636506989,-0.0460850008694016,-0.0332135761048244,0.0328298616348169,0.0362639379885253,0.0375951860378887,-0.00259226199818282,-0.0332487872476258,0.0113486232440377,72.0 197,0.0489735217864827,0.0506801187398187,0.00349435452911985,0.0700725447072635,-0.00844872411121698,0.0134041002778894,-0.0544457590642881,0.0343088588777263,0.0133159679089277,0.036201264733046,129.0 198,-0.0527375548420648,-0.044641636506989,0.0541515220015222,-0.0263278347173518,-0.0552311212900554,-0.03388131745233,-0.0139477432193303,-0.0394933828740919,-0.0740888714915354,-0.0590671943081523,142.0 199,0.0417084448844436,-0.044641636506989,-0.0450071887955207,0.0344962143200845,0.0438374845004259,-0.0157187066685371,0.0375951860378887,-0.0144006206784737,0.089898693277671,0.00720651632920303,90.0 200,0.0562385986885218,-0.044641636506989,-0.0579409336820915,-0.00796585769556799,0.0520932016496327,0.0491030249218961,0.056003375058324,-0.0214118336448964,-0.0283202425479987,0.0444854785627154,158.0 201,-0.034574862586967,0.0506801187398187,-0.0557853095343297,-0.015999222636143,-0.00982467696941811,-0.00788999512379879,0.0375951860378887,-0.0394933828740919,-0.0529587932392004,0.0279170509033766,39.0 202,0.0816663678456587,0.0506801187398187,0.00133873038135806,0.0356438377699009,0.126394655992494,0.0910649188016934,0.0191869970174533,0.0343088588777263,0.0844952822124031,-0.0300724459043093,196.0 203,-0.00188201652779104,0.0506801187398187,0.0304396563761424,0.0528581912385822,0.0397096259258226,0.0566185880048449,-0.0397192078479398,0.0712099797536354,0.0253931349154494,0.0279170509033766,222.0 204,0.110726675453815,0.0506801187398187,0.00672779075076256,0.0287580963824284,-0.0277120641260328,-0.00726369820021974,-0.0470824834561139,0.0343088588777263,0.00200784054982379,0.0776223338813931,277.0 205,-0.0309423241359475,-0.044641636506989,0.0466068374843559,0.0149866136074833,-0.0167044412604238,-0.0470335528474903,0.000778807997017968,-0.00259226199818282,0.0634559213720654,-0.0259303389894746,99.0 206,0.00175052192322852,0.0506801187398187,0.0261284080806188,-0.00911348124867051,0.0245741444856101,0.038455977221052,-0.0213110188275045,0.0343088588777263,0.00943640914607987,0.00306440941436832,196.0 207,0.00901559882526763,-0.044641636506989,0.045529025410475,0.0287580963824284,0.0121905687618,-0.0138398158977999,0.0265502726256275,-0.0394933828740919,0.0461323310394148,0.036201264733046,202.0 208,0.030810829531385,-0.044641636506989,0.0401399650410705,0.076958286094736,0.0176943801946045,0.0378296802974729,-0.0286742944356786,0.0343088588777263,-0.00149858682029207,0.11904340302974,155.0 209,0.0380759064334241,0.0506801187398187,-0.0180618869484982,0.0666296740135272,-0.051103262715452,-0.0166581520539057,-0.0765355858888105,0.0343088588777263,-0.0119006848015081,-0.0135040182449705,77.0 210,0.00901559882526763,-0.044641636506989,0.0142724752679289,0.0149866136074833,0.054845107366035,0.0472241341511589,0.0707299262746723,-0.0394933828740919,-0.0332487872476258,-0.0590671943081523,191.0 211,0.0925639831987174,-0.044641636506989,0.0369065288194278,0.0218723549949558,-0.0249601584096305,-0.0166581520539057,0.000778807997017968,-0.0394933828740919,-0.0225121719296605,-0.0217882320746399,70.0 212,0.0671362140415805,-0.044641636506989,0.00349435452911985,0.0356438377699009,0.0493412959332305,0.0312535625998928,0.0707299262746723,-0.0394933828740919,-0.000609254186102297,0.0196328370737072,73.0 213,0.00175052192322852,-0.044641636506989,-0.0708746785686623,-0.0228849640236156,-0.00156895982021134,-0.00100072896442909,0.0265502726256275,-0.0394933828740919,-0.0225121719296605,0.00720651632920303,49.0 214,0.030810829531385,-0.044641636506989,-0.0331512559828308,-0.0228849640236156,-0.0469754041408486,-0.0811667351825494,0.103864666511456,-0.076394503750001,-0.0398095943643375,-0.0549250873933176,65.0 215,0.0271782910803654,0.0506801187398187,0.0940305687351156,0.0976155102571536,-0.0345918284170385,-0.0320024266815928,-0.0434008456520269,-0.00259226199818282,0.0366457977933988,0.106617082285236,263.0 216,0.0126481372762872,0.0506801187398187,0.0358287167455469,0.0494153205448459,0.0534691545078339,0.0741549018650587,-0.0691723102806364,0.145012221505454,0.0456008084141249,0.0486275854775501,248.0 217,0.0744012909436196,-0.044641636506989,0.0315174684500233,0.10105838095089,0.0465893902168282,0.0368902349121043,0.0155053592133662,-0.00259226199818282,0.0336568129023847,0.0444854785627154,296.0 218,-0.0418399394890061,-0.044641636506989,-0.0654856181992578,-0.0400993174922969,-0.00569681839481472,0.014343545663258,-0.0434008456520269,0.0343088588777263,0.00702686254915195,-0.0135040182449705,214.0 219,-0.0890629393522603,-0.044641636506989,-0.041773752573878,-0.0194420933298793,-0.0662387441556644,-0.0742774690231797,0.0081420836051921,-0.0394933828740919,0.00114379737951254,-0.0300724459043093,185.0 220,0.0235457526293458,0.0506801187398187,-0.0396181284261162,-0.00567061055493425,-0.0483513569990498,-0.0332550205287509,0.0118237214092792,-0.0394933828740919,-0.101643547945512,-0.0673514081378217,78.0 221,-0.0454724779400257,-0.044641636506989,-0.0385403163522353,-0.0263278347173518,-0.0153284884022226,0.000878161806308105,-0.0323559322397657,-0.00259226199818282,0.00114379737951254,-0.0383566597339788,93.0 222,-0.0236772472339084,0.0506801187398187,-0.0256065714656645,0.0425295791573734,-0.0538551684318543,-0.0476598497710694,-0.0213110188275045,-0.0394933828740919,0.00114379737951254,0.0196328370737072,252.0 223,-0.099960554705319,-0.044641636506989,-0.0234509473179027,-0.0641994123484507,-0.0579830270064577,-0.0601857882426507,0.0118237214092792,-0.0394933828740919,-0.0181182673078967,-0.0507829804784829,150.0 224,-0.0273097856849279,-0.044641636506989,-0.0665634302731387,-0.112399602060758,-0.0497273098572509,-0.0413968805352788,0.000778807997017968,-0.0394933828740919,-0.0358167281015492,-0.0093619113301358,77.0 225,0.030810829531385,0.0506801187398187,0.0325952805239042,0.0494153205448459,-0.040095639849843,-0.0435889197678055,-0.0691723102806364,0.0343088588777263,0.0630166151147464,0.00306440941436832,208.0 226,-0.103593093156339,0.0506801187398187,-0.0460850008694016,-0.0263278347173518,-0.0249601584096305,-0.0248000120604336,0.0302319104297145,-0.0394933828740919,-0.0398095943643375,-0.0549250873933176,77.0 227,0.0671362140415805,0.0506801187398187,-0.0299178197611881,0.0574486853821349,-0.000193006962010205,-0.0157187066685371,0.0744115640787594,-0.0505637191368646,-0.0384591123013538,0.00720651632920303,108.0 228,-0.0527375548420648,-0.044641636506989,-0.0126728265790937,-0.0607565416547144,-0.000193006962010205,0.00808057642746734,0.0118237214092792,-0.00259226199818282,-0.0271286455543265,-0.0507829804784829,160.0 229,-0.0273097856849279,0.0506801187398187,-0.0159062628007364,-0.0297707054110881,0.00393485161259318,-0.000687580502639557,0.0412768238419757,-0.0394933828740919,-0.0236445575721341,0.0113486232440377,53.0 230,-0.0382074010379866,0.0506801187398187,0.0713965151836166,-0.0573136709609782,0.153913713156516,0.155886650392127,0.000778807997017968,0.0719480021711535,0.0502764933899896,0.0693381200517237,220.0 231,0.00901559882526763,-0.044641636506989,-0.030995631835069,0.0218723549949558,0.00806271018719657,0.00870687335104641,0.00446044580110504,-0.00259226199818282,0.00943640914607987,0.0113486232440377,154.0 232,0.0126481372762872,0.0506801187398187,0.000260918307477141,-0.0114087283893043,0.0397096259258226,0.0572448849284239,-0.0397192078479398,0.0560805201945126,0.024052583226893,0.0320591578182113,259.0 233,0.0671362140415805,-0.044641636506989,0.0369065288194278,-0.0504279295735057,-0.0235842055514294,-0.034507614375909,0.0486400994501499,-0.0394933828740919,-0.0259524244351894,-0.0383566597339788,90.0 234,0.0453409833354632,-0.044641636506989,0.0390621529671896,0.0459724498511097,0.00668675732899544,-0.0241737151368545,0.0081420836051921,-0.0125555646346783,0.0643282330236709,0.0569117993072195,246.0 235,0.0671362140415805,0.0506801187398187,-0.0148284507268555,0.0585963091762383,-0.0593589798646588,-0.034507614375909,-0.0618090346724622,0.012906208769699,-0.00514530798026311,0.0486275854775501,124.0 236,0.0271782910803654,-0.044641636506989,0.00672779075076256,0.0356438377699009,0.0796122588136553,0.0707102687853738,0.0155053592133662,0.0343088588777263,0.0406722637144977,0.0113486232440377,67.0 237,0.0562385986885218,-0.044641636506989,-0.0687190544209005,-0.0687899065952895,-0.000193006962010205,-0.00100072896442909,0.0449584616460628,-0.0376483268302965,-0.0483617248028919,-0.00107769750046639,72.0 238,0.0344433679824045,0.0506801187398187,-0.00943939035745095,0.0597439326260547,-0.0359677812752396,-0.00757684666200928,-0.0765355858888105,0.0712099797536354,0.0110081010458725,-0.0217882320746399,257.0 239,0.0235457526293458,-0.044641636506989,0.0196615356373334,-0.0125563519424068,0.0837401173882587,0.0387691256828415,0.0633666506664982,-0.00259226199818282,0.0660482061630984,0.0486275854775501,262.0 240,0.0489735217864827,0.0506801187398187,0.0746299514052593,0.0666296740135272,-0.00982467696941811,-0.00225332281158722,-0.0434008456520269,0.0343088588777263,0.0336568129023847,0.0196328370737072,275.0 241,0.030810829531385,0.0506801187398187,-0.00836157828357004,0.00465800152627453,0.0149424744782022,0.0274957810584184,0.0081420836051921,-0.00812743012956918,-0.0295276227417736,0.0569117993072195,177.0 242,-0.103593093156339,0.0506801187398187,-0.0234509473179027,-0.0228849640236156,-0.0868780370286814,-0.0677013513255995,-0.0176293810234174,-0.0394933828740919,-0.0781409106690696,-0.0714935150526564,71.0 243,0.0162806757273067,0.0506801187398187,-0.0460850008694016,0.0115437429137471,-0.0332158755588373,-0.0160318551303266,-0.0102661054152432,-0.00259226199818282,-0.0439854025655911,-0.0424987666488135,47.0 244,-0.0600026317441039,0.0506801187398187,0.0541515220015222,-0.0194420933298793,-0.0497273098572509,-0.0489124436182275,0.0228686348215404,-0.0394933828740919,-0.0439854025655911,-0.0052198044153011,187.0 245,-0.0273097856849279,-0.044641636506989,-0.0353068801305926,-0.0297707054110881,-0.0566070741482565,-0.058620045933703,0.0302319104297145,-0.0394933828740919,-0.0498684677352306,-0.129483011860342,125.0 246,0.0417084448844436,-0.044641636506989,-0.0320734439089499,-0.061904165207817,0.0796122588136553,0.0509819156926333,0.056003375058324,-0.00997248617336464,0.0450661683362615,-0.0590671943081523,78.0 247,-0.0817978624502212,-0.044641636506989,-0.0816527993074713,-0.0400993174922969,0.00255889875439205,-0.0185370428246429,0.0707299262746723,-0.0394933828740919,-0.0109044358473771,-0.09220404962683,51.0 248,-0.0418399394890061,-0.044641636506989,0.0476846495582368,0.0597439326260547,0.127770608850695,0.128016437292858,-0.0249926566315915,0.108111100629544,0.0638931206368394,0.0403433716478807,258.0 249,-0.0127796318808497,-0.044641636506989,0.0606183944448076,0.0528581912385822,0.0479653430750293,0.0293746718291555,-0.0176293810234174,0.0343088588777263,0.0702112981933102,0.00720651632920303,215.0 250,0.0671362140415805,-0.044641636506989,0.056307146149284,0.0735154154009998,-0.0139525355440215,-0.039204841302752,-0.0323559322397657,-0.00259226199818282,0.0757375884575476,0.036201264733046,303.0 251,-0.0527375548420648,0.0506801187398187,0.098341817030639,0.0872868981759448,0.0603489187988395,0.0487898764601065,-0.0581273968683752,0.108111100629544,0.0844952822124031,0.0403433716478807,243.0 252,0.00538306037424807,-0.044641636506989,0.0595405823709267,-0.0561660474078757,0.0245741444856101,0.0528608064633705,-0.0434008456520269,0.0509143632718854,-0.00421985970694603,-0.0300724459043093,91.0 253,0.0816663678456587,-0.044641636506989,0.0336730925977851,0.0081008722200108,0.0520932016496327,0.0566185880048449,-0.0176293810234174,0.0343088588777263,0.0348641930961596,0.0693381200517237,150.0 254,0.030810829531385,0.0506801187398187,0.056307146149284,0.076958286094736,0.0493412959332305,-0.0122740735888523,-0.0360375700438527,0.0712099797536354,0.120053382001538,0.0900486546258972,310.0 255,0.00175052192322852,-0.044641636506989,-0.0654856181992578,-0.00567061055493425,-0.00707277125301585,-0.0194764882100115,0.0412768238419757,-0.0394933828740919,-0.003303712578677,0.00720651632920303,153.0 256,-0.0491050163910452,-0.044641636506989,0.160854917315731,-0.0469850588797694,-0.0290880169842339,-0.019789636671801,-0.0470824834561139,0.0343088588777263,0.028016506523264,0.0113486232440377,346.0 257,-0.0273097856849279,0.0506801187398187,-0.0557853095343297,0.0253152256886921,-0.00707277125301585,-0.0235474182132754,0.052321737254237,-0.0394933828740919,-0.00514530798026311,-0.0507829804784829,63.0 258,0.0780338293946392,0.0506801187398187,-0.0245287593917836,-0.0423945646329306,0.00668675732899544,0.0528608064633705,-0.0691723102806364,0.0808042711813717,-0.0371283460104736,0.0569117993072195,89.0 259,0.0126481372762872,-0.044641636506989,-0.0363846922044735,0.0425295791573734,-0.0139525355440215,0.0129343775852051,-0.0268334755336351,0.00515697338575809,-0.0439854025655911,0.00720651632920303,50.0 260,0.0417084448844436,-0.044641636506989,-0.00836157828357004,-0.0573136709609782,0.00806271018719657,-0.0313761297580137,0.151725957964588,-0.076394503750001,-0.0802365402489018,-0.0176461251598052,39.0 261,0.0489735217864827,-0.044641636506989,-0.041773752573878,0.104501251644626,0.0355817673512192,-0.0257394574458021,0.177497422593197,-0.076394503750001,-0.0129079422541688,0.0154907301588724,103.0 262,-0.0164121703318693,0.0506801187398187,0.127442743025423,0.0976155102571536,0.0163184273364034,0.0174750302811533,-0.0213110188275045,0.0343088588777263,0.0348641930961596,0.00306440941436832,308.0 263,-0.0745327855481821,0.0506801187398187,-0.0773415510119477,-0.0469850588797694,-0.0469754041408486,-0.0326287236051719,0.00446044580110504,-0.0394933828740919,-0.072128454601956,-0.0176461251598052,116.0 264,0.0344433679824045,0.0506801187398187,0.0282840322283806,-0.0332135761048244,-0.0455994512826475,-0.00976888589453599,-0.050764121260201,-0.00259226199818282,-0.0594726974107223,-0.0217882320746399,145.0 265,-0.034574862586967,0.0506801187398187,-0.0256065714656645,-0.0171468461892456,0.00118294589619092,-0.00287961973516629,0.0081420836051921,-0.015507654304751,0.0148227108412663,0.0403433716478807,74.0 266,-0.0527375548420648,0.0506801187398187,-0.0622521819776151,0.0115437429137471,-0.00844872411121698,-0.0366996536084358,0.122272855531891,-0.076394503750001,-0.0868289932162924,0.00306440941436832,45.0 267,0.0598711371395414,-0.044641636506989,-0.000816893766403737,-0.0848566365108683,0.075484400239052,0.0794784257154807,0.00446044580110504,0.0343088588777263,0.0233748412798208,0.0279170509033766,115.0 268,0.063503675590561,0.0506801187398187,0.088641508365711,0.0700725447072635,0.0204462859110067,0.0375165318356834,-0.050764121260201,0.0712099797536354,0.0293004132685869,0.0734802269665584,264.0 269,0.00901559882526763,-0.044641636506989,-0.0320734439089499,-0.0263278347173518,0.0424615316422248,-0.0103951828181151,0.159089233572762,-0.076394503750001,-0.0119006848015081,-0.0383566597339788,87.0 270,0.00538306037424807,0.0506801187398187,0.0304396563761424,0.0838440274822086,-0.0373437341334407,-0.0473467013092799,0.0155053592133662,-0.0394933828740919,0.00864028293306308,0.0154907301588724,202.0 271,0.0380759064334241,0.0506801187398187,0.00888341489852436,0.0425295791573734,-0.0428475455662452,-0.0210422305189592,-0.0397192078479398,-0.00259226199818282,-0.0181182673078967,0.00720651632920303,127.0 272,0.0126481372762872,-0.044641636506989,0.00672779075076256,-0.0561660474078757,-0.0758704141630723,-0.0664487574784414,-0.0213110188275045,-0.0376483268302965,-0.0181182673078967,-0.09220404962683,182.0 273,0.0744012909436196,0.0506801187398187,-0.02021751109626,0.0459724498511097,0.0741084473808508,0.0328193049088404,-0.0360375700438527,0.0712099797536354,0.106354276741726,0.036201264733046,241.0 274,0.0162806757273067,-0.044641636506989,-0.0245287593917836,0.0356438377699009,-0.00707277125301585,-0.00319276819695581,-0.0139477432193303,-0.00259226199818282,0.0155668445407018,0.0154907301588724,66.0 275,-0.00551455497881059,0.0506801187398187,-0.0115950145052127,0.0115437429137471,-0.0222082526932283,-0.0154055582067476,-0.0213110188275045,-0.00259226199818282,0.0110081010458725,0.0693381200517237,94.0 276,0.0126481372762872,-0.044641636506989,0.0261284080806188,0.063186803319791,0.125018703134293,0.0916912157252725,0.0633666506664982,-0.00259226199818282,0.057572856202426,-0.0217882320746399,283.0 277,-0.034574862586967,-0.044641636506989,-0.0590187457559724,0.00121513083253827,-0.0538551684318543,-0.078035250564654,0.0670482884705852,-0.076394503750001,-0.02139368094036,0.0154907301588724,64.0 278,0.0671362140415805,0.0506801187398187,-0.0363846922044735,-0.0848566365108683,-0.00707277125301585,0.01966706951368,-0.0544457590642881,0.0343088588777263,0.00114379737951254,0.0320591578182113,102.0 279,0.0380759064334241,0.0506801187398187,-0.0245287593917836,0.00465800152627453,-0.0263361112678317,-0.0263657543693812,0.0155053592133662,-0.0394933828740919,-0.0159982677581387,-0.0259303389894746,200.0 280,0.00901559882526763,0.0506801187398187,0.0185837235634525,0.0390867084636372,0.0176943801946045,0.0105857641217836,0.0191869970174533,-0.00259226199818282,0.0163049527999418,-0.0176461251598052,265.0 281,-0.0926954778032799,0.0506801187398187,-0.0902752958985185,-0.0573136709609782,-0.0249601584096305,-0.0304366843726451,-0.00658446761115617,-0.00259226199818282,0.024052583226893,0.00306440941436832,94.0 282,0.0707687524926,-0.044641636506989,-0.00512814206192736,-0.00567061055493425,0.0878679759628621,0.102964560349696,0.0118237214092792,0.0343088588777263,-0.0089440189577978,0.0279170509033766,230.0 283,-0.0164121703318693,-0.044641636506989,-0.052551873312687,-0.0332135761048244,-0.0442234984244464,-0.0363865051466462,0.0191869970174533,-0.0394933828740919,-0.0683297436244215,-0.0300724459043093,181.0 284,0.0417084448844436,0.0506801187398187,-0.0223731352440218,0.0287580963824284,-0.0662387441556644,-0.0451546620767532,-0.0618090346724622,-0.00259226199818282,0.00286377051894013,-0.0549250873933176,156.0 285,0.0126481372762872,-0.044641636506989,-0.02021751109626,-0.015999222636143,0.0121905687618,0.0212328118226277,-0.0765355858888105,0.108111100629544,0.0598807230654812,-0.0217882320746399,233.0 286,-0.0382074010379866,-0.044641636506989,-0.0547074974604488,-0.0779708951233958,-0.0332158755588373,-0.0864902590329714,0.140681044552327,-0.076394503750001,-0.0191970476139445,-0.0052198044153011,60.0 287,0.0453409833354632,-0.044641636506989,-0.00620595413580824,-0.015999222636143,0.125018703134293,0.125198101136752,0.0191869970174533,0.0343088588777263,0.0324332257796019,-0.0052198044153011,219.0 288,0.0707687524926,0.0506801187398187,-0.0169840748746173,0.0218723549949558,0.0438374845004259,0.0563054395430553,0.0375951860378887,-0.00259226199818282,-0.0702093127286876,-0.0176461251598052,80.0 289,-0.0745327855481821,0.0506801187398187,0.0552293340754031,-0.0400993174922969,0.0534691545078339,0.05317395492516,-0.0434008456520269,0.0712099797536354,0.061237907519701,-0.0342145528191441,68.0 290,0.0598711371395414,0.0506801187398187,0.0767855755530211,0.0253152256886921,0.00118294589619092,0.0168487333575743,-0.0544457590642881,0.0343088588777263,0.0299356483965325,0.0444854785627154,332.0 291,0.0744012909436196,-0.044641636506989,0.0185837235634525,0.063186803319791,0.0617248716570406,0.0428400556861055,0.0081420836051921,-0.00259226199818282,0.0580391276638951,-0.0590671943081523,248.0 292,0.00901559882526763,-0.044641636506989,-0.0223731352440218,-0.0320659525517218,-0.0497273098572509,-0.0686407967109681,0.0780932018828464,-0.0708593356186146,-0.0629129499162512,-0.0383566597339788,84.0 293,-0.0709002470971626,-0.044641636506989,0.0929527566612346,0.0126913664668496,0.0204462859110067,0.0425269072243159,0.000778807997017968,0.000359827671889909,-0.0545441527110952,-0.00107769750046639,200.0 294,0.0235457526293458,0.0506801187398187,-0.030995631835069,-0.00567061055493425,-0.0167044412604238,0.0177881787429428,-0.0323559322397657,-0.00259226199818282,-0.0740888714915354,-0.0342145528191441,55.0 295,-0.0527375548420648,0.0506801187398187,0.0390621529671896,-0.0400993174922969,-0.00569681839481472,-0.0129003705124313,0.0118237214092792,-0.0394933828740919,0.0163049527999418,0.00306440941436832,85.0 296,0.0671362140415805,-0.044641636506989,-0.0611743699037342,-0.0400993174922969,-0.0263361112678317,-0.024486863598644,0.0339135482338016,-0.0394933828740919,-0.0561575730950062,-0.0590671943081523,89.0 297,0.00175052192322852,-0.044641636506989,-0.00836157828357004,-0.0641994123484507,-0.0387196869916418,-0.024486863598644,0.00446044580110504,-0.0394933828740919,-0.0646830224644503,-0.0549250873933176,31.0 298,0.0235457526293458,0.0506801187398187,-0.0374625042783544,-0.0469850588797694,-0.0910058956032848,-0.0755300628703378,-0.0323559322397657,-0.0394933828740919,-0.0307512098645563,-0.0135040182449705,129.0 299,0.0380759064334241,0.0506801187398187,-0.0137506386529745,-0.015999222636143,-0.0359677812752396,-0.0219816759043277,-0.0139477432193303,-0.00259226199818282,-0.0259524244351894,-0.00107769750046639,83.0 300,0.0162806757273067,-0.044641636506989,0.0735521393313785,-0.0412469410453994,-0.00432086553661359,-0.0135266674360104,-0.0139477432193303,-0.00111621716314646,0.0428956878925287,0.0444854785627154,275.0 301,-0.00188201652779104,0.0506801187398187,-0.0245287593917836,0.0528581912385822,0.0273260502020124,0.0300009687527346,0.0302319104297145,-0.00259226199818282,-0.02139368094036,0.036201264733046,65.0 302,0.0126481372762872,-0.044641636506989,0.0336730925977851,0.0333485905259811,0.0300779559184146,0.0271826325966288,-0.0029028298070691,0.00884708547334898,0.0311929907028023,0.0279170509033766,198.0 303,0.0744012909436196,-0.044641636506989,0.034750904671666,0.0941726395634173,0.0575970130824372,0.0202933664372591,0.0228686348215404,-0.00259226199818282,0.0738021469200488,-0.0217882320746399,236.0 304,0.0417084448844436,0.0506801187398187,-0.0385403163522353,0.0528581912385822,0.0768603530972531,0.116429944206646,-0.0397192078479398,0.0712099797536354,-0.0225121719296605,-0.0135040182449705,253.0 305,-0.00914709342983014,0.0506801187398187,-0.0396181284261162,-0.0400993174922969,-0.00844872411121698,0.0162224364339952,-0.0654906724765493,0.0712099797536354,0.0177634778671173,-0.0673514081378217,124.0 306,0.00901559882526763,0.0506801187398187,-0.00189470584028465,0.0218723549949558,-0.0387196869916418,-0.0248000120604336,-0.00658446761115617,-0.0394933828740919,-0.0398095943643375,-0.0135040182449705,44.0 307,0.0671362140415805,0.0506801187398187,-0.030995631835069,0.00465800152627453,0.0245741444856101,0.0356376410649462,-0.0286742944356786,0.0343088588777263,0.0233748412798208,0.0817644407962278,172.0 308,0.00175052192322852,-0.044641636506989,-0.0460850008694016,-0.0332135761048244,-0.07311850844667,-0.0814798836443389,0.0449584616460628,-0.0693832907835783,-0.0611765950943345,-0.0797777288823259,114.0 309,-0.00914709342983014,0.0506801187398187,0.00133873038135806,-0.00222773986119799,0.0796122588136553,0.0700839718617947,0.0339135482338016,-0.00259226199818282,0.0267142576335128,0.0817644407962278,142.0 310,-0.00551455497881059,-0.044641636506989,0.0649296427403312,0.0356438377699009,-0.00156895982021134,0.0149698425868371,-0.0139477432193303,0.000728838880648992,-0.0181182673078967,0.0320591578182113,109.0 311,0.096196521649737,-0.044641636506989,0.0401399650410705,-0.0573136709609782,0.0452134373586271,0.0606895180081088,-0.0213110188275045,0.0361539149215217,0.0125531528133893,0.0237749439885419,180.0 312,-0.0745327855481821,-0.044641636506989,-0.0234509473179027,-0.00567061055493425,-0.0208322998350272,-0.0141529643595894,0.0155053592133662,-0.0394933828740919,-0.0384591123013538,-0.0300724459043093,144.0 313,0.0598711371395414,0.0506801187398187,0.0530737099276413,0.0528581912385822,0.0328298616348169,0.01966706951368,-0.0102661054152432,0.0343088588777263,0.0552050380896167,-0.00107769750046639,163.0 314,-0.0236772472339084,-0.044641636506989,0.0401399650410705,-0.0125563519424068,-0.00982467696941811,-0.00100072896442909,-0.0029028298070691,-0.00259226199818282,-0.0119006848015081,-0.0383566597339788,147.0 315,0.00901559882526763,-0.044641636506989,-0.02021751109626,-0.0538708002672419,0.0314539087766158,0.0206065148990486,0.056003375058324,-0.0394933828740919,-0.0109044358473771,-0.00107769750046639,97.0 316,0.0162806757273067,0.0506801187398187,0.0142724752679289,0.00121513083253827,0.00118294589619092,-0.0213553789807487,-0.0323559322397657,0.0343088588777263,0.0749683360277342,0.0403433716478807,220.0 317,0.0199132141783263,-0.044641636506989,-0.0342290680567117,0.055153438482502,0.0672286830898452,0.0741549018650587,-0.00658446761115617,0.0328328140426899,0.0247253233428045,0.0693381200517237,190.0 318,0.0889314447476978,-0.044641636506989,0.00672779075076256,0.0253152256886921,0.0300779559184146,0.00870687335104641,0.0633666506664982,-0.0394933828740919,0.00943640914607987,0.0320591578182113,109.0 319,0.0199132141783263,-0.044641636506989,0.00457216660300077,0.0459724498511097,-0.0180803941186249,-0.0545491159304391,0.0633666506664982,-0.0394933828740919,0.0286607203138089,0.0610539062220542,191.0 320,-0.0236772472339084,-0.044641636506989,0.0304396563761424,-0.00567061055493425,0.0823641645300576,0.092004364187062,-0.0176293810234174,0.0712099797536354,0.0330470723549341,0.00306440941436832,122.0 321,0.096196521649737,-0.044641636506989,0.0519958978537604,0.0792535333386559,0.054845107366035,0.0365770864503148,-0.0765355858888105,0.141322109417863,0.098646374304928,0.0610539062220542,230.0 322,0.0235457526293458,0.0506801187398187,0.0616962065186885,0.0620391798699746,0.0245741444856101,-0.0360733566848567,-0.0912621371051588,0.155344535350708,0.133395733837469,0.0817644407962278,242.0 323,0.0707687524926,0.0506801187398187,-0.00728376620968916,0.0494153205448459,0.0603489187988395,-0.00444536204411395,-0.0544457590642881,0.108111100629544,0.129019411600168,0.0569117993072195,248.0 324,0.030810829531385,-0.044641636506989,0.00564997867688165,0.0115437429137471,0.0782363059554542,0.077912683406533,-0.0434008456520269,0.108111100629544,0.0660482061630984,0.0196328370737072,249.0 325,-0.00188201652779104,-0.044641636506989,0.0541515220015222,-0.0664946594890845,0.0727324945226497,0.0566185880048449,-0.0434008456520269,0.0848633944777217,0.0844952822124031,0.0486275854775501,192.0 326,0.0453409833354632,0.0506801187398187,-0.00836157828357004,-0.0332135761048244,-0.00707277125301585,0.00119131026809764,-0.0397192078479398,0.0343088588777263,0.0299356483965325,0.0279170509033766,131.0 327,0.0744012909436196,-0.044641636506989,0.114508998138853,0.0287580963824284,0.0245741444856101,0.0249905933641021,0.0191869970174533,-0.00259226199818282,-0.000609254186102297,-0.0052198044153011,237.0 328,-0.0382074010379866,-0.044641636506989,0.067085266888093,-0.0607565416547144,-0.0290880169842339,-0.0232342697514859,-0.0102661054152432,-0.00259226199818282,-0.00149858682029207,0.0196328370737072,78.0 329,-0.0127796318808497,0.0506801187398187,-0.0557853095343297,-0.00222773986119799,-0.0277120641260328,-0.029184090525487,0.0191869970174533,-0.0394933828740919,-0.0170521046047435,0.0444854785627154,135.0 330,0.00901559882526763,0.0506801187398187,0.0304396563761424,0.0425295791573734,-0.00294491267841247,0.0368902349121043,-0.0654906724765493,0.0712099797536354,-0.0236445575721341,0.0154907301588724,244.0 331,0.0816663678456587,0.0506801187398187,-0.0256065714656645,-0.0366564467985606,-0.0703666027302678,-0.0464072559239113,-0.0397192078479398,-0.00259226199818282,-0.0411803851880079,-0.0052198044153011,199.0 332,0.030810829531385,-0.044641636506989,0.104808689473925,0.076958286094736,-0.0112006298276192,-0.0113346282034837,-0.0581273968683752,0.0343088588777263,0.0571041874478439,0.036201264733046,270.0 333,0.0271782910803654,0.0506801187398187,-0.00620595413580824,0.0287580963824284,-0.0167044412604238,-0.00162702588800815,-0.0581273968683752,0.0343088588777263,0.0293004132685869,0.0320591578182113,164.0 334,-0.0600026317441039,0.0506801187398187,-0.0471628129432825,-0.0228849640236156,-0.071742555588469,-0.0576806005483345,-0.00658446761115617,-0.0394933828740919,-0.0629129499162512,-0.0549250873933176,72.0 335,0.00538306037424807,-0.044641636506989,-0.0482406250171634,-0.0125563519424068,0.00118294589619092,-0.00663740127664067,0.0633666506664982,-0.0394933828740919,-0.0514005352605825,-0.0590671943081523,96.0 336,-0.0200447087828888,-0.044641636506989,0.0854080721440683,-0.0366564467985606,0.0919958345374655,0.0894991764927457,-0.0618090346724622,0.145012221505454,0.0809479135112756,0.0527696923923848,306.0 337,0.0199132141783263,0.0506801187398187,-0.0126728265790937,0.0700725447072635,-0.0112006298276192,0.00714113104209875,-0.0397192078479398,0.0343088588777263,0.00538436996854573,0.00306440941436832,91.0 338,-0.0636351701951234,-0.044641636506989,-0.0331512559828308,-0.0332135761048244,0.00118294589619092,0.0240511479787335,-0.0249926566315915,-0.00259226199818282,-0.0225121719296605,-0.0590671943081523,214.0 339,0.0271782910803654,-0.044641636506989,-0.00728376620968916,-0.0504279295735057,0.075484400239052,0.0566185880048449,0.0339135482338016,-0.00259226199818282,0.0434431722527813,0.0154907301588724,95.0 340,-0.0164121703318693,-0.044641636506989,-0.0137506386529745,0.132044217194516,-0.00982467696941811,-0.00381906512053488,0.0191869970174533,-0.0394933828740919,-0.0358167281015492,-0.0300724459043093,216.0 341,0.030810829531385,0.0506801187398187,0.0595405823709267,0.0563010619323185,-0.0222082526932283,0.00119131026809764,-0.0323559322397657,-0.00259226199818282,-0.0247911874324607,-0.0176461251598052,263.0 342,0.0562385986885218,0.0506801187398187,0.0218171597850952,0.0563010619323185,-0.00707277125301585,0.0181013272047324,-0.0323559322397657,-0.00259226199818282,-0.0236445575721341,0.0237749439885419,178.0 343,-0.0200447087828888,-0.044641636506989,0.0185837235634525,0.090729768869681,0.00393485161259318,0.00870687335104641,0.0375951860378887,-0.0394933828740919,-0.0578000656756125,0.00720651632920303,113.0 344,-0.107225631607358,-0.044641636506989,-0.0115950145052127,-0.0400993174922969,0.0493412959332305,0.0644472995495832,-0.0139477432193303,0.0343088588777263,0.00702686254915195,-0.0300724459043093,200.0 345,0.0816663678456587,0.0506801187398187,-0.00297251791416553,-0.0332135761048244,0.0424615316422248,0.057871181852003,-0.0102661054152432,0.0343088588777263,-0.000609254186102297,-0.00107769750046639,139.0 346,0.00538306037424807,0.0506801187398187,0.0175059114895716,0.0322009670761646,0.127770608850695,0.127390140369279,-0.0213110188275045,0.0712099797536354,0.062575181458056,0.0154907301588724,139.0 347,0.0380759064334241,0.0506801187398187,-0.0299178197611881,-0.0745280244296595,-0.0125765826858204,-0.0125872220506418,0.00446044580110504,-0.00259226199818282,0.00371173823343597,-0.0300724459043093,88.0 348,0.030810829531385,-0.044641636506989,-0.02021751109626,-0.00567061055493425,-0.00432086553661359,-0.0294972389872765,0.0780932018828464,-0.0394933828740919,-0.0109044358473771,-0.00107769750046639,148.0 349,0.00175052192322852,0.0506801187398187,-0.0579409336820915,-0.0435421881860331,-0.0965097070360893,-0.0470335528474903,-0.098625412713333,0.0343088588777263,-0.0611765950943345,-0.0714935150526564,88.0 350,-0.0273097856849279,0.0506801187398187,0.0606183944448076,0.107944122338362,0.0121905687618,-0.0175975974392743,-0.0029028298070691,-0.00259226199818282,0.0702112981933102,0.135611830689079,243.0 351,-0.0854304009012408,0.0506801187398187,-0.0406959404999971,-0.0332135761048244,-0.0813742255958769,-0.0695802420963367,-0.00658446761115617,-0.0394933828740919,-0.0578000656756125,-0.0424987666488135,71.0 352,0.0126481372762872,0.0506801187398187,-0.0719524906425432,-0.0469850588797694,-0.051103262715452,-0.0971373067338155,0.118591217727804,-0.076394503750001,-0.0202887477516296,-0.0383566597339788,77.0 353,-0.0527375548420648,-0.044641636506989,-0.0557853095343297,-0.0366564467985606,0.0892439288210632,-0.00319276819695581,0.0081420836051921,0.0343088588777263,0.132372649338676,0.00306440941436832,109.0 354,-0.0236772472339084,0.0506801187398187,0.045529025410475,0.0218723549949558,0.10988322169408,0.0888728795691667,0.000778807997017968,0.0343088588777263,0.0741925366900307,0.0610539062220542,272.0 355,-0.0745327855481821,0.0506801187398187,-0.00943939035745095,0.0149866136074833,-0.0373437341334407,-0.0216685274425382,-0.0139477432193303,-0.00259226199818282,-0.0332487872476258,0.0113486232440377,60.0 356,-0.00551455497881059,0.0506801187398187,-0.0331512559828308,-0.015999222636143,0.00806271018719657,0.0162224364339952,0.0155053592133662,-0.00259226199818282,-0.0283202425479987,-0.0756356219674911,54.0 357,-0.0600026317441039,0.0506801187398187,0.0498402737059986,0.0184294843012196,-0.0167044412604238,-0.0301235359108556,-0.0176293810234174,-0.00259226199818282,0.049768659920749,-0.0590671943081523,221.0 358,-0.0200447087828888,-0.044641636506989,-0.084886235529114,-0.0263278347173518,-0.0359677812752396,-0.0341944659141195,0.0412768238419757,-0.0516707527631419,-0.0823814832581028,-0.0466408735636482,90.0 359,0.0380759064334241,0.0506801187398187,0.00564997867688165,0.0322009670761646,0.00668675732899544,0.0174750302811533,-0.0249926566315915,0.0343088588777263,0.0148227108412663,0.0610539062220542,311.0 360,0.0162806757273067,-0.044641636506989,0.0207393477112143,0.0218723549949558,-0.0139525355440215,-0.0132135189742209,-0.00658446761115617,-0.00259226199818282,0.0133159679089277,0.0403433716478807,281.0 361,0.0417084448844436,-0.044641636506989,-0.00728376620968916,0.0287580963824284,-0.0428475455662452,-0.0482861466946485,0.052321737254237,-0.076394503750001,-0.072128454601956,0.0237749439885419,182.0 362,0.0199132141783263,0.0506801187398187,0.104808689473925,0.0700725447072635,-0.0359677812752396,-0.0266789028311707,-0.0249926566315915,-0.00259226199818282,0.00371173823343597,0.0403433716478807,321.0 363,-0.0491050163910452,0.0506801187398187,-0.0245287593917836,6.75072794357462e-05,-0.0469754041408486,-0.0282446451401184,-0.0654906724765493,0.0284046795375808,0.0191990330785671,0.0113486232440377,58.0 364,0.00175052192322852,0.0506801187398187,-0.00620595413580824,-0.0194420933298793,-0.00982467696941811,0.00494909180957202,-0.0397192078479398,0.0343088588777263,0.0148227108412663,0.0983328684555666,262.0 365,0.0344433679824045,-0.044641636506989,-0.0385403163522353,-0.0125563519424068,0.0094386630453977,0.00526224027136155,-0.00658446761115617,-0.00259226199818282,0.0311929907028023,0.0983328684555666,206.0 366,-0.0454724779400257,0.0506801187398187,0.137143051690352,-0.015999222636143,0.0410855787840237,0.0318798595234718,-0.0434008456520269,0.0712099797536354,0.0710215779459822,0.0486275854775501,233.0 367,-0.00914709342983014,0.0506801187398187,0.17055522598066,0.0149866136074833,0.0300779559184146,0.033758750294209,-0.0213110188275045,0.0343088588777263,0.0336568129023847,0.0320591578182113,242.0 368,-0.0164121703318693,0.0506801187398187,0.00241654245523897,0.0149866136074833,0.0218222387692079,-0.0100820343563255,-0.0249926566315915,0.0343088588777263,0.085533121187439,0.0817644407962278,123.0 369,-0.00914709342983014,-0.044641636506989,0.0379843408933087,-0.0400993174922969,-0.0249601584096305,-0.00381906512053488,-0.0434008456520269,0.0158582984397717,-0.00514530798026311,0.0279170509033766,167.0 370,0.0199132141783263,-0.044641636506989,-0.0579409336820915,-0.0573136709609782,-0.00156895982021134,-0.0125872220506418,0.0744115640787594,-0.0394933828740919,-0.0611765950943345,-0.0756356219674911,63.0 371,0.0526060602375023,0.0506801187398187,-0.00943939035745095,0.0494153205448459,0.0507172487914316,-0.019163339748222,-0.0139477432193303,0.0343088588777263,0.119343994203787,-0.0176461251598052,197.0 372,-0.0273097856849279,0.0506801187398187,-0.0234509473179027,-0.015999222636143,0.0135665216200011,0.0127778033543103,0.0265502726256275,-0.00259226199818282,-0.0109044358473771,-0.0217882320746399,71.0 373,-0.0745327855481821,-0.044641636506989,-0.0105172024313319,-0.00567061055493425,-0.0662387441556644,-0.0570543036247554,-0.0029028298070691,-0.0394933828740919,-0.0425721049227942,-0.00107769750046639,168.0 374,-0.107225631607358,-0.044641636506989,-0.0342290680567117,-0.067642283042187,-0.0634868384392622,-0.0705196874817053,0.0081420836051921,-0.0394933828740919,-0.000609254186102297,-0.0797777288823259,140.0 375,0.0453409833354632,0.0506801187398187,-0.00297251791416553,0.107944122338362,0.0355817673512192,0.0224854056697859,0.0265502726256275,-0.00259226199818282,0.028016506523264,0.0196328370737072,217.0 376,-0.00188201652779104,-0.044641636506989,0.068163078961974,-0.00567061055493425,0.119514891701488,0.130208476525385,-0.0249926566315915,0.0867084505215172,0.0461323310394148,-0.00107769750046639,121.0 377,0.0199132141783263,0.0506801187398187,0.00996122697240527,0.0184294843012196,0.0149424744782022,0.0447189464568426,-0.0618090346724622,0.0712099797536354,0.00943640914607987,-0.063209301222987,235.0 378,0.0162806757273067,0.0506801187398187,0.00241654245523897,-0.00567061055493425,-0.00569681839481472,0.0108989125835731,-0.050764121260201,0.0343088588777263,0.0226920225667445,-0.0383566597339788,245.0 379,-0.00188201652779104,-0.044641636506989,-0.0385403163522353,0.0218723549949558,-0.108893282759899,-0.115613065979398,0.0228686348215404,-0.076394503750001,-0.0468794828442166,0.0237749439885419,40.0 380,0.0162806757273067,-0.044641636506989,0.0261284080806188,0.0585963091762383,-0.0607349327228599,-0.0442152166913845,-0.0139477432193303,-0.0339582147427055,-0.0514005352605825,-0.0259303389894746,52.0 381,-0.0709002470971626,0.0506801187398187,-0.0891974838246376,-0.0745280244296595,-0.0428475455662452,-0.0257394574458021,-0.0323559322397657,-0.00259226199818282,-0.0129079422541688,-0.0549250873933176,104.0 382,0.0489735217864827,-0.044641636506989,0.0606183944448076,-0.0228849640236156,-0.0235842055514294,-0.072711726714232,-0.0434008456520269,-0.00259226199818282,0.104137611358979,0.036201264733046,132.0 383,0.00538306037424807,0.0506801187398187,-0.0288400076873072,-0.00911348124867051,-0.0318399227006362,-0.0288709420636975,0.0081420836051921,-0.0394933828740919,-0.0181182673078967,0.00720651632920303,88.0 384,0.0344433679824045,0.0506801187398187,-0.0299178197611881,0.00465800152627453,0.0933717873956666,0.0869939887984295,0.0339135482338016,-0.00259226199818282,0.024052583226893,-0.0383566597339788,69.0 385,0.0235457526293458,0.0506801187398187,-0.019139699022379,0.0494153205448459,-0.0634868384392622,-0.0611252336280193,0.00446044580110504,-0.0394933828740919,-0.0259524244351894,-0.0135040182449705,219.0 386,0.0199132141783263,-0.044641636506989,-0.0406959404999971,-0.015999222636143,-0.00844872411121698,-0.0175975974392743,0.052321737254237,-0.0394933828740919,-0.0307512098645563,0.00306440941436832,72.0 387,-0.0454724779400257,-0.044641636506989,0.0153502873418098,-0.0745280244296595,-0.0497273098572509,-0.0172844489774848,-0.0286742944356786,-0.00259226199818282,-0.104364820832166,-0.0756356219674911,201.0 388,0.0526060602375023,0.0506801187398187,-0.0245287593917836,0.0563010619323185,-0.00707277125301585,-0.005071658967693,-0.0213110188275045,-0.00259226199818282,0.0267142576335128,-0.0383566597339788,110.0 389,-0.00551455497881059,0.0506801187398187,0.00133873038135806,-0.0848566365108683,-0.0112006298276192,-0.0166581520539057,0.0486400994501499,-0.0394933828740919,-0.0411803851880079,-0.0880619427119953,51.0 390,0.00901559882526763,0.0506801187398187,0.0692408910358548,0.0597439326260547,0.0176943801946045,-0.0232342697514859,-0.0470824834561139,0.0343088588777263,0.103292264911524,0.0734802269665584,277.0 391,-0.0236772472339084,-0.044641636506989,-0.0697968664947814,-0.0641994123484507,-0.0593589798646588,-0.0504781859271752,0.0191869970174533,-0.0394933828740919,-0.0891368600793477,-0.0507829804784829,63.0 392,-0.0418399394890061,0.0506801187398187,-0.0299178197611881,-0.00222773986119799,0.0218222387692079,0.0365770864503148,0.0118237214092792,-0.00259226199818282,-0.0411803851880079,0.065196013136889,118.0 393,-0.0745327855481821,-0.044641636506989,-0.0460850008694016,-0.0435421881860331,-0.0290880169842339,-0.0232342697514859,0.0155053592133662,-0.0394933828740919,-0.0398095943643375,-0.0217882320746399,69.0 394,0.0344433679824045,-0.044641636506989,0.0185837235634525,0.0563010619323185,0.0121905687618,-0.0545491159304391,-0.0691723102806364,0.0712099797536354,0.130080609521753,0.00720651632920303,273.0 395,-0.0600026317441039,-0.044641636506989,0.00133873038135806,-0.0297707054110881,-0.00707277125301585,-0.0216685274425382,0.0118237214092792,-0.00259226199818282,0.0318152175007986,-0.0549250873933176,258.0 396,-0.0854304009012408,0.0506801187398187,-0.030995631835069,-0.0228849640236156,-0.0634868384392622,-0.0542359674686496,0.0191869970174533,-0.0394933828740919,-0.096433222891784,-0.0342145528191441,43.0 397,0.0526060602375023,-0.044641636506989,-0.00405032998804645,-0.0309183289641906,-0.0469754041408486,-0.0583068974719135,-0.0139477432193303,-0.0258399681500055,0.0360557900898319,0.0237749439885419,198.0 398,0.0126481372762872,-0.044641636506989,0.0153502873418098,-0.0332135761048244,0.0410855787840237,0.0321930079852613,-0.0029028298070691,-0.00259226199818282,0.0450661683362615,-0.0673514081378217,242.0 399,0.0598711371395414,0.0506801187398187,0.0228949718589761,0.0494153205448459,0.0163184273364034,0.0118383579689417,-0.0139477432193303,-0.00259226199818282,0.0395398780720242,0.0196328370737072,232.0 400,-0.0236772472339084,-0.044641636506989,0.045529025410475,0.090729768869681,-0.0180803941186249,-0.0354470597612776,0.0707299262746723,-0.0394933828740919,-0.0345237153303495,-0.0093619113301358,175.0 401,0.0162806757273067,-0.044641636506989,-0.0450071887955207,-0.0573136709609782,-0.0345918284170385,-0.05392281900686,0.0744115640787594,-0.076394503750001,-0.0425721049227942,0.0403433716478807,93.0 402,0.110726675453815,0.0506801187398187,-0.0331512559828308,-0.0228849640236156,-0.00432086553661359,0.0202933664372591,-0.0618090346724622,0.0712099797536354,0.0155668445407018,0.0444854785627154,168.0 403,-0.0200447087828888,-0.044641636506989,0.0972640049567582,-0.00567061055493425,-0.00569681839481472,-0.0238605666750649,-0.0213110188275045,-0.00259226199818282,0.0616858488238662,0.0403433716478807,275.0 404,-0.0164121703318693,-0.044641636506989,0.0541515220015222,0.0700725447072635,-0.0332158755588373,-0.0279314966783289,0.0081420836051921,-0.0394933828740919,-0.0271286455543265,-0.0093619113301358,293.0 405,0.0489735217864827,0.0506801187398187,0.1231314947299,0.0838440274822086,-0.104765424185296,-0.10089508827529,-0.0691723102806364,-0.00259226199818282,0.0366457977933988,-0.0300724459043093,281.0 406,-0.0563700932930843,-0.044641636506989,-0.0805749872335904,-0.0848566365108683,-0.0373437341334407,-0.0370128020702253,0.0339135482338016,-0.0394933828740919,-0.0561575730950062,-0.137767225690012,72.0 407,0.0271782910803654,-0.044641636506989,0.0929527566612346,-0.0527231767141394,0.00806271018719657,0.0397085710682101,-0.0286742944356786,0.021024455362399,-0.0483617248028919,0.0196328370737072,140.0 408,0.063503675590561,-0.044641636506989,-0.0503962491649252,0.107944122338362,0.0314539087766158,0.0193539210518905,-0.0176293810234174,0.0236075338237126,0.0580391276638951,0.0403433716478807,189.0 409,-0.0527375548420648,0.0506801187398187,-0.0115950145052127,0.0563010619323185,0.0562210602242361,0.0729023080179005,-0.0397192078479398,0.0712099797536354,0.0305664873984148,-0.0052198044153011,181.0 410,-0.00914709342983014,0.0506801187398187,-0.0277621956134263,0.0081008722200108,0.0479653430750293,0.0372033833738938,-0.0286742944356786,0.0343088588777263,0.0660482061630984,-0.0424987666488135,209.0 411,0.00538306037424807,-0.044641636506989,0.0584627702970458,-0.0435421881860331,-0.07311850844667,-0.0723985782524425,0.0191869970174533,-0.076394503750001,-0.0514005352605825,-0.0259303389894746,136.0 412,0.0744012909436196,-0.044641636506989,0.0854080721440683,0.063186803319791,0.0149424744782022,0.0130909518160999,0.0155053592133662,-0.00259226199818282,0.0062093156165054,0.0859065477110625,261.0 413,-0.0527375548420648,-0.044641636506989,-0.000816893766403737,-0.0263278347173518,0.0108146159035988,0.00714113104209875,0.0486400994501499,-0.0394933828740919,-0.0358167281015492,0.0196328370737072,113.0 414,0.0816663678456587,0.0506801187398187,0.00672779075076256,-0.00452298700183173,0.10988322169408,0.117056241130225,-0.0323559322397657,0.0918746074441444,0.0547240033481791,0.00720651632920303,131.0 415,-0.00551455497881059,-0.044641636506989,0.00888341489852436,-0.0504279295735057,0.0259500973438113,0.0472241341511589,-0.0434008456520269,0.0712099797536354,0.0148227108412663,0.00306440941436832,174.0 416,-0.0273097856849279,-0.044641636506989,0.0800190117746638,0.09876313370697,-0.00294491267841247,0.0181013272047324,-0.0176293810234174,0.00331191734196264,-0.0295276227417736,0.036201264733046,257.0 417,-0.0527375548420648,-0.044641636506989,0.0713965151836166,-0.0745280244296595,-0.0153284884022226,-0.00131387742621863,0.00446044580110504,-0.0214118336448964,-0.0468794828442166,0.00306440941436832,55.0 418,0.00901559882526763,-0.044641636506989,-0.0245287593917836,-0.0263278347173518,0.0988755988284711,0.0941964034195887,0.0707299262746723,-0.00259226199818282,-0.02139368094036,0.00720651632920303,84.0 419,-0.0200447087828888,-0.044641636506989,-0.0547074974604488,-0.0538708002672419,-0.0662387441556644,-0.0573674520865449,0.0118237214092792,-0.0394933828740919,-0.0740888714915354,-0.0052198044153011,42.0 420,0.0235457526293458,-0.044641636506989,-0.0363846922044735,6.75072794357462e-05,0.00118294589619092,0.0346981956795776,-0.0434008456520269,0.0343088588777263,-0.0332487872476258,0.0610539062220542,146.0 421,0.0380759064334241,0.0506801187398187,0.0164280994156907,0.0218723549949558,0.0397096259258226,0.0450320949186321,-0.0434008456520269,0.0712099797536354,0.049768659920749,0.0154907301588724,212.0 422,-0.0781653239992017,0.0506801187398187,0.077863387626902,0.0528581912385822,0.0782363059554542,0.0644472995495832,0.0265502726256275,-0.00259226199818282,0.0406722637144977,-0.0093619113301358,233.0 423,0.00901559882526763,0.0506801187398187,-0.0396181284261162,0.0287580963824284,0.0383336730676214,0.0735286049414796,-0.0728539480847234,0.108111100629544,0.0155668445407018,-0.0466408735636482,91.0 424,0.00175052192322852,0.0506801187398187,0.0110390390462862,-0.0194420933298793,-0.0167044412604238,-0.00381906512053488,-0.0470824834561139,0.0343088588777263,0.024052583226893,0.0237749439885419,111.0 425,-0.0781653239992017,-0.044641636506989,-0.0406959404999971,-0.081413765817132,-0.100637565610693,-0.112794729823292,0.0228686348215404,-0.076394503750001,-0.0202887477516296,-0.0507829804784829,152.0 426,0.030810829531385,0.0506801187398187,-0.0342290680567117,0.0436772026071898,0.0575970130824372,0.0688313780146366,-0.0323559322397657,0.057556565029549,0.0354619386607697,0.0859065477110625,120.0 427,-0.034574862586967,0.0506801187398187,0.00564997867688165,-0.00567061055493425,-0.07311850844667,-0.062690975936967,-0.00658446761115617,-0.0394933828740919,-0.045420957777041,0.0320591578182113,67.0 428,0.0489735217864827,0.0506801187398187,0.088641508365711,0.0872868981759448,0.0355817673512192,0.0215459602844172,-0.0249926566315915,0.0343088588777263,0.0660482061630984,0.131469723774244,310.0 429,-0.0418399394890061,-0.044641636506989,-0.0331512559828308,-0.0228849640236156,0.0465893902168282,0.0415874618389473,0.056003375058324,-0.0247329345237283,-0.0259524244351894,-0.0383566597339788,94.0 430,-0.00914709342983014,-0.044641636506989,-0.0568631216082106,-0.0504279295735057,0.0218222387692079,0.0453452433804217,-0.0286742944356786,0.0343088588777263,-0.00991895736315477,-0.0176461251598052,183.0 431,0.0707687524926,0.0506801187398187,-0.030995631835069,0.0218723549949558,-0.0373437341334407,-0.0470335528474903,0.0339135482338016,-0.0394933828740919,-0.0149564750249113,-0.00107769750046639,66.0 432,0.00901559882526763,-0.044641636506989,0.0552293340754031,-0.00567061055493425,0.0575970130824372,0.0447189464568426,-0.0029028298070691,0.0232385226149535,0.0556835477026737,0.106617082285236,173.0 433,-0.0273097856849279,-0.044641636506989,-0.0600965578298533,-0.0297707054110881,0.0465893902168282,0.0199802179754696,0.122272855531891,-0.0394933828740919,-0.0514005352605825,-0.0093619113301358,72.0 434,0.0162806757273067,-0.044641636506989,0.00133873038135806,0.0081008722200108,0.00531080447079431,0.0108989125835731,0.0302319104297145,-0.0394933828740919,-0.045420957777041,0.0320591578182113,49.0 435,-0.0127796318808497,-0.044641636506989,-0.0234509473179027,-0.0400993174922969,-0.0167044412604238,0.0046359433477825,-0.0176293810234174,-0.00259226199818282,-0.0384591123013538,-0.0383566597339788,64.0 436,-0.0563700932930843,-0.044641636506989,-0.074108114790305,-0.0504279295735057,-0.0249601584096305,-0.0470335528474903,0.0928197530991947,-0.076394503750001,-0.0611765950943345,-0.0466408735636482,48.0 437,0.0417084448844436,0.0506801187398187,0.0196615356373334,0.0597439326260547,-0.00569681839481472,-0.00256647127337676,-0.0286742944356786,-0.00259226199818282,0.0311929907028023,0.00720651632920303,178.0 438,-0.00551455497881059,0.0506801187398187,-0.0159062628007364,-0.067642283042187,0.0493412959332305,0.0791652772536912,-0.0286742944356786,0.0343088588777263,-0.0181182673078967,0.0444854785627154,104.0 439,0.0417084448844436,0.0506801187398187,-0.0159062628007364,0.0172818607481171,-0.0373437341334407,-0.0138398158977999,-0.0249926566315915,-0.0110795197996419,-0.0468794828442166,0.0154907301588724,132.0 440,-0.0454724779400257,-0.044641636506989,0.0390621529671896,0.00121513083253827,0.0163184273364034,0.0152829910486266,-0.0286742944356786,0.0265596234937854,0.0445283740214053,-0.0259303389894746,220.0 441,-0.0454724779400257,-0.044641636506989,-0.0730303027164241,-0.081413765817132,0.0837401173882587,0.0278089295202079,0.17381578478911,-0.0394933828740919,-0.00421985970694603,0.00306440941436832,57.0 ================================================ FILE: src/estimagic/examples/exam_points.csv ================================================ points 275.5 351.5 346.25 228.25 108.25 380.75 346.25 360.75 196 414.75 370.5 371.75 143.75 333.5 397.5 405.75 154.75 321 279 326.5 49.5 402.75 389.75 382.25 337.75 311 105.5 380.5 236 326.5 343.75 328.75 316.25 348.25 338.75 375.75 410 17 414.25 21.25 369.625 318.875 336.125 429.875 407.5 415.75 332.375 397 375.875 419.125 270.125 299.25 384.125 335 408.5 414.25 253.5 339.25 338.75 355.375 326.375 240.375 385 435 317.25 365.625 372.75 365.125 349.625 366.75 386.5 391.75 403 258.5 386 411 350.25 402.25 294.625 291.125 378.125 442.0 428.1 347.3 431.8 430.4 426.0 433.5 331.1 405.7 415.5 406.4 418.6 400.7 408.8 404.8 409.4 410.8 402.5 401.0 415.3 390.8 394.6 399.0 380.0 397.5 368.7 394.7 304.3 391.1 388.4 370.3 384.6 383.5 305.6 286.5 367.9 329.8 288.2 338.5 333.6 268.6 335.2 296.3 269.1 243.2 159.4 448.4 449.8 435.9 429.4 428.3 427.5 422.5 409.8 415.8 413.4 416.8 406.7 383.9 389.0 387.2 368.6 399.5 382.6 355.9 389.9 342.5 365.2 320.3 341.5 248.1 305.0 279.2 275.7 204.5 235.0 102.2 112.3 130.6 60.2 ================================================ FILE: src/estimagic/examples/logit.py ================================================ """Likelihood functions and derivatives of a logit model.""" import numpy as np import pandas as pd from optimagic import mark def logit_loglike_and_derivative(params, y, x): return logit_loglike(params, y, x), logit_jac(params, y, x) @mark.scalar def scalar_logit_fun_and_jac(params, y, x): return logit_loglike(params, y, x).sum(), logit_grad(params, y, x) @mark.likelihood def logit_loglike(params, y, x): """Log-likelihood function of a logit model. Args: params (pd.DataFrame): The index consists of the parameter names, the "value" column are the parameter values. y (np.array): 1d numpy array with the dependent variable x (np.array): 2d numpy array with the independent variables Returns: loglike (np.array): 1d numpy array with likelihood contribution per individual """ if isinstance(params, pd.DataFrame): p = params["value"].to_numpy() else: p = params q = 2 * y - 1 contribs = np.log(1 / (1 + np.exp(-(q * np.dot(x, p))))) return contribs @mark.scalar def logit_grad(params, y, x): return logit_jac(params, y, x).sum(axis=0) def logit_jac(params, y, x): """Derivative of the log-likelihood for each observation of a logit model. Args: params (pd.DataFrame): The index consists of the parmater names, the "value" column are the parameter values. y (np.array): 1d numpy array with the dependent variable x (np.array): 2d numpy array with the independent variables Returns: jac : array-like The derivative of the loglikelihood for each observation evaluated at `params`. """ if isinstance(params, pd.DataFrame): p = params["value"].to_numpy() else: p = params y = y.to_numpy() c = 1 / (1 + np.exp(-(np.dot(x, p)))) jac = (y - c)[:, None] * x return jac def logit_hess(params, y, x): # noqa: ARG001 """Hessian matrix of the log-likelihood. Args: params (pd.DataFrame): The index consists of the parmater names, the "value" column are the parameter values. y (np.array): 1d numpy array with the dependent variable x (np.array): 2d numpy array with the independent variables Returns: hessian (np.array) : 2d numpy array with the hessian of the logl-ikelihood function evaluated at `params` """ if isinstance(params, pd.DataFrame): p = params["value"].to_numpy() else: p = params c = 1 / (1 + np.exp(-(np.dot(x, p)))) return -np.dot(c * (1 - c) * x.T, x) ================================================ FILE: src/estimagic/examples/sensitivity_probit_example_data.csv ================================================ ,y,intercept,x1,x2 0,1,1.0,2.967339833505456,0.7105279305877271 1,1,1.0,-0.4737153743988922,-1.1947183078244987 2,0,1.0,-1.1011968596889783,-1.1704333745431343 3,0,1.0,-1.1832573322549391,-1.812714817628745 4,1,1.0,-2.3917863439314444,-0.2947731027029936 5,1,1.0,0.0908366872724484,0.3628735729212425 6,1,1.0,0.5150137863290288,0.5485807069534177 7,1,1.0,0.5220340116294889,1.748757460776195 8,1,1.0,0.5485275001956246,-0.5687296924071432 9,1,1.0,1.43973351826228,-0.6278313683011209 10,1,1.0,1.8281942689784905,0.5833740898183319 11,0,1.0,0.6203701506873812,-1.2327182433057997 12,0,1.0,0.2076431867285613,-0.8558012930544818 13,1,1.0,-0.713737602411397,-0.4573876589872307 14,1,1.0,0.9673009144681528,0.4266483456470918 15,0,1.0,-0.4631899865771679,-1.0403265409190658 16,1,1.0,0.2599554285953085,0.6341859007451927 17,1,1.0,-0.4562329870545088,-0.1583203259067126 18,1,1.0,1.1682151881456315,0.1039509925891936 19,0,1.0,-0.0605745077022622,-1.8645801044006725 20,0,1.0,0.5822247107503228,-0.2371636329667403 21,0,1.0,0.4520876106211226,-0.9460673311759388 22,1,1.0,0.0409344380538476,0.0426628982425797 23,0,1.0,-0.143137988496383,0.0679483511882124 24,1,1.0,0.9013176034221748,0.304171639050923 25,1,1.0,-0.40511856420813,-1.1427561325612587 26,0,1.0,-1.5053126843329996,-2.884391731316892 27,0,1.0,-1.1331512623824767,-1.051132720307086 28,1,1.0,0.8375281213414355,2.579436350691107 29,1,1.0,1.0007174023555803,-0.3155487287071573 30,1,1.0,0.9387951669370302,0.1871519140067916 31,1,1.0,0.6863269382319725,0.1518721944545467 32,0,1.0,-1.3296603207436988,-1.4590430175050315 33,0,1.0,0.2695905640603205,-1.1971290308909963 34,1,1.0,-0.767164882859859,0.2577870141677244 35,1,1.0,-0.3378354875372247,-0.9501433638483396 36,1,1.0,-0.5069580021169584,-0.875829581774297 37,1,1.0,-0.6167194328609338,-0.5736531300036655 38,1,1.0,-1.6214767172342575,0.188139381199072 39,0,1.0,-1.1672730585489146,-0.6365164563768158 40,1,1.0,1.172776386377527,1.0297981695080152 41,0,1.0,-1.297569295949586,-1.7718674689141647 42,1,1.0,0.1157608402516829,1.628218918792883 43,0,1.0,-1.7134262798084272,-1.1743038064390103 44,1,1.0,-0.3378301805529568,-0.2910465476647137 45,0,1.0,-1.2507660936638987,0.5192700189880949 46,1,1.0,0.1852781797882933,0.532037838399578 47,1,1.0,0.4713992108946831,-0.0537546275328069 48,1,1.0,0.4179504108182624,-0.3475018384774174 49,1,1.0,-0.9778983061644152,-0.729408252986756 50,0,1.0,0.7944699277825029,0.329545003238688 51,1,1.0,1.2343066783419687,1.5491790039309807 52,1,1.0,0.1235621702894244,0.0592456128791801 53,1,1.0,1.269120052278218,-0.1843042492235104 54,1,1.0,-1.2634617606506475,0.3337263439422797 55,0,1.0,-0.7040770738291899,0.6044788486986316 56,1,1.0,0.8193210871328203,-0.0202599958298661 57,0,1.0,-1.1187665172260364,-0.7329443748308223 58,1,1.0,0.7733653804552899,0.9515755686139656 59,0,1.0,-1.4284200762508827,-0.1591355401498399 60,1,1.0,1.2557808993112447,0.7783356023914118 61,1,1.0,1.0954237482760552,1.3705251159656435 62,1,1.0,-0.1691717408839249,0.7651427516343878 63,1,1.0,0.1626843344471919,1.6872421302843787 64,0,1.0,-0.7163214429613063,0.0419288919170111 65,0,1.0,-0.162170564451405,-0.304954808305126 66,1,1.0,-1.0327080821266987,0.702572524175719 67,0,1.0,-0.0696354589891792,-0.2855075672616649 68,1,1.0,-0.2624176936441514,0.1752919443310249 69,1,1.0,2.9393342559065134,1.137135462818222 70,1,1.0,1.4841485099826488,1.712258677673838 71,1,1.0,0.4565596145128141,-0.1027463605459767 72,1,1.0,0.2084141086516602,1.2085839475816187 73,0,1.0,-0.2347957644651526,0.1513618892279431 74,1,1.0,1.9542771245178288,1.1043410659097217 75,1,1.0,-0.3135962069181063,-0.2517000246000336 76,1,1.0,0.2555445644824194,-0.0730217916434685 77,1,1.0,-0.2059823410868791,-0.0543716903474794 78,1,1.0,-0.0248905950961883,-0.5150388954448659 79,0,1.0,1.225105650926888,-0.3862704052917259 80,1,1.0,2.896474517346035,2.1344493415925743 81,0,1.0,-0.9491912347452498,-0.8480648749834054 82,1,1.0,1.876352942575416,0.5530603658361252 83,0,1.0,-0.0174570333207706,-0.3103318292477376 84,0,1.0,-1.019760355761109,-1.0971349945509807 85,1,1.0,0.4084657488393273,0.794047731322371 86,1,1.0,-0.0458605880080085,-0.0353863139287917 87,1,1.0,-0.6693379543480799,0.1142902898413986 88,1,1.0,0.1779490476364521,-0.4603061273110742 89,0,1.0,-0.9547283329747902,-1.8625332801880468 90,1,1.0,0.3239303547353877,0.2107081373806453 91,1,1.0,1.6972498476778457,1.044410604093328 92,1,1.0,0.6813433805395099,1.4921237344313034 93,1,1.0,0.4461024414775839,0.0654578597019615 94,1,1.0,0.2078533896018308,-0.2541940646803443 95,1,1.0,0.3263353239247097,0.4982692324763858 96,0,1.0,-0.7302304887271422,-1.2242819044689828 97,0,1.0,-2.134148270229161,-1.4070260108821095 98,1,1.0,0.7663482764745027,0.4445176623754732 99,1,1.0,1.1178424200542096,1.146653429079442 100,1,1.0,0.9365769455461784,0.7638840833057274 101,1,1.0,-0.0006918433006383,-1.2705885563349224 102,0,1.0,-1.6115798626589557,-1.0135984193972905 103,1,1.0,1.1393695947132625,0.292807998613878 104,0,1.0,-1.4110909753020493,-2.0360703145826013 105,0,1.0,-0.2591328569667465,0.9303415754138996 106,1,1.0,2.1297240912820303,0.9722628513110064 107,1,1.0,0.1969352464276115,0.2044264256090287 108,1,1.0,0.5632045286945059,2.166210352951132 109,1,1.0,-0.8431918326214936,0.0646403189429225 110,1,1.0,-0.0462805349133515,0.629760884331796 111,1,1.0,0.8478655164781554,-0.324044211049318 112,0,1.0,-0.3430960696190223,-0.1083513310120653 113,1,1.0,-0.8412163461781723,-0.3694768855277115 114,1,1.0,0.0425250043881166,2.5822006195163314 115,0,1.0,-2.1038431417665224,-1.9043419555741448 116,0,1.0,-0.3549849715649531,-0.8638934105406288 117,0,1.0,-0.7386323058296131,-0.8546395079376573 118,1,1.0,1.2668048563539456,0.0781058994909303 119,1,1.0,0.9845806200984912,0.5908293048839913 120,1,1.0,1.1762057615240264,-1.2267758372574409 121,1,1.0,-0.9524628689796972,0.1346436887974137 122,1,1.0,0.1833494108495044,-1.0260862933604546 123,1,1.0,0.133916753789661,1.296504893146537 124,0,1.0,0.1578065358086548,0.2694749105461692 125,0,1.0,-1.296910200945708,-1.1032412046335796 126,1,1.0,1.2892943166086595,-0.1787385210775674 127,1,1.0,0.9431499935730242,-0.0326729938331903 128,0,1.0,-2.4371635709848047,-0.92093226123648 129,1,1.0,-0.2628329413698394,-0.93694947924651 130,1,1.0,0.6031729148792794,0.6189866518971492 131,1,1.0,-0.90559012177888,-1.5529327070681278 132,1,1.0,-0.855044509066238,-1.2704022111290432 133,1,1.0,-0.5705964537327763,0.3076153691824254 134,1,1.0,0.5608257110081025,0.6174472584833083 135,1,1.0,-0.0228631974187066,1.2042243267456483 136,0,1.0,0.0530696858084396,0.0558334991389864 137,1,1.0,0.297809951960623,1.3739524610207354 138,0,1.0,0.3429162534394854,-0.5915842517979316 139,1,1.0,0.1734106016517921,1.507517484625247 140,0,1.0,0.33821118682033,1.0102552429771807 141,1,1.0,1.02653644515944,1.02143778909253 142,1,1.0,0.3027320514309517,-1.4960041452449413 143,1,1.0,-1.2697668292847764,-0.5618196019415743 144,1,1.0,-0.5485272823778224,-2.198463918159258 145,1,1.0,0.693512418939432,0.0015694927958309 146,1,1.0,-0.0784353030320485,0.0376932580498911 147,0,1.0,-0.4037594846701222,-1.9366725167024248 148,1,1.0,-1.6694098774422772,-0.0899218514434318 149,1,1.0,0.8971866095072558,-0.2810372876345518 150,1,1.0,0.7863694950315233,0.2404933928094482 151,0,1.0,-0.3557892260599152,0.2877744618702097 152,1,1.0,-0.3264627056735612,-1.3272226462525392 153,1,1.0,1.7931975011096195,0.5312626320716513 154,1,1.0,0.7713546449156672,0.4081133931417659 155,1,1.0,1.172176474950308,-0.7681777891551985 156,0,1.0,-1.7397413150964665,-1.3331921217915137 157,1,1.0,-0.7642442274527602,-0.3543317697385149 158,1,1.0,-0.7622973505390092,-1.0043654423227852 159,0,1.0,0.2183761115134746,0.1012303526877383 160,0,1.0,0.757650591005793,-1.3193289959201857 161,1,1.0,1.5339011909795075,0.1427797797290031 162,1,1.0,0.3428802701803621,-0.6709166177455135 163,0,1.0,-0.6695756656569956,-0.1236548642449643 164,0,1.0,-2.670817685024346,-2.872780248512667 165,1,1.0,1.2968864520966965,-0.734234141896909 166,1,1.0,1.3055762611896382,0.5622459948440263 167,1,1.0,-1.3767653403773457,0.091126858522262 168,1,1.0,0.3896937795124568,0.0943941400849941 169,1,1.0,-0.774479613424448,-0.3019000802803239 170,0,1.0,-0.0634110861092311,-0.1665790405124566 171,1,1.0,-0.5236970783533639,1.219647820786846 172,1,1.0,1.6421650319269059,2.0004080783764366 173,0,1.0,-0.9528542209170808,-1.688639170444758 174,0,1.0,0.2367776663421717,-0.9764936985442892 175,1,1.0,-0.1103685422432995,1.2263870453620218 176,1,1.0,0.6468029014209795,-0.1125491795350973 177,1,1.0,0.433514024457416,-0.699606770429479 178,1,1.0,0.5557093563379646,1.5376372410355468 179,1,1.0,0.0684287864291487,0.2549539224012551 180,0,1.0,-1.6020562130119167,-3.1523422992631676 181,1,1.0,2.530993840708988,2.0089350789073968 182,0,1.0,-2.1789047186506245,-1.5661421903098047 183,0,1.0,-1.0143967120165147,1.553384547849153 184,1,1.0,-0.3766649261347968,-1.0992087933659138 185,0,1.0,-0.3264425692830396,-0.5393112782332404 186,1,1.0,0.0992541561086452,0.1564331665687704 187,1,1.0,0.0779009573487326,0.1297158875442746 188,0,1.0,-1.2011229146210671,-0.6769386208397218 189,0,1.0,-1.034835688105053,-0.6932541343113545 190,1,1.0,-1.0538139954195052,-0.6644377275004232 191,0,1.0,-0.0787614364836631,-0.7218602561575269 192,0,1.0,-0.2312767149706146,-0.0291788116972061 193,1,1.0,0.5368286496648684,-0.4652339662035581 194,1,1.0,-0.5161769087658401,-0.5950592586145473 195,0,1.0,-1.6891790799062465,-1.5863280071109278 196,1,1.0,0.2171321954941918,1.6213288020007652 197,0,1.0,-0.9128648541202304,0.4576788535533095 198,0,1.0,-1.492745460495128,-0.6209847326587016 199,1,1.0,0.0575406705969811,0.1938543753585582 200,1,1.0,2.1172076399797217,-0.1236919908770869 201,1,1.0,-0.1626427486852519,-0.0418562938180431 202,1,1.0,0.6054438030974071,1.8062297489412764 203,1,1.0,0.3028163105308905,-0.015132002316731 204,1,1.0,0.5093766380457077,0.5200349901127361 205,0,1.0,-0.9777859179248424,-0.6048699537019336 206,1,1.0,1.550736237638344,1.8122982462119728 207,1,1.0,0.8357124233487145,-1.3200050086406558 208,0,1.0,0.0523292492226149,0.3103116279689318 209,1,1.0,-0.2433775165368695,-0.2069161324931882 210,1,1.0,0.9772189727418472,-0.6958513049018134 211,0,1.0,-1.3899868885523656,-0.850863428787955 212,1,1.0,0.3558687272455471,1.0763750887530195 213,1,1.0,0.1825344690271067,0.6058034898038257 214,0,1.0,-1.7230734924219526,-1.3982647789591447 215,1,1.0,-0.893682326457443,-0.2559151634337065 216,1,1.0,1.6124510712779236,0.9274695947325436 217,1,1.0,0.8533514042158431,-0.5173271109299739 218,1,1.0,-0.5940285064261097,-0.8869896231033375 219,0,1.0,-1.1571085386996875,-0.4007227965229337 220,1,1.0,-0.4811524615163916,-0.9157792540196568 221,1,1.0,0.7577697909855564,0.3726344661588177 222,1,1.0,0.8336022247491442,0.1830281488597728 223,0,1.0,-0.1475136397050963,-0.7707608952355066 224,0,1.0,-3.997368053852816,-2.101625256349573 225,0,1.0,-0.6322397999095749,-1.3741277039341029 226,1,1.0,1.1718199437536057,-0.2596835687715374 227,1,1.0,1.0383038297649536,-0.2241598969261504 228,1,1.0,-0.0663217464490231,-0.9663854691419248 229,0,1.0,-0.2620401802843139,-0.6551163537399683 230,1,1.0,-0.8711551943827907,1.6526234409045248 231,1,1.0,-0.454503170626116,-0.3992887139743629 232,1,1.0,-0.3398309735153191,0.5915035882354636 233,1,1.0,-0.0941102884706825,0.5781564535766524 234,1,1.0,-0.0050339294241279,0.0585440231134717 235,1,1.0,0.9312014517072728,0.4425799319986031 236,1,1.0,0.7967252896162756,2.569724989482353 237,1,1.0,0.8944925587361772,1.1566950632271873 238,1,1.0,-0.7521110245807455,-0.4267067547412025 239,1,1.0,-0.0535873810152297,-0.7360222350516838 240,1,1.0,1.2521867392737955,0.056428559644252 241,1,1.0,0.1033942998202911,0.5605960667292729 242,1,1.0,0.5833348969824241,0.2531813880173922 243,0,1.0,-1.0645438621752024,-1.303562039254952 244,1,1.0,0.4885443174441546,0.7233863844230481 245,1,1.0,-0.1604764282411145,0.105122448427376 246,1,1.0,-1.300957177234422,-1.5532019347076516 247,0,1.0,-1.7044162128692306,-2.0542500697482384 248,0,1.0,1.3276652048384383,0.9682809898945218 249,1,1.0,-0.1830327437266867,0.2037877709707388 250,0,1.0,-0.5272269625826158,-0.209700992281532 251,0,1.0,0.0259507800846553,0.0411236100334752 252,1,1.0,0.2710353305750642,0.2943050293540375 253,1,1.0,1.5041294473761584,0.5946513596450765 254,1,1.0,0.0145544352540653,0.3705696340714013 255,0,1.0,-0.1291168438974311,0.111426386746702 256,1,1.0,0.0817896748680334,0.0606794408675246 257,1,1.0,0.7621088732237331,-0.8905255802050267 258,0,1.0,-0.8322133747312974,-0.7790859392929107 259,1,1.0,0.0355942880687503,0.3009959974810477 260,1,1.0,0.6369944436845489,0.6189645756553264 261,1,1.0,1.2670501959399332,0.391628198512087 262,0,1.0,-0.4787291522723686,-0.5703539678097848 263,0,1.0,-0.826517804170331,-0.4972232873404489 264,0,1.0,-1.718542348135264,-1.224754253077296 265,1,1.0,1.0484752763280232,0.5893242036287374 266,1,1.0,0.4474382854026535,-0.2669761606754156 267,1,1.0,-1.7636646176498751,-0.581777714938195 268,0,1.0,-0.1044211730895233,0.1915387906660626 269,1,1.0,0.1009447676484083,-0.3522684357081524 270,0,1.0,-1.2350658136514983,-1.9464534855344024 271,0,1.0,-0.92056925302896,-1.2931521291522536 272,0,1.0,-1.3220685255590627,-0.9675978863071708 273,1,1.0,0.6052114261269296,0.728102938213813 274,1,1.0,0.5149992113311551,0.0165534507833951 275,0,1.0,0.1586237445585029,-0.313239766080771 276,1,1.0,1.8345306001451973,1.12546911647858 277,0,1.0,-1.790005298850818,-1.7223741187038832 278,0,1.0,-0.8219700326667985,0.3612843693356167 279,0,1.0,-0.4396539011852441,-0.9215054194243268 280,1,1.0,0.5123834603118314,0.0455702091769979 281,1,1.0,0.7899626513107794,1.0660503420463314 282,1,1.0,0.4338554957801895,-0.6210919770787423 283,0,1.0,-0.07884888966718,-1.563819819117406 284,0,1.0,-0.4082491508367082,0.9470829546722284 285,1,1.0,1.3678165391481167,2.4329547329193164 286,1,1.0,2.1374523981366247,1.9690326249256476 287,1,1.0,0.4036209992141275,1.7000818372076283 288,0,1.0,-2.316725988882649,-1.3224712601990403 289,1,1.0,2.9225629006916485,0.7067649055710895 290,1,1.0,-0.3178591434216822,-0.9781152975546747 291,0,1.0,-1.7814341389327732,0.4358027604157619 292,1,1.0,-0.7759742860112459,0.4343797947519167 293,0,1.0,-1.521144008128242,-1.8770242441720129 294,1,1.0,1.207933413154748,0.5848609575113036 295,0,1.0,-0.5195977105007731,-1.9801780905893072 296,0,1.0,-1.2670988239980543,0.0464519360412306 297,1,1.0,1.7391157801536254,1.5648059307712128 298,1,1.0,1.8341510139252708,1.0819962650092607 299,0,1.0,-0.956008516719905,-1.0790439674454115 300,0,1.0,-0.2283707586108963,-0.8154219598905247 301,0,1.0,0.1027208659533498,-1.209479193874335 302,1,1.0,0.2342677470450889,-0.3841667249881214 303,0,1.0,-0.9822848116666076,-0.1730465612300329 304,1,1.0,-0.2736723014269126,-0.9943192348217365 305,1,1.0,0.6694745607141676,-0.7283559417298938 306,1,1.0,-0.5030360205988657,2.42714457383548 307,1,1.0,-0.5091672703742999,-0.2853721752296898 308,0,1.0,-0.5248330968832379,-1.6180776625709798 309,1,1.0,-1.5004986329180396,-0.484153601771506 310,1,1.0,-0.6158479387505932,0.0178753874195085 311,0,1.0,0.1222948381328843,0.1178646894817386 312,0,1.0,0.533319323502044,0.1500645438542796 313,1,1.0,1.2984280643034,0.1410441764464916 314,0,1.0,-1.2076469395463743,-0.2149425674625211 315,1,1.0,0.85504548218145,0.7231904990548563 316,0,1.0,-1.344968676687029,-0.1620686622510434 317,0,1.0,-1.4043650973680473,-0.6189631658982028 318,1,1.0,-0.3957641275287884,-0.3723833259463206 319,1,1.0,-0.8685195500942487,-0.8328101737390551 320,1,1.0,0.4547358035067283,0.2561435661792704 321,1,1.0,0.792675209341637,1.1277798330473867 322,1,1.0,0.7382319891040461,-0.3199441621407357 323,1,1.0,-0.168298776723828,0.2744198530881772 324,0,1.0,-1.3209228037598135,-1.8641796960926813 325,1,1.0,0.443657137582849,1.1550768990864873 326,1,1.0,0.011542739833444,-0.7094220262449857 327,1,1.0,0.5773609086306638,-0.7259515479675905 328,1,1.0,0.8879340843067987,1.4070505205555208 329,1,1.0,0.6200477359005288,1.1345345467203014 330,1,1.0,0.7310595252555993,0.7455911761286926 331,0,1.0,-1.7658619849888864,-1.3720928516281985 332,0,1.0,-0.1341473456163144,-0.994298239199161 333,0,1.0,-0.6744754495951865,-0.3490680106918694 334,1,1.0,0.3879026726978911,0.3258163255631186 335,1,1.0,0.7540381493807256,0.6777241712068732 336,0,1.0,-1.3716973324326427,-0.4155500665400312 337,0,1.0,-0.534645259819218,-0.0229027180032056 338,0,1.0,-1.1086912800257407,-0.0672810453228422 339,1,1.0,-0.0252833781001102,-0.2917860158218366 340,0,1.0,-0.732104812450567,-1.4723941014147677 341,1,1.0,-0.2918157185783673,0.2069278085364787 342,1,1.0,-1.0591658056569309,-0.1148390749014321 343,1,1.0,1.157863530635342,0.1520750836099127 344,1,1.0,0.2324638798226632,-0.7465587815502271 345,0,1.0,-0.2067659239640128,-1.0167996994927333 346,0,1.0,-0.3971782657873483,-1.5076667897825005 347,1,1.0,0.953349979616936,1.7390279032350708 348,1,1.0,0.0700395737287643,-1.3279362540064 349,1,1.0,-0.6789129387004738,0.3332036016237296 350,0,1.0,-0.4391369271827264,-0.7074565150801869 351,1,1.0,-0.3443075117824245,1.8097972610869313 352,1,1.0,-0.1284660003310376,-0.5801554388335964 353,1,1.0,1.34829720408885,0.4320826453797517 354,1,1.0,0.8318590446012307,0.6864361974022913 355,0,1.0,-1.5564319014907797,-0.6033157273688537 356,1,1.0,1.1595761619710239,0.2178616340559158 357,1,1.0,0.7048647888684799,0.8918527296532835 358,1,1.0,0.6951026378263953,1.2800949387118135 359,0,1.0,0.2926265412028538,0.3070625379533885 360,0,1.0,0.8741458802788883,-0.7216229201473695 361,0,1.0,-1.42192945762898,-0.6202074352425955 362,0,1.0,-1.9656085754759007,-3.1766209803054286 363,1,1.0,0.3574557075096347,-1.0496463677400432 364,0,1.0,-0.8922492072567841,-1.479611796991133 365,0,1.0,-0.0832253490197983,-0.6592784984364464 366,1,1.0,0.995113052382914,1.5947636437525377 367,1,1.0,0.3382176201063367,1.1104992284320743 368,1,1.0,1.0212880016810113,0.8654728026738276 369,0,1.0,-1.1426015797190967,0.0662036821644343 370,1,1.0,0.4506288328915611,-0.558854137371921 371,0,1.0,-2.104889350035209,-2.362407390816069 372,1,1.0,1.1024369159275051,-0.9347324212512744 373,1,1.0,-1.519101980822113,-0.2600589435428634 374,1,1.0,1.142816304263952,0.2951330740252899 375,1,1.0,-0.7371888587016935,0.9359396425381616 376,0,1.0,-1.5065209256280567,-0.410848272122933 377,1,1.0,-0.0595100402309776,0.5550810233373265 378,1,1.0,-0.188660503511723,0.7186023351995943 379,1,1.0,-0.7452214093124996,-0.7450187122196846 380,1,1.0,-0.531544435414745,0.3138769959444683 381,1,1.0,2.048227866003794,1.5083554913635764 382,1,1.0,0.6001940600803981,-0.7264227059830543 383,0,1.0,0.1519383904003125,-1.2891480708140328 384,0,1.0,-0.3545910695625223,-2.192961497477754 385,0,1.0,-0.3057643966312685,-0.585921640264153 386,1,1.0,0.2471959359997532,0.048414904403108 387,0,1.0,0.2543414216850956,-1.2469656323452512 388,0,1.0,-0.7609394037421923,-0.8448954377142418 389,0,1.0,-0.9163726974412084,0.9882031619427212 390,1,1.0,0.1508650108106266,0.8568553921820786 391,0,1.0,-0.6642978950140246,-0.8889735434778726 392,0,1.0,0.3548133507721483,-0.7779884413338854 393,1,1.0,-0.8600548840980936,-0.4539912247700959 394,1,1.0,-0.7429537738065075,0.0114437356426277 395,1,1.0,-0.2535340844255676,0.9076532949641268 396,1,1.0,0.8001937976744286,0.5722618352767321 397,1,1.0,-0.3883565308771623,-0.8259023842662157 398,1,1.0,0.5977958997071127,-0.275801624651131 399,1,1.0,0.6387069878621207,1.3655600918843147 400,1,1.0,1.0911760733371816,1.5791502556562729 401,0,1.0,-0.9805425769609684,-0.3666617549156375 402,0,1.0,-1.369441385924611,-0.9847889416331238 403,1,1.0,1.1582984487228056,-1.2266167224361129 404,1,1.0,0.4440907238874604,-0.3936073933953013 405,1,1.0,0.6611205983388556,1.060123465219715 406,1,1.0,-0.8335795139696339,-0.9755222594150011 407,0,1.0,-1.6439914475814803,-0.8675406395128159 408,1,1.0,-0.8819368683329126,-0.1957812104822939 409,1,1.0,0.8354705639144281,0.1287775822586926 410,0,1.0,-0.2609988170970853,1.0192787770404355 411,0,1.0,0.4776824481723035,-1.6632972544475249 412,1,1.0,0.7352530908904895,0.1464596422096873 413,1,1.0,1.7754145709866491,0.5240941887032902 414,0,1.0,-0.6624419960181424,-0.7988957278612207 415,1,1.0,-0.4443604762316816,-1.642515989493213 416,1,1.0,0.2813076346511557,0.5221328197258545 417,1,1.0,0.4047248032129417,-0.2519314931457125 418,0,1.0,-1.4394125219510452,-0.6468744429238464 419,1,1.0,-0.858863344498971,-1.0834174787617243 420,1,1.0,-1.8221972955681007,-0.4423770310388989 421,1,1.0,0.0340291364481049,0.0535410677891895 422,1,1.0,0.1389924160873612,1.6561826698924016 423,1,1.0,1.3137538710602334,0.2668931722847939 424,0,1.0,-1.5823800259906016,-0.4843119202077253 425,0,1.0,-0.9884121757552115,-1.017275176721108 426,1,1.0,0.1891846729526719,0.7202276734240844 427,1,1.0,0.5549509525985887,0.6555349243663006 428,1,1.0,0.743046629972897,-0.2620526326348232 429,1,1.0,-0.7994828361716214,0.0674717100057284 430,1,1.0,1.3452888092027804,1.2993383495036197 431,1,1.0,0.593434907945471,0.5542402806505377 432,1,1.0,-0.3604995799057854,0.0717109903593987 433,1,1.0,-0.4386099200416233,-0.8651858607251545 434,0,1.0,-0.4402702669756882,-0.195029986235245 435,0,1.0,0.3032458645675097,-1.0372346857526795 436,1,1.0,-0.4333408019199423,-0.5682367355085381 437,1,1.0,1.5358280369397963,1.1336084514545677 438,1,1.0,1.0444229598809338,-1.706007604872449 439,0,1.0,-0.6895667267398463,0.2197286472743179 440,1,1.0,-1.0835887859738067,-0.5133199774445829 441,1,1.0,-1.435345781022083,-0.3507862424218248 442,1,1.0,1.9297909746680324,0.8369372594479194 443,0,1.0,-0.338182401181062,-1.4017763122942484 444,0,1.0,-1.6733718505492423,-1.1502748432880705 445,1,1.0,0.6428480304549449,-0.8565841265118209 446,1,1.0,1.625043954688396,0.2684616284515225 447,1,1.0,0.2876272826778734,0.1478022099240175 448,1,1.0,0.3368801552005952,0.893746682993275 449,1,1.0,-0.0654948493457917,0.1471922583989302 450,0,1.0,-1.3491325809773853,-1.5042520420301004 451,1,1.0,2.19922820165968,0.9176696435031358 452,0,1.0,-1.0334672799120719,1.0862360341907649 453,0,1.0,-0.9915011452555988,-0.8899359464273447 454,0,1.0,-1.860269273300948,-1.4938459110286948 455,0,1.0,1.2143771399004315,-0.1574495810904875 456,0,1.0,-0.0257266277321376,-2.763283716356398 457,1,1.0,0.8793234918769298,-0.4790330268995254 458,1,1.0,-0.6848896091284365,-0.6895061753139731 459,0,1.0,-0.5058512330043357,-1.0735551021865912 460,0,1.0,-0.2349362672485728,-0.7195382062522444 461,0,1.0,-1.525836738077298,0.3768448376636971 462,1,1.0,1.343277042835544,1.4922969894105973 463,0,1.0,-1.1425390778723443,-0.951202897699515 464,0,1.0,-0.3226463317225551,-0.7402452426696127 465,0,1.0,-1.2404890638754444,-1.5531842506526792 466,1,1.0,-0.3536059317590624,0.3771460530559369 467,0,1.0,-1.3282742623104344,0.5339134714454185 468,0,1.0,-0.9258748330652584,-1.3421387779066227 469,0,1.0,-1.4665246512288688,-0.5451657549430262 470,1,1.0,-1.1812080835635677,-1.3493029874057316 471,0,1.0,0.4135585813164385,-0.025350021067494 472,0,1.0,-1.3243619524797448,0.409534672879704 473,0,1.0,-0.7228225038935124,-1.514869732841602 474,0,1.0,-2.20980025728101,-1.0093006645488696 475,1,1.0,1.238750423806423,-0.7446564777409403 476,1,1.0,-0.4186349451861238,-0.5618885655764582 477,1,1.0,1.0857714428735843,1.8243557159731605 478,0,1.0,-1.799327737479811,-0.2480898901303862 479,0,1.0,-0.3253674708553154,-0.3914478506480767 480,1,1.0,-0.5136395015989216,-0.9317499359334656 481,0,1.0,-0.3081096323101271,-0.5011679303275849 482,1,1.0,-0.2208773209943117,1.4017034313293963 483,0,1.0,0.02167470940763,-0.3736350816473629 484,1,1.0,0.0896169433721431,1.2400084680697327 485,1,1.0,0.4025748274273325,-0.8595955345264295 486,1,1.0,0.0028195413080264,1.4772960576588925 487,1,1.0,-0.0397108752283274,0.0221461207268718 488,1,1.0,0.2719210349381246,0.6933413861564819 489,0,1.0,-0.8869306477742079,-0.57935496421845 490,0,1.0,-0.1052532576360905,-1.1272101493683535 491,1,1.0,-0.3964232816485298,0.2347032941233135 492,1,1.0,0.7519728559295678,-0.1073490103732342 493,0,1.0,-0.8375571507510022,-0.0193217276753288 494,0,1.0,-0.6531098282499894,0.4829876741298714 495,0,1.0,-0.7959626582390963,-1.5185537150051427 496,0,1.0,-2.230755547576115,-2.015629118756931 497,1,1.0,0.1662432821120798,1.92140063364716 498,0,1.0,-1.3813185589498524,-1.7651345098203135 499,0,1.0,0.5973042353337923,-0.4216421296311065 ================================================ FILE: src/estimagic/lollipop_plot.py ================================================ import math import pandas as pd import plotly.graph_objects as go from optimagic.config import PLOTLY_PALETTE, PLOTLY_TEMPLATE from optimagic.visualization.plotting_utilities import create_grid_plot, create_ind_dict def lollipop_plot( data, *, sharex=True, plot_bar=True, n_rows=1, scatterplot_kws=None, barplot_kws=None, combine_plots_in_grid=True, template=PLOTLY_TEMPLATE, palette=PLOTLY_PALETTE, ): """Make a lollipop plot. Args: data (pandas.DataFrame): The datapoints to be plotted. The whole data will be plotted. Thus if you want to plot just some variables or rows you need to restrict the dataset before passing it. sharex (bool): Whether the x-axis is shared across variables, default True. plot_bar (bool): Whether thin bars are plotted, default True. n_rows (int): Number of rows for a grid if plots are combined in a grid, default 1. The number of columns is determined automatically. scatterplot_kws (dict): Keyword arguments to plot the dots of the lollipop plot via the scatter function. barplot_kws (dict): Keyword arguments to plot the lines of the lollipop plot via the barplot function. combine_plots_in_grid (bool): decide whether to return a one figure containing subplots for each factor pair or a dictionary of individual plots. Default True. template (str): The template for the figure. Default is "plotly_white". palette: The coloring palette for traces. Default is "qualitative.Plotly". Returns: plotly.Figure: The grid plot or dict of individual plots """ data, varnames = _harmonize_data(data) scatter_dict = { "mode": "markers", "marker": {"color": palette[0]}, "showlegend": False, } bar_dict = { "orientation": "h", "width": 0.03, "marker": {"color": palette[0]}, "showlegend": False, } scatterplot_kws = ( scatter_dict if scatterplot_kws is None else scatter_dict.update( {k: v for k, v in scatterplot_kws.items() if k not in scatter_dict} ) ) barplot_kws = ( bar_dict if barplot_kws is None else bar_dict.update( {k: v for k, v in barplot_kws.items() if k not in bar_dict} ) ) # container for individual plots g_list = [] # container for titles titles = [] # creating data traces for plotting faceted/individual plots for indep_name in varnames: g_ind = [] # dot plot using the scatter function to_plot = data[data["indep"] == indep_name] trace_1 = go.Scatter(x=to_plot["values"], y=to_plot["__name__"], **scatter_dict) g_ind.append(trace_1) # bar plot if plot_bar: trace_2 = go.Bar(x=to_plot["values"], y=to_plot["__name__"], **bar_dict) g_ind.append(trace_2) g_list.append(g_ind) titles.append(indep_name) # common x range lower_candidate = data[["indep", "values"]].groupby("indep").min().min() upper_candidate = data[["indep", "values"]].groupby("indep").max().max() padding = (upper_candidate - lower_candidate) / 10 lower = lower_candidate - padding upper = upper_candidate + padding common_dependencies = { "ind_list": g_list, "names": titles, "share_xax": sharex, "x_min": lower, "x_max": upper, } common_layout = { "template": template, "margin": {"l": 10, "r": 10, "t": 30, "b": 10}, } # Plot with subplots if combine_plots_in_grid: n_cols = math.ceil(len(varnames) / n_rows) g = create_grid_plot( rows=n_rows, cols=n_cols, **common_dependencies, kws={"height": 150 * n_rows, "width": 150 * n_cols, **common_layout}, ) out = g # Dictionary for individual plots else: ind_dict = create_ind_dict( **common_dependencies, kws={"height": 150, "width": 150, "title_x": 0.5, **common_layout}, ) out = ind_dict return out def _harmonize_data(data): if not isinstance(data, list): data = [data] to_concat = [] for i, _df in enumerate(data): df = _df.copy() df.columns = _make_string_index(df.columns) df.index = _make_string_index(df.index) df["__name__"] = df.index df["__hue__"] = i to_concat.append(df) combined = pd.concat(to_concat) # so that it is possibel to facet the strip plot new_data = pd.melt( combined, id_vars=["__name__", "__hue__"], var_name="indep", value_name="values" ) varnames = new_data["indep"].unique() return new_data, varnames def _make_string_index(ind): if isinstance(ind, pd.MultiIndex): out = ind.map(lambda tup: "_".join(str(name) for name in tup)).tolist() else: out = ind.map(str).tolist() return out ================================================ FILE: src/estimagic/ml_covs.py ================================================ """Functions for inferences in maximum likelihood models.""" import numpy as np import pandas as pd from estimagic.shared_covs import process_pandas_arguments from optimagic.exceptions import INVALID_INFERENCE_MSG from optimagic.utilities import robust_inverse def cov_hessian(hess): """Covariance based on the negative inverse of the hessian of loglike. While this method makes slightly weaker statistical assumptions than a covariance estimate based on the outer product of gradients, it is numerically much more problematic for the following reasons: - It is much more difficult to estimate a hessian numerically or with automatic differentiation than it is to estimate the gradient / jacobian - The resulting hessian might not be positive definite and thus not invertible. Args: hess (numpy.ndarray): 2d array hessian matrix of dimension (nparams, nparams) Returns: numpy.ndarray: covariance matrix (nparams, nparams) Resources: Marno Verbeek - A guide to modern econometrics :cite:`Verbeek2008` """ _hess, names = process_pandas_arguments(hess=hess) info_matrix = -_hess cov = robust_inverse(info_matrix, msg=INVALID_INFERENCE_MSG) if "params" in names: cov = pd.DataFrame(cov, columns=names["params"], index=names["params"]) return cov def cov_jacobian(jac): """Covariance based on outer product of jacobian of loglikeobs. Args: jac (numpy.ndarray): 2d array jacobian matrix of dimension (nobs, nparams) Returns: numpy.ndarray: covariance matrix of size (nparams, nparams) Resources: Marno Verbeek - A guide to modern econometrics. """ _jac, names = process_pandas_arguments(jac=jac) info_matrix = _jac.T @ _jac cov = robust_inverse(info_matrix, msg=INVALID_INFERENCE_MSG) if "params" in names: cov = pd.DataFrame(cov, columns=names["params"], index=names["params"]) return cov def cov_robust(jac, hess): """Covariance of parameters based on HJJH dot product. H stands for Hessian of the log likelihood function and J for Jacobian, of the log likelihood per individual. Args: jac (numpy.ndarray): 2d array jacobian matrix of dimension (nobs, nparams) hess (numpy.ndarray): 2d array hessian matrix of dimension (nparams, nparams) Returns: numpy.ndarray: covariance HJJH matrix (nparams, nparams) Resources: https://tinyurl.com/yym5d4cw """ _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess) info_matrix_hess = -_hess cov_hess = robust_inverse(info_matrix_hess, msg=INVALID_INFERENCE_MSG) cov = cov_hess @ _jac.T @ _jac @ cov_hess if "params" in names: cov = pd.DataFrame(cov, columns=names["params"], index=names["params"]) return cov def se_from_cov(cov): """Standard deviation of parameter estimates based on the function of choice. Args: cov (numpy.ndarray): Covariance matrix Returns: standard_errors (numpy.ndarray): 1d array with standard errors """ standard_errors = np.sqrt(np.diag(cov)) if isinstance(cov, pd.DataFrame): standard_errors = pd.Series(standard_errors, index=cov.index) return standard_errors def cov_cluster_robust(jac, hess, design_info): """Cluster robust standard errors. A cluster is a group of observations that correlate amongst each other, but not between groups. Each cluster is seen as independent. As the number of clusters increase, the standard errors approach robust standard errors. Args: jac (np.array): "jacobian" - an n x k + 1-dimensional array of first derivatives of the pseudo-log-likelihood function w.r.t. the parameters hess (np.array): "hessian" - a k + 1 x k + 1-dimensional array of second derivatives of the pseudo-log-likelihood function w.r.t. the parameters design_info (pd.DataFrame): dataframe containing psu, stratum, population/design weight and/or a finite population corrector (fpc) Returns: cluster_robust_se (np.array): a 1d array of k + 1 standard errors cluster_robust_var (np.array): 2d variance-covariance matrix """ _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess) cluster_meat = _clustering(_jac, design_info) cov = _sandwich_step(_hess, cluster_meat) if "params" in names: cov = pd.DataFrame(cov, columns=names["params"], index=names["params"]) return cov def cov_strata_robust(jac, hess, design_info): """Cluster robust standard errors. A stratum is a group of observations that share common information. Each stratum can be constructed based on age, gender, education, region, etc. The function runs the same formulation for cluster_robust_se for each stratum and returns the sum. Each stratum contain primary sampling units (psu) or clusters. If observations are independent, but wish to have to strata, make the psu column take the values of the index. Args: jac (np.array): "jacobian" - an n x k + 1-dimensional array of first derivatives of the pseudo-log-likelihood function w.r.t. the parameters hess (np.array): "hessian" - a k + 1 x k + 1-dimensional array of second derivatives of the pseudo-log-likelihood function w.r.t. the parameters design_info (pd.DataFrame): dataframe containing psu, stratum, population/design weight and/or a finite population corrector (fpc) Returns: strata_robust_se (np.array): a 1d array of k + 1 standard errors strata_robust_var (np.array): 2d variance-covariance matrix """ _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess) strata_meat = _stratification(_jac, design_info) cov = _sandwich_step(_hess, strata_meat) if "params" in names: cov = pd.DataFrame(cov, columns=names["params"], index=names["params"]) return cov def _sandwich_step(hess, meat): """The sandwich estimator for variance estimation. This is used in several robust covariance formulae. Args: hess (np.array): "hessian" - a k + 1 x k + 1-dimensional array of second derivatives of the pseudo-log-likelihood function w.r.t. the parameters meat (np.array): the variance of the total scores Returns: se (np.array): a 1d array of k + 1 standard errors var (np.array): 2d variance-covariance matrix """ invhessian = robust_inverse(hess, INVALID_INFERENCE_MSG) var = np.dot(np.dot(invhessian, meat), invhessian) return var def _clustering(jac, design_info): """Variance estimation for each cluster. The function takes the sum of the jacobian observations for each cluster. The result is the meat of the sandwich estimator. Args: jac (np.array): "jacobian" - an n x k + 1-dimensional array of first derivatives of the pseudo-log-likelihood function w.r.t. the parameters design_info (pd.DataFrame): dataframe containing psu, stratum, population/design weight and/or a finite population corrector (fpc) Returns: cluster_meat (np.array): 2d square array of length k + 1. Variance of the likelihood equation (Pg.557, 14-10, Greene 7th edition) """ list_of_clusters = design_info["psu"].unique() meat = np.zeros([len(jac[0, :]), len(jac[0, :])]) for psu in list_of_clusters: psu_scores = jac[design_info["psu"] == psu] psu_scores_sum = psu_scores.sum(axis=0) meat += np.dot(psu_scores_sum[:, None], psu_scores_sum[:, None].T) cluster_meat = len(list_of_clusters) / (len(list_of_clusters) - 1) * meat return cluster_meat def _stratification(jac, design_info): """Variance estimation for each stratum. The function takes the sum of the jacobian observations for each cluster within strata. The result is the meat of the sandwich estimator. Args: design_options (pd.DataFrame): dataframe containing psu, stratum, population/design weight and/or a finite population corrector (fpc) jac (np.array): "jacobian" - an n x k + 1-dimensional array of first derivatives of the pseudo-log-likelihood function w.r.t. the parameters Returns: strata_meat (np.array): 2d square array of length k + 1. Variance of the likelihood equation """ n_params = len(jac[0, :]) stratum_col = design_info["strata"] # Stratification does not require clusters if "psu" not in design_info: design_info["psu"] = design_info.index else: pass psu_col = design_info["psu"] strata_meat = np.zeros([n_params, n_params]) # Variance estimation per stratum for stratum in stratum_col.unique(): psu_in_strata = psu_col[stratum_col == stratum].unique() psu_jac = np.zeros([n_params]) if "fpc" in design_info: fpc = design_info["fpc"][stratum_col == stratum].unique() else: fpc = 1 # psu_jac stacks the sum of the observations for each cluster. for psu in psu_in_strata: psu_jac = np.vstack([psu_jac, np.sum(jac[psu_col == psu], axis=0)]) psu_jac_mean = np.sum(psu_jac, axis=0) / len(psu_in_strata) if len(psu_in_strata) > 1: mid_step = np.dot( (psu_jac[1:] - psu_jac_mean).T, (psu_jac[1:] - psu_jac_mean) ) strata_meat += ( fpc * (len(psu_in_strata) / (len(psu_in_strata) - 1)) * mid_step ) # Apply "grand-mean" method for single unit stratum elif len(psu_in_strata) == 1: strata_meat += fpc * np.dot(psu_jac[1:].T, psu_jac[1:]) return strata_meat ================================================ FILE: src/estimagic/msm_covs.py ================================================ import pandas as pd from estimagic.shared_covs import process_pandas_arguments from optimagic.exceptions import INVALID_INFERENCE_MSG from optimagic.utilities import robust_inverse def cov_robust(jac, weights, moments_cov): """Calculate the cov of msm estimates with asymptotically non-efficient weights. Note that asymptotically non-efficient weights are typically preferrable because they lead to less finite sample bias. Args: jac (np.ndarray or pandas.DataFrame): Numpy array or DataFrame with the jacobian of simulate_moments with respect to params. The derivative needs to be taken at the estimated parameters. Has shape n_moments, n_params. weights (np.ndarray): The weighting matrix for msm estimation. moments_cov (np.ndarray): The covariance matrix of the empirical moments. Returns: numpy.ndarray: numpy array with covariance matrix. """ _jac, _weights, _moments_cov, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov ) bread = robust_inverse( _jac.T @ _weights @ _jac, msg=INVALID_INFERENCE_MSG, ) butter = _jac.T @ _weights @ _moments_cov @ _weights @ _jac cov = bread @ butter @ bread if names: cov = pd.DataFrame(cov, columns=names.get("params"), index=names.get("params")) return cov def cov_optimal(jac, weights): """Calculate the cov of msm estimates with asymptotically efficient weights. Note that asymptotically efficient weights have substantial finite sample bias and are typically not a good choice. Args: jac (np.ndarray or pandas.DataFrame): Numpy array or DataFrame with the jacobian of simulate_moments with respect to params. The derivative needs to be taken at the estimated parameters. Has shape n_moments, n_params. weights (np.ndarray): The weighting matrix for msm estimation. moments_cov (np.ndarray): The covariance matrix of the empirical moments. Returns: numpy.ndarray: numpy array with covariance matrix. """ _jac, _weights, names = process_pandas_arguments(jac=jac, weights=weights) cov = robust_inverse(_jac.T @ _weights @ _jac, msg=INVALID_INFERENCE_MSG) if names: cov = pd.DataFrame(cov, columns=names.get("params"), index=names.get("params")) return cov ================================================ FILE: src/estimagic/msm_sensitivity.py ================================================ """Implement local sensitivity measures for method of moments. measures: m1: Andrews, Gentzkow & Shapiro (2017, Quarterly Journal of Economics) epsilon 2-6: Honore, Jorgensen & de Paula (https://onlinelibrary.wiley.com/doi/full/10.1002/jae.2779) """ import numpy as np import pandas as pd from estimagic.msm_covs import cov_robust from estimagic.shared_covs import process_pandas_arguments from optimagic.exceptions import INVALID_SENSITIVITY_MSG from optimagic.utilities import robust_inverse def calculate_sensitivity_to_bias(jac, weights): """Calculate the sensitivity to bias. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How strongly would the parameter estimates be biased if the kth moment was misspecified, i.e not zero in expectation? Args: jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with respect to params, evaluated at the point estimates. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _weights, names = process_pandas_arguments(jac=jac, weights=weights) gwg = _sandwich(_jac, _weights) gwg_inverse = robust_inverse(gwg, INVALID_SENSITIVITY_MSG) m1 = -gwg_inverse @ _jac.T @ _weights if names: m1 = pd.DataFrame(m1, index=names.get("params"), columns=names.get("moments")) return m1 def calculate_fundamental_sensitivity_to_noise( jac, weights, moments_cov, params_cov_opt ): """Calculate the fundamental sensitivity to noise. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How much precision would be lost if the kth moment was subject to a little additional noise if the optimal weighting matrix is used? Args: jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with respect to params, evaluated at the point estimates. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov_opt (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Note that this needs to be the parameter covariance matrix using the formula for asymptotically optimal MSM. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _weights, _moments_cov, _params_cov_opt, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov, params_cov_opt=params_cov_opt ) m2 = [] for k in range(len(_weights)): mask_matrix_o = np.zeros(shape=_weights.shape) mask_matrix_o[k, k] = 1 meat = _sandwich_plus(_jac, _weights, mask_matrix_o) m2k = _params_cov_opt @ meat @ _params_cov_opt m2k = np.diagonal(m2k) m2.append(m2k) m2 = np.array(m2).T moments_variances = np.diagonal(_moments_cov) params_variances = np.diagonal(_params_cov_opt) e2 = m2 / params_variances.reshape(-1, 1) e2 = e2 * moments_variances if names: e2 = pd.DataFrame(e2, index=names.get("params"), columns=names.get("moments")) return e2 def calculate_actual_sensitivity_to_noise( sensitivity_to_bias, weights, moments_cov, params_cov ): """Calculate the actual sensitivity to noise. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How much precision would be lost if the kth moment was subjet to a little additional noise if "weights" is used as weighting matrix? Args: sensitivity_to_bias (np.ndarray or pandas.DataFrame): See ``calculate_sensitivity_to_bias`` for details. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ if isinstance(sensitivity_to_bias, pd.DataFrame): sensitivity_to_bias = sensitivity_to_bias.to_numpy() _weights, _moments_cov, _params_cov, names = process_pandas_arguments( weights=weights, moments_cov=moments_cov, params_cov=params_cov ) m3 = [] for k in range(len(_weights)): mask_matrix_o = np.zeros(shape=_weights.shape) mask_matrix_o[k, k] = 1 m3k = _sandwich(sensitivity_to_bias.T, mask_matrix_o) m3k = np.diagonal(m3k) m3.append(m3k) m3 = np.array(m3).T moments_variances = np.diagonal(_moments_cov) params_variances = np.diagonal(_params_cov) e3 = m3 / params_variances.reshape(-1, 1) e3 = e3 * moments_variances if names: e3 = pd.DataFrame(e3, index=names.get("params"), columns=names.get("moments")) return e3 def calculate_actual_sensitivity_to_removal(jac, weights, moments_cov, params_cov): """Calculate the actual sensitivity to removal. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How much precision would be lost if the kth moment was excluded from the estimation if "weights" is used as weighting matrix? Args: sensitivity_to_bias (np.ndarray or pandas.DataFrame): See ``calculate_sensitivity_to_bias`` for details. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ m4 = [] _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov ) for k in range(len(_weights)): weight_tilde_k = np.copy(_weights) weight_tilde_k[k, :] = 0 weight_tilde_k[:, k] = 0 sigma_tilde_k = cov_robust(_jac, weight_tilde_k, _moments_cov) m4k = sigma_tilde_k - _params_cov m4k = m4k.diagonal() m4.append(m4k) m4 = np.array(m4).T params_variances = np.diagonal(_params_cov) e4 = m4 / params_variances.reshape(-1, 1) if names: e4 = pd.DataFrame(e4, index=names.get("params"), columns=names.get("moments")) return e4 def calculate_fundamental_sensitivity_to_removal(jac, moments_cov, params_cov_opt): """Calculate the fundamental sensitivity to removal. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How much precision would be lost if the kth moment was excluded from the estimation with if the optimal weighting matrix is used? Args: jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with respect to params, evaluated at the point estimates. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov_opt (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Note that this needs to be the parameter covariance matrix using the formula for asymptotically optimal MSM. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _moments_cov, _params_cov_opt, names = process_pandas_arguments( jac=jac, moments_cov=moments_cov, params_cov_opt=params_cov_opt, ) m5 = [] for k in range(len(_moments_cov)): g_k = np.copy(_jac) g_k = np.delete(g_k, k, axis=0) s_k = np.copy(_moments_cov) s_k = np.delete(s_k, k, axis=0) s_k = np.delete(s_k, k, axis=1) sigma_k = _sandwich(g_k, robust_inverse(s_k, INVALID_SENSITIVITY_MSG)) sigma_k = robust_inverse(sigma_k, INVALID_SENSITIVITY_MSG) m5k = sigma_k - _params_cov_opt m5k = m5k.diagonal() m5.append(m5k) m5 = np.array(m5).T params_variances = np.diagonal(_params_cov_opt) e5 = m5 / params_variances.reshape(-1, 1) if names: e5 = pd.DataFrame(e5, index=names.get("params"), columns=names.get("moments")) return e5 def calculate_sensitivity_to_weighting(jac, weights, moments_cov, params_cov): """Calculate the sensitivity to weighting. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How would the precision change if the weight of the kth moment is increased a little? Args: sensitivity_to_bias (np.ndarray or pandas.DataFrame): See ``calculate_sensitivity_to_bias`` for details. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov ) gwg_inverse = _sandwich(_jac, _weights) gwg_inverse = robust_inverse(gwg_inverse, INVALID_SENSITIVITY_MSG) m6 = [] for k in range(len(_weights)): mask_matrix_o = np.zeros(shape=_weights.shape) mask_matrix_o[k, k] = 1 m6k_1 = gwg_inverse @ _sandwich(_jac, mask_matrix_o) @ _params_cov m6k_2 = ( gwg_inverse @ _jac.T @ mask_matrix_o @ _moments_cov @ _weights @ _jac @ gwg_inverse ) m6k_3 = ( gwg_inverse @ _jac.T @ _weights @ _moments_cov @ mask_matrix_o @ _jac @ gwg_inverse ) m6k_4 = _params_cov @ _sandwich(_jac, mask_matrix_o) @ gwg_inverse m6k = -m6k_1 + m6k_2 + m6k_3 - m6k_4 m6k = m6k.diagonal() m6.append(m6k) m6 = np.array(m6).T weights_diagonal = np.diagonal(_weights) params_variances = np.diagonal(_params_cov) e6 = m6 / params_variances.reshape(-1, 1) e6 = e6 * weights_diagonal if names: e6 = pd.DataFrame(e6, index=names.get("params"), columns=names.get("moments")) return e6 def _sandwich(a, b): """Calculate the sandwich product of two matrices: a.T * b * a.""" sandwich = a.T @ b @ a return sandwich def _sandwich_plus(a, b, c): """Calculate the sandwich product of three matrices: a.T * b.T * c * b * a.""" sandwich = a.T @ b.T @ c @ b @ a return sandwich ================================================ FILE: src/estimagic/msm_weighting.py ================================================ import functools import numpy as np import pandas as pd from pybaum import tree_just_flatten from scipy.linalg import block_diag from estimagic.bootstrap import bootstrap from optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree from optimagic.parameters.tree_registry import get_registry from optimagic.utilities import robust_inverse def get_moments_cov( data, calculate_moments, *, moment_kwargs=None, bootstrap_kwargs=None ): """Bootstrap the covariance matrix of the moment conditions. Args: data (pandas.DataFrame): DataFrame with empirical data. calculate_moments (callable): Function that calculates that takes data and moment_kwargs as arguments and returns a 1d numpy array or pandas Series with moment conditions. moment_kwargs (dict): Additional keyword arguments for calculate_moments. bootstrap_kwargs (dict): Additional keyword arguments that govern the bootstrapping. Allowed arguments are "n_draws", "seed", "n_cores", "batch_evaluator", "weight_by", "cluster_by" and "error_handling". For details see the bootstrap function. Returns: pandas.DataFrame or numpy.ndarray: The covariance matrix of the moment conditions for msm estimation. """ moment_kwargs = {} if moment_kwargs is None else moment_kwargs bootstrap_kwargs = {} if bootstrap_kwargs is None else bootstrap_kwargs valid_bs_kwargs = { "n_cores", "n_draws", "seed", "batch_evaluator", "weight_by", "cluster_by", "error_handling", "existing_result", "outcome_kwargs", } problematic = set(bootstrap_kwargs).difference(valid_bs_kwargs) if problematic: raise ValueError(f"Invalid bootstrap_kwargs: {problematic}") first_eval = calculate_moments(data, **moment_kwargs) registry = get_registry(extended=True) @functools.wraps(calculate_moments) def func(data, **kwargs): raw = calculate_moments(data, **kwargs) out = pd.Series( tree_just_flatten(raw, registry=registry) ) # xxxx won't be necessary soon! return out cov_arr = bootstrap( data=data, outcome=func, outcome_kwargs=moment_kwargs, **bootstrap_kwargs ).cov() if isinstance(cov_arr, pd.DataFrame): cov_arr = cov_arr.to_numpy() # xxxx won't be necessary soon cov = matrix_to_block_tree(cov_arr, first_eval, first_eval) return cov def get_weighting_matrix( moments_cov, method, empirical_moments, clip_value=1e-6, return_type="pytree" ): """Calculate a weighting matrix from moments_cov. Args: moments_cov (pandas.DataFrame or numpy.ndarray): Square DataFrame or Array with the covariance matrix of the moment conditions for msm estimation. method (str): One of "optimal", "diagonal", or "identity". empirical_moments (pytree): Pytree containing empirical moments. Used to get the tree structure clip_value (float): Bound at which diagonal elements of the moments_cov are clipped to avoid dividing by zero. return_type (str): One of "pytree", "array" or "pytree_and_array" Returns: pandas.DataFrame or numpy.ndarray: Weighting matrix with the same shape as moments_cov. """ fast_path = isinstance(moments_cov, np.ndarray) and moments_cov.ndim == 2 if fast_path: _internal_cov = moments_cov else: _internal_cov = block_tree_to_matrix( moments_cov, outer_tree=empirical_moments, inner_tree=empirical_moments, ) if method == "optimal": array_weights = robust_inverse(_internal_cov) elif method == "diagonal": diagonal_values = 1 / np.clip(np.diagonal(_internal_cov), clip_value, np.inf) array_weights = np.diag(diagonal_values) elif method == "identity": array_weights = np.identity(_internal_cov.shape[0]) else: raise ValueError(f"Invalid method: {method}") if return_type == "array" or (fast_path and "_and_" not in return_type): out = array_weights elif fast_path: out = (array_weights, array_weights) else: tree_weights = matrix_to_block_tree( array_weights, outer_tree=empirical_moments, inner_tree=empirical_moments, ) if return_type == "pytree": out = tree_weights else: out = (tree_weights, array_weights) return out def _assemble_block_diagonal_matrix(matrices): """Build a block diagonal matrix out of matrices. Args: matrices (list): List of square numpy arrays or DataFrames with the building blocks for the block diagonal matrix. Returns: pandas.DataFrame or numpy.ndarray: The block diagonal matrix. """ values = block_diag(*matrices) if all(isinstance(mat, pd.DataFrame) for mat in matrices): to_concat = [pd.Series(index=mat.index, dtype=float) for mat in matrices] combined_index = pd.concat(to_concat).index out = pd.DataFrame(values, index=combined_index, columns=combined_index) else: out = values return out ================================================ FILE: src/estimagic/py.typed ================================================ ================================================ FILE: src/estimagic/shared_covs.py ================================================ from typing import NamedTuple import numpy as np import pandas as pd import scipy from pybaum import tree_just_flatten, tree_unflatten from optimagic.parameters.block_trees import matrix_to_block_tree from optimagic.parameters.tree_registry import get_registry def transform_covariance( internal_params, internal_cov, converter, rng, n_samples, bounds_handling, ): """Transform the internal covariance matrix to an external one, given constraints. Args: internal_params (InternalParams): NamedTuple with entries: - value (np.ndarray): Internal parameter values. - lower_bounds (np.ndarray): Lower bounds on the internal params. - upper_bounds (np.ndarray): Upper bounds on the internal params. - soft_lower_bounds (np.ndarray): Soft lower bounds on the internal params. - soft_upper_bounds (np.ndarray): Soft upper bounds on the internal params. - name (list): List of names of the external parameters. - free_mask (np.ndarray): Boolean mask representing which external parameter is free. internal_cov (np.ndarray or pandas.DataFrame) with a covariance matrix of the internal parameter vector. For background information about internal and external params see :ref:`implementation_of_constraints`. constraints (list): List with constraint dictionaries. See :ref:`constraints`. rng (numpy.random.Generator): A random number generator. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an error. If "ignore", boundary problems are simply ignored. Returns: pd.DataFrame: Quadratic DataFrame containing the covariance matrix of the free parameters. If parameters were fixed (explicitly or by other constraints), the index is a subset of params.index. The columns are the same as the index. """ if converter.has_transforming_constraints: _from_internal = converter.params_from_internal is_free = internal_params.free_mask lower_bounds = internal_params.lower_bounds upper_bounds = internal_params.upper_bounds sample = rng.multivariate_normal( mean=internal_params.values, cov=internal_cov, size=n_samples, ) transformed_free = [] for params in sample: if bounds_handling == "clip": x = np.clip(params, a_min=lower_bounds, a_max=upper_bounds) elif bounds_handling == "raise": if (params < lower_bounds).any() or (params > upper_bounds).any(): raise ValueError() else: x = params transformed = _from_internal(x=x, return_type="flat") transformed_free.append(transformed[is_free]) free_cov = np.cov( np.array(transformed_free), rowvar=False, ) else: free_cov = internal_cov return free_cov def calculate_summary_data_estimation( estimation_result, free_estimates, ci_level, method, n_samples, bounds_handling, seed, ): se = estimation_result.se( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed ) lower, upper = estimation_result.ci( method=method, n_samples=n_samples, ci_level=ci_level, bounds_handling=bounds_handling, seed=seed, ) p_values = estimation_result.p_values( method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed ) summary_data = { "value": estimation_result.params, "standard_error": se, "ci_lower": lower, "ci_upper": upper, "p_value": p_values, "free": free_estimates.free_mask, } return summary_data def calculate_estimation_summary( summary_data, names, free_names, ): """Create estimation summary using pre-calculated results. Args: summary_data (dict): Dictionary with entries ['params', 'p_value', 'ci_lower', 'ci_upper', 'standard_error']. names (List[str]): List of parameter names, corresponding to result_object. free_names (List[str]): List of parameter names for free parameters. Returns: pytree: A pytree with the same structure as params. Each leaf in the params tree is replaced by a DataFrame containing columns "value", "standard_error", "pvalue", "ci_lower" and "ci_upper". Parameters that do not have a standard error (e.g. because they were fixed during estimation) contain NaNs in all but the "value" column. The value column is only reproduced for convenience. """ # ================================================================================== # Flatten summary and construct data frame for flat estimates # ================================================================================== registry = get_registry(extended=True) flat_data = { key: tree_just_flatten(val, registry=registry) for key, val in summary_data.items() } df = pd.DataFrame(flat_data, index=names) stars = pd.cut( df.loc[free_names, "p_value"], bins=[-1, 0.01, 0.05, 0.1, 2], labels=["***", "**", "*", ""], ) df["stars"] = stars # ================================================================================== # Map summary data into params tree structure # ================================================================================== # create tree with values corresponding to indices of df indices = tree_unflatten(summary_data["value"], names, registry=registry) estimates_flat = tree_just_flatten(summary_data["value"]) indices_flat = tree_just_flatten(indices) # use index chunks in indices_flat to access the corresponding sub data frame of df, # and use the index information stored in estimates_flat to form the correct (multi) # index for the resulting leaf. summary_flat = [] for index_leaf, params_leaf in zip(indices_flat, estimates_flat, strict=False): if np.isscalar(params_leaf): loc = [index_leaf] index = [0] elif isinstance(params_leaf, pd.DataFrame) and "value" in params_leaf: loc = index_leaf["value"].to_numpy().flatten() index = params_leaf.index elif isinstance(params_leaf, pd.DataFrame): loc = index_leaf.to_numpy().flatten() # use product of existing index and columns for regular pd.DataFrame index = pd.MultiIndex.from_tuples( [ (*row, col) if isinstance(row, tuple) else (row, col) for row in params_leaf.index for col in params_leaf.columns ] ) elif isinstance(params_leaf, pd.Series): loc = index_leaf.to_numpy().flatten() index = params_leaf.index else: # array case (numpy or jax) loc = index_leaf.flatten() if params_leaf.ndim == 1: index = pd.RangeIndex(stop=params_leaf.size) else: index = pd.MultiIndex.from_arrays( np.unravel_index(np.arange(params_leaf.size), params_leaf.shape) ) df_chunk = df.loc[loc] df_chunk.index = index summary_flat.append(df_chunk) summary = tree_unflatten(summary_data["value"], summary_flat) return summary def process_pandas_arguments(**kwargs): """Convert pandas objects to arrays and extract names of moments and parameters. This works for any number of keyword arguments. The result is a tuple containing numpy arrays in same order as the keyword arguments and a dictionary with the separated index objects as last entry. This dictionary contains the entries "moments" and "params" for the identified moment names and parameter names. The keyword arguments "jac", "hess", "weights" and "moments_cov" are used to extract the names. Other keyword arguments are simply converted to numpy arrays. """ param_name_candidates = {} moment_name_candidates = {} if "jac" in kwargs: jac = kwargs["jac"] if isinstance(jac, pd.DataFrame): param_name_candidates["jac"] = jac.columns moment_name_candidates["jac"] = jac.index if "hess" in kwargs: hess = kwargs["hess"] if isinstance(hess, pd.DataFrame): param_name_candidates["hess"] = hess.index if "weights" in kwargs: weights = kwargs["weights"] if isinstance(weights, pd.DataFrame): moment_name_candidates["weights"] = weights.index if "moments_cov" in kwargs: moments_cov = kwargs["moments_cov"] if isinstance(moments_cov, pd.DataFrame): moment_name_candidates["moments_cov"] = moments_cov.index names = {} if param_name_candidates: _check_names_coincide(param_name_candidates) names["params"] = list(param_name_candidates.values())[0] if moment_name_candidates: _check_names_coincide(moment_name_candidates) names["moments"] = list(moment_name_candidates.values())[0] # order of outputs is same as order of inputs; names are last. out_list = [_to_numpy(val, name=key) for key, val in kwargs.items()] + [names] return tuple(out_list) def _to_numpy(df_or_array, name): if isinstance(df_or_array, pd.DataFrame): arr = df_or_array.to_numpy() elif isinstance(df_or_array, np.ndarray): arr = df_or_array else: raise TypeError( f"{name} must be a DataFrame or numpy array, not {type(df_or_array)}." ) return arr def _check_names_coincide(name_dict): if len(name_dict) >= 2: first_key = list(name_dict)[0] first_names = name_dict[first_key] for key, names in name_dict.items(): if not first_names.equals(names): msg = f"Ambiguous parameter or moment names from {first_key} and {key}." raise ValueError(msg) def get_derivative_case(derivative): """Determine which kind of derivative should be used.""" if callable(derivative): case = "closed-form" elif derivative is False: case = "skip" else: case = "numerical" return case def calculate_ci(free_values, free_standard_errors, ci_level): alpha = 1 - ci_level scale = scipy.stats.norm.ppf(1 - alpha / 2) lower = free_values - scale * free_standard_errors upper = free_values + scale * free_standard_errors return lower, upper def calculate_p_values(free_values, free_standard_errors): tvalues = free_values / np.clip(free_standard_errors, 1e-300, np.inf) pvalues = 2 * scipy.stats.norm.sf(np.abs(tvalues)) return pvalues def calculate_free_estimates(estimates, internal_estimates): mask = internal_estimates.free_mask names = internal_estimates.names registry = get_registry(extended=True) external_flat = np.array(tree_just_flatten(estimates, registry=registry)) free_estimates = FreeParams( values=external_flat[mask], free_mask=mask, all_names=names, free_names=np.array(names)[mask].tolist(), ) return free_estimates def transform_free_cov_to_cov(free_cov, free_params, params, return_type): """Fill non-free values and project to params block-tree.""" mask = free_params.free_mask cov = np.full((len(mask), len(mask)), np.nan) cov[np.ix_(mask, mask)] = free_cov if return_type == "dataframe": names = free_params.all_names cov = pd.DataFrame(cov, columns=names, index=names) elif return_type == "pytree": cov = matrix_to_block_tree(cov, params, params) elif return_type != "array": raise ValueError( "return_type must be one of pytree, array, or dataframe, " f"not {return_type}." ) return cov def transform_free_values_to_params_tree(values, free_params, params): """Fill non-free values and project to params tree structure.""" mask = free_params.free_mask flat = np.full(len(mask), np.nan) flat[np.ix_(mask)] = values registry = get_registry(extended=True) pytree = tree_unflatten(params, flat, registry=registry) return pytree class FreeParams(NamedTuple): values: np.ndarray # free external parameter values free_mask: np.ndarray # boolean mask to filter free params from external params free_names: list # names of free external parameters all_names: list # names of all external parameters ================================================ FILE: src/estimagic/utilities.py ================================================ from optimagic.decorators import deprecated from optimagic.utilities import ( calculate_trustregion_initial_radius as _calculate_trustregion_initial_radius, ) from optimagic.utilities import ( chol_params_to_lower_triangular_matrix as _chol_params_to_lower_triangular_matrix, ) from optimagic.utilities import cov_matrix_to_params as _cov_matrix_to_params from optimagic.utilities import ( cov_matrix_to_sdcorr_params as _cov_matrix_to_sdcorr_params, ) from optimagic.utilities import cov_params_to_matrix as _cov_params_to_matrix from optimagic.utilities import cov_to_sds_and_corr as _cov_to_sds_and_corr from optimagic.utilities import ( dimension_to_number_of_triangular_elements as _dimension_to_number_of_triangular_elements, # noqa: E501 ) from optimagic.utilities import get_rng as _get_rng from optimagic.utilities import hash_array as _hash_array from optimagic.utilities import isscalar as _isscalar from optimagic.utilities import ( number_of_triangular_elements_to_dimension as _number_of_triangular_elements_to_dimension, # noqa: E501 ) from optimagic.utilities import propose_alternatives as _propose_alternatives from optimagic.utilities import read_pickle as _read_pickle from optimagic.utilities import robust_cholesky as _robust_cholesky from optimagic.utilities import robust_inverse as _robust_inverse from optimagic.utilities import sdcorr_params_to_matrix as _sdcorr_params_to_matrix from optimagic.utilities import ( sdcorr_params_to_sds_and_corr as _sdcorr_params_to_sds_and_corr, ) from optimagic.utilities import sds_and_corr_to_cov as _sds_and_corr_to_cov from optimagic.utilities import to_pickle as _to_pickle MSG = ( "estimagic.utilities.{name} has been deprecated in version 0.5.0. Use optimagic." "utilities.{name} instead. This function will be removed in version 0.6.0." ) chol_params_to_lower_triangular_matrix = deprecated( _chol_params_to_lower_triangular_matrix, MSG.format(name="chol_params_to_lower_triangular_matrix"), ) cov_params_to_matrix = deprecated( _cov_params_to_matrix, MSG.format(name="cov_params_to_matrix") ) cov_matrix_to_params = deprecated( _cov_matrix_to_params, MSG.format(name="cov_matrix_to_params") ) sdcorr_params_to_sds_and_corr = deprecated( _sdcorr_params_to_sds_and_corr, MSG.format(name="sdcorr_params_to_sds_and_corr") ) sds_and_corr_to_cov = deprecated( _sds_and_corr_to_cov, MSG.format(name="sds_and_corr_to_cov") ) cov_to_sds_and_corr = deprecated( _cov_to_sds_and_corr, MSG.format(name="cov_to_sds_and_corr") ) sdcorr_params_to_matrix = deprecated( _sdcorr_params_to_matrix, MSG.format(name="sdcorr_params_to_matrix") ) cov_matrix_to_sdcorr_params = deprecated( _cov_matrix_to_sdcorr_params, MSG.format(name="cov_matrix_to_sdcorr_params") ) number_of_triangular_elements_to_dimension = deprecated( _number_of_triangular_elements_to_dimension, MSG.format(name="number_of_triangular_elements_to_dimension"), ) dimension_to_number_of_triangular_elements = deprecated( _dimension_to_number_of_triangular_elements, MSG.format(name="dimension_to_number_of_triangular_elements"), ) propose_alternatives = deprecated( _propose_alternatives, MSG.format(name="propose_alternatives") ) robust_cholesky = deprecated(_robust_cholesky, MSG.format(name="robust_cholesky")) robust_inverse = deprecated(_robust_inverse, MSG.format(name="robust_inverse")) hash_array = deprecated(_hash_array, MSG.format(name="hash_array")) calculate_trustregion_initial_radius = deprecated( _calculate_trustregion_initial_radius, MSG.format(name="calculate_trustregion_initial_radius"), ) to_pickle = deprecated(_to_pickle, MSG.format(name="to_pickle")) read_pickle = deprecated(_read_pickle, MSG.format(name="read_pickle")) isscalar = deprecated(_isscalar, MSG.format(name="isscalar")) get_rng = deprecated(_get_rng, MSG.format(name="get_rng")) __all__ = [ "chol_params_to_lower_triangular_matrix", "cov_params_to_matrix", "cov_matrix_to_params", "sdcorr_params_to_sds_and_corr", "sds_and_corr_to_cov", "cov_to_sds_and_corr", "sdcorr_params_to_matrix", "cov_matrix_to_sdcorr_params", "number_of_triangular_elements_to_dimension", "dimension_to_number_of_triangular_elements", "propose_alternatives", "robust_cholesky", "robust_inverse", "hash_array", "calculate_trustregion_initial_radius", "to_pickle", "read_pickle", "isscalar", "get_rng", ] ================================================ FILE: src/optimagic/__init__.py ================================================ from __future__ import annotations from optimagic import constraints, mark, sandbox, timing, utilities from optimagic.algorithms import algos from optimagic.benchmarking.benchmark_reports import ( convergence_report, rank_report, traceback_report, ) from optimagic.benchmarking.get_benchmark_problems import get_benchmark_problems from optimagic.benchmarking.run_benchmark import run_benchmark from optimagic.constraints import ( DecreasingConstraint, EqualityConstraint, FixedConstraint, FlatCovConstraint, FlatSDCorrConstraint, IncreasingConstraint, LinearConstraint, NonlinearConstraint, PairwiseEqualityConstraint, ProbabilityConstraint, ) from optimagic.differentiation.derivatives import first_derivative, second_derivative from optimagic.differentiation.numdiff_options import NumdiffOptions from optimagic.logging import ( ExistenceStrategy as ExistenceStrategy, ) from optimagic.logging import ( SQLiteLogOptions as SQLiteLogOptions, ) from optimagic.logging import ( SQLiteLogReader as SQLiteLogReader, ) from optimagic.logging.read_log import OptimizeLogReader from optimagic.optimization.fun_value import ( FunctionValue, LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.optimization.history import History from optimagic.optimization.multistart_options import MultistartOptions from optimagic.optimization.optimize import maximize, minimize from optimagic.optimization.optimize_result import OptimizeResult from optimagic.optimizers import pygad from optimagic.parameters.bounds import Bounds from optimagic.parameters.constraint_tools import check_constraints, count_free_params from optimagic.parameters.scaling import ScalingOptions from optimagic.visualization.convergence_plot import convergence_plot from optimagic.visualization.history_plots import criterion_plot, params_plot from optimagic.visualization.profile_plot import profile_plot from optimagic.visualization.slice_plot import slice_plot try: from ._version import version as __version__ except ImportError: # broken installation, we don't even try unknown only works because we do poor mans # version compare __version__ = "unknown" __all__ = [ "maximize", "minimize", "utilities", "first_derivative", "second_derivative", "run_benchmark", "get_benchmark_problems", "profile_plot", "convergence_plot", "convergence_report", "rank_report", "traceback_report", "slice_plot", "criterion_plot", "params_plot", "count_free_params", "check_constraints", "OptimizeLogReader", "OptimizeResult", "Bounds", "mark", "ScalingOptions", "MultistartOptions", "NumdiffOptions", "FunctionValue", "LeastSquaresFunctionValue", "ScalarFunctionValue", "LikelihoodFunctionValue", "constraints", "FlatCovConstraint", "FlatSDCorrConstraint", "IncreasingConstraint", "DecreasingConstraint", "FixedConstraint", "NonlinearConstraint", "LinearConstraint", "ProbabilityConstraint", "PairwiseEqualityConstraint", "EqualityConstraint", "History", "__version__", "algos", "pygad", "timing", "sandbox", ] ================================================ FILE: src/optimagic/algorithms.py ================================================ """This code was auto-generated by a pre-commit hook and should not be changed. If you manually change this code, all of your changes will be overwritten the next time the pre-commit hook runs. Detailed information on the purpose of the code can be found here: https://optimagic.readthedocs.io/en/latest/development/ep-02-typing.html#algorithm-selection """ from dataclasses import dataclass from typing import Type, cast from optimagic.optimization.algorithm import Algorithm from optimagic.optimizers.bayesian_optimizer import BayesOpt from optimagic.optimizers.bhhh import BHHH from optimagic.optimizers.fides import Fides from optimagic.optimizers.gfo_optimizers import ( GFODifferentialEvolution, GFODownhillSimplex, GFOEvolutionStrategy, GFOGeneticAlgorithm, GFOHillClimbing, GFOParallelTempering, GFOParticleSwarmOptimization, GFOPowellsMethod, GFORepulsingHillClimbing, GFOSimulatedAnnealing, GFOSpiralOptimization, GFOStochasticHillClimbing, ) from optimagic.optimizers.iminuit_migrad import IminuitMigrad from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel from optimagic.optimizers.nevergrad_optimizers import ( NevergradBayesOptim, NevergradCGA, NevergradCMAES, NevergradDifferentialEvolution, NevergradEDA, NevergradEMNA, NevergradMeta, NevergradNGOpt, NevergradOnePlusOne, NevergradPSO, NevergradRandomSearch, NevergradSamplingSearch, NevergradTBPSA, ) from optimagic.optimizers.nlopt_optimizers import ( NloptBOBYQA, NloptCCSAQ, NloptCOBYLA, NloptCRS2LM, NloptDirect, NloptESCH, NloptISRES, NloptLBFGSB, NloptMMA, NloptNelderMead, NloptNEWUOA, NloptPRAXIS, NloptSbplx, NloptSLSQP, NloptTNewton, NloptVAR, ) from optimagic.optimizers.pounders import Pounders from optimagic.optimizers.pygad_optimizer import Pygad from optimagic.optimizers.pygmo_optimizers import ( PygmoBeeColony, PygmoCmaes, PygmoCompassSearch, PygmoDe, PygmoDe1220, PygmoGaco, PygmoGwo, PygmoIhs, PygmoMbh, PygmoPso, PygmoPsoGen, PygmoSade, PygmoSea, PygmoSga, PygmoSimulatedAnnealing, PygmoXnes, ) from optimagic.optimizers.pyswarms_optimizers import ( PySwarmsGeneralPSO, PySwarmsGlobalBestPSO, PySwarmsLocalBestPSO, ) from optimagic.optimizers.scipy_optimizers import ( ScipyBasinhopping, ScipyBFGS, ScipyBrute, ScipyCOBYLA, ScipyConjugateGradient, ScipyDifferentialEvolution, ScipyDirect, ScipyDualAnnealing, ScipyLBFGSB, ScipyLSDogbox, ScipyLSLM, ScipyLSTRF, ScipyNelderMead, ScipyNewtonCG, ScipyPowell, ScipySHGO, ScipySLSQP, ScipyTruncatedNewton, ScipyTrustConstr, ) from optimagic.optimizers.tao_optimizers import TAOPounders from optimagic.optimizers.tranquilo import Tranquilo, TranquiloLS @dataclass(frozen=True) class AlgoSelection: def _all(self) -> list[Type[Algorithm]]: raw = [field.default for field in self.__dataclass_fields__.values()] return cast(list[Type[Algorithm]], raw) def _available(self) -> list[Type[Algorithm]]: _all = self._all() return [ a for a in _all if a.algo_info.is_available # type: ignore ] @property def All(self) -> list[Type[Algorithm]]: return self._all() @property def Available(self) -> list[Type[Algorithm]]: return self._available() @property def AllNames(self) -> list[str]: return [str(a.name) for a in self._all()] @property def AvailableNames(self) -> list[str]: return [str(a.name) for a in self._available()] @property def _all_algorithms_dict(self) -> dict[str, Type[Algorithm]]: return {str(a.name): a for a in self._all()} @property def _available_algorithms_dict(self) -> dict[str, Type[Algorithm]]: return {str(a.name): a for a in self._available()} @dataclass(frozen=True) class BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms( AlgoSelection ): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @dataclass(frozen=True) class BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection): scipy_shgo: Type[ScipySHGO] = ScipySHGO @dataclass(frozen=True) class BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @dataclass(frozen=True) class BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Parallel( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Scalar( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @dataclass(frozen=True) class BoundedGradientFreeLocalParallelScalarAlgorithms(AlgoSelection): tranquilo: Type[Tranquilo] = Tranquilo @dataclass(frozen=True) class BoundedGradientFreeLeastSquaresLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @dataclass(frozen=True) class BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def GradientFree( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection): scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Scalar(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientBasedScalarAlgorithms(AlgoSelection): scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection): scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedLocalNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Scalar(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def NonlinearConstrained( self, ) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedLeastSquaresLocalAlgorithms(AlgoSelection): scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF @dataclass(frozen=True) class GradientBasedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Parallel( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: return BoundedGlobalGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: return BoundedGlobalGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel( self, ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: return BoundedGlobalGradientFreeParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLocalNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @property def Scalar(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLocalScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tranquilo: Type[Tranquilo] = Tranquilo @property def NonlinearConstrained( self, ) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms: return BoundedGradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLeastSquaresLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Parallel(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms: return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def LeastSquares(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms: return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms: return BoundedGradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA @property def Bounded(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLocalParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms: return BoundedGradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLeastSquaresLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms: return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel( self, ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo @property def Global(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: return BoundedGlobalGradientFreeParallelScalarAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms: return BoundedGradientFreeLocalParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLeastSquaresParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Local(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms: return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class GradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded( self, ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @property def Global(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def GradientBased( self, ) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @property def GradientFree( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def GradientFree( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def GradientFree(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: return BoundedGlobalGradientFreeParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms() @property def GradientFree( self, ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def GradientBased( self, ) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms() @property def GradientFree( self, ) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedLocalParallelScalarAlgorithms(AlgoSelection): tranquilo: Type[Tranquilo] = Tranquilo @property def GradientFree(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms: return BoundedGradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLeastSquaresLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientFree(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms: return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms() @property def GradientFree( self, ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientBasedAlgorithms(AlgoSelection): scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms() @property def Scalar(self) -> BoundedGlobalGradientBasedScalarAlgorithms: return BoundedGlobalGradientBasedScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection): scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientBasedScalarAlgorithms(AlgoSelection): scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalGradientBasedScalarAlgorithms: return BoundedGlobalGradientBasedScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def LeastSquares(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms: return BoundedGradientBasedLeastSquaresLocalAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> BoundedGradientBasedLocalScalarAlgorithms: return BoundedGradientBasedLocalScalarAlgorithms() @dataclass(frozen=True) class GradientBasedLocalNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: return GradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedLocalScalarAlgorithms: return BoundedGradientBasedLocalScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: return GradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedLeastSquaresLocalAlgorithms(AlgoSelection): scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF @property def Bounded(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms: return BoundedGradientBasedLeastSquaresLocalAlgorithms() @dataclass(frozen=True) class GradientBasedLikelihoodLocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH @dataclass(frozen=True) class BoundedGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms() @property def Local(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalGradientBasedScalarAlgorithms: return BoundedGlobalGradientBasedScalarAlgorithms() @property def Local(self) -> BoundedGradientBasedLocalScalarAlgorithms: return BoundedGradientBasedLocalScalarAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedLeastSquaresAlgorithms(AlgoSelection): scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF @property def Local(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms: return BoundedGradientBasedLeastSquaresLocalAlgorithms() @dataclass(frozen=True) class GradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms() @property def Global(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: return GradientBasedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect @property def NonlinearConstrained( self, ) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGlobalGradientFreeParallelAlgorithms: return BoundedGlobalGradientFreeParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalGradientFreeScalarAlgorithms: return BoundedGlobalGradientFreeScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect @property def Bounded(self) -> BoundedGlobalGradientFreeScalarAlgorithms: return BoundedGlobalGradientFreeScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: return GlobalGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalGradientFreeParallelAlgorithms: return BoundedGlobalGradientFreeParallelAlgorithms() @property def NonlinearConstrained( self, ) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalGradientFreeParallelScalarAlgorithms: return GlobalGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def LeastSquares(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms: return BoundedGradientFreeLeastSquaresLocalAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGradientFreeLocalParallelAlgorithms: return BoundedGradientFreeLocalParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeLocalScalarAlgorithms: return BoundedGradientFreeLocalScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLocalNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA @property def Bounded(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: return GradientFreeLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLocalScalarAlgorithms(AlgoSelection): nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedGradientFreeLocalScalarAlgorithms: return BoundedGradientFreeLocalScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: return GradientFreeLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GradientFreeLocalParallelScalarAlgorithms: return GradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLeastSquaresLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms: return BoundedGradientFreeLeastSquaresLocalAlgorithms() @property def Parallel(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms: return GradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class GradientFreeLocalParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel pounders: Type[Pounders] = Pounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLocalParallelAlgorithms: return BoundedGradientFreeLocalParallelAlgorithms() @property def LeastSquares(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms: return GradientFreeLeastSquaresLocalParallelAlgorithms() @property def Scalar(self) -> GradientFreeLocalParallelScalarAlgorithms: return GradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tranquilo: Type[Tranquilo] = Tranquilo @property def Global(self) -> BoundedGlobalGradientFreeScalarAlgorithms: return BoundedGlobalGradientFreeScalarAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalScalarAlgorithms: return BoundedGradientFreeLocalScalarAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedGradientFreeParallelScalarAlgorithms: return BoundedGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeLeastSquaresAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Local(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms: return BoundedGradientFreeLeastSquaresLocalAlgorithms() @property def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: return BoundedGradientFreeLeastSquaresParallelAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Global(self) -> BoundedGlobalGradientFreeParallelAlgorithms: return BoundedGlobalGradientFreeParallelAlgorithms() @property def LeastSquares(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: return BoundedGradientFreeLeastSquaresParallelAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalParallelAlgorithms: return BoundedGradientFreeLocalParallelAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeParallelScalarAlgorithms: return BoundedGradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Global(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: return GradientFreeLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Global(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedGradientFreeParallelScalarAlgorithms: return BoundedGradientFreeParallelScalarAlgorithms() @property def Global(self) -> GlobalGradientFreeParallelScalarAlgorithms: return GlobalGradientFreeParallelScalarAlgorithms() @property def Local(self) -> GradientFreeLocalParallelScalarAlgorithms: return GradientFreeLocalParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLeastSquaresParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: return BoundedGradientFreeLeastSquaresParallelAlgorithms() @property def Local(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms: return GradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedGlobalNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def GradientBased(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms: return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms: return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms: return BoundedGlobalNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: return BoundedGlobalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def GradientBased(self) -> BoundedGlobalGradientBasedScalarAlgorithms: return BoundedGlobalGradientBasedScalarAlgorithms() @property def GradientFree(self) -> BoundedGlobalGradientFreeScalarAlgorithms: return BoundedGlobalGradientFreeScalarAlgorithms() @property def NonlinearConstrained(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: return BoundedGlobalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: return BoundedGlobalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def GradientFree(self) -> BoundedGlobalGradientFreeParallelAlgorithms: return BoundedGlobalGradientFreeParallelAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms: return BoundedGlobalNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalParallelScalarAlgorithms: return BoundedGlobalParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalNonlinearConstrainedScalarAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: return BoundedGlobalNonlinearConstrainedScalarAlgorithms() @property def GradientBased(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms: return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms: return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: return GlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms: return BoundedGlobalNonlinearConstrainedParallelAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms: return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: return GlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalParallelScalarAlgorithms: return BoundedGlobalParallelScalarAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeParallelScalarAlgorithms: return GlobalGradientFreeParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: return GlobalNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLocalNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def GradientBased(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms: return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms: return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: return BoundedLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tranquilo: Type[Tranquilo] = Tranquilo @property def GradientBased(self) -> BoundedGradientBasedLocalScalarAlgorithms: return BoundedGradientBasedLocalScalarAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeLocalScalarAlgorithms: return BoundedGradientFreeLocalScalarAlgorithms() @property def NonlinearConstrained(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: return BoundedLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedLocalParallelScalarAlgorithms: return BoundedLocalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLeastSquaresLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientBased(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms: return BoundedGradientBasedLeastSquaresLocalAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms: return BoundedGradientFreeLeastSquaresLocalAlgorithms() @property def Parallel(self) -> BoundedLeastSquaresLocalParallelAlgorithms: return BoundedLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientFree(self) -> BoundedGradientFreeLocalParallelAlgorithms: return BoundedGradientFreeLocalParallelAlgorithms() @property def LeastSquares(self) -> BoundedLeastSquaresLocalParallelAlgorithms: return BoundedLeastSquaresLocalParallelAlgorithms() @property def Scalar(self) -> BoundedLocalParallelScalarAlgorithms: return BoundedLocalParallelScalarAlgorithms() @dataclass(frozen=True) class LocalNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: return BoundedLocalNonlinearConstrainedScalarAlgorithms() @property def GradientBased(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: return GradientBasedLocalNonlinearConstrainedScalarAlgorithms() @property def GradientFree(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: return GradientFreeLocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class LocalParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedLocalParallelScalarAlgorithms: return BoundedLocalParallelScalarAlgorithms() @property def GradientFree(self) -> GradientFreeLocalParallelScalarAlgorithms: return GradientFreeLocalParallelScalarAlgorithms() @dataclass(frozen=True) class LeastSquaresLocalParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLeastSquaresLocalParallelAlgorithms: return BoundedLeastSquaresLocalParallelAlgorithms() @property def GradientFree(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms: return GradientFreeLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class BoundedNonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms: return BoundedGlobalNonlinearConstrainedScalarAlgorithms() @property def GradientBased(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: return BoundedLocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: return BoundedNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedNonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Global(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms: return BoundedGlobalNonlinearConstrainedParallelAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms: return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: return BoundedNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo @property def Global(self) -> BoundedGlobalParallelScalarAlgorithms: return BoundedGlobalParallelScalarAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeParallelScalarAlgorithms: return BoundedGradientFreeParallelScalarAlgorithms() @property def Local(self) -> BoundedLocalParallelScalarAlgorithms: return BoundedLocalParallelScalarAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: return BoundedNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLeastSquaresParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientFree(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: return BoundedGradientFreeLeastSquaresParallelAlgorithms() @property def Local(self) -> BoundedLeastSquaresLocalParallelAlgorithms: return BoundedLeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class NonlinearConstrainedParallelScalarAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: return BoundedNonlinearConstrainedParallelScalarAlgorithms() @property def Global(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: return GlobalNonlinearConstrainedParallelScalarAlgorithms() @property def GradientFree(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: return GradientFreeNonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalGradientBasedAlgorithms(AlgoSelection): scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalGradientBasedAlgorithms: return BoundedGlobalGradientBasedAlgorithms() @property def NonlinearConstrained(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms: return GlobalGradientBasedNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GlobalGradientBasedScalarAlgorithms: return GlobalGradientBasedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedLocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedLocalAlgorithms: return BoundedGradientBasedLocalAlgorithms() @property def LeastSquares(self) -> GradientBasedLeastSquaresLocalAlgorithms: return GradientBasedLeastSquaresLocalAlgorithms() @property def Likelihood(self) -> GradientBasedLikelihoodLocalAlgorithms: return GradientBasedLikelihoodLocalAlgorithms() @property def NonlinearConstrained(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms: return GradientBasedLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GradientBasedLocalScalarAlgorithms: return GradientBasedLocalScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientBasedAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalGradientBasedAlgorithms: return BoundedGlobalGradientBasedAlgorithms() @property def LeastSquares(self) -> BoundedGradientBasedLeastSquaresAlgorithms: return BoundedGradientBasedLeastSquaresAlgorithms() @property def Local(self) -> BoundedGradientBasedLocalAlgorithms: return BoundedGradientBasedLocalAlgorithms() @property def NonlinearConstrained( self, ) -> BoundedGradientBasedNonlinearConstrainedAlgorithms: return BoundedGradientBasedNonlinearConstrainedAlgorithms() @property def Scalar(self) -> BoundedGradientBasedScalarAlgorithms: return BoundedGradientBasedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedNonlinearConstrainedAlgorithms: return BoundedGradientBasedNonlinearConstrainedAlgorithms() @property def Global(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms: return GlobalGradientBasedNonlinearConstrainedAlgorithms() @property def Local(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms: return GradientBasedLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms: return GradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedScalarAlgorithms: return BoundedGradientBasedScalarAlgorithms() @property def Global(self) -> GlobalGradientBasedScalarAlgorithms: return GlobalGradientBasedScalarAlgorithms() @property def Local(self) -> GradientBasedLocalScalarAlgorithms: return GradientBasedLocalScalarAlgorithms() @property def NonlinearConstrained(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms: return GradientBasedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientBasedLeastSquaresAlgorithms(AlgoSelection): scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF @property def Bounded(self) -> BoundedGradientBasedLeastSquaresAlgorithms: return BoundedGradientBasedLeastSquaresAlgorithms() @property def Local(self) -> GradientBasedLeastSquaresLocalAlgorithms: return GradientBasedLeastSquaresLocalAlgorithms() @dataclass(frozen=True) class GradientBasedLikelihoodAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH @property def Local(self) -> GradientBasedLikelihoodLocalAlgorithms: return GradientBasedLikelihoodLocalAlgorithms() @dataclass(frozen=True) class GlobalGradientFreeAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect @property def Bounded(self) -> BoundedGlobalGradientFreeAlgorithms: return BoundedGlobalGradientFreeAlgorithms() @property def NonlinearConstrained(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms: return GlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GlobalGradientFreeParallelAlgorithms: return GlobalGradientFreeParallelAlgorithms() @property def Scalar(self) -> GlobalGradientFreeScalarAlgorithms: return GlobalGradientFreeScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLocalAlgorithms: return BoundedGradientFreeLocalAlgorithms() @property def LeastSquares(self) -> GradientFreeLeastSquaresLocalAlgorithms: return GradientFreeLeastSquaresLocalAlgorithms() @property def NonlinearConstrained(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms: return GradientFreeLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GradientFreeLocalParallelAlgorithms: return GradientFreeLocalParallelAlgorithms() @property def Scalar(self) -> GradientFreeLocalScalarAlgorithms: return GradientFreeLocalScalarAlgorithms() @dataclass(frozen=True) class BoundedGradientFreeAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Global(self) -> BoundedGlobalGradientFreeAlgorithms: return BoundedGlobalGradientFreeAlgorithms() @property def LeastSquares(self) -> BoundedGradientFreeLeastSquaresAlgorithms: return BoundedGradientFreeLeastSquaresAlgorithms() @property def Local(self) -> BoundedGradientFreeLocalAlgorithms: return BoundedGradientFreeLocalAlgorithms() @property def NonlinearConstrained(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms: return BoundedGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGradientFreeParallelAlgorithms: return BoundedGradientFreeParallelAlgorithms() @property def Scalar(self) -> BoundedGradientFreeScalarAlgorithms: return BoundedGradientFreeScalarAlgorithms() @dataclass(frozen=True) class GradientFreeNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms: return BoundedGradientFreeNonlinearConstrainedAlgorithms() @property def Global(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms: return GlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Local(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms: return GradientFreeLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GradientFreeNonlinearConstrainedParallelAlgorithms: return GradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: return GradientFreeNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GradientFreeScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedGradientFreeScalarAlgorithms: return BoundedGradientFreeScalarAlgorithms() @property def Global(self) -> GlobalGradientFreeScalarAlgorithms: return GlobalGradientFreeScalarAlgorithms() @property def Local(self) -> GradientFreeLocalScalarAlgorithms: return GradientFreeLocalScalarAlgorithms() @property def NonlinearConstrained(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: return GradientFreeNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GradientFreeParallelScalarAlgorithms: return GradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class GradientFreeLeastSquaresAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeLeastSquaresAlgorithms: return BoundedGradientFreeLeastSquaresAlgorithms() @property def Local(self) -> GradientFreeLeastSquaresLocalAlgorithms: return GradientFreeLeastSquaresLocalAlgorithms() @property def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms: return GradientFreeLeastSquaresParallelAlgorithms() @dataclass(frozen=True) class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeParallelAlgorithms: return BoundedGradientFreeParallelAlgorithms() @property def Global(self) -> GlobalGradientFreeParallelAlgorithms: return GlobalGradientFreeParallelAlgorithms() @property def LeastSquares(self) -> GradientFreeLeastSquaresParallelAlgorithms: return GradientFreeLeastSquaresParallelAlgorithms() @property def Local(self) -> GradientFreeLocalParallelAlgorithms: return GradientFreeLocalParallelAlgorithms() @property def NonlinearConstrained( self, ) -> GradientFreeNonlinearConstrainedParallelAlgorithms: return GradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GradientFreeParallelScalarAlgorithms: return GradientFreeParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedGlobalAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def GradientBased(self) -> BoundedGlobalGradientBasedAlgorithms: return BoundedGlobalGradientBasedAlgorithms() @property def GradientFree(self) -> BoundedGlobalGradientFreeAlgorithms: return BoundedGlobalGradientFreeAlgorithms() @property def NonlinearConstrained(self) -> BoundedGlobalNonlinearConstrainedAlgorithms: return BoundedGlobalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedGlobalParallelAlgorithms: return BoundedGlobalParallelAlgorithms() @property def Scalar(self) -> BoundedGlobalScalarAlgorithms: return BoundedGlobalScalarAlgorithms() @dataclass(frozen=True) class GlobalNonlinearConstrainedAlgorithms(AlgoSelection): nlopt_isres: Type[NloptISRES] = NloptISRES scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalNonlinearConstrainedAlgorithms: return BoundedGlobalNonlinearConstrainedAlgorithms() @property def GradientBased(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms: return GlobalGradientBasedNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms: return GlobalGradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GlobalNonlinearConstrainedParallelAlgorithms: return GlobalNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms: return GlobalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class GlobalScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalScalarAlgorithms: return BoundedGlobalScalarAlgorithms() @property def GradientBased(self) -> GlobalGradientBasedScalarAlgorithms: return GlobalGradientBasedScalarAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeScalarAlgorithms: return GlobalGradientFreeScalarAlgorithms() @property def NonlinearConstrained(self) -> GlobalNonlinearConstrainedScalarAlgorithms: return GlobalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> GlobalParallelScalarAlgorithms: return GlobalParallelScalarAlgorithms() @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedGlobalParallelAlgorithms: return BoundedGlobalParallelAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeParallelAlgorithms: return GlobalGradientFreeParallelAlgorithms() @property def NonlinearConstrained(self) -> GlobalNonlinearConstrainedParallelAlgorithms: return GlobalNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> GlobalParallelScalarAlgorithms: return GlobalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientBased(self) -> BoundedGradientBasedLocalAlgorithms: return BoundedGradientBasedLocalAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeLocalAlgorithms: return BoundedGradientFreeLocalAlgorithms() @property def LeastSquares(self) -> BoundedLeastSquaresLocalAlgorithms: return BoundedLeastSquaresLocalAlgorithms() @property def NonlinearConstrained(self) -> BoundedLocalNonlinearConstrainedAlgorithms: return BoundedLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedLocalParallelAlgorithms: return BoundedLocalParallelAlgorithms() @property def Scalar(self) -> BoundedLocalScalarAlgorithms: return BoundedLocalScalarAlgorithms() @dataclass(frozen=True) class LocalNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedLocalNonlinearConstrainedAlgorithms: return BoundedLocalNonlinearConstrainedAlgorithms() @property def GradientBased(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms: return GradientBasedLocalNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms: return GradientFreeLocalNonlinearConstrainedAlgorithms() @property def Scalar(self) -> LocalNonlinearConstrainedScalarAlgorithms: return LocalNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class LocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedLocalScalarAlgorithms: return BoundedLocalScalarAlgorithms() @property def GradientBased(self) -> GradientBasedLocalScalarAlgorithms: return GradientBasedLocalScalarAlgorithms() @property def GradientFree(self) -> GradientFreeLocalScalarAlgorithms: return GradientFreeLocalScalarAlgorithms() @property def NonlinearConstrained(self) -> LocalNonlinearConstrainedScalarAlgorithms: return LocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> LocalParallelScalarAlgorithms: return LocalParallelScalarAlgorithms() @dataclass(frozen=True) class LeastSquaresLocalAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLeastSquaresLocalAlgorithms: return BoundedLeastSquaresLocalAlgorithms() @property def GradientBased(self) -> GradientBasedLeastSquaresLocalAlgorithms: return GradientBasedLeastSquaresLocalAlgorithms() @property def GradientFree(self) -> GradientFreeLeastSquaresLocalAlgorithms: return GradientFreeLeastSquaresLocalAlgorithms() @property def Parallel(self) -> LeastSquaresLocalParallelAlgorithms: return LeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class LikelihoodLocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH @property def GradientBased(self) -> GradientBasedLikelihoodLocalAlgorithms: return GradientBasedLikelihoodLocalAlgorithms() @dataclass(frozen=True) class LocalParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel pounders: Type[Pounders] = Pounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLocalParallelAlgorithms: return BoundedLocalParallelAlgorithms() @property def GradientFree(self) -> GradientFreeLocalParallelAlgorithms: return GradientFreeLocalParallelAlgorithms() @property def LeastSquares(self) -> LeastSquaresLocalParallelAlgorithms: return LeastSquaresLocalParallelAlgorithms() @property def Scalar(self) -> LocalParallelScalarAlgorithms: return LocalParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedNonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Global(self) -> BoundedGlobalNonlinearConstrainedAlgorithms: return BoundedGlobalNonlinearConstrainedAlgorithms() @property def GradientBased(self) -> BoundedGradientBasedNonlinearConstrainedAlgorithms: return BoundedGradientBasedNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms: return BoundedGradientFreeNonlinearConstrainedAlgorithms() @property def Local(self) -> BoundedLocalNonlinearConstrainedAlgorithms: return BoundedLocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedNonlinearConstrainedParallelAlgorithms: return BoundedNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedNonlinearConstrainedScalarAlgorithms: return BoundedNonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class BoundedScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt fides: Type[Fides] = Fides gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tranquilo: Type[Tranquilo] = Tranquilo @property def Global(self) -> BoundedGlobalScalarAlgorithms: return BoundedGlobalScalarAlgorithms() @property def GradientBased(self) -> BoundedGradientBasedScalarAlgorithms: return BoundedGradientBasedScalarAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeScalarAlgorithms: return BoundedGradientFreeScalarAlgorithms() @property def Local(self) -> BoundedLocalScalarAlgorithms: return BoundedLocalScalarAlgorithms() @property def NonlinearConstrained(self) -> BoundedNonlinearConstrainedScalarAlgorithms: return BoundedNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> BoundedParallelScalarAlgorithms: return BoundedParallelScalarAlgorithms() @dataclass(frozen=True) class BoundedLeastSquaresAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def GradientBased(self) -> BoundedGradientBasedLeastSquaresAlgorithms: return BoundedGradientBasedLeastSquaresAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeLeastSquaresAlgorithms: return BoundedGradientFreeLeastSquaresAlgorithms() @property def Local(self) -> BoundedLeastSquaresLocalAlgorithms: return BoundedLeastSquaresLocalAlgorithms() @property def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: return BoundedLeastSquaresParallelAlgorithms() @dataclass(frozen=True) class BoundedParallelAlgorithms(AlgoSelection): nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Global(self) -> BoundedGlobalParallelAlgorithms: return BoundedGlobalParallelAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeParallelAlgorithms: return BoundedGradientFreeParallelAlgorithms() @property def LeastSquares(self) -> BoundedLeastSquaresParallelAlgorithms: return BoundedLeastSquaresParallelAlgorithms() @property def Local(self) -> BoundedLocalParallelAlgorithms: return BoundedLocalParallelAlgorithms() @property def NonlinearConstrained(self) -> BoundedNonlinearConstrainedParallelAlgorithms: return BoundedNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> BoundedParallelScalarAlgorithms: return BoundedParallelScalarAlgorithms() @dataclass(frozen=True) class NonlinearConstrainedScalarAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedNonlinearConstrainedScalarAlgorithms: return BoundedNonlinearConstrainedScalarAlgorithms() @property def Global(self) -> GlobalNonlinearConstrainedScalarAlgorithms: return GlobalNonlinearConstrainedScalarAlgorithms() @property def GradientBased(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms: return GradientBasedNonlinearConstrainedScalarAlgorithms() @property def GradientFree(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: return GradientFreeNonlinearConstrainedScalarAlgorithms() @property def Local(self) -> LocalNonlinearConstrainedScalarAlgorithms: return LocalNonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> NonlinearConstrainedParallelScalarAlgorithms: return NonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class NonlinearConstrainedParallelAlgorithms(AlgoSelection): scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) @property def Bounded(self) -> BoundedNonlinearConstrainedParallelAlgorithms: return BoundedNonlinearConstrainedParallelAlgorithms() @property def Global(self) -> GlobalNonlinearConstrainedParallelAlgorithms: return GlobalNonlinearConstrainedParallelAlgorithms() @property def GradientFree(self) -> GradientFreeNonlinearConstrainedParallelAlgorithms: return GradientFreeNonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: return NonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedParallelScalarAlgorithms: return BoundedParallelScalarAlgorithms() @property def Global(self) -> GlobalParallelScalarAlgorithms: return GlobalParallelScalarAlgorithms() @property def GradientFree(self) -> GradientFreeParallelScalarAlgorithms: return GradientFreeParallelScalarAlgorithms() @property def Local(self) -> LocalParallelScalarAlgorithms: return LocalParallelScalarAlgorithms() @property def NonlinearConstrained(self) -> NonlinearConstrainedParallelScalarAlgorithms: return NonlinearConstrainedParallelScalarAlgorithms() @dataclass(frozen=True) class LeastSquaresParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLeastSquaresParallelAlgorithms: return BoundedLeastSquaresParallelAlgorithms() @property def GradientFree(self) -> GradientFreeLeastSquaresParallelAlgorithms: return GradientFreeLeastSquaresParallelAlgorithms() @property def Local(self) -> LeastSquaresLocalParallelAlgorithms: return LeastSquaresLocalParallelAlgorithms() @dataclass(frozen=True) class GradientBasedAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedGradientBasedAlgorithms: return BoundedGradientBasedAlgorithms() @property def Global(self) -> GlobalGradientBasedAlgorithms: return GlobalGradientBasedAlgorithms() @property def LeastSquares(self) -> GradientBasedLeastSquaresAlgorithms: return GradientBasedLeastSquaresAlgorithms() @property def Likelihood(self) -> GradientBasedLikelihoodAlgorithms: return GradientBasedLikelihoodAlgorithms() @property def Local(self) -> GradientBasedLocalAlgorithms: return GradientBasedLocalAlgorithms() @property def NonlinearConstrained(self) -> GradientBasedNonlinearConstrainedAlgorithms: return GradientBasedNonlinearConstrainedAlgorithms() @property def Scalar(self) -> GradientBasedScalarAlgorithms: return GradientBasedScalarAlgorithms() @dataclass(frozen=True) class GradientFreeAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedGradientFreeAlgorithms: return BoundedGradientFreeAlgorithms() @property def Global(self) -> GlobalGradientFreeAlgorithms: return GlobalGradientFreeAlgorithms() @property def LeastSquares(self) -> GradientFreeLeastSquaresAlgorithms: return GradientFreeLeastSquaresAlgorithms() @property def Local(self) -> GradientFreeLocalAlgorithms: return GradientFreeLocalAlgorithms() @property def NonlinearConstrained(self) -> GradientFreeNonlinearConstrainedAlgorithms: return GradientFreeNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GradientFreeParallelAlgorithms: return GradientFreeParallelAlgorithms() @property def Scalar(self) -> GradientFreeScalarAlgorithms: return GradientFreeScalarAlgorithms() @dataclass(frozen=True) class GlobalAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_shgo: Type[ScipySHGO] = ScipySHGO @property def Bounded(self) -> BoundedGlobalAlgorithms: return BoundedGlobalAlgorithms() @property def GradientBased(self) -> GlobalGradientBasedAlgorithms: return GlobalGradientBasedAlgorithms() @property def GradientFree(self) -> GlobalGradientFreeAlgorithms: return GlobalGradientFreeAlgorithms() @property def NonlinearConstrained(self) -> GlobalNonlinearConstrainedAlgorithms: return GlobalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> GlobalParallelAlgorithms: return GlobalParallelAlgorithms() @property def Scalar(self) -> GlobalScalarAlgorithms: return GlobalScalarAlgorithms() @dataclass(frozen=True) class LocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLocalAlgorithms: return BoundedLocalAlgorithms() @property def GradientBased(self) -> GradientBasedLocalAlgorithms: return GradientBasedLocalAlgorithms() @property def GradientFree(self) -> GradientFreeLocalAlgorithms: return GradientFreeLocalAlgorithms() @property def LeastSquares(self) -> LeastSquaresLocalAlgorithms: return LeastSquaresLocalAlgorithms() @property def Likelihood(self) -> LikelihoodLocalAlgorithms: return LikelihoodLocalAlgorithms() @property def NonlinearConstrained(self) -> LocalNonlinearConstrainedAlgorithms: return LocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> LocalParallelAlgorithms: return LocalParallelAlgorithms() @property def Scalar(self) -> LocalScalarAlgorithms: return LocalScalarAlgorithms() @dataclass(frozen=True) class BoundedAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt fides: Type[Fides] = Fides gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Global(self) -> BoundedGlobalAlgorithms: return BoundedGlobalAlgorithms() @property def GradientBased(self) -> BoundedGradientBasedAlgorithms: return BoundedGradientBasedAlgorithms() @property def GradientFree(self) -> BoundedGradientFreeAlgorithms: return BoundedGradientFreeAlgorithms() @property def LeastSquares(self) -> BoundedLeastSquaresAlgorithms: return BoundedLeastSquaresAlgorithms() @property def Local(self) -> BoundedLocalAlgorithms: return BoundedLocalAlgorithms() @property def NonlinearConstrained(self) -> BoundedNonlinearConstrainedAlgorithms: return BoundedNonlinearConstrainedAlgorithms() @property def Parallel(self) -> BoundedParallelAlgorithms: return BoundedParallelAlgorithms() @property def Scalar(self) -> BoundedScalarAlgorithms: return BoundedScalarAlgorithms() @dataclass(frozen=True) class NonlinearConstrainedAlgorithms(AlgoSelection): ipopt: Type[Ipopt] = Ipopt nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr @property def Bounded(self) -> BoundedNonlinearConstrainedAlgorithms: return BoundedNonlinearConstrainedAlgorithms() @property def Global(self) -> GlobalNonlinearConstrainedAlgorithms: return GlobalNonlinearConstrainedAlgorithms() @property def GradientBased(self) -> GradientBasedNonlinearConstrainedAlgorithms: return GradientBasedNonlinearConstrainedAlgorithms() @property def GradientFree(self) -> GradientFreeNonlinearConstrainedAlgorithms: return GradientFreeNonlinearConstrainedAlgorithms() @property def Local(self) -> LocalNonlinearConstrainedAlgorithms: return LocalNonlinearConstrainedAlgorithms() @property def Parallel(self) -> NonlinearConstrainedParallelAlgorithms: return NonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> NonlinearConstrainedScalarAlgorithms: return NonlinearConstrainedScalarAlgorithms() @dataclass(frozen=True) class ScalarAlgorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt fides: Type[Fides] = Fides gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tranquilo: Type[Tranquilo] = Tranquilo @property def Bounded(self) -> BoundedScalarAlgorithms: return BoundedScalarAlgorithms() @property def Global(self) -> GlobalScalarAlgorithms: return GlobalScalarAlgorithms() @property def GradientBased(self) -> GradientBasedScalarAlgorithms: return GradientBasedScalarAlgorithms() @property def GradientFree(self) -> GradientFreeScalarAlgorithms: return GradientFreeScalarAlgorithms() @property def Local(self) -> LocalScalarAlgorithms: return LocalScalarAlgorithms() @property def NonlinearConstrained(self) -> NonlinearConstrainedScalarAlgorithms: return NonlinearConstrainedScalarAlgorithms() @property def Parallel(self) -> ParallelScalarAlgorithms: return ParallelScalarAlgorithms() @dataclass(frozen=True) class LeastSquaresAlgorithms(AlgoSelection): nag_dfols: Type[NagDFOLS] = NagDFOLS pounders: Type[Pounders] = Pounders scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF tao_pounders: Type[TAOPounders] = TAOPounders tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedLeastSquaresAlgorithms: return BoundedLeastSquaresAlgorithms() @property def GradientBased(self) -> GradientBasedLeastSquaresAlgorithms: return GradientBasedLeastSquaresAlgorithms() @property def GradientFree(self) -> GradientFreeLeastSquaresAlgorithms: return GradientFreeLeastSquaresAlgorithms() @property def Local(self) -> LeastSquaresLocalAlgorithms: return LeastSquaresLocalAlgorithms() @property def Parallel(self) -> LeastSquaresParallelAlgorithms: return LeastSquaresParallelAlgorithms() @dataclass(frozen=True) class LikelihoodAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH @property def GradientBased(self) -> GradientBasedLikelihoodAlgorithms: return GradientBasedLikelihoodAlgorithms() @property def Local(self) -> LikelihoodLocalAlgorithms: return LikelihoodLocalAlgorithms() @dataclass(frozen=True) class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedParallelAlgorithms: return BoundedParallelAlgorithms() @property def Global(self) -> GlobalParallelAlgorithms: return GlobalParallelAlgorithms() @property def GradientFree(self) -> GradientFreeParallelAlgorithms: return GradientFreeParallelAlgorithms() @property def LeastSquares(self) -> LeastSquaresParallelAlgorithms: return LeastSquaresParallelAlgorithms() @property def Local(self) -> LocalParallelAlgorithms: return LocalParallelAlgorithms() @property def NonlinearConstrained(self) -> NonlinearConstrainedParallelAlgorithms: return NonlinearConstrainedParallelAlgorithms() @property def Scalar(self) -> ParallelScalarAlgorithms: return ParallelScalarAlgorithms() @dataclass(frozen=True) class Algorithms(AlgoSelection): bayes_opt: Type[BayesOpt] = BayesOpt bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( GFOStochasticHillClimbing ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution nevergrad_eda: Type[NevergradEDA] = NevergradEDA nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA nevergrad_meta: Type[NevergradMeta] = NevergradMeta nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne nevergrad_pso: Type[NevergradPSO] = NevergradPSO nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB nlopt_mma: Type[NloptMMA] = NloptMMA nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch pygmo_de: Type[PygmoDe] = PygmoDe pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220 pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_gwo: Type[PygmoGwo] = PygmoGwo pygmo_ihs: Type[PygmoIhs] = PygmoIhs pygmo_mbh: Type[PygmoMbh] = PygmoMbh pygmo_pso: Type[PygmoPso] = PygmoPso pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pygmo_sade: Type[PygmoSade] = PygmoSade pygmo_sea: Type[PygmoSea] = PygmoSea pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution ) scipy_direct: Type[ScipyDirect] = ScipyDirect scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG scipy_powell: Type[ScipyPowell] = ScipyPowell scipy_shgo: Type[ScipySHGO] = ScipySHGO scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr tao_pounders: Type[TAOPounders] = TAOPounders tranquilo: Type[Tranquilo] = Tranquilo tranquilo_ls: Type[TranquiloLS] = TranquiloLS @property def Bounded(self) -> BoundedAlgorithms: return BoundedAlgorithms() @property def Global(self) -> GlobalAlgorithms: return GlobalAlgorithms() @property def GradientBased(self) -> GradientBasedAlgorithms: return GradientBasedAlgorithms() @property def GradientFree(self) -> GradientFreeAlgorithms: return GradientFreeAlgorithms() @property def LeastSquares(self) -> LeastSquaresAlgorithms: return LeastSquaresAlgorithms() @property def Likelihood(self) -> LikelihoodAlgorithms: return LikelihoodAlgorithms() @property def Local(self) -> LocalAlgorithms: return LocalAlgorithms() @property def NonlinearConstrained(self) -> NonlinearConstrainedAlgorithms: return NonlinearConstrainedAlgorithms() @property def Parallel(self) -> ParallelAlgorithms: return ParallelAlgorithms() @property def Scalar(self) -> ScalarAlgorithms: return ScalarAlgorithms() algos = Algorithms() global_algos = GlobalAlgorithms() ALL_ALGORITHMS = algos._all_algorithms_dict AVAILABLE_ALGORITHMS = algos._available_algorithms_dict GLOBAL_ALGORITHMS = global_algos._available_algorithms_dict ================================================ FILE: src/optimagic/batch_evaluators.py ================================================ """A collection of batch evaluators for process based parallelism. All batch evaluators have the same interface and any function with the same interface can be used used as batch evaluator in optimagic. """ from joblib import Parallel, delayed try: from pathos.pools import ProcessPool pathos_is_available = True except ImportError: pathos_is_available = False import threading from typing import Any, Callable, Literal, TypeVar, cast from optimagic import deprecations from optimagic.config import DEFAULT_N_CORES as N_CORES from optimagic.decorators import catch, unpack from optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral, ErrorHandling T = TypeVar("T") def pathos_mp_batch_evaluator( func: Callable[..., T], arguments: list[Any], *, n_cores: int = N_CORES, error_handling: ErrorHandling | Literal["raise", "continue"] = ErrorHandling.CONTINUE, unpack_symbol: Literal["*", "**"] | None = None, ) -> list[T]: """Batch evaluator based on pathos.multiprocess.ProcessPool. This uses a patched but older version of python multiprocessing that replaces pickling with dill and can thus handle decorated functions. Args: func (Callable): The function that is evaluated. arguments (Iterable): Arguments for the functions. Their interperation depends on the unpack argument. n_cores (int): Number of cores used to evaluate the function in parallel. Value below one are interpreted as one. If only one core is used, the batch evaluator disables everything that could cause problems, i.e. in that case func and arguments are never pickled and func is executed in the main process. error_handling (str): Can take the values "raise" (raise the error and stop all tasks as soon as one task fails) and "continue" (catch exceptions and set the traceback of the raised exception. KeyboardInterrupt and SystemExit are always raised. unpack_symbol (str or None). Can be "**", "*" or None. If None, func just takes one argument. If "*", the elements of arguments are positional arguments for func. If "**", the elements of arguments are keyword arguments for func. Returns: list: The function evaluations. """ if not pathos_is_available: raise NotImplementedError( "To use the pathos_mp_batch_evaluator, install pathos with " "conda install -c conda-forge pathos." ) _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol) n_cores = int(n_cores) reraise = error_handling in [ "raise", ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ] @unpack(symbol=unpack_symbol) @catch(default="__traceback__", reraise=reraise) def internal_func(*args: Any, **kwargs: Any) -> T: return func(*args, **kwargs) if n_cores <= 1: res = [internal_func(arg) for arg in arguments] else: p = ProcessPool(nodes=n_cores) try: res = p.map(internal_func, arguments) except Exception as e: p.terminate() raise e return res def joblib_batch_evaluator( func: Callable[..., T], arguments: list[Any], *, n_cores: int = N_CORES, error_handling: ErrorHandling | Literal["raise", "continue"] = ErrorHandling.CONTINUE, unpack_symbol: Literal["*", "**"] | None = None, ) -> list[T]: """Batch evaluator based on joblib's Parallel. Args: func (Callable): The function that is evaluated. arguments (Iterable): Arguments for the functions. Their interperation depends on the unpack argument. n_cores (int): Number of cores used to evaluate the function in parallel. Value below one are interpreted as one. If only one core is used, the batch evaluator disables everything that could cause problems, i.e. in that case func and arguments are never pickled and func is executed in the main process. error_handling (str): Can take the values "raise" (raise the error and stop all tasks as soon as one task fails) and "continue" (catch exceptions and set the output of failed tasks to the traceback of the raised exception. KeyboardInterrupt and SystemExit are always raised. unpack_symbol (str or None). Can be "**", "*" or None. If None, func just takes one argument. If "*", the elements of arguments are positional arguments for func. If "**", the elements of arguments are keyword arguments for func. Returns: list: The function evaluations. """ _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol) n_cores = int(n_cores) if int(n_cores) >= 2 else 1 reraise = error_handling in [ "raise", ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ] @unpack(symbol=unpack_symbol) @catch(default="__traceback__", reraise=reraise) def internal_func(*args: Any, **kwargs: Any) -> T: return func(*args, **kwargs) if n_cores == 1: res = [internal_func(arg) for arg in arguments] else: res = Parallel(n_jobs=n_cores)(delayed(internal_func)(arg) for arg in arguments) return res def threading_batch_evaluator( func: Callable[..., T], arguments: list[Any], *, n_cores: int = N_CORES, error_handling: ErrorHandling | Literal["raise", "continue"] = ErrorHandling.CONTINUE, unpack_symbol: Literal["*", "**"] | None = None, ) -> list[T]: """Batch evaluator based on Python's threading. Args: func (Callable): The function that is evaluated. arguments (Iterable): Arguments for the functions. Their interperation depends on the unpack argument. n_cores (int): Number of threads used to evaluate the function in parallel. Value below one are interpreted as one. error_handling (str): Can take the values "raise" (raise the error and stop all tasks as soon as one task fails) and "continue" (catch exceptions and set the output of failed tasks to the traceback of the raised exception. KeyboardInterrupt and SystemExit are always raised. unpack_symbol (str or None). Can be "**", "*" or None. If None, func just takes one argument. If "*", the elements of arguments are positional arguments for func. If "**", the elements of arguments are keyword arguments for func. Returns: list: The function evaluations. """ _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol) n_cores = int(n_cores) if int(n_cores) >= 2 else 1 reraise = error_handling in [ "raise", ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ] @unpack(symbol=unpack_symbol) @catch(default="__traceback__", reraise=reraise) def internal_func(*args: Any, **kwargs: Any) -> T: return func(*args, **kwargs) if n_cores == 1: res = [internal_func(arg) for arg in arguments] else: results = [None] * len(arguments) threads = [] errors = [] error_lock = threading.Lock() def thread_func(index: int, arg: Any) -> None: try: results[index] = internal_func(arg) except Exception as e: with error_lock: errors.append(e) for i, arg in enumerate(arguments): thread = threading.Thread(target=thread_func, args=(i, arg)) threads.append(thread) thread.start() for thread in threads: thread.join() if errors: raise errors[0] res = cast(list[T], results) return res def _check_inputs( func: Callable[..., T], arguments: list[Any], n_cores: int, error_handling: ErrorHandling | Literal["raise", "continue"], unpack_symbol: Literal["*", "**"] | None, ) -> None: if not callable(func): raise TypeError("func must be callable.") try: arguments = list(arguments) except Exception as e: raise ValueError("arguments must be list like.") from e try: int(n_cores) except Exception as e: raise ValueError("n_cores must be an integer.") from e if unpack_symbol not in (None, "*", "**"): raise ValueError( f"unpack_symbol must be None, '*' or '**', not {unpack_symbol}" ) if error_handling not in [ "raise", "continue", ErrorHandling.RAISE, ErrorHandling.CONTINUE, ErrorHandling.RAISE_STRICT, ]: raise ValueError( "error_handling must be 'raise' or 'continue' or ErrorHandling not " f"{error_handling}" ) def process_batch_evaluator( batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = "joblib", ) -> BatchEvaluator: if batch_evaluator is None: deprecations.throw_none_valued_batch_evaluator_warning() batch_evaluator = "joblib" if callable(batch_evaluator): out = batch_evaluator elif isinstance(batch_evaluator, str): if batch_evaluator == "joblib": out = cast(BatchEvaluator, joblib_batch_evaluator) elif batch_evaluator == "pathos": out = cast(BatchEvaluator, pathos_mp_batch_evaluator) elif batch_evaluator == "threading": out = cast(BatchEvaluator, threading_batch_evaluator) else: raise ValueError( "Invalid batch evaluator requested. Currently only 'pathos', 'joblib', " "and 'threading' are supported." ) else: raise TypeError("batch_evaluator must be a callable or string.") return out ================================================ FILE: src/optimagic/benchmarking/__init__.py ================================================ ================================================ FILE: src/optimagic/benchmarking/benchmark_reports.py ================================================ import pandas as pd from optimagic.benchmarking.process_benchmark_results import ( process_benchmark_results, ) from optimagic.visualization.profile_plot import create_solution_times def convergence_report( problems, results, *, stopping_criterion="y", x_precision=1e-4, y_precision=1e-4 ): """Create a DataFrame with convergence information for a set of problems. Args: problems (dict): optimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. stopping_criterion (str): one of "x_and_y", "x_or_y", "x", "y". Determines how convergence is determined from the two precisions. Default is "y". x_precision (float or None): how close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. y_precision (float or None): how close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. Returns: pandas.DataFrame: indexes are the problems, columns are the algorithms and the dimensionality of the benchmark problems. For the algorithms column, the values are strings that are either "success", "failed", or "error". For the dimensionality column, the values denote the number of dimensions of the problem. """ _, converged_info = process_benchmark_results( problems=problems, results=results, stopping_criterion=stopping_criterion, x_precision=x_precision, y_precision=y_precision, ) report = _get_success_info(results, converged_info) report["dimensionality"] = report.index.map(_get_problem_dimensions(problems)) return report def rank_report( problems, results, *, runtime_measure="n_evaluations", stopping_criterion="y", x_precision=1e-4, y_precision=1e-4, ): """Create a DataFrame with rank information for a set of problems. Args: problems (dict): optimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. runtime_measure (str): "n_evaluations", "n_batches" or "walltime". This is the runtime until the desired convergence was reached by an algorithm. This is called performance measure by Moré and Wild (2009). Default is "n_evaluations". stopping_criterion (str): one of "x_and_y", "x_or_y", "x", "y". Determines how convergence is determined from the two precisions. x_precision (float or None): how close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. y_precision (float or None): how close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. Returns: pandas.DataFrame: indexes are the problems, columns are the algorithms and the dimensionality of the problems. The values are the ranks of the algorithms for each problem, where 0 means the algorithm was the fastest, 1 means it was the second fastest and so on. If an algorithm did not converge on a problem, the value is "failed". If an algorithm did encounter an error during optimization, the value is "error". """ histories, converged_info = process_benchmark_results( problems=problems, results=results, stopping_criterion=stopping_criterion, x_precision=x_precision, y_precision=y_precision, ) solution_times = create_solution_times( histories, runtime_measure, converged_info, return_tidy=False ) solution_times["rank"] = ( solution_times.groupby("problem")[runtime_measure].rank( method="dense", ascending=True ) - 1 ).astype("Int64") success_info = _get_success_info(results, converged_info) df_wide = solution_times.pivot(index="problem", columns="algorithm", values="rank") report = df_wide.astype(str) report.columns.name = None report[~converged_info] = success_info report["dimensionality"] = report.index.map(_get_problem_dimensions(problems)) return report def traceback_report(problems, results, return_type="dataframe"): """Create traceback report for all problems that have not been solved. Args: results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. return_type (str): either "text", "markdown", "dict" or "dataframe". If "text", the traceback report is returned as a string. If "markdown", it is a markdown string. If "dict", it is returned as a dictionary. If "dataframe", it is a tidy pandas DataFrame, where indexes are the algorithm and problem names, the columns are the tracebacks and the dimensionality of the problem. Default is "dataframe". Returns: (list or str or dict or pandas.DataFrame): traceback report. If return_type is "text", the report is a list of strings. If "markdown", it is a formatted markdown string with algorithms and problem names as headers. If return_type is "dict", the report is a dictionary. If return_type is "dataframe", it is a tidy pandas DataFrame. In the latter case, indexes are the algorithm and problem names, the columns are the tracebacks and the dimensionality of the problems. The values are the tracebacks of the algorithms for problems where they stopped with an error. """ if return_type == "text": report = [] for result in results.values(): if isinstance(result["solution"], str): report.append(result["solution"]) elif return_type == "markdown": report = "```python" for (problem_name, algorithm_name), result in results.items(): if isinstance(result["solution"], str): if f"### {algorithm_name}" not in report: report += f"\n### {algorithm_name} \n" report += f"\n#### {problem_name} \n" report += f"\n{result['solution']} \n" report += "\n```" elif return_type == "dict": report = {} for (problem_name, algorithm_name), result in results.items(): if isinstance(result["solution"], str): report[(problem_name, algorithm_name)] = result["solution"] elif return_type == "dataframe": tracebacks = {} for (problem_name, algorithm_name), result in results.items(): if isinstance(result["solution"], str): tracebacks[algorithm_name] = tracebacks.setdefault(algorithm_name, {}) tracebacks[algorithm_name][problem_name] = result["solution"] report = pd.DataFrame.from_dict(tracebacks, orient="index").stack().to_frame() report.index.set_names(["algorithm", "problem"], inplace=True) report.columns = ["traceback"] report["dimensionality"] = 0 for problem_name, dim in _get_problem_dimensions(problems).items(): if problem_name in report.index.get_level_values("problem"): report.loc[(slice(None), problem_name), "dimensionality"] = dim else: raise ValueError( f"return_type {return_type} is not supported. Must be one of " f"'text', 'markdown', 'dict' or 'dataframe'." ) return report def _get_success_info(results, converged_info): """Create a DataFrame with information on whether an algorithm succeeded or not. Args: results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. converged_info (pandas.DataFrame): columns are the algorithms, indexes are the problems. The values are boolean and True when the algorithm arrived at the solution with the desired precision. Returns: pandas.DataFrame: indexes are the problems, columns are the algorithms. values are strings that are either "success", "failed", or "error". """ success_info = converged_info.replace({True: "success", False: "failed"}) for key, value in results.items(): if isinstance(value["solution"], str): success_info.at[key] = "error" return success_info def _get_problem_dimensions(problems): """Get the dimension of each problem. Args: problems (dict): dictionary of problems. keys are problem names, values are dictionaries with the problem information. Returns: dict: keys are problem names, values are the dimension of the problem. """ return {prob: len(problems[prob]["inputs"]["params"]) for prob in problems} ================================================ FILE: src/optimagic/benchmarking/cartis_roberts.py ================================================ """Define the medium scale CUTEst Benchmark Set. This benchmark set is contains 60 test cases for nonlinear least squares solvers. It was used to benchmark all modern model based non-linear derivative free least squares solvers (e.g. POUNDERS, DFOGN, DFOLS). The parameter dimensions are of medium scale, varying between 25 and 100. The benchmark set is based on Table 3 in Cartis and Roberts (2019). Implementation is based on - the original SIF files: https://bitbucket.org/optrove/sif/src/master/ - on sources cited in the SIF files or, - where available, on AMPL implementaions available here: - https://vanderbei.princeton.edu/ampl/nlmodels/cute/index.html """ from functools import partial import numpy as np from optimagic import mark from optimagic.config import IS_NUMBA_INSTALLED from optimagic.parameters.bounds import Bounds if IS_NUMBA_INSTALLED: from numba import njit else: def njit(func): return func from optimagic.benchmarking.more_wild import ( brown_almost_linear, linear_full_rank, linear_rank_one, watson, ) @mark.least_squares def luksan11(x): dim_in = len(x) fvec = np.zeros(2 * (dim_in - 1)) fvec[::2] = 20 * x[:-1] / (1 + x[:-1] ** 2) - 10 * x[1:] fvec[1::2] = x[:-1] - 1 return fvec @mark.least_squares def luksan12(x): dim_in = len(x) n = (dim_in - 2) // 3 i = np.arange(0, 3 * n, 3) fvec = np.zeros(6 * n) fvec[::6] = 10 * (x[i] ** 2 - x[i + 1]) fvec[1::6] = x[i + 2] - 1 fvec[2::6] = (x[i + 3] - 1) ** 2 fvec[3::6] = (x[i + 4] - 1) ** 3 fvec[4::6] = x[i] ** 2 * x[i + 3] + np.sin(x[i + 3] - x[i + 4]) - 10 fvec[5::6] = x[i + 1] + (x[i + 2] ** 4) * (x[i + 3] ** 2) - 20 return fvec @mark.least_squares def luksan13(x): dim_in = len(x) n = (dim_in - 2) // 3 fvec = np.zeros(n * 7) i = np.arange(n) k = i * 7 fvec[k] = 10 * (x[3 * i] ** 2 - x[3 * i + 1]) fvec[k + 1] = 10 * (x[3 * i + 1] ** 2 - x[3 * i + 2]) fvec[k + 2] = (x[3 * i + 2] - x[3 * i + 3]) ** 2 fvec[k + 3] = (x[3 * i + 3] - x[3 * i + 4]) ** 2 fvec[k + 4] = x[3 * i] + x[3 * i + 1] ** 2 + x[3 * i + 2] - 30 fvec[k + 5] = x[3 * i + 1] - x[3 * i + 2] ** 2 + x[3 * i + 3] - 10 fvec[k + 6] = x[3 * i + 1] * x[3 * i + 4] - 10 return fvec @mark.least_squares def luksan14(x): dim_in = len(x) dim_out = 7 * (dim_in - 2) // 3 fvec = np.zeros(dim_out, dtype=np.float64) for i in range(0, dim_in - 2, 3): k = (i // 3) * 7 fvec[k : k + 7] = [ 10 * (x[i] ** 2 - x[i + 1]), x[i + 1] + x[i + 2] - 2, x[i + 3] - 1, x[i + 4] - 1, x[i] + 3 * x[i + 1], x[i + 2] + x[i + 3] - 2 * x[i + 4], 10 * (x[i + 1] ** 2 - x[i + 4]), ] return fvec @mark.least_squares def luksan15(x): dim_in = len(x) dim_out = (dim_in - 2) * 2 temp = np.zeros((dim_out, 3), dtype=np.float64) y = np.tile([35.8, 11.2, 6.2, 4.4], dim_out // 4) for p in range(1, 4): k = 0 for i in range(0, dim_in - 2, 2): for j in range(1, 5): temp[k, p - 1] = (p**2 / j) * np.abs( x[i] * (x[i + 1] ** 2) * (x[i + 2] ** 3) * (x[i + 3] ** 4) ) ** (1 / (p * j)) k += 1 fvec = y - np.sum(temp, axis=1) return fvec @mark.least_squares def luksan16(x): dim_in = len(x) dim_out = (dim_in - 2) * 2 temp = np.zeros((dim_out, 3), dtype=np.float64) y = np.tile([35.8, 11.2, 6.2, 4.4], dim_out // 4) for p in range(1, 4): k = 0 for i in range(0, dim_in - 2, 2): for j in range(1, 5): temp[k, p - 1] = (p**2 / j) * np.exp( (x[i] + 2 * x[i + 1] + 3 * x[i + 2] + 4 * x[i + 3]) * (1 / (p * j)) ) k += 1 fvec = y - np.sum(temp, axis=1) return fvec @mark.least_squares def luksan17(x): dim_in = len(x) dim_out = (dim_in - 2) * 2 temp = np.zeros((dim_out, 4), dtype=np.float64) y = np.tile([30.6, 72.2, 124.4, 187.4], dim_out // 4) for q in range(1, 5): k = 0 for i in range(-1, dim_in - 4, 2): for j in range(1, 5): temp[k, q - 1] += -j * q**2 * np.sin(x[i + q]) + j**2 * q * np.cos( x[i + q] ) k += 1 fvec = y - np.sum(temp, axis=1) return fvec @mark.least_squares def luksan21(x): dim_out = len(x) h = 1 / (dim_out + 1) fvec = np.zeros(dim_out, dtype=np.float64) fvec[0] = 2 * x[0] + 0.5 * h**2 * (x[0] + h + 1) ** 3 - x[1] + 1 for i in range(1, dim_out - 1): fvec[i] = ( 2 * x[i] + 0.5 * h**2 * (x[i] + h * (i + 1) + 1) ** 3 - x[i - 1] - x[i + 1] + 1 ) fvec[-1] = 2 * x[-1] + 0.5 * h**2 * (x[-1] + h * dim_out + 1) ** 3 - x[-2] + 1 return fvec @mark.least_squares def luksan22(x): dim_out = 2 * len(x) - 2 fvec = np.zeros(dim_out) fvec[0] = x[0] - 1 fvec[1:-1:2] = 10 * (x[:-2] ** 2 - x[1:-1]) fvec[2:-1:2] = 2 * np.exp(-((x[:-2] - x[1:-1]) ** 2)) + np.exp( -2 * (x[1:-1] - x[2:]) ** 2 ) fvec[-1] = -10 * (x[-2] ** 2) return fvec @mark.least_squares def morebvne(x): dim_in = len(x) h = 1 / (dim_in + 1) i = np.arange(1, dim_in + 1) fvec = np.zeros(dim_in) fvec[0] = 2 * x[0] - x[1] + h**2 / 2 * (x[0] + i[0] * h + 1) ** 3 fvec[1:-1] = ( 2 * x[1:-1] - x[:-2] - x[2:] + h**2 / 2 * (x[1:-1] + i[1:-1] * h + 1) ** 3 ) fvec[-1] = 2 * x[-2] - x[-2] + h**2 / 2 * (x[-1] + i[-1] * h + 1) ** 3 return fvec @mark.least_squares @njit def flosp2(x, a, b, ra=1.0e7): n = 5 xvec = np.ones((3, n, n), dtype=np.float64) xvec[0] = x[: n**2].reshape(n, n) xvec[1] = x[n**2 : 2 * n**2].reshape(n, n) xvec[2, 1:-1, 1:-1] = x[2 * n**2 :].reshape(n - 2, n - 2) h = 1 / 2 ax = 1.0 axx = ax**2 theta = 0.5 * np.pi pi1 = -0.5 * ax * ra * np.cos(theta) pi2 = 0.5 * ax * ra * np.sin(theta) fvec = np.empty(59, dtype=np.float64) temp = np.empty((n - 2, n - 2, n - 2), dtype=np.float64) for j in range(1, n - 1): for i in range(1, n - 1): temp[0, i - 1, j - 1] = ( xvec[0, i, j] * -2 * (1 / h) ** 2 + xvec[0, i + 1, j] * (1 / h) ** 2 + xvec[0, i - 1, j] * (1 / h) ** 2 + xvec[0, i, j] * -2 * axx * (1 / h) ** 2 + xvec[0, i, j + 1] * axx * (1 / h) ** 2 + xvec[0, i, j - 1] * ax * (1 / h) ** 2 + xvec[1, i + 1, j] * -pi1 / (2 * h) + xvec[1, i - 1, j] * pi1 / (2 * h) + xvec[1, i, j + 1] * -pi2 / (2 * h) + xvec[1, i, j - 1] * pi2 / (2 * h) ) temp[1, i - 1, j - 1] = ( xvec[2, i, j] * -2 * (1 / h) ** 2 + xvec[2, i + 1, j] * (1 / h) ** 2 + xvec[2, i - 1, j] * (1 / h) ** 2 + xvec[2, i, j] * -2 * axx * (1 / h) ** 2 + xvec[2, i, j + 1] * axx * (1 / h) ** 2 + xvec[2, i, j - 1] * axx * (1 / h) ** 2 + xvec[0, i, j] * axx * 0.25 ) temp[2, i - 1, j - 1] = ( xvec[1, i, j] * -2 * (1 / h) ** 2 + xvec[1, i + 1, j] * (1 / h) ** 2 + xvec[1, i - 1, j] * (1 / h) ** 2 + xvec[1, i, j] * -2 * axx * (1 / h) ** 2 + xvec[1, i, j + 1] * axx * (1 / h) ** 2 + xvec[1, i, j - 1] * axx * (1 / h) ** 2 - 0.25 * ax * (1 / h) ** 2 * (xvec[2, i, j + 1] - xvec[2, i, j - 1]) * (xvec[1, i + 1, j] - xvec[1, i - 1, j]) + 0.25 * ax * (1 / h) ** 2 * (xvec[2, i + 1, j] - xvec[2, i - 1, j]) * (xvec[1, i, j + 1] - xvec[1, i, j - 1]) ) fvec[:27] = temp.flatten() temp = np.zeros((n, n), dtype=np.float64) for k in range(n): temp[k, -1] = a[2] temp[k, 0] = b[2] temp[0, k] = 0 temp[-1, -1] = 0 for k in range(n): temp[k, -1] += ( xvec[1, k, -1] * 2 * a[0] * (1 / h) + xvec[1, k, -2] * -2 * a[0] * (1 / h) + xvec[1, k, -1] * a[1] ) temp[k, 0] += ( xvec[1, k, 1] * 2 * b[0] * (1 / h) + xvec[1, k, 0] * -2 * b[0] * (1 / h) + xvec[1, k, 0] * b[1] ) temp[-1, k] += xvec[1, -1, k] * 2 * (1 / (ax * h)) + xvec[1, -2, k] * -2 * ( 1 / (ax * h) ) temp[0, k] += xvec[1, 1, k] * 2 * (1 / (ax * h)) + xvec[1, 0, k] * -2 * ( 1 / (ax * h) ) fvec[27:32] = temp[0] fvec[32:37] = temp[-1] fvec[37:40] = temp[1:-1, 0] fvec[40:43] = temp[1:-1, -1] temp = np.zeros((n, n), dtype=np.float64) for k in range(n): temp[k, -1] += xvec[2, k, -1] * -2 * (1 / h) + xvec[2, k, -2] * 2 * (1 / h) temp[k, 0] += xvec[2, k, 1] * 2 * (1 / h) + xvec[2, k, 0] * -2 * (1 / h) temp[-1, k] += xvec[2, -1, k] * -2 * (1 / (ax * h)) + xvec[2, -2, k] * 2 * ( 1 / (ax * h) ) temp[0, k] += xvec[2, 1, k] * 2 * (1 / (ax * h)) + xvec[2, 0, k] * -2 * ( 1 / (ax * h) ) fvec[43:48] = temp[0] fvec[48:53] = temp[-1] fvec[53:56] = temp[1:-1, 0] fvec[56:] = temp[1:-1, -1] return fvec @mark.least_squares def oscigrne(x): dim_in = len(x) rho = 500 fvec = np.zeros(dim_in) fvec[0] = 0.5 * x[0] - 0.5 - 4 * rho * (x[1] - 2.0 * x[0] ** 2 + 1.0) * x[0] fvec[1:-1] = ( 2 * rho * (x[1:-1] - 2.0 * x[:-2] ** 2 + 1.0) - 4 * rho * (x[2:] - 2.0 * x[:-2] ** 2 + 1.0) * x[2:] ) fvec[-1] = 2 * rho * (x[-1] - 2.0 * x[-2] ** 2 + 1.0) return fvec @mark.least_squares def spmsqrt(x): m = (len(x) + 2) // 3 xmat = np.diag(x[2:-1:3], -1) + np.diag(x[::3], 0) + np.diag(x[1:-2:3], 1) b = np.zeros((m, m), dtype=np.float64) b[0, 0] = np.sin(1) b[0, 1] = np.sin(4) k = 2 for i in range(1, m - 1): k += 1 b[i, i - 1] = np.sin(k**2) k += 1 b[i, i] = np.sin(k**2) k += 1 b[i, i + 1] = np.sin(k**2) k += 1 b[-1, -2] = np.sin(k**2) k += 1 b[-1, -1] = np.sin(k**2) fmat = np.zeros((m, m), dtype=np.float64) fmat[0, 0] = xmat[0, 0] ** 2 + xmat[0, 1] * xmat[1, 0] fmat[0, 1] = xmat[0, 0] * xmat[0, 1] + xmat[0, 1] * xmat[1, 1] fmat[0, 2] = xmat[0, 1] * xmat[1, 2] fmat[1, 0] = xmat[1, 0] * xmat[0, 0] + xmat[1, 1] * xmat[1, 0] fmat[1, 1] = xmat[1, 0] * xmat[0, 1] + xmat[1, 1] ** 2 + xmat[1, 2] * xmat[2, 1] fmat[1, 2] = xmat[1, 1] * xmat[1, 2] + xmat[1, 2] * xmat[2, 2] fmat[1, 3] = xmat[1, 2] * xmat[2, 3] for i in range(2, m - 2): fmat[i, i - 2] = xmat[i, i - 1] * xmat[i - 1, i - 2] fmat[i, i - 1] = ( xmat[i, i - 1] * xmat[i - 1, i - 1] + xmat[i, i] * xmat[i, i - 1] ) fmat[i, i] = ( xmat[i, i - 1] * xmat[i - 1, i] + xmat[i, i] ** 2 + xmat[i, i + 1] * xmat[i + 1, i] ) fmat[i, i + 1] = ( xmat[i, i] * xmat[i, i + 1] + xmat[i, i + 1] * xmat[i + 1, i + 1] ) fmat[i, i + 2] = xmat[i, i + 1] * xmat[i + 1, i + 2] fmat[-2, -4] = xmat[-2, -3] * xmat[-3, -4] fmat[-2, -3] = xmat[-2, -3] * xmat[-3, -3] + xmat[-2, -2] * xmat[-2, -3] fmat[-2, -2] = ( xmat[-2, -3] * xmat[-3, -2] + xmat[-2, -2] ** 2 + xmat[-2, -1] * xmat[-1, -2] ) fmat[-2, -1] = xmat[-2, -2] * xmat[-2, -1] + xmat[-2, -1] * xmat[-1, -1] fmat[-1, -3] = xmat[-1, -2] * xmat[-2, -3] fmat[-1, -2] = xmat[-1, -2] * xmat[-2, -2] + xmat[-1, -1] * xmat[-1, -2] fmat[-1, -1] = xmat[-1, -2] * xmat[-2, -1] + xmat[-1, -1] ** 2 fmat[0, 0] -= b[0, 0] ** 2 + b[0, 1] * b[1, 0] for i in range(1, m - 1): fmat[i, i] -= ( b[i, i] ** 2 + b[i - 1, i] * b[i, i - 1] + b[i + 1, i] * b[i, i + 1] ) fmat[-1, -1] -= b[-1, -1] ** 2 + b[-2, -1] * b[-1, -2] for i in range(m - 1): fmat[i + 1, i] -= b[i + 1, i] * b[i, i] + b[i + 1, i + 1] * b[i + 1, i] for i in range(1, m): fmat[i - 1, i] -= b[i - 1, i] * b[i, i] + b[i - 1, i - 1] * b[i - 1, i] for i in range(1, m - 1): fmat[i + 1, i - 1] -= b[i + 1, i] * b[i, i - 1] for i in range(1, m - 1): fmat[i - 1, i + 1] -= b[i - 1, i] * b[i, i + 1] return fmat.flatten() @mark.least_squares def semicon2(x): n = len(x) // 1 ln = 9 * n // 10 lambda_ = 0.2 a = -0.00009 b = 0.00001 ua = 0.0 ub = 700.0 ca = 1e12 cb = 1e13 beta = 40.0 h = (b - a) / (n + 1) lb = lambda_ * beta lua = lambda_ * ua lub = lambda_ * ub xvec = np.zeros(n + 2, dtype=np.float64) xvec[0] = lua xvec[1:-1] = x xvec[-1] = lub fvec = np.zeros(n, dtype=np.float64) for i in range(1, ln + 1): fvec[i - 1] = ( xvec[i - 1] - 2 * xvec[i] + xvec[i + 1] + lambda_ * (h**2) * ca * np.exp(-lb * (xvec[i] - lua)) - lambda_ * (h**2) * cb * np.exp(lb * (xvec[i] - lub)) - lambda_ * (h**2) * ca ) for i in range(ln + 1, n + 1): fvec[i - 1] = ( xvec[i - 1] - 2 * xvec[i] + xvec[i + 1] - lambda_ * (h**2) * cb * np.exp(lb * (xvec[i] - lub)) + lambda_ * (h**2) * ca * np.exp(-lb * (xvec[i] - lua)) + lambda_ * (h**2) * cb ) return fvec @mark.least_squares def qr3d(x, m=5): q = x[: m**2].reshape(m, m) r = np.zeros((m, m), dtype=np.float64) r[np.triu_indices_from(r)] = x[m**2 :] a = ( np.diag((1 - np.arange(2, m + 1)) / m, -1) + np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1) ) a[0, 1] = 0 a[-1, -2] = (1 - m) / m a[-1, -1] = 2 * m omat = np.zeros((m, m), dtype=np.float64) # triu fmat = np.zeros((m, m), dtype=np.float64) for i in range(m): for j in range(i, m): for k in range(m): omat[i, j] += q[i, k] * q[j, k] for i in range(m): for j in range(m): for k in range(j + 1): fmat[i, j] += q[i, k] * r[k, j] for i in range(m): omat[i, i] -= 1 fmat[0, 0] -= a[0, 0] fmat[0, 1] -= a[0, 1] for i in range(1, m - 1): fmat[i, i - 1] -= a[i, i - 1] fmat[i, i] -= a[i, i] fmat[i, i + 1] -= a[i, i + 1] fmat[-1, -2] -= a[-1, -2] fmat[-1, -1] -= a[-1, -1] return np.concatenate((omat[np.triu_indices_from(omat)].flatten(), fmat.flatten())) @mark.least_squares def qr3dbd(x, m=5): q = x[: m**2].reshape(m, m) r = np.zeros((m, m), dtype=np.float64) r[0, :-2] = x[m**2 : -9] r[1, 1:-1] = x[-9:-6] r[2, 2:] = x[-6:-3] r[3, 3:] = x[-3:-1] r[4, 4] = x[-1] a = ( np.diag((1 - np.arange(2, m + 1)) / m, -1) + np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1) ) a[0, 1] = 0 a[-1, -2] = (1 - m) / m a[-1, -1] = 2 * m omat = np.zeros((m, m), dtype=np.float64) # triu fmat = np.zeros((m, m), dtype=np.float64) for i in range(m): for j in range(i, m): for k in range(m): omat[i, j] += q[i, k] * q[j, k] for i in range(m): fmat[i, 0] += q[i, 0] * r[0, 0] fmat[i, 1] += q[i, 0] * r[0, 1] + q[i, 1] * r[1, 1] for j in range(2, m): for k in range(j - 2, j + 1): fmat[i, j] += q[i, k] * r[k, j] for i in range(m): omat[i, i] -= 1 fmat[0, 0] -= a[0, 0] fmat[0, 1] -= a[0, 1] for i in range(1, m - 1): fmat[i, i - 1] -= a[i, i - 1] fmat[i, i] -= a[i, i] fmat[i, i + 1] -= a[i, i + 1] fmat[-1, -2] -= a[-1, -2] fmat[-1, -1] -= a[-1, -1] return np.concatenate((omat[np.triu_indices_from(omat)].flatten(), fmat.flatten())) @mark.least_squares def eigen(x, param): dim_in = int(np.sqrt(len(x) + 0.25)) dvec = x[:dim_in] qmat = x[dim_in:].reshape(dim_in, dim_in) emat = qmat @ np.diag(dvec) @ qmat - param omat = qmat @ qmat - np.eye(dim_in) return np.concatenate((emat.flatten(), omat.flatten())) @mark.least_squares def powell_singular(x): dim_in = len(x) fvec = np.zeros(dim_in) fvec[::4] = x[::4] + 10 * x[1::4] fvec[1::4] = 5 * (x[2::4] - x[3::4]) fvec[2::4] = (x[1::4] - 2 * x[2::4]) ** 2 fvec[3::4] = 10 * (x[0::4] - x[3::4]) ** 2 return fvec @mark.least_squares @njit def hydcar( x_in, n, m, k, ): x = x_in[: (n * m)].reshape((n, m)) t = x_in[(n * m) : 4 * n] v = x_in[4 * n :] avec = np.array([9.647, 9.953, 9.466], dtype=np.float64) bvec = np.array([-2998, -3448.10, -3347.25], dtype=np.float64) cvec = np.array([230.66, 235.88, 215.31], dtype=np.float64) alp = np.array([37.6, 48.2, 45.4], dtype=np.float64) be = np.array([8425, 9395, 10466], dtype=np.float64) bep = np.array([24.2, 35.6, 31.9], dtype=np.float64) fl = np.array([30, 30, 40], dtype=np.float64) tf = 100.0 b = 40.0 d = 60.0 q = 2500000.0 out = np.empty(n * 5 - 1, dtype=np.float64) fvec1 = np.zeros(m, dtype=np.float64) fvec3 = np.zeros(m, dtype=np.float64) fvec2 = np.zeros((n - 2, m), dtype=np.float64) fvec7 = np.zeros(n, dtype=np.float64) fvec8 = 0 fvec9 = np.zeros(n - 2, dtype=np.float64) for j in range(m): fvec1[j] += x[0, j] * b fvec3[j] += -x[n - 1, j] for j in range(m): fvec1[j] += -1 * x[1, j] * (v[0] + b) fvec1[j] += v[0] * x[0, j] * np.exp(avec[j] + (bvec[j] / (t[0] + cvec[j]))) fvec3[j] += x[n - 2, j] * np.exp(avec[j] + (bvec[j] / (t[n - 2] + cvec[j]))) fvec8 += ( ( v[0] * x[0, j] * np.exp(avec[j] + (bvec[j] / (t[0] + cvec[j]))) * (be[j] + bep[j] * t[0]) ) + b * x[0, j] * (alp[j] * t[0]) - x[1, j] * (b + v[0]) * (alp[j] * t[1]) ) for i in range(1, n - 1): fvec2[i - 1, j] += ( v[i - 1] * x[i - 1, j] * (-1) * np.exp(avec[j] + (bvec[j] / (t[i - 1] + cvec[j]))) ) fvec2[i - 1, j] += ( v[i] * x[i, j] * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j]))) ) fvec9[i - 1] += ( v[i] * x[i, j] * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j]))) * (be[j] + bep[j] * t[i]) ) fvec9[i - 1] += ( v[i - 1] * x[i - 1, j] * (-1) * np.exp(avec[j] + (bvec[j] / (t[i - 1] + cvec[j]))) * (be[j] + bep[j] * t[i - 1]) ) for i in range(n): fvec7[i] += x[i, j] * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j]))) for j in range(m): for i in range(1, k): fvec2[i - 1, j] += -1 * x[i + 1, j] * (v[i] + b) fvec2[i - 1, j] += x[i, j] * (v[i - 1] + b) fvec2[k - 1, j] += -1 * x[k + 1, j] * (v[k] - d) fvec2[k - 1, j] += x[k, j] * (v[k - 1] + b) for i in range(k + 1, n - 1): fvec2[i - 1, j] += -1 * x[i + 1, j] * (v[i] - d) fvec2[i - 1, j] += x[i, j] * (v[i - 1] - d) for j in range(m): for i in range(1, k): fvec9[i - 1] += 1 * x[i, j] * (v[i - 1] + b) * (alp[j] * t[i]) fvec9[i - 1] += (-1) * x[i + 1, j] * (v[i] + b) * (alp[j] * t[i + 1]) fvec9[k - 1] += 1 * x[k, j] * (v[k - 1] + b) * (alp[j] * t[i]) fvec9[k - 1] += (-1) * x[k + 1, j] * (v[k] - d) * (alp[j] * t[k + 1]) for i in range(k + 1, n - 1): fvec9[i - 1] += 1 * x[i, j] * (v[i - 1] - d) * (alp[j] * t[i]) fvec9[i - 1] += (-1) * x[i + 1, j] * (v[i] - d) * (alp[j] * t[i + 1]) smallhf = 0 for j in range(m): fvec2[k - 1, j] -= fl[j] smallhf += (tf * alp[j]) * fl[j] fvec7 -= 1 fvec8 -= q fvec9[k - 1] -= smallhf out[:m] = fvec1 * 1e-2 out[m : 2 * m] = fvec3 out[2 * m : (n - 2) * m + 2 * m] = fvec2.flatten() * 1e-2 out[(n - 2) * m + 2 * m : (n - 2) * m + 2 * m + n] = fvec7 out[(n - 2) * m + 2 * m + n] = fvec8 * 1e-5 out[-(n - 2) :] = fvec9 * 1e-5 return out @mark.least_squares def methane(x): fvec = np.zeros(31, dtype=np.float64) fvec[0] = 0.01 * ( 0.000826446280991736 * x[24] * x[1] * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) - x[4] * (693.37 + x[24]) + 693.37 * x[1] ) fvec[1] = ( 0.000869565217391304 * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) * x[19] - x[22] ) fvec[2] = 0.01 * ( -0.000826446280991736 * x[24] * x[1] * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) - x[7] * (693.37 + x[25]) + x[4] * (693.37 + x[24]) + 0.000833333333333333 * x[25] * x[4] * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) ) fvec[3] = -4.5125 + 0.01 * ( -0.000833333333333333 * x[25] * x[4] * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) - x[10] * (-442.13 + x[26]) + x[7] * (693.37 + x[25]) + 0.000840336134453782 * x[26] * x[7] * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) ) fvec[4] = 0.01 * ( -0.000840336134453782 * x[26] * x[7] * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) - x[13] * (-442.13 + x[27]) + x[10] * (-442.13 + x[26]) + 0.000847457627118644 * x[27] * x[10] * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) ) fvec[5] = 0.01 * ( -0.000847457627118644 * x[27] * x[10] * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) - x[16] * (-442.13 + x[28]) + x[13] * (-442.13 + x[27]) + 0.000854700854700855 * x[28] * x[13] * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) ) fvec[6] = 0.01 * ( -0.000854700854700855 * x[28] * x[13] * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) - x[19] * (-442.13 + x[29]) + x[16] * (-442.13 + x[28]) + 0.000862068965517241 * x[29] * x[16] * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) ) fvec[7] = 0.01 * ( -0.000862068965517241 * x[29] * x[16] * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) - x[22] * (-442.13 + x[30]) + x[19] * (-442.13 + x[29]) + 0.000869565217391304 * x[30] * x[19] * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) ) fvec[8] = 0.01 * ( 0.000826446280991736 * x[24] * x[2] * np.exp(18.3443 - 3841.2203 / (228 + x[0])) - x[5] * (693.37 + x[24]) + 693.37 * x[2] ) fvec[9] = ( 0.000869565217391304 * np.exp(18.3443 - 3841.2203 / (228 + x[18])) * x[20] - x[23] ) fvec[10] = 0.01 * ( -0.000826446280991736 * x[24] * x[2] * np.exp(18.3443 - 3841.2203 / (228 + x[0])) - x[8] * (693.37 + x[25]) + x[5] * (693.37 + x[24]) + 0.000833333333333333 * x[25] * x[5] * np.exp(18.3443 - 3841.2203 / (228 + x[3])) ) fvec[11] = -6.8425 + 0.01 * ( -0.000833333333333333 * x[25] * x[5] * np.exp(18.3443 - 3841.2203 / (228 + x[3])) - x[11] * (-442.13 + x[26]) + x[8] * (693.37 + x[25]) + 0.000840336134453782 * x[26] * x[8] * np.exp(18.3443 - 3841.2203 / (228 + x[6])) ) fvec[12] = 0.01 * ( -0.000840336134453782 * x[26] * x[8] * np.exp(18.3443 - 3841.2203 / (228 + x[6])) - x[14] * (-442.13 + x[27]) + x[11] * (-442.13 + x[26]) + 0.000847457627118644 * x[27] * x[11] * np.exp(18.3443 - 3841.2203 / (228 + x[9])) ) fvec[13] = 0.01 * ( -0.000847457627118644 * x[27] * x[11] * np.exp(18.3443 - 3841.2203 / (228 + x[9])) - x[17] * (-442.13 + x[28]) + x[14] * (-442.13 + x[27]) + 0.000854700854700855 * x[28] * x[14] * np.exp(18.3443 - 3841.2203 / (228 + x[12])) ) fvec[14] = 0.01 * ( -0.000854700854700855 * x[28] * x[14] * np.exp(18.3443 - 3841.2203 / (228 + x[12])) - x[20] * (-442.13 + x[29]) + x[17] * (-442.13 + x[28]) + 0.000862068965517241 * x[29] * x[17] * np.exp(18.3443 - 3841.2203 / (228 + x[15])) ) fvec[15] = 0.01 * ( -0.000862068965517241 * x[29] * x[17] * np.exp(18.3443 - 3841.2203 / (228 + x[15])) - x[23] * (-442.13 + x[30]) + x[20] * (-442.13 + x[29]) + 0.000869565217391304 * x[30] * x[20] * np.exp(18.3443 - 3841.2203 / (228 + x[18])) ) fvec[16] = ( -1 + 0.000826446280991736 * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) * x[1] + 0.000826446280991736 * np.exp(18.3443 - 3841.2203 / (228 + x[0])) * x[2] ) fvec[17] = ( -1 + 0.000833333333333333 * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) * x[4] + 0.000833333333333333 * np.exp(18.3443 - 3841.2203 / (228 + x[3])) * x[5] ) fvec[18] = ( -1 + 0.000840336134453782 * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) * x[7] + 0.000840336134453782 * np.exp(18.3443 - 3841.2203 / (228 + x[6])) * x[8] ) fvec[19] = ( -1 + 0.000847457627118644 * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) * x[10] + 0.000847457627118644 * np.exp(18.3443 - 3841.2203 / (228 + x[9])) * x[11] ) fvec[20] = ( -1 + 0.000854700854700855 * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) * x[13] + 0.000854700854700855 * np.exp(18.3443 - 3841.2203 / (228 + x[12])) * x[14] ) fvec[21] = ( -1 + 0.000862068965517241 * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) * x[16] + 0.000862068965517241 * np.exp(18.3443 - 3841.2203 / (228 + x[15])) * x[17] ) fvec[22] = ( -1 + 0.000869565217391304 * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) * x[19] + 0.000869565217391304 * np.exp(18.3443 - 3841.2203 / (228 + x[18])) * x[20] ) fvec[23] = ( -1 + 0.00087719298245614 * np.exp(18.5751 - 3632.649 / (239.2 + x[21])) * x[22] + 0.00087719298245614 * np.exp(18.3443 - 3841.2203 / (228 + x[21])) * x[23] ) fvec[24] = -83.862 + 1e-5 * ( 0.000826446280991736 * x[24] * x[1] * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) * (9566.67 + 0.0422 * x[0] * x[0] - 1.59 * x[0]) + 693.37 * (0.0422 * x[0] * x[0] + 15.97 * x[0]) * x[1] - x[4] * (693.37 + x[24]) * (0.0422 * x[3] * x[3] + 15.97 * x[3]) + 0.000826446280991736 * x[24] * x[2] * np.exp(18.3443 - 3841.2203 / (228 + x[0])) * (10834.67 + 8.74 * x[0]) + 12549.997 * x[2] * x[0] - 18.1 * x[5] * (693.37 + x[24]) * x[3] ) fvec[25] = 1e-5 * ( 0.000833333333333333 * x[25] * x[4] * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) * (9566.67 + 0.0422 * x[3] * x[3] - 1.59 * x[3]) + x[4] * (693.37 + x[24]) * (0.0422 * x[3] * x[3] + 15.97 * x[3]) - 0.000826446280991736 * x[24] * x[1] * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) * (9566.67 + 0.0422 * x[0] * x[0] - 1.59 * x[0]) - x[7] * (693.37 + x[25]) * (0.0422 * x[6] * x[6] + 15.97 * x[6]) + 0.000833333333333333 * x[25] * x[5] * np.exp(18.3443 - 3841.2203 / (228 + x[3])) * (10834.67 + 8.74 * x[3]) + 18.1 * x[5] * (693.37 + x[24]) * x[3] - 0.000826446280991736 * x[24] * x[2] * np.exp(18.3443 - 3841.2203 / (228 + x[0])) * (10834.67 + 8.74 * x[0]) - 18.1 * x[8] * (693.37 + x[25]) * x[6] ) fvec[26] = -18.9447111025 + 1e-5 * ( 0.000840336134453782 * x[26] * x[7] * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) * (9566.67 + 0.0422 * x[6] * x[6] - 1.59 * x[6]) + x[7] * (693.37 + x[25]) * (0.0422 * x[6] * x[6] + 15.97 * x[6]) - 0.000833333333333333 * x[25] * x[4] * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) * (9566.67 + 0.0422 * x[3] * x[3] - 1.59 * x[3]) - x[10] * (-442.13 + x[26]) * (0.0422 * x[9] * x[9] + 15.97 * x[9]) + 0.000840336134453782 * x[26] * x[8] * np.exp(18.3443 - 3841.2203 / (228 + x[6])) * (10834.67 + 8.74 * x[6]) + 18.1 * x[8] * (693.37 + x[25]) * x[6] - 0.000833333333333333 * x[25] * x[5] * np.exp(18.3443 - 3841.2203 / (228 + x[3])) * (10834.67 + 8.74 * x[3]) - 18.1 * x[11] * (-442.13 + x[26]) * x[9] ) fvec[27] = 1e-5 * ( 0.000847457627118644 * x[27] * x[10] * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) * (9566.67 + 0.0422 * x[9] * x[9] - 1.59 * x[9]) + x[10] * (-442.13 + x[26]) * (0.0422 * x[9] * x[9] + 15.97 * x[9]) - 0.000840336134453782 * x[26] * x[7] * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) * (9566.67 + 0.0422 * x[6] * x[6] - 1.59 * x[6]) - x[13] * (-442.13 + x[27]) * (0.0422 * x[12] * x[12] + 15.97 * x[12]) + 0.000847457627118644 * x[27] * x[11] * np.exp(18.3443 - 3841.2203 / (228 + x[9])) * (10834.67 + 8.74 * x[9]) + 18.1 * x[11] * (-442.13 + x[26]) * x[9] - 0.000840336134453782 * x[26] * x[8] * np.exp(18.3443 - 3841.2203 / (228 + x[6])) * (10834.67 + 8.74 * x[6]) - 18.1 * x[14] * (-442.13 + x[27]) * x[12] ) fvec[28] = 1e-5 * ( 0.000854700854700855 * x[28] * x[13] * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) * (9566.67 + 0.0422 * x[12] * x[12] - 1.59 * x[12]) + x[13] * (-442.13 + x[27]) * (0.0422 * x[12] * x[12] + 15.97 * x[12]) - 0.000847457627118644 * x[27] * x[10] * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) * (9566.67 + 0.0422 * x[9] * x[9] - 1.59 * x[9]) - x[16] * (-442.13 + x[28]) * (0.0422 * x[15] * x[15] + 15.97 * x[15]) + 0.000854700854700855 * x[28] * x[14] * np.exp(18.3443 - 3841.2203 / (228 + x[12])) * (10834.67 + 8.74 * x[12]) + 18.1 * x[14] * (-442.13 + x[27]) * x[12] - 0.000847457627118644 * x[27] * x[11] * np.exp(18.3443 - 3841.2203 / (228 + x[9])) * (10834.67 + 8.74 * x[9]) - 18.1 * x[17] * (-442.13 + x[28]) * x[15] ) fvec[29] = 1e-5 * ( 0.000862068965517241 * x[29] * x[16] * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) * (9566.67 + 0.0422 * x[15] * x[15] - 1.59 * x[15]) + x[16] * (-442.13 + x[28]) * (0.0422 * x[15] * x[15] + 15.97 * x[15]) - 0.000854700854700855 * x[28] * x[13] * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) * (9566.67 + 0.0422 * x[12] * x[12] - 1.59 * x[12]) - x[19] * (-442.13 + x[29]) * (0.0422 * x[18] * x[18] + 15.97 * x[18]) + 0.000862068965517241 * x[29] * x[17] * np.exp(18.3443 - 3841.2203 / (228 + x[15])) * (10834.67 + 8.74 * x[15]) + 18.1 * x[17] * (-442.13 + x[28]) * x[15] - 0.000854700854700855 * x[28] * x[14] * np.exp(18.3443 - 3841.2203 / (228 + x[12])) * (10834.67 + 8.74 * x[12]) - 18.1 * x[20] * (-442.13 + x[29]) * x[18] ) fvec[30] = 1e-5 * ( 0.000869565217391304 * x[30] * x[19] * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) * (9566.67 + 0.0422 * x[18] * x[18] - 1.59 * x[18]) + x[19] * (-442.13 + x[29]) * (0.0422 * x[18] * x[18] + 15.97 * x[18]) - 0.000862068965517241 * x[29] * x[16] * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) * (9566.67 + 0.0422 * x[15] * x[15] - 1.59 * x[15]) - x[22] * (-442.13 + x[30]) * (0.0422 * x[21] * x[21] + 15.97 * x[21]) + 0.000869565217391304 * x[30] * x[20] * np.exp(18.3443 - 3841.2203 / (228 + x[18])) * (10834.67 + 8.74 * x[18]) + 18.1 * x[20] * (-442.13 + x[29]) * x[18] - 0.000862068965517241 * x[29] * x[17] * np.exp(18.3443 - 3841.2203 / (228 + x[15])) * (10834.67 + 8.74 * x[15]) - 18.1 * x[23] * (-442.13 + x[30]) * x[21] ) return fvec @mark.least_squares def argtrig(x): dim_in = len(x) fvec = ( dim_in - np.sum(np.cos(x)) + np.arange(1, dim_in + 1) * (1 - np.cos(x) - np.sin(x)) ) return fvec @mark.least_squares def artif(x): dim_in = len(x) xvec = np.zeros(dim_in + 2, dtype=np.float64) xvec[1:-1] = x fvec = np.zeros(dim_in, dtype=np.float64) for i in range(dim_in): fvec[i] = -0.05 * (xvec[i + 1] + xvec[i + 2] + xvec[i]) + np.arctan( np.sin(np.mod(i + 1, 100) * xvec[i + 1]) ) return fvec @mark.least_squares def arwhdne(x): dim_in = len(x) fvec = np.zeros(2 * (dim_in - 1)) fvec[: dim_in - 1] = x[:-1] ** 2 + x[-1] ** 2 fvec[dim_in - 1 :] = 4 * x[:-1] - 3 return fvec @mark.least_squares @njit def bdvalues(x): dim_in = len(x) h = 1 / (dim_in + 1) xvec = np.zeros(dim_in + 2, dtype=np.float64) for i in range(dim_in): xvec[i + 1] = x[i] fvec = np.zeros(dim_in, dtype=np.float64) for i in range(2, dim_in + 2): fvec[i - 2] = ( -xvec[i - 2] + 2 * xvec[i - 1] - xvec[i] + 0.5 * h**2 * (xvec[i - 1] + i * h + 1) ** 3 ) return fvec @mark.least_squares def bratu_2d(x, alpha): x = x.reshape((int(np.sqrt(len(x))), int(np.sqrt(len(x))))) p = x.shape[0] + 2 h = 1 / (p - 1) c = h**2 * alpha xvec = np.zeros((x.shape[0] + 2, x.shape[1] + 2), dtype=np.float64) xvec[1 : x.shape[0] + 1, 1 : x.shape[1] + 1] = x fvec = np.zeros(x.shape) for i in range(2, p): for j in range(2, p): fvec[i - 2, j - 2] = ( 4 * xvec[i - 1, j - 1] - xvec[i, j - 1] - xvec[i - 2, j - 1] - xvec[i - 1, j] - xvec[i - 1, j - 2] - c * np.exp(xvec[i - 1, j - 1]) ) return fvec.flatten() @mark.least_squares def bratu_3d(x, alpha): n = int(np.cbrt(len(x))) x = x.reshape((n, n, n)) p = x.shape[0] + 2 h = 1 / (p - 1) c = h**2 * alpha xvec = np.zeros((x.shape[0] + 2, x.shape[1] + 2, x.shape[2] + 2), dtype=np.float64) xvec[1 : x.shape[0] + 1, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x fvec = np.zeros(x.shape, dtype=np.float64) for i in range(2, p): for j in range(2, p): for k in range(2, p): fvec[i - 2, j - 2, k - 2] = ( 6 * xvec[i - 1, j - 1, k - 1] - xvec[i, j - 1, k - 1] - xvec[i - 2, j - 1, k - 1] - xvec[i - 1, j, k - 1] - xvec[i - 1, j - 2, k - 1] - xvec[i - 1, j - 1, k] - xvec[i - 1, j - 1, k - 2] - c * np.exp(xvec[i, j, k]) ) return fvec.flatten() @mark.least_squares def broydn_3d(x): kappa_1 = 2 kappa_2 = 1 fvec = np.zeros_like(x) fvec[0] = -2 * x[1] + kappa_2 + (3 - kappa_1 * x[0]) * x[0] fvec[1 : len(x) - 1] = ( -x[:-2] - 2 * x[2:] + kappa_2 + (3 - kappa_1 * x[1:-1]) * x[1:-1] ) fvec[-1] = -x[-2] + kappa_2 + (3 - kappa_1 * x[-1]) * x[-1] return fvec @mark.least_squares def broydn_bd(x): dim_in = len(x) fvec = np.zeros(dim_in, dtype=np.float64) for i in range(1, 1 + dim_in): ji = [] lb = max(1, i - 5) ub = min(dim_in, i + 1) for j in range(lb, ub + 1): if j != i: ji.append(j) fvec[i - 1] = x[i - 1] * (2 + 5 * x[i - 1] ** 2) - np.sum( x[np.array(ji) - 1] * (1 + x[np.array(ji) - 1]) ) return fvec @mark.least_squares def cbratu_2d(x): n = int(np.sqrt(len(x) / 2)) x = x.reshape((2, n, n)) xvec = np.zeros((x.shape[0], x.shape[1] + 2, x.shape[2] + 2), dtype=np.float64) xvec[0, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x[0, :, :] xvec[1, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x[1, :, :] p = x.shape[1] + 2 h = 1 / (p - 1) alpha = 5 c = h**2 * alpha fvec = np.zeros(x.shape, dtype=np.float64) for i in range(2, p): for j in range(2, p): fvec[0, i - 2, j - 2] = ( 4 * xvec[0, i - 1, j - 1] - xvec[0, i, j - 1] - xvec[0, i - 2, j - 1] - xvec[0, i - 1, j] - xvec[0, i - 1, j - 2] - c * np.exp(xvec[0, i - 1, j - 1]) * np.cos(xvec[0, i - 1, j - 1]) ) fvec[1, i - 2, j - 2] = ( 4 * xvec[1, i - 1, j - 1] - xvec[1, i, j - 1] - xvec[1, i - 2, j - 1] - xvec[1, i - 1, j] - xvec[1, i - 1, j - 2] - c * np.exp(xvec[1, i - 1, j - 1]) * np.sin(xvec[1, i - 1, j - 1]) ) return fvec.flatten() @mark.least_squares def chandheq(x): dim_in = len(x) constant = 1 w = np.ones(dim_in, dtype=np.int64) / dim_in h = np.ones(dim_in, dtype=np.int64) fvec = np.zeros(dim_in, dtype=np.float64) for i in range(dim_in): fvec[i] = (-0.5 * constant * w * x[i] / (x[i] + x) * h[i] * h + h[i] - 1).sum() return fvec @mark.least_squares @njit def chemrcta(x): dim_in = int(len(x) / 2) x = x.reshape((2, dim_in)) fvec = np.zeros(2 * dim_in, dtype=np.float64) # define some auxiliary params pem = 1.0 peh = 5.0 d = 0.135 b = 0.5 beta = 2.0 gamma = 25.0 h = 1 / (dim_in - 1) cu1 = -h * pem cui1 = 1 / (h**2 * pem) + 1 / h cui = -1 / h - 2 / (h**2 * pem) ct1 = -h * peh cti1 = 1 / (h**2 * peh) + 1 / h cti = -beta - 1 / h - 2 / (h**2 * peh) fvec[0] = cu1 * x[0, 1] - x[0, 0] + h * pem fvec[1] = ct1 * x[1, 1] - x[1, 0] + h * peh for i in range(2, dim_in): fvec[i] = ( -d * x[0, i - 1] * np.exp(gamma - gamma / x[1, i - 1]) + (cui1) * x[0, i - 2] + cui * x[0, i - 1] + x[0, i] / (h**2 * pem) ) fvec[dim_in - 2 + i] = ( b * d * x[0, i - 1] * np.exp(gamma - gamma / x[1, i - 1]) + beta * x[1, i - 1] + cti1 * x[1, i - 2] + cti * x[1, i - 1] + x[1, i] / (h**2 * peh) ) fvec[-2] = x[0, -1] - x[0, -2] fvec[-1] = x[1, -1] - x[1, -2] return fvec @mark.least_squares @njit def chemrctb(x): dim_in = int(len(x)) fvec = np.zeros(dim_in, dtype=np.float64) # define some auxiliary params pe = 5.0 d = 0.135 b = 0.5 gamma = 25.0 h = 1 / (dim_in - 1) ct1 = -h * pe cti1 = 1 / (h**2 * pe) + 1 / h cti = -1 / h - 2 / (h**2 * pe) fvec[0] = ct1 * x[1] - x[0] + h * pe for i in range(2, dim_in): fvec[i - 1] = ( d * (b + 1 - x[i - 1]) * np.exp(gamma - gamma / x[i - 1]) + cti1 * x[i - 2] + cti * x[i - 1] + x[i] / (h**2 * pe) ) fvec[-1] = x[-1] - x[-2] return fvec @mark.least_squares def chnrsbne(x): alfa = np.array( [ 1.25, 1.40, 2.40, 1.40, 1.75, 1.20, 2.25, 1.20, 1.00, 1.10, 1.50, 1.60, 1.25, 1.25, 1.20, 1.20, 1.40, 0.50, 0.50, 1.25, 1.80, 0.75, 1.25, 1.40, 1.60, 2.00, 1.00, 1.60, 1.25, 2.75, 1.25, 1.25, 1.25, 3.00, 1.50, 2.00, 1.25, 1.40, 1.80, 1.50, 2.20, 1.40, 1.50, 1.25, 2.00, 1.50, 1.25, 1.40, 0.60, 1.50, ] ) dim_in = len(x) fvec = np.zeros(2 * (dim_in - 1)) fvec[: dim_in - 1] = 4 * alfa[1:] * (x[:-1] - x[1:] ** 2) fvec[dim_in - 1 :] = x[1:] - 1 return fvec @mark.least_squares @njit def drcavty(x, r): m = int(np.sqrt(len(x))) x = x.reshape((m, m)) h = 1 / (m + 2) xvec = np.zeros((m + 4, m + 4), dtype=np.float64) xvec[2 : m + 2, 2 : m + 2] = x xvec[-2, :] = -h / 2 xvec[-1, :] = h / 2 fvec = np.zeros(x.shape, dtype=np.float64) for i in range(m): for j in range(m): fvec[i, j] = ( 20 * xvec[i + 2, j + 2] - 8 * xvec[i + 1, j + 2] - 8 * xvec[i + 3, j + 2] - 8 * xvec[i + 2, j + 1] - 8 * xvec[i + 2, j + 3] + 2 * xvec[i + 1, j + 3] + 2 * xvec[i + 3, j + 2] + 2 * xvec[i + 1, j + 1] + 2 * xvec[i + 3, j + 3] + xvec[i, j + 2] + xvec[i + 4, j + 2] + xvec[i + 2, j] + xvec[i + 2, j + 4] + (r / 4) * (xvec[i + 2, j + 3] - xvec[i + 2, j + 1]) * ( xvec[i, j + 2] + xvec[i + 1, j + 1] + xvec[i + 1, j + 3] - 4 * xvec[i + 1, j + 2] - 4 * xvec[i + 3, j + 2] - xvec[i + 3, j + 2] - xvec[i + 3, j + 3] - xvec[i + 4, j + 2] ) - (r / 4) * (xvec[i + 3, j + 2] - xvec[i + 1, j + 2]) * ( xvec[i + 2, j] + xvec[i + 1, j + 1] + xvec[i + 3, j + 1] - 4 * xvec[i + 2, j + 1] - 4 * xvec[i + 2, j + 3] - xvec[i + 1, j + 3] - xvec[i + 3, j + 3] - xvec[i + 2, j + 4] ) ) return fvec.flatten() @mark.least_squares def freurone(x): dim_in = len(x) fvec = np.zeros((2, dim_in - 1), dtype=np.float64) for i in range(dim_in - 1): fvec[0, i] = (5.0 - x[i + 1]) * x[i + 1] ** 2 + x[i] - 2 * x[i + 1] - 13.0 fvec[1, i] = (1.0 + x[i + 1]) * x[i + 1] ** 2 + x[i] - 14 * x[i + 1] - 29.0 return fvec.flatten() @mark.least_squares def hatfldg(x): dim_in = len(x) fvec = np.zeros(dim_in, dtype=np.float64) for i in range(1, dim_in - 1): fvec[i - 1] = x[i] * (x[i - 1] - x[i + 1]) + x[i] - x[12] + 1 fvec[-2] = x[0] - x[12] + 1 - x[0] * x[1] fvec[-1] = x[-1] - x[12] + 1 + x[-2] * x[-1] return fvec @mark.least_squares def integreq(x): dim_in = len(x) h = 1 / (dim_in + 1) t = np.arange(1, dim_in + 1) * h xvec = np.zeros(dim_in + 2, dtype=np.float64) xvec[1:-1] = x fvec = np.zeros_like(x) for i in range(1, dim_in): fvec[i - 1] = ( xvec[i] + h * ( (1 - t[i - 1]) * (t[:i] * (xvec[1 : i + 1] + t[:i] + 1) ** 3).sum() + t[i - 1] * ((1 - t[i:]) * (xvec[i + 1 : -1] + t[i:] + 1) ** 3).sum() ) / 2 ) fvec[-1] = ( xvec[-2] + h * ( (1 - t[-1]) * (t * (xvec[1:-1] + t + 1) ** 3).sum() + t[-1] * ((1 - t[-1]) * (xvec[-2] + t[-1] + 1) ** 3) ) / 2 ) return fvec @mark.least_squares def msqrta(x): dim_in = int(np.sqrt(len(x))) xmat = x.reshape((dim_in, dim_in)) bmat = 5 * xmat amat = np.zeros((dim_in, dim_in), dtype=np.float64) for i in range(1, dim_in + 1): for j in range(1, dim_in + 1): amat[i - 1, j - 1] = (bmat[i - 1, :] * bmat[:, j - 1]).sum() fmat = np.zeros((dim_in, dim_in)) for i in range(1, dim_in + 1): for j in range(1, dim_in + 1): fmat[i - 1, j - 1] = (xmat[i - 1, :] * xmat[:, j - 1]).sum() - amat[ i - 1, j - 1 ] return fmat.flatten() @mark.least_squares def penalty_1(x, a=1e-5): fvec = np.sqrt(a) * (x - 2) fvec = np.concatenate([fvec, [x @ x - 1 / 4]]) return fvec @mark.least_squares def penalty_2(x, a=1e-10): dim_in = len(x) y = np.exp(np.arange(1, 2 * dim_in + 1) / 10) + np.exp(np.arange(2 * dim_in) / 10) fvec = np.zeros(2 * dim_in) fvec[0] = x[0] - 0.2 fvec[1:dim_in] = np.sqrt(a) * ( np.exp(x[1:] / 10) + np.exp(x[:-1] / 10) - y[1:dim_in] ) fvec[dim_in:-1] = np.sqrt(a) * (np.exp(x[1:] / 10) - np.exp(-1 / 10)) fvec[-1] = (np.arange(1, dim_in + 1)[::-1] * x**2).sum() - 1 return fvec @mark.least_squares def vardimne(x): dim_in = len(x) fvec = np.zeros(dim_in + 2) fvec[:-2] = x - 1 fvec[-2] = (np.arange(1, dim_in + 1) * (x - 1)).sum() fvec[-1] = ((np.arange(1, dim_in + 1) * (x - 1)).sum()) ** 2 return fvec @mark.least_squares def yatpsq_1(x, dim_in): xvec = x[: dim_in**2] xvec = xvec.reshape((dim_in, dim_in)) yvec = x[dim_in**2 : dim_in**2 + dim_in] zvec = x[dim_in**2 + dim_in : dim_in**2 + 2 * dim_in] fvec = np.zeros((dim_in, dim_in), dtype=np.float64) for i in range(dim_in): for j in range(dim_in): fvec[i, j] = ( xvec[i, j] ** 3 - 10 * xvec[i, j] ** 2 - (yvec[i] + zvec[j]) * (xvec[i, j] * np.cos(xvec[i, j]) - np.sin(xvec[i, j])) ) fvec = fvec.flatten() temp = (np.sin(xvec) / xvec).sum(axis=0) - 1 fvec = np.concatenate((fvec, temp)) temp = (np.sin(xvec) / xvec).sum(axis=1) - 1 fvec = np.concatenate((fvec, temp)) return fvec @mark.least_squares def yatpsq_2(x, dim_in): xvec = x[: dim_in**2] xvec = xvec.reshape((dim_in, dim_in)) yvec = x[dim_in**2 : dim_in**2 + dim_in] zvec = x[dim_in**2 + dim_in : dim_in**2 + 2 * dim_in] fvec = np.zeros((dim_in, dim_in), dtype=np.float64) for i in range(dim_in): for j in range(dim_in): fvec[i, j] = xvec[i, j] - (yvec[i] + zvec[j]) * (1 + np.cos(xvec[i, j])) - 1 fvec = fvec.flatten() temp = (np.sin(xvec) + xvec).sum(axis=0) - 1 fvec = np.concatenate((fvec, temp)) temp = (np.sin(xvec) + xvec).sum(axis=1) - 1 fvec = np.concatenate((fvec, temp)) return fvec def get_start_points_msqrta(dim_in, flag=1): bmat = np.zeros((dim_in, dim_in)) for i in range(1, dim_in + 1): for j in range(1, dim_in + 1): bmat[i - 1, j - 1] = np.sin(((i - 1) * dim_in + j) ** 2) if flag == 2: bmat[2, 0] = 0 xmat = 0.2 * bmat return xmat.flatten().tolist() def get_start_points_bdvalues(n, a=1): h = 1 / (n + 1) x = np.zeros(n) for i in range(n): x[i] = (i + 1) * h * ((i + 1) * h - 1) return (x * a).tolist() def get_start_points_spmsqrt(m): b = np.zeros((m, m)) b[0, 0] = np.sin(1) b[0, 1] = np.sin(4) k = 2 for i in range(1, m - 1): k += 1 b[i, i - 1] = np.sin(k**2) k += 1 b[i, i] = np.sin(k**2) k += 1 b[i, i + 1] = np.sin(k**2) k += 1 b[-1, -2] = np.sin(k**2) k += 1 b[-1, -1] = np.sin(k**2) x = np.zeros((m, m)) x[:, :2] = 0.2 * b[:, :2] x[1:-1, :-2] = 0.2 * b[1:-1, :-2] x[1:-1, 1:-1] = 0.2 * b[1:-1, 1:-1] x[1:-1, 2:] = 0.2 * b[1:-1, 2:] x[-1, -2:] = 0.2 * b[-1, -2:] x_out = x[x != 0] return x_out.tolist() def get_start_points_qr3d(m): r = np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1) r[0, 1] = 0 r[-1, -1] = 2 * m return np.concatenate([np.eye(m).flatten(), r[np.triu_indices_from(r)]]).tolist() def get_start_points_qr3dbd(m): r = np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1) r[0, 1] = 0 r[-1, -1] = 2 * m return np.concatenate( [np.eye(m).flatten(), r[0, :-2], r[1, 1:-1], r[2, 2:], r[3, 3:], [r[4, 4]]] ).tolist() def get_start_points_hydcar20(): x = [ 0.0, 0.3, 0.1, 0.0, 0.3, 0.9, 0.01, 0.3, 0.9, 0.02, 0.4, 0.8, 0.05, 0.4, 0.8, 0.07, 0.45, 0.8, 0.09, 0.5, 0.7, 0.1, 0.5, 0.7, 0.15, 0.5, 0.6, 0.2, 0.5, 0.6, 0.25, 0.6, 0.5, 0.3, 0.6, 0.5, 0.35, 0.6, 0.5, 0.4, 0.6, 0.4, 0.4, 0.7, 0.4, 0.42, 0.7, 0.3, 0.45, 0.75, 0.3, 0.45, 0.75, 0.2, 0.5, 0.8, 0.1, 0.5, 0.8, 0.0, ] return x + [100] * 20 + [300] * 19 def get_start_points_hydcar6(): x = [ 0.0, 0.2, 0.9, 0.0, 0.2, 0.8, 0.05, 0.3, 0.8, 0.1, 0.3, 0.6, 0.3, 0.5, 0.3, 0.6, 0.6, 0.0, ] return x + [100] * 6 + [300] * 5 def get_start_points_methanb8(): return [ 107.47, 0.09203, 0.908, 102.4, 0.1819, 0.8181, 97.44, 0.284, 0.716, 96.3, 0.3051, 0.6949, 93.99, 0.3566, 0.6434, 89.72, 0.468, 0.532, 83.71, 0.6579, 0.3421, 78.31, 0.8763, 0.1237, 886.37, 910.01, 922.52, 926.46, 935.56, 952.83, 975.73, ] def get_start_points_methanl8(): return [ 120, 0.09203, 0.908, 110, 0.1819, 0.8181, 100, 0.284, 0.716, 88, 0.3051, 0.6949, 86, 0.3566, 0.6434, 84, 0.468, 0.532, 80, 0.6579, 0.3421, 76, 0.8763, 0.1237, 886.37, 910.01, 922.52, 926.46, 935.56, 952.83, 975.73, ] solution_x_bdvalues = [ -0.00501717, -0.00998312, -0.01489709, -0.01975833, -0.02456605, -0.02931945, -0.03401771, -0.03866001, -0.0432455, -0.04777331, -0.05224255, -0.05665232, -0.0610017, -0.06528975, -0.06951549, -0.07367795, -0.07777612, -0.08180898, -0.08577546, -0.08967451, -0.09350501, -0.09726585, -0.10095589, -0.10457394, -0.10811881, -0.11158927, -0.11498406, -0.1183019, -0.12154147, -0.12470143, -0.1277804, -0.13077697, -0.13368969, -0.1365171, -0.13925766, -0.14190984, -0.14447205, -0.14694265, -0.14931997, -0.15160232, -0.15378794, -0.15587503, -0.15786175, -0.15974621, -0.16152647, -0.16320056, -0.16476642, -0.16622197, -0.16756507, -0.1687935, -0.16990502, -0.17089728, -0.17176792, -0.17251447, -0.17313443, -0.1736252, -0.17398413, -0.17420848, -0.17429545, -0.17424214, -0.17404559, -0.17370274, -0.17321044, -0.17256546, -0.17176447, -0.17080403, -0.16968062, -0.16839059, -0.16693019, -0.16529558, -0.16348276, -0.16148763, -0.15930595, -0.15693338, -0.15436539, -0.15159735, -0.14862447, -0.14544178, -0.14204417, -0.13842638, -0.13458293, -0.13050819, -0.12619633, -0.12164132, -0.11683693, -0.1117767, -0.10645396, -0.10086179, -0.09499304, -0.0888403, -0.08239586, -0.07565179, -0.06859981, -0.06123136, -0.05353755, -0.04550917, -0.03713662, -0.02840998, -0.01931889, -0.00985262, ] solution_x_bratu_2d = [ 0.07234633, 0.11814877, 0.1459185, 0.15914495, 0.15914495, 0.1459185, 0.11814877, 0.07234633, 0.11814877, 0.19875438, 0.24923944, 0.27361473, 0.27361473, 0.24923944, 0.19875438, 0.11814877, 0.1459185, 0.24923944, 0.31530971, 0.34753593, 0.34753593, 0.31530971, 0.24923944, 0.1459185, 0.15914495, 0.27361473, 0.34753593, 0.3837784, 0.3837784, 0.34753593, 0.27361473, 0.15914495, 0.15914495, 0.27361473, 0.34753593, 0.3837784, 0.3837784, 0.34753593, 0.27361473, 0.15914495, 0.1459185, 0.24923944, 0.31530971, 0.34753593, 0.34753593, 0.31530971, 0.24923944, 0.1459185, 0.11814877, 0.19875438, 0.24923944, 0.27361473, 0.27361473, 0.24923944, 0.19875438, 0.11814877, 0.07234633, 0.11814877, 0.1459185, 0.15914495, 0.15914495, 0.1459185, 0.11814877, 0.07234633, ] solution_x_bratu_2d_t = [ 0.1933024, 0.33566336, 0.43355494, 0.48428111, 0.48428111, 0.43355494, 0.33566336, 0.1933024, 0.33566336, 0.59839893, 0.78485783, 0.88316504, 0.88316504, 0.78485783, 0.59839893, 0.33566336, 0.43355494, 0.78485783, 1.04056365, 1.17766089, 1.17766089, 1.04056365, 0.78485783, 0.43355494, 0.48428111, 0.88316504, 1.17766089, 1.33720634, 1.33720634, 1.17766089, 0.88316504, 0.48428111, 0.48428111, 0.88316504, 1.17766089, 1.33720634, 1.33720634, 1.17766089, 0.88316504, 0.48428111, 0.43355494, 0.78485783, 1.04056365, 1.17766089, 1.17766089, 1.04056365, 0.78485783, 0.43355494, 0.33566336, 0.59839893, 0.78485783, 0.88316504, 0.88316504, 0.78485783, 0.59839893, 0.33566336, 0.1933024, 0.33566336, 0.43355494, 0.48428111, 0.48428111, 0.43355494, 0.33566336, 0.1933024, ] solution_x_bratu_3d = [ 0.24431369, 0.27785366, 0.19682155, 0.27785366, 0.32761664, 0.23878408, 0.19682155, 0.23878408, 0.18908409, 0.27785366, 0.32761664, 0.23878408, 0.32761664, 0.39611483, 0.29367471, 0.23878408, 0.29367471, 0.2314289, 0.19682155, 0.23878408, 0.18908409, 0.23878408, 0.29367471, 0.2314289, 0.18908409, 0.2314289, 0.18663237, ] solution_x_broydn_3d = [ -0.57076119, -0.68191013, -0.70248602, -0.70626058, -0.70695185, -0.70707842, -0.70710159, -0.70710583, -0.70710661, -0.70710675, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710678, -0.70710677, -0.70710675, -0.70710669, -0.70710654, -0.70710612, -0.70710498, -0.70710185, -0.70709332, -0.70707001, -0.70700634, -0.70683248, -0.70635771, -0.70506153, -0.7015252, -0.69189463, -0.66579752, -0.59603531, -0.4164123, ] solution_x_cbratu_2d = [ 0.16692195, 0.2529246, 0.2796211, 0.2529246, 0.16692195, 0.2529246, 0.39198662, 0.43607163, 0.39198662, 0.2529246, 0.2796211, 0.43607163, 0.48598608, 0.43607163, 0.2796211, 0.2529246, 0.39198662, 0.43607163, 0.39198662, 0.2529246, 0.16692195, 0.2529246, 0.2796211, 0.2529246, 0.16692195, ] solution_x_cbratu_2d = solution_x_cbratu_2d + [0] * 25 solution_x_broydn_bd = [ -0.00000000e00, -0.00000000e00, -0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, -0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, -0.00000000e00, -0.00000000e00, -1.00000000e-10, -2.00000000e-10, -2.00000000e-10, -1.00000000e-10, 4.00000000e-10, 1.40000000e-09, 3.00000000e-09, 4.60000000e-09, 4.70000000e-09, -1.00000000e-10, -1.43000000e-08, -4.23000000e-08, -8.25000000e-08, -1.17600000e-07, -1.00700000e-07, 5.54000000e-08, 4.68300000e-07, 1.22420000e-06, 2.22540000e-06, 2.92120000e-06, 1.96990000e-06, -2.95480000e-06, -1.47187000e-05, -3.48246000e-05, -5.90960000e-05, -7.05915000e-05, -3.15647000e-05, 1.19032300e-04, 4.48647900e-04, 9.73368600e-04, 1.53772420e-03, 1.63273940e-03, 2.14717600e-04, -4.30681910e-03, -1.36127680e-02, -2.81043041e-02, -4.39233903e-02, -4.73306566e-02, -8.45337580e-03, 1.04321937e-01, 2.74938066e-01, 4.54655029e-01, 6.22031184e-01, 7.74293819e-01, 9.11375485e-01, 1.03226579e00, 1.13635201e00, 1.22498498e00, 1.30019836e00, 1.36374913e00, 1.41711415e00, 1.46168952e00, 1.49882961e00, 1.52972625e00, 1.55537824e00, 1.57663224e00, 1.59421664e00, 1.60875287e00, 1.62076049e00, 1.63067143e00, 1.63884538e00, 1.64557503e00, 1.65102930e00, 1.65461538e00, 1.64858082e00, 1.55247986e00, ] solution_x_chemrctb = [ 0.05141945, 0.05203209, 0.05267567, 0.05335175, 0.05406197, 0.05480806, 0.05559182, 0.05641517, 0.05728009, 0.05818869, 0.05914317, 0.06014585, 0.06119916, 0.06230566, 0.06346804, 0.06468911, 0.06597184, 0.06731935, 0.0687349, 0.07022193, 0.07178406, 0.07342507, 0.07514894, 0.07695987, 0.07886224, 0.08086068, 0.08296004, 0.08516541, 0.08748215, 0.08991588, 0.09247251, 0.09515826, 0.09797963, 0.10094348, 0.104057, 0.10732776, 0.11076369, 0.11437313, 0.11816486, 0.12214807, 0.12633243, 0.13072811, 0.13534578, 0.14019665, 0.14529249, 0.15064569, 0.15626923, 0.16217677, 0.16838265, 0.17490194, 0.18175047, 0.18894487, 0.19650261, 0.20444203, 0.21278242, 0.22154402, 0.23074811, 0.24041703, 0.25057426, 0.26124447, 0.27245356, 0.28422875, 0.29659862, 0.30959322, 0.32324409, 0.33758438, 0.35264891, 0.36847425, 0.38509884, 0.40256304, 0.42090924, 0.440182, 0.46042812, 0.48169675, 0.50403953, 0.52751072, 0.55216731, 0.57806915, 0.60527915, 0.63386338, 0.66389124, 0.69543563, 0.72857315, 0.76338427, 0.79995348, 0.83836951, 0.87872535, 0.921118, 0.96564698, 1.01240975, 1.06148865, 1.11291774, 1.16660739, 1.22219286, 1.27878417, 1.33468684, 1.38740197, 1.43443791, 1.47507982, 1.51238643, ] solution_x_drcavty3 = [ 6.90580000e-06, -3.04054000e-05, -1.34595400e-04, -2.98301400e-04, -3.97564800e-04, -2.82615200e-04, -1.00791500e-04, 1.18693000e-05, 4.83418000e-05, 3.86272000e-05, -3.61169000e-05, -1.56090300e-04, -3.44522400e-04, -5.22159200e-04, -5.02848100e-04, -1.96532500e-04, 4.01814000e-05, 1.66926300e-04, 1.64254200e-04, 9.75942000e-05, -1.53179900e-04, -3.26999400e-04, -5.35655500e-04, -5.17594800e-04, -2.45473400e-04, 2.11398200e-04, 3.85544900e-04, 4.70161600e-04, 3.19836200e-04, 1.48115900e-04, -3.44263800e-04, -3.05706200e-04, -6.07866500e-04, -1.40639000e-04, 3.54345200e-04, 1.17906180e-03, 1.27587890e-03, 6.46781700e-04, 2.97807400e-04, 9.77706000e-05, -3.80139500e-04, 5.85784900e-04, -4.34699000e-04, 1.15040270e-03, 2.93253490e-03, 5.19921130e-03, 3.26982700e-03, -1.15543100e-03, -3.31632400e-04, -9.65743000e-05, 8.88011200e-04, 4.55121760e-03, 1.59257740e-03, 4.02608170e-03, 5.27395750e-03, -2.05009960e-03, -7.90681200e-04, 1.29072190e-03, 3.92764700e-04, -7.23810000e-05, 1.10527329e-02, 1.14289463e-02, 1.01554380e-03, -4.10803130e-03, -1.39518580e-03, 1.43680550e-03, -2.32410100e-04, 3.02444440e-03, 1.54672000e-04, -3.88632200e-04, 4.87177720e-03, -1.17441400e-03, 6.05647400e-04, -6.18932200e-04, -1.81334350e-03, 5.15906690e-03, 1.41277700e-04, 6.31930020e-03, 8.67670500e-04, 1.30191470e-03, 2.96133460e-03, 3.64054300e-03, 2.00721890e-03, 5.74324870e-03, 2.01317600e-04, 5.60508670e-03, 1.15676060e-03, 8.20725550e-03, -9.88774500e-04, 1.46054681e-02, 4.93810300e-04, 3.65006800e-04, 6.47333900e-04, 7.25182800e-04, 1.71821900e-04, 2.96466900e-04, -7.95212300e-04, 1.80194150e-03, 8.79835000e-04, 1.17217338e-02, ] solution_x_drcavty2 = [ -8.30500000e-07, 1.79025100e-04, 4.69755400e-04, 6.91706100e-04, 7.63680500e-04, 6.99211100e-04, 5.59898000e-04, 4.13496000e-04, 2.89295400e-04, 1.58674600e-04, 1.44396300e-04, 6.45348200e-04, 1.19393250e-03, 1.48581000e-03, 1.49174680e-03, 1.30666740e-03, 1.04594130e-03, 7.90114600e-04, 5.54089300e-04, 2.86541200e-04, 4.86092200e-04, 1.31996230e-03, 1.90360630e-03, 2.06459340e-03, 1.95310610e-03, 1.71807210e-03, 1.41349750e-03, 1.06405470e-03, 6.90019300e-04, 2.91278200e-04, 1.00536400e-03, 1.98412810e-03, 2.39879170e-03, 2.47713040e-03, 2.38995890e-03, 2.11023730e-03, 1.58275540e-03, 9.59023300e-04, 3.65828300e-04, -2.52386000e-05, 1.70639510e-03, 2.47798200e-03, 2.96272640e-03, 3.23424450e-03, 2.92194380e-03, 1.83925430e-03, 5.17510800e-04, -2.72294500e-04, -8.42981300e-04, -6.44882600e-04, 3.47563010e-03, 3.68554070e-03, 4.85243520e-03, 4.30556650e-03, 2.59563830e-03, 1.23414300e-04, -1.12148630e-03, -1.16433340e-03, -1.76218150e-03, -5.08449600e-04, 1.02089465e-02, 9.19876750e-03, 7.38832940e-03, 8.91347900e-04, -2.02918160e-03, -4.35306900e-04, 1.74552680e-03, -3.82299000e-04, -1.89595900e-03, 1.44318390e-03, 8.39182720e-03, 2.10036430e-03, -2.07708990e-03, -7.15986500e-04, -1.22269490e-03, 2.85020860e-03, 1.86361079e-02, -1.85665600e-04, 5.77159200e-04, 1.16139361e-02, 7.12641800e-03, 2.28174230e-03, -4.41730960e-03, 1.19527564e-02, 2.02136034e-02, 1.78591365e-02, 1.06707580e-01, 3.00444810e-03, 1.98001460e-02, 1.37005246e-01, 2.73846000e-03, 7.47556450e-03, 1.07964128e-02, 1.81864591e-02, -1.19626975e-02, -5.17858661e-02, -2.97147410e-02, -5.84116800e-03, 1.62672675e-01, 9.02415668e-01, ] solution_x_freurone = [ 12.26912153, -0.83186186, -1.50692279, -1.53467102, -1.53579843, -1.53584421, -1.53584607, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584616, -1.53584616, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584616, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584616, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584616, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584615, -1.53584616, -1.53584615, -1.53584616, -1.53584615, -1.53584615, -1.53584615, -1.53584617, -1.53584665, -1.53585846, -1.53614941, -1.54330584, ] solution_x_integreq = [ -0.0049257, -0.00980164, -0.01462709, -0.01940127, -0.02412341, -0.0287927, -0.03340834, -0.03796949, -0.04247531, -0.04692493, -0.05131748, -0.05565204, -0.0599277, -0.06414352, -0.06829854, -0.07239178, -0.07642224, -0.08038888, -0.08429068, -0.08812655, -0.09189541, -0.09559613, -0.09922759, -0.1027886, -0.10627797, -0.10969449, -0.11303691, -0.11630394, -0.11949428, -0.1226066, -0.12563951, -0.12859164, -0.13146152, -0.13424771, -0.1369487, -0.13956294, -0.14208886, -0.14452484, -0.14686924, -0.14912034, -0.15127642, -0.15333569, -0.15529633, -0.15715646, -0.15891416, -0.16056747, -0.16211435, -0.16355275, -0.16488053, -0.16609551, -0.16719544, -0.16817804, -0.16904092, -0.16978168, -0.17039781, -0.17088676, -0.17124589, -0.17147249, -0.1715638, -0.17151695, -0.171329, -0.17099693, -0.17051763, -0.16988789, -0.16910442, -0.16816384, -0.16706265, -0.16579725, -0.16436394, -0.16275891, -0.16097822, -0.15901783, -0.15687354, -0.15454105, -0.15201592, -0.14929355, -0.14636922, -0.14323804, -0.13989497, -0.13633479, -0.13255212, -0.1285414, -0.12429688, -0.11981262, -0.11508247, -0.11010007, -0.10485884, -0.09935198, -0.09357242, -0.08751287, -0.08116576, -0.07452324, -0.06757719, -0.06031917, -0.05274045, -0.04483194, -0.03658422, -0.02798751, -0.01903165, -0.01008278, ] solution_x_msqrta = [ 8.0e-10, -5.0e-10, -2.0e-10, -1.0e-10, 1.0e-10, -5.0e-10, -1.0e-10, -4.0e-10, -2.0e-10, 2.0e-10, 2.1e-09, -1.0e-09, -7.0e-10, -4.0e-10, 7.0e-10, -1.4e-09, -3.0e-10, -8.0e-10, 4.0e-10, 6.0e-10, 2.2e-09, -1.6e-09, 8.0e-10, -5.0e-10, -1.2e-09, -5.0e-10, -7.0e-10, -1.7e-09, -2.1e-09, 2.0e-10, -1.2e-09, -3.0e-10, 1.4e-09, -1.0e-10, -1.7e-09, 1.4e-09, 1.0e-10, -9.0e-10, -2.1e-09, -5.0e-10, -2.7e-09, 6.0e-10, -1.0e-09, 2.0e-10, 1.2e-09, -4.0e-10, 2.1e-09, 8.0e-10, 6.0e-10, 4.0e-10, -2.9e-09, 1.0e-09, -4.0e-10, 3.0e-10, 5.0e-10, 6.0e-10, 1.5e-09, 9.0e-10, 4.0e-10, -1.0e-10, 2.9e-09, -1.4e-09, 5.0e-10, -4.0e-10, -7.0e-10, -7.0e-10, -1.2e-09, -1.4e-09, -1.2e-09, 2.0e-10, -1.3e-09, 9.0e-10, -4.0e-10, 3.0e-10, 6.0e-10, 2.0e-10, 5.0e-10, 1.1e-09, 1.0e-09, -1.0e-10, 2.5e-09, -1.1e-09, 1.3e-09, -3.0e-10, -1.6e-09, 3.0e-10, -1.6e-09, -1.4e-09, -1.7e-09, -3.0e-10, -3.1e-09, 7.0e-10, 4.0e-10, 3.0e-10, -6.0e-10, 1.4e-09, 1.3e-09, 2.0e-10, -8.0e-10, -5.0e-10, ] solution_x_msqrtb = [ 1.2e-09, -3.0e-10, 2.0e-10, -4.0e-10, -3.0e-10, 6.0e-10, -2.0e-10, -8.0e-10, -6.0e-10, -0.0e00, 8.0e-10, -1.6e-09, -1.3e-09, -5.0e-10, -0.0e00, -2.0e-09, 3.0e-10, -9.0e-10, -1.0e-10, 1.0e-09, 2.3e-09, -0.0e00, 1.3e-09, -6.0e-10, -1.3e-09, 1.1e-09, -1.0e-09, -1.2e-09, -1.9e-09, 0.0e00, -1.1e-09, 1.9e-09, 1.9e-09, 7.0e-10, -5.0e-10, 1.0e-09, -8.0e-10, 1.3e-09, -4.0e-10, -6.0e-10, -3.1e-09, -1.0e-10, -2.2e-09, 7.0e-10, 2.2e-09, -7.0e-10, 1.8e-09, 1.3e-09, 3.1e-09, -4.0e-10, -1.6e-09, 1.7e-09, 1.3e-09, 8.0e-10, -0.0e00, 1.0e-09, -3.0e-10, 1.5e-09, 3.0e-10, -7.0e-10, 4.2e-09, -1.0e-09, 5.0e-10, -1.4e-09, -9.0e-10, 2.7e-09, -7.0e-10, -2.7e-09, -2.0e-09, -3.0e-10, -8.0e-10, 5.0e-10, -4.0e-10, 2.0e-10, 1.2e-09, 2.2e-09, 7.0e-10, 3.0e-10, 1.3e-09, -1.1e-09, 2.7e-09, 4.0e-10, 2.6e-09, -5.0e-10, -2.8e-09, -6.0e-10, -2.2e-09, -9.0e-10, -3.6e-09, 8.0e-10, -2.4e-09, 3.1e-09, 2.9e-09, 1.4e-09, -8.0e-10, 1.1e-09, -1.1e-09, 2.5e-09, -4.0e-10, -9.0e-10, ] solution_x_penalty2 = [ 1.00248452e-01, -1.60000000e-09, -1.40000000e-09, -1.50000000e-09, -1.00000000e-09, -9.00000000e-10, -8.00000000e-10, -6.00000000e-10, -1.20000000e-09, -8.00000000e-10, -7.00000000e-10, -6.00000000e-10, -4.00000000e-10, -4.00000000e-10, -2.00000000e-10, -2.00000000e-10, -3.00000000e-10, -3.00000000e-10, -1.00000000e-10, -0.00000000e00, 0.00000000e00, 1.00000000e-10, 1.00000000e-10, 0.00000000e00, 0.00000000e00, 1.00000000e-10, 1.00000000e-10, 1.00000000e-10, -1.00000000e-10, -4.00000000e-10, -4.00000000e-10, -1.00000000e-09, -7.00000000e-10, -6.00000000e-10, -8.00000000e-10, -8.00000000e-10, -1.20000000e-09, -9.00000000e-10, -6.00000000e-10, -4.00000000e-10, -1.00000000e-10, 2.00000000e-10, 5.00000000e-10, 1.00000000e-09, 5.40000000e-09, 2.20000000e-09, 7.20000000e-09, 8.50000000e-09, 9.60000000e-09, 1.00000000e-08, 1.14000000e-08, 5.80000000e-09, 1.62000000e-08, 1.82000000e-08, 2.15000000e-08, 2.35000000e-08, 2.57000000e-08, 2.90000000e-08, 3.19000000e-08, 3.09000000e-08, 4.16000000e-08, 4.66000000e-08, 5.31000000e-08, 6.20000000e-08, 6.85000000e-08, 7.99000000e-08, 9.37000000e-08, 1.08300000e-07, 1.24300000e-07, 1.46700000e-07, 1.64900000e-07, 1.87800000e-07, 2.08200000e-07, 2.40000000e-07, 2.78900000e-07, 3.24200000e-07, 3.77700000e-07, 4.22400000e-07, 4.89200000e-07, 5.68200000e-07, 6.64900000e-07, 7.72800000e-07, 8.93500000e-07, 1.05180000e-06, 1.24180000e-06, 1.46270000e-06, 1.73540000e-06, 2.06050000e-06, 2.45940000e-06, 2.96690000e-06, 3.61230000e-06, 4.44100000e-06, 5.50230000e-06, 6.96620000e-06, 8.98120000e-06, 1.18847000e-05, 1.64570000e-05, 2.42465000e-05, 4.02062000e-05, 4.21655000e-05, ] solution_x_watson = [ -0.00000000e00, 1.00000000e00, -9.80000000e-09, 3.33333416e-01, 3.52440000e-06, 1.33262416e-01, 5.10786700e-04, 5.27159393e-02, -4.88557280e-03, 7.04208489e-02, -1.76310368e-01, 3.59652497e-01, -3.34930081e-01, -1.06954704e-01, 7.10806973e-01, -7.44769987e-01, 1.39770112e-01, 2.23466491e-01, -2.30955205e-02, -4.13852010e-03, -2.40655722e-01, 2.11825083e-01, -7.89005230e-02, 2.52472539e-02, 5.33065585e-02, 1.99652115e-01, -5.38278039e-01, 2.27228847e-01, 2.79127878e-01, -2.82103374e-01, 7.20994063e-02, ] solution_x_yatpsq_1 = [7.06817436] * 100 + [4011.71977601] * 10 + [-4045.83698215] * 10 solution_x_yatpsq_2 = [0.0500104219] * 100 + [31.74567612] * 10 + [-32.22096803] * 10 solution_x_arglble = [ 1.47780425, 1.95560851, 3.92938553, 2.91121702, 8.01627094, 6.85877107, 15.21786782, 4.82243403, 6.00860058, 15.03254187, 23.28986922, 12.71754213, -26.58525249, 29.43573564, 15.37597436, 8.64486806, -29.5319652, 11.01720116, -42.30702246, 29.06508375, -6.28404714, 45.57973843, -20.97480585, 24.43508427, -31.03091276, -54.17050498, -28.61801666, 57.87147127, 51.17010631, 29.75194872, -20.71188224, 16.28973612, 150.20741347, -60.0639304, -83.05993998, 21.03440232, 91.61798856, -85.61404492, 18.37256156, 57.1301675, 37.08398118, -13.56809428, 121.49145482, 90.15947687, 12.46203868, -42.9496117, -29.66369732, 47.87016853, 142.29920239, -63.06182553, 3.443138, -109.34100996, -194.46227627, -58.23603332, 74.84482245, 114.74294254, -32.27461444, 101.34021261, 48.55025892, 58.50389743, 66.92509996, -42.42376449, -96.64369487, 31.57947224, -83.36761354, 299.41482695, -67.29433552, -121.1278608, -35.15915764, -167.11987996, -88.92036545, 41.06880464, 242.22658666, 182.23597711, 72.43687845, -172.22808985, 156.51103949, 35.74512311, -129.889251, 113.260335, 61.76069205, 73.16796235, -226.09539309, -28.13618857, -140.01767085, 241.98290964, -188.31315279, 179.31895373, -102.19596978, 23.92407737, -20.6453831, -86.8992234, -260.60293104, -60.32739463, -187.46576175, 94.74033707, -26.40609501, 283.59840478, -81.00626161, -6.23327543, ] solution_x_morebvne = [ -0.00480171244711894, -0.009553656441466189, -0.014255063178091082, -0.018905148312847776, -0.02350311156607665, -0.028048136314090373, -0.03253938916802462, -0.03697601953959521, -0.04135715919328434, -0.04568192178445848, -0.04994940238289979, -0.054158676981210775, -0.058308801987528955, -0.06239881370196436, -0.06642772777614718, -0.07039453865524663, -0.07429821900179416, -0.07813771910061508, -0.08191196624414197, -0.0856198640973513, -0.08926029204153081, -0.09283210449604987, -0.09633413021726782, -0.09976517157367622, -0.10312400379632972, -0.10640937420357734, -0.10962000139906009, -0.11275457444189355, -0.11581175198790325, -0.11879016140072804, -0.12168839783155079, -0.12450502326615709, -0.12723856553796054, -0.12988751730556838, -0.13245033499339215, -0.13492543769373558, -0.13731120602871583, -0.13960598097029298, -0.14180806261659795, -0.1439157089226587, -0.14592713438352983, -0.14784050866773057, -0.1496539551987893, -0.15136554968258126, -0.1529733185780274, -0.15447523750859646, -0.15586922961191965, -0.15715316382468714, -0.15832485309984537, -0.1593820525529578, -0.16032245753442315, -0.1611437016240685, -0.16184335454444554, -0.16241891998896002, -0.16286783336074998, -0.16318745941800578, -0.16337508982118337, -0.16342794057730933, -0.1633431493763049, -0.16311777281396858, -0.16274878349595126, -0.16223306701673054, -0.1615674188072447, -0.16074854084447576, -0.15977303821587704, -0.15863741553111918, -0.15733807317317933, -0.15587130338031904, -0.15423328614998352, -0.15242008495510762, -0.15042764226272823, -0.14825177484417715, -0.145888168865457, -0.14333237474568616, -0.14057980177072898, -0.13762571244830293, -0.13446521658997057, -0.13109326510447508, -0.12750464348585883, -0.12369396497871003, -0.11965566340170586, -0.11538398560935402, -0.11087298357047293, -0.10611650604048345, -0.1011081898030038, -0.09584145045453528, -0.09030947270418657, -0.08450520015839938, -0.07842132455849246, -0.07205027443652223, -0.06538420315244962, -0.058414976272886505, -0.05113415824875308, -0.04353299834598826, -0.03560241577999431, -0.027332984000740478, -0.018714914071368417, -0.009738037078703543, -0.00039178550924665913, 0.009334826481040273, ] solution_x_oscigrne = [ -0.999903551150572, 1.000114247321587, 0.9998642692032601, 1.0001606899137698, 0.9998089690015318, 1.00022598434592, 0.999731099815014, 1.0003177559633685, 0.9996214137443762, 1.000446688078944, 0.9994668378325214, 1.0006277224039304, 0.999248858409288, 1.000881705203141, 0.9989411926230166, 1.0012376192059906, 0.998506407461758, 1.0017355588402022, 0.9978909678746266, 1.0024305943941474, 0.9970179188459485, 1.0033975851656824, 0.9957759910544586, 1.0047367560724156, 0.9940033482196368, 1.0065792892561265, 0.991463576275275, 1.0090910440981682, 0.9878113950447263, 1.0124704597271386, 0.9825476906558169, 1.016933474160337, 0.97497223558056, 1.0226745117037057, 0.9641665140655163, 1.0297915464178238, 0.949087499582646, 1.0381739331985491, 0.9289092394703534, 1.0473881814350956, 0.9037032879438561, 1.0566576332592028, 0.8751912025461293, 1.0650493325263044, 0.8467346025073008, 1.071836505625808, 0.8219637719978666, 1.0767742560792348, 0.8030052123113574, 1.0800668731949534, 0.7899438210393462, 1.0821271313195384, 0.781606186399683, 1.0833627968298138, 0.7765461227304188, 1.0840845613674477, 0.7735701654509769, 1.0844995317623756, 0.7718524469999704, 1.0847359223290596, 0.7708717537908896, 1.084869871714956, 0.7703153481248035, 1.0849455446733298, 0.770000790359205, 1.084988222260974, 0.7698233176390366, 1.0850122686562047, 0.7697233024137095, 1.0850258120070317, 0.7696669756983013, 1.0850334441314584, 0.7696352680150108, 1.0850377667549869, 0.7696174327289053, 1.0850402906360344, 0.769607435938671, 1.0850420199210213, 0.7696019492096186, 1.0850440403901882, 0.7695993333871978, 1.085048727912368, 0.7695994609478275, 1.0850634025349974, 0.769604955537232, 1.0851124682328854, 0.7696264320469576, 1.0852784437585632, 0.7697008512420949, 1.0858411700848751, 0.7699541437010324, 1.0877518530579322, 0.7708145141755096, 1.0942649130343773, 0.7737450447320231, 1.1167550385396223, 0.7838340360674543, 1.197692695917145, 0.8197587877734469, 0.34400894026358253, ] solution_x_spmsqrt = [ 0.8414709848078964, -0.7568024953079281, 0.41211848524175654, -0.2879033166650651, -0.13235175009777303, -0.991778853443116, -0.9537526527594719, 0.9200260381967906, -0.6298879942744537, -0.5063656411097588, 0.9988152247235795, -0.4910215938984694, -0.6019998676776046, 0.9395300555699313, -0.9300948780045254, -0.9992080341070627, -0.026521020285755953, -0.40406521945636065, 0.2793865543595699, -0.8509193596391765, 0.923470012926003, 0.1935029667421232, 0.9364725475338365, -0.8859527784925296, 0.176016272833866, -0.5291338443628917, 0.14993681711330134, -0.9851359060614224, -0.8115681644677004, 0.9978032744219705, -0.32153677367579575, -0.15853338004399595, 0.9055399984980432, -0.10589758762554138, -0.21933702833760824, 0.9956757929363228, -0.6701396839379524, -0.9055272090161384, 0.45213333953209767, -0.8012247906768953, -0.24539810131000517, -0.9999908622413068, 0.9851203677373821, 0.7025150575473956, 0.97049168633502, -0.9905826083622151, -0.4442747122315391, -0.9365254011824229, 0.7333337958292518, -0.6501275235748957, -0.236456371968843, 0.7902854647755708, 0.4042582281073567, 0.5663064119145462, 0.346394965535536, 0.6369471771360007, 0.5590140193623636, 0.6017832141649304, 0.11508425966985522, -0.2620839590180966, 0.9766556656643753, -0.9660321335212897, -0.9201559227267819, -0.5946419876082146, 0.4278557468834321, 0.9835224135737828, 0.3296208750563675, -0.4117614029834671, -0.9965019983464922, -0.7736233386803075, 0.9509241545016164, 0.3635926207547267, 0.7570979728966365, -0.20259269090077123, 0.9997657290235363, 0.9835006076878136, -0.7274941973722288, 0.9535984876805766, 0.9745271031531163, -0.5444763096196569, 0.9767074399435044, 0.8369692092360629, 0.49052257006311906, -0.017099129324754637, -0.6155654443683672, 0.6372259975359011, -0.7853724073864938, 0.025888206258587287, -0.8648845336882347, 0.8272184413093554, -0.23598771211618058, 0.5221681399851388, -0.1941831363419754, 0.963594168151382, 0.714349265095485, -0.9904998832255181, 0.06994035488705914, -0.1506818641899401, -0.6954738915705097, -0.3056143888882522, ] solution_x_semicon2 = [ -6.349124087002204e-18, -2.720672286121828e-18, -2.0494594670589808e-18, 2.2161886201338454e-18, 1.2316133510181184e-18, -3.2705004691851026e-18, -3.807751676148402e-18, 3.4276320135398005e-18, 2.2326202305653343e-18, -7.019626524587994e-18, -5.858568597552852e-18, -9.211616078255237e-18, -7.71770563976305e-18, -7.876848461287058e-18, -1.6000476267872514e-18, -1.734489564328033e-18, 1.7775061650713925e-19, -5.073461303752388e-18, -7.328302750507605e-18, -3.1238828312289746e-18, 5.980268474857619e-19, -1.6975325642760375e-18, 4.937452418560979e-18, 1.3511147727766607e-17, 5.749609112964981e-17, 1.801648085519756e-16, 5.786887187854167e-16, 1.878507150702686e-15, 6.136128040698149e-15, 2.0030487111145893e-14, 6.534292422120726e-14, 2.1313997714716273e-13, 6.952358252095008e-13, 2.2677726547784577e-12, 7.39723184163866e-12, 2.412907024826009e-11, 7.870673668386567e-11, 2.567338541527492e-10, 8.374412732822885e-10, 2.731653274793591e-09, 8.910391477241349e-09, 2.906484367660483e-08, 9.480673285799459e-08, 3.092504307341268e-07, 1.008744689447865e-06, 3.2904220583605265e-06, 1.073297189268266e-05, 3.500918292749676e-05, 0.00011418868707255388, 0.00037238836057455765, 0.001213800218967423, 0.003949812221062431, 0.012784144445260801, 0.040678780990535776, 0.12303542035650665, 0.32818258157808705, 0.7151930120566522, 1.2976206270033757, 2.0761013706015463, 3.050641312078585, 4.221240463432072, 5.587898824666944, 7.1506163957832, 8.90939317678084, 10.864229167659865, 13.015124368420274, 15.362078779062067, 17.905092399585243, 20.644165229989806, 23.57929727027575, 26.71048852044308, 30.037738980491792, 33.56104865042189, 37.28041753023337, 41.19584561992624, 45.30733291950049, 49.61487942895613, 54.118485148293146, 58.81815007751155, 63.71387421661134, 68.80565756559251, 74.09350012445506, 79.577401893199, 85.25736287182431, 91.13338306033101, 97.2054624587191, 103.47360106698856, 109.93779888513943, 116.59805591317166, 123.45437215108528, 130.5067475988803, 135.59853094786146, 138.72972219802878, 139.9003970309516, 139.9942331482723, 139.99967247706724, 139.9999814190815, 139.99999894593952, 139.99999994020595, 139.9999999966189, ] solution_x_qr3d = [ 0.8944271909999159, 0.39036002917941326, 0.18505699313910443, 0.095507370926703, 0.0652019862276467, -0.4472135954999579, 0.7807200583588265, 0.3701139862782089, 0.19101474185340606, 0.13040397245529342, 0, -0.4879500364742666, 0.7402279725564178, 0.3820294837068121, 0.26080794491058684, 0, 0, -0.5299359348983446, 0.7003873867958222, 0.47814789900274257, 0, 0, 0, -0.5638286020497468, 0.8258918255501917, 0.447213595499958, -0.35777087639996635, 0.08944271909999157, 0, 0, 0.8197560612767679, -0.7416840554408852, 0.1951800145897066, 0, 1.132212330751066, -1.1439886848599183, 0.31796156093900735, 1.4188709070303882, -6.058518452574962, 7.972029516100272, ] solution_x_qr3dbd = [ 0.8944271909999159, 0.3903600291794133, 0.1850569931391044, 0.09550737092670301, 0.06520198622764671, -0.4472135954999579, 0.7807200583588265, 0.3701139862782088, 0.19101474185340603, 0.1304039724552934, 0, -0.48795003647426655, 0.7402279725564178, 0.38202948370681217, 0.26080794491058684, 0, 0, -0.5299359348983446, 0.7003873867958221, 0.4781478990027425, 0, 0, 0, -0.5638286020497468, 0.8258918255501917, 0.447213595499958, -0.3577708763999664, 0.08944271909999163, 0.819756061276768, -0.7416840554408851, 0.19518001458970657, 1.1322123307510663, -1.1439886848599186, 0.3179615609390067, 1.4188709070303884, -6.058518452574962, 7.972029516100272, ] solution_x_eigenb = [ 2.1880343004416827, 2.433369037415534, 2.4128467655642227, 2.4143168471412966, 2.4142066676219205, 2.414206682597956, 2.4143168514760567, 2.4128467763690584, 2.433369040912926, 2.1880343004587717, 0.963618832502572, -0.19358332415060756, -0.020571030789440843, -0.0042978748491313435, -0.0011189543814576976, -0.0003256602342870186, -0.00010143977363046447, -3.309302901033653e-05, -1.1092507161708885e-05, -4.10236648587512e-06, -9.758954956834986e-05, 0.9208420627147608, -0.19113674746578155, -0.019750356974082468, -0.004087291838641473, -0.0010576174015411869, -0.00030654777315460436, -9.524470464093356e-05, -3.082138926676854e-05, -1.1092450516891782e-05, -2.1507171255294457e-05, 3.236210199985498e-05, 0.9240994322190695, -0.19135572211934757, -0.019818514674832036, -0.004104765871306482, -0.0010626629889412406, -0.0003082323428087962, -9.524506106163913e-05, -3.30929923080829e-05, -5.86920342958478e-06, 8.343154992559033e-06, -1.7090950629164782e-06, 0.9238630466026214, -0.19134060715511186, -0.019813760575551793, -0.004103453696629484, -0.0010626616637818649, -0.0003065476688168684, -0.0001014390007803748, -1.7629336348662287e-06, 2.414698832828228e-06, -4.1012794468935585e-07, 1.7073395190219743e-07, 0.9238806428059877, -0.19134186729883576, -0.019813762593004474, -0.004104766449316578, -0.001057618179089577, -0.00032566103285368623, -5.617848676115081e-07, 7.498487330424776e-07, -1.1366246620226694e-07, 4.233248357697225e-08, -1.8184553525805125e-08, 0.9238806405230912, -0.19134060742073378, -0.019818514337183922, -0.004087292358165013, -0.001118954298914211, -1.8565094044360727e-07, 2.461558460853512e-07, -2.9698560165568933e-08, 2.5691549193155624e-08, 4.188439547971367e-08, 1.7091126942261566e-07, 0.9238630462038411, -0.19135572096178424, -0.019750357283584624, -0.004297874115889032, -6.452367583021426e-08, 7.874956290802419e-08, -2.175478191008932e-08, -2.9611984365300305e-08, -1.125976562800656e-07, -4.101727958716196e-07, -1.7068431405865914e-06, 0.9240994307748498, -0.19113674668992328, -0.020571030343105964, -1.2720088883591045e-08, 5.7045828447717434e-08, 7.924431455583827e-08, 2.4584027025267894e-07, 7.494896735776724e-07, 2.41326518080458e-06, 8.34418638100877e-06, 3.236121345356557e-05, 0.9208420621116469, -0.19358332383274754, -9.410619198170363e-09, -1.324699653531768e-08, -6.476508265084364e-08, -1.8558263456164044e-07, -5.618026820278632e-07, -1.7630007182357942e-06, -5.868582984643352e-06, -2.1506490807524302e-05, -9.758904294524118e-05, 0.9636188326077207, ] solution_x_luksan12 = [ -2.6260067987163516, 6.896065319147662, 1.527892692614362, 1.5497440503992346, 2.2933547979396463, 1.6306208450523716, 1.5819176730889628, 2.32812470715209, 1.6291935486837537, 1.5831335363873629, 2.3316182375355337, 1.6290901468343557, 1.5831778731783577, 2.331746894500055, 1.6290866339965575, 1.5831791924745884, 2.3317508225088055, 1.6290863289641941, 1.5831794312419505, 2.3317513366797806, 1.6290862573533014, 1.5831793883878833, 2.3317513422592953, 1.629086261632123, 1.5831793726762426, 2.3317512664646154, 1.6290862864991247, 1.5831794068996972, 2.331751192772075, 1.629086292750654, 1.5831793960520542, 2.331751254872966, 1.6290863143562366, 1.5831794727104964, 2.331751220286205, 1.6290863278083665, 1.5831794601591884, 2.3317513895695736, 1.6290862215089341, 1.583179580563746, 2.3317517718530487, 1.629086273471926, 1.5831794517394973, 2.3317514742466026, 1.629086326246853, 1.5831793647918584, 2.3317510961040324, 1.6290862893642488, 1.583179357650647, 2.3317509213902934, 1.629086379389047, 1.5831793466774813, 2.331751016773598, 1.6290862487394557, 1.5831795060981861, 2.331751350596469, 1.629086344775534, 1.583179237092884, 2.331750704884059, 1.6290861760511046, 1.583180031048655, 2.3317524781223806, 1.6290862227638085, 1.583179320520067, 2.3317511883963484, 1.62908615550431, 1.5831794393651841, 2.33175167156419, 1.6290861496589093, 1.583179491480598, 2.3317514438297446, 1.6290867286777644, 1.5831787218028068, 2.331749775035503, 1.629086341296889, 1.5831793785655173, 2.331751379257171, 1.6290862955034657, 1.5831794896813611, 2.331751501905547, 1.6290857775369425, 1.583179220493569, 2.331751008922027, 1.6290872482674832, 1.5831776309272405, 2.331747114910226, 1.6291104318632341, 1.5831327312118264, 2.3316489547138564, 1.629748307506696, 1.5818970811223332, 2.3289347056739356, 1.6468612471308757, 1.5492871064990756, 2.2555720960782186, -1.2840500262457888, 2.551876632332705, 0.9810837136085581, ] solution_x_luksan13 = [ 1.3086052241942845, 1.6216137111993358, 2.2886144747619968, 1.34232518154836, 1.6619515759503343, 2.3751337553163605, 1.343855296385397, 1.6630483896975998, 2.377485954810846, 1.3438990216611229, 1.6630791019372382, 2.3775518416632186, 1.3439002468996393, 1.6630799506592178, 2.3775536420160925, 1.3439002707105718, 1.6630799637087308, 2.377553663793456, 1.343900274442869, 1.6630799703922405, 2.377553677510941, 1.343900273735809, 1.663079968764641, 2.3775536820493994, 1.3439002754790357, 1.6630799760584611, 2.3775537070702106, 1.3439002833097773, 1.6630799799354958, 2.377553710130499, 1.343900279016161, 1.6630799791188497, 2.377553709151317, 1.3439002771699002, 1.6630799775994893, 2.3775537046342916, 1.3439002881835356, 1.6630799956367304, 2.377553757775651, 1.3439002886986675, 1.663079991183404, 2.377553741510781, 1.3439002855650906, 1.6630799931177906, 2.3775537587908215, 1.3439002747672082, 1.6630799713724573, 2.377553681610918, 1.3439002854677142, 1.6630799870142512, 2.377553735404843, 1.3439002800895123, 1.6630799851565248, 2.3775537353686165, 1.3439002784725658, 1.663079982758718, 2.377553724300102, 1.3439002839408525, 1.663079984436528, 2.377553726731763, 1.3439002742018746, 1.6630799686940994, 2.377553673295027, 1.343900285055898, 1.663079986851812, 2.3775537287002404, 1.3439002797888209, 1.6630799728327539, 2.377553682977633, 1.3439002761687657, 1.6630799673959926, 2.3775536708099745, 1.3439002798986437, 1.6630799763732456, 2.3775537001976423, 1.3439002780805065, 1.6630799801142993, 2.3775537125819097, 1.3439002916123894, 1.6630800028052062, 2.3775537813761924, 1.3439003043419797, 1.6630800585945529, 2.3775539515552806, 1.3439012394354732, 1.6630827445655045, 2.3775621184439015, 1.3439344570959444, 1.6631783889211809, 2.37785290158486, 1.3451213454947373, 1.666597519847485, 2.38828410134482, 1.3886122940189465, 1.7939836213544151, 2.885811280705054, 4.723092522164855, 5.389923139518635, ] solution_x_luksan14 = [ -0.692015438465357, 0.4736192819939741, 1.2412079054782652, -0.4783782506182583, 0.23882842116612296, 1.094079486551747, -0.2641170714882838, 0.08143516139066553, 1.0679283525636625, -0.1494417718117012, 0.033925047353142675, 1.0809287152115696, -0.13657207013818243, 0.02960520381904979, 1.0827798919700173, -0.1362649003784561, 0.029450043690314048, 1.0828484754308758, -0.13625660575465595, 0.029445194398704543, 1.0828506279698982, -0.13625635947650283, 0.029445045430998882, 1.082850692128227, -0.13625634849900412, 0.029445040594224484, 1.0828506890538272, -0.13625634095713987, 0.029445038872369666, 1.0828506892763552, -0.1362563401019553, 0.029445038661562405, 1.0828506865668028, -0.1362563352007162, 0.029445038297226032, 1.0828506897645718, -0.1362563399662581, 0.029445038930055314, 1.0828506855121347, -0.13625633491870875, 0.029445037517807717, 1.0828506970059786, -0.13625635176495168, 0.029445039882406293, 1.0828506971123373, -0.13625635316124712, 0.029445040472916795, 1.0828506840532126, -0.13625633315729457, 0.029445037711023858, 1.0828506931348947, -0.13625634492000505, 0.029445039530404147, 1.082850698256033, -0.1362563552104686, 0.029445040416000965, 1.0828506949203678, -0.13625634915587218, 0.02944504055043263, 1.0828506838415628, -0.13625633328649378, 0.029445037473531936, 1.0828506996527274, -0.1362563547825522, 0.029445040998217188, 1.0828506836226952, -0.13625633328336106, 0.029445037480123, 1.0828506844275172, -0.13625633160755746, 0.029445037363800216, 1.0828506910381746, -0.13625634142989135, 0.02944503900512941, 1.0828506970833573, -0.13625635262199126, 0.029445040274926663, 1.0828506858747455, -0.13625633628442352, 0.029445037869996665, 1.0828506843744086, -0.13625633006537194, 0.029445038276720837, 1.082850772934407, -0.13625645049722282, 0.029445066824156065, 1.0828531756767654, -0.13625960631817075, 0.029445905929758334, 1.082930785041219, -0.13636157993795717, 0.029472948037119334, 1.0853841906187118, -0.13957177103726676, 0.03033477911863786, 0.9997884952922044, 0.03012334667228848, 0.03001755818766747, ] solution_x_luksan15 = [ -2.8128120376543353, 1.2947106505184869, -2.81440005752448, 0.5152023646747894, -2.0089506145318516, 1.0515415361025104, -3.4386919581246973, 0.5351040934480955, -1.6697901903837928, 1.1273918094584376, -3.0058465049859246, 0.5987020506877548, -2.1273657980099876, 0.9192000873982219, -3.4535594653315687, 0.5623710862956811, -1.9386092963220432, 0.9821839081455959, -3.5093911936842477, 0.5501687807342822, -2.1778372107639, 0.9063872377904626, -3.424790911399904, 0.5665652947300606, -2.279179522364104, 0.8685030719682756, -3.145617106466628, 0.6099286765580767, -1.9284952155742618, 0.9691898205061202, -3.016417857801295, 0.6212423706928658, -1.9216427599092167, 0.9730403697373208, -3.285655214260605, 0.5820210274753043, -2.1458132202272386, 0.9058785725661358, -2.958575186535638, 0.6348101011039402, -1.9602973787877798, 0.952916227779147, -3.2178165098362, 0.5944426358988449, -1.72911123316871, 1.0594397548002128, -3.1978737322906863, 0.5844520814478443, -1.7679802490196521, 1.0524244273162242, -3.146733476316577, 0.5902404606592925, -2.128977747267094, 0.914704760106901, -3.2345320433357836, 0.5920343641247361, -2.2610082037405834, 0.8670334110534724, -3.4060367708426935, 0.576246511012118, -2.355877477880136, 0.8412164300993267, -3.081758650591148, 0.6241626014976729, -1.6525049806156873, 1.081267498611019, -2.963721310470893, 0.6194531536445858, -1.506685332675987, 1.1746494074889837, -2.9257857154408438, 0.6141113169233168, -2.1021910871806577, 0.9219352856117872, -3.3401105799593105, 0.5774971684592886, -1.7547078621368493, 1.0532221932141288, -3.3352090067149023, 0.5658942245431572, -2.353376910071777, 0.854027290091373, -3.2416948400866707, 0.5965555845011213, -1.9356880509818681, 0.9699374813403614, -3.292568634235838, 0.580974296002741, -1.6861988438404256, 1.0857879072493273, -3.1295933491980374, 0.5904364080984017, -1.9200181272009214, 0.9895807082139652, -3.2489040016986324, 0.582149002451738, -0.5601900925614515, 2.4870578822567175, -3.5588729390698672, -0.4666353059476335, ] solution_x_luksan16 = [ 10.23347504140921, 20.59876890029168, -35.3558957906405, 14.159724748767388, -6.192790944245422, 6.904260897808911, -12.426375656338006, 7.916405145745939, 9.883332945956017, -7.763552251913891, 9.829215962712826, -5.460412966230336, -2.0247375676154316, 2.292011784517189, -8.854644657757955, 6.501718109308879, -10.00818055426963, 6.969493641831156, 5.794385035460475, -4.827934342605883, -0.3589290720778674, 1.7351238328795198, -20.46786870905146, 15.073627999686598, -22.38633414526394, 14.87045990266218, -9.108444080860473, 5.493242761973028, -4.436790715837233, 3.35863879246109, -12.778497913669044, 9.51430783434134, 17.974890839174535, -14.543141451864756, -0.878903552061843, 3.937581796604095, -37.95441551229194, 27.217302740157244, 1.9384003997032608, -5.0732916754344854, -9.834240436833431, 9.928282181783743, 24.901369369122225, -20.6810518922668, 5.609710258509786, 0.40845702633880704, 7.517485147363342, -6.744213821972444, 6.283831933809041, -2.719582209866709, 1.1727545558714283, -0.5901766790421645, -10.83801088444533, 8.63096398021895, 4.262706411344401, -4.302452961158821, -1.1028476495022923, 2.413241731236865, -4.9611988982883455, 3.2905463368286787, -11.011449594878954, 8.354169868673353, 17.534906240840428, -14.074846099952074, 1.5433478836011245, 1.9967416934301132, -5.0398763810168425, 2.8962555845029465, -2.503087792307807, 2.18971326359533, -5.425121985743007, 4.100312921945939, -7.90674647105912, 5.736740005101778, -10.955014606035165, 7.825133686094932, 13.983943593069979, -11.161214770039159, 4.399884927901469, -0.7147360927864606, 16.135713247282684, -12.343832004737795, -6.450092935863761, 7.476113508806722, -22.97438911325564, 15.605814430808019, 24.62552916846743, -20.02790069712666, 32.528132112236335, -20.037974911481278, -1.5215783873530375, 3.528694334566693, 7.356145013974544, -6.400505214585759, -0.5849689371220534, 2.3004991729965987, -7.749666453881994, 5.308798604551641, -12.801456405933285, 9.38466573197068, ] solution_x_luksan17 = [ -0.8437781517444214, 5.211601301412691, -0.8255686986099557, -1.1224808830025863, -0.9115016416553087, -1.0449996789508758, -0.8370067076638087, -1.1138351780732039, -0.8829775814636974, -1.0662539739396077, -1.0011441498985942, -0.9971962219303508, -0.9921060603499269, -1.0107562326286939, -0.9812584988122448, -1.0142511875210711, -0.9884985874862595, -1.0098353738374723, -0.9872157109718606, -1.0112133037704427, -0.9864952703702744, -1.0113724963264314, -0.9870842717327896, -1.011035340568662, -0.9869391605540793, -1.0111661235119436, -0.9868977564988685, -1.0111670258624144, -0.9869449215955661, -1.0111419398459263, -0.9869301696083717, -1.0111537677001405, -0.9869283405594838, -1.0111528331131907, -0.986932002795563, -1.0111510466790732, -0.9869305922818844, -1.0111520809826104, -0.9869305749770071, -1.011151923374323, -0.9869308492975264, -1.0111518042563021, -0.9869307203053385, -1.0111518914759727, -0.9869307309805547, -1.0111518713966636, -0.9869307504505271, -1.011151864732727, -0.9869307391816526, -1.0111518711394447, -0.986930743051711, -1.0111518679059937, -0.9869307405857761, -1.011151871272733, -0.9869307406810994, -1.0111518690271912, -0.9869307442357795, -1.0111518679286133, -0.9869307403926516, -1.0111518706700777, -0.9869307426486206, -1.011151868044014, -0.9869307395007838, -1.0111518726198288, -0.9869307376627915, -1.0111518716425005, -0.9869307459735903, -1.0111518650959377, -0.9869307441838526, -1.011151868558413, -0.9869307370598098, -1.011151873493444, -0.9869307388844694, -1.011151870953494, -0.9869307414667764, -1.0111518699114064, -0.9869307413827505, -1.0111518693727195, -0.9869307428780796, -1.0111518683864686, -0.986930742414142, -1.0111518688900905, -0.9869307423425782, -1.0111518687612688, -0.9869307388492422, -1.0111518725840953, -0.9869307379963231, -1.0111518716476386, -0.9869307390986436, -1.0111518722894763, -0.9869307349748873, -1.011151874864258, -0.986930740183915, -1.0111518696551045, -0.9869307419219207, -1.011151869862698, -0.9869307393375487, -1.0111518711716971, -0.98693074172968, -1.0111518690786507, ] solution_x_luksan21 = [ -6.072657993990953, -11.151678306331249, -15.281680185264895, -18.553572785910436, -21.08878192552139, -23.018420220928494, -24.46705052798465, -25.543512880064885, -26.337637149910563, -26.920674289315414, -27.347575624380077, -27.65988732407303, -27.88857759276986, -28.05649817926434, -28.18038720009252, -28.272439283160423, -28.341505869674798, -28.39399571405599, -28.434542523545463, -28.466491445203996, -28.492253969229775, -28.51356599746666, -28.53167622506297, -28.54748213206502, -28.561629796383773, -28.574583635599762, -28.58667947877867, -28.59815838663125, -28.609194634285124, -28.61991238997938, -28.630402327046635, -28.640728722950577, -28.650938105649047, -28.661061977374047, -28.671123050731868, -28.68113826085573, -28.69112022311497, -28.701078262087883, -28.71101815506324, -28.72094477397387, -28.730862903992644, -28.740776507637953, -28.75068864243215, -28.760599670061964, -28.77051000987731, -28.780418360465934, -28.790323772732222, -28.80022409430249, -28.810119012418326, -28.82001016509087, -28.829900708980315, -28.839793590776342, -28.849689118959287, -28.859586440413942, -28.86948336412596, -28.879378174667828, -28.88926990461213, -28.8991581591508, -28.909043599093003, -28.918925344790882, -28.92880159055073, -28.938669244304233, -28.94852535832674, -28.95836634260835, -28.96818485329044, -28.97796943084888, -28.98770372929353, -28.997369201223027, -29.006941800115108, -29.016389375603524, -29.025665682937056, -29.03470507627482, -29.04341491397447, -29.051663075623473, -29.059265584091445, -29.065967527124695, -29.07141754001239, -29.07512741421742, -29.07641564980266, -29.074334399240477, -29.067568644255143, -29.054295689963208, -29.0319862391214, -28.997130480357225, -28.944864342494807, -28.86845848063996, -28.758620076438536, -28.602552441434927, -28.38270523359173, -28.075144666563702, -27.647479062397462, -27.056331868057303, -26.24447489795629, -25.137976027248023, -23.64410279764132, -21.651318090974016, -19.03331370504794, -15.65922569153753, -11.41085934966044, -6.2035977373817195, ] solution_x_luksan22 = [ 0.960000892597201, 0.9188402401000288, 0.8346999741225976, 0.6824138623323005, 0.44429684165068956, 0.16679643843737746, -0.023004295169824662, 0.015327433907712216, -0.009931088912543472, 0.006684223516831371, -0.004411575260245885, 0.0029571424982889867, -0.001966221180203038, 0.001316579307305194, -0.0008787716047566526, 0.0005864520202429219, -0.00039319743428295955, 0.000262756179445633, -0.0001760817809970661, 0.00011752859355313591, -7.938290213698181e-05, 5.4582276287576205e-05, -3.676956244840184e-05, 2.2892784894905886e-05, -1.4844959572369287e-05, 8.978467841981641e-06, -7.419590428825042e-06, 5.0134136153247085e-06, -4.8296331309899045e-06, 4.4148810133672814e-06, -5.451937781671956e-06, 5.28902886393822e-06, -5.486579973708986e-06, 4.589797533333856e-06, -3.686156736885388e-06, 1.3813327052080713e-06, -2.875286216624316e-07, -1.966618190117126e-07, -5.984127105225435e-08, -7.936593253722943e-07, 2.1703743349261694e-07, -8.894851154156264e-07, 3.573122556113253e-07, 8.966129390508038e-08, -1.3677877479650928e-06, 6.43379960693636e-07, -6.357331910140075e-07, -1.7198971213860295e-07, 7.720236116499906e-07, -1.5538185090359862e-06, 8.378707374504217e-07, -2.73224811890867e-07, 1.0658698182255397e-07, -7.054426255706567e-07, 5.652296338005435e-08, 2.3167064968509738e-07, -7.1526763080569e-07, 1.1597873712533682e-08, -2.863395818025362e-07, -8.633020826805924e-07, 4.2697411766659695e-07, -1.1541837705801389e-06, 4.7551953352402166e-07, 3.5199581610952056e-07, -2.5742042022428385e-06, 2.499335480925905e-06, -4.079663388392796e-06, 3.723178696281719e-06, -2.919751243874922e-06, 1.4829783071179273e-06, -1.1191190311723908e-06, 5.588506976522976e-07, -4.825768343696162e-07, 9.863313432267673e-07, -1.3577296762331155e-06, 1.0976659429101464e-06, -1.3266509165639996e-06, 8.804177825565544e-07, -1.59655791383051e-06, 2.2532672723180185e-07, -1.5600238860251166e-06, 1.7351280778268136e-06, -1.618517725020847e-06, 1.287719767348254e-06, -1.2077855625401572e-06, -1.226034002734674e-06, 1.2900045894163097e-06, -1.883605391514802e-06, -1.279989121101194e-06, 1.7054561610327153e-06, -4.501630088393006e-06, 3.38756209930697e-06, -5.221884694101911e-06, 6.8241273128299335e-06, -7.215782067624106e-06, 4.201540035034105e-06, -3.7683204911021953e-06, 2.681881990832518e-07, 7.907138005741486e-07, 3.0687261099791043, ] solution_x_hydcar20 = [ 2.6759127057952494e-07, 0.0020075973345538593, 0.9979921350741772, 1.1074382518158015e-06, 0.003926319294260064, 0.9960725732674882, 4.473878508666662e-06, 0.007432799593801248, 0.9925627265276902, 1.791855353550396e-05, 0.013805819144958344, 0.9861762623015062, 7.125759668355649e-05, 0.025273323829558065, 0.9746554185737586, 0.0002803534439901862, 0.04553620774789967, 0.9541834388081104, 0.001082782951168712, 0.08018717893154169, 0.9187300381172898, 0.0040498954829321235, 0.13606692468745604, 0.859883179829612, 0.014346338806928278, 0.21713770052872539, 0.7685159606643465, 0.04646668013286012, 0.31340489138177297, 0.640128428485367, 0.05166573387892863, 0.42190983200015447, 0.526424434120917, 0.05782376960889372, 0.5508311256800345, 0.39134510471107226, 0.0640697514942442, 0.6757845188863061, 0.26014572961945, 0.07002653957502589, 0.7737845233677303, 0.15618893705724401, 0.07687228280882921, 0.836427017007432, 0.08670070018373897, 0.08838584185008973, 0.8661393596329539, 0.04547479851695662, 0.11319983773418042, 0.8640793740698467, 0.022720788195972993, 0.16949917403437423, 0.8198575022045703, 0.010643323761055593, 0.2889793132934084, 0.7066247193552677, 0.004395967351324038, 0.4999998216058201, 0.4986616017769641, 0.0013385766172157153, 138.21584043438, 138.13772749931923, 137.99511335767673, 137.73620245773773, 137.27057972483576, 136.44580451915726, 135.01759125081972, 132.61817941469988, 128.72481163454674, 122.65561152670969, 119.34453517365907, 115.7706674624033, 112.60284049804272, 110.23710719573587, 108.6126130079421, 107.35503374607997, 105.88999629459819, 103.3849389558914, 98.83590533616024, 92.07928040428027, 290.69252437645963, 290.65561780338373, 290.59025193849226, 290.4776378323827, 290.2922675253222, 290.0081841660331, 289.615549643397, 289.14599494303985, 288.7724567817456, 277.8716922412405, 280.6638306878377, 284.24018735028017, 287.7729960147694, 290.52761603695006, 292.2929262974615, 293.2381872872007, 293.71591921265576, 294.59496871170194, 298.03881543410006, ] solution_x_hydcar6 = [ 0.005639001019299217, 0.13023671278902904, 0.8641242861916716, 0.020668138451243257, 0.22352172363375894, 0.7558101379149978, 0.06627260583120169, 0.3309386117749258, 0.6027887823938726, 0.11820814762835632, 0.4418686949212894, 0.43992315745035443, 0.24917284487890107, 0.500513105207918, 0.2503140499131811, 0.49624066598713407, 0.4131755248073142, 0.09058380920555179, 132.63808144202352, 127.84951651511513, 120.39649297520073, 113.59593052273564, 104.00971299398577, 93.2001658166645, 288.8739911256674, 288.5263717741471, 275.6104698527219, 280.00166220797604, 289.1545657231987, ] solution_x_methane = [ 107.7653795798063, 0.09225723236421929, 0.9077427676357807, 102.68498908598441, 0.1821995285312063, 0.8178004714687936, 97.71772476055263, 0.28421889854366617, 0.7157811014563338, 96.57726115013135, 0.30530731675985745, 0.6946926832401424, 94.26309926727772, 0.3566490103973165, 0.6433509896026832, 89.98899748591788, 0.46779111102816195, 0.5322088889718375, 83.97342066984532, 0.6573895966863115, 0.34261040331368847, 78.32157508655418, 0.8759450903481356, 0.12405490965186436, 886.7137742582912, 910.3656929177117, 922.1591291059583, 926.0766775727482, 935.1735260591255, 952.4236258294623, 975.0192103423753, ] CARTIS_ROBERTS_PROBLEMS = { "arglale": { # arglale is the same as linear_full_rank with specific settings "fun": partial(linear_full_rank, dim_out=400), "start_x": [1] * 100, "solution_x": [-0.99999952] * 100, "start_criterion": 700, "solution_criterion": 300, }, "arglble": { # arglble is the same as linear_rank_one with specific settings "fun": partial(linear_rank_one, dim_out=400), "start_x": [1] * 100, "solution_x": solution_x_arglble, "start_criterion": 5.460944e14, "solution_criterion": 99.62547, }, "argtrig": { "fun": argtrig, "start_x": [1 / 100] * 100, "solution_x": [0] * 100, "start_criterion": 32.99641, "solution_criterion": 0, }, "artif": { "fun": artif, "start_x": [1] * 100, "solution_x": None, "start_criterion": 36.59115, "solution_criterion": 0, }, "arwhdne": { "fun": arwhdne, "start_x": [1] * 100, "solution_x": [0.706011] * 99 + [0], "start_criterion": 495, "solution_criterion": 27.66203, }, "bdvalues": { "fun": bdvalues, "start_x": get_start_points_bdvalues(100, 1000), "solution_x": solution_x_bdvalues, "start_criterion": 1.943417e7, "solution_criterion": 0, }, "bratu_2d": { "fun": partial(bratu_2d, alpha=4), "start_x": [0] * 64, "solution_x": solution_x_bratu_2d, "start_criterion": 0.1560738, "solution_criterion": 0, }, "bratu_2d_t": { "fun": partial(bratu_2d, alpha=6.80812), "start_x": [0] * 64, "solution_x": solution_x_bratu_2d_t, "start_criterion": 0.4521311, "solution_criterion": 1.8534736e-05, }, "bratu_3d": { "fun": partial(bratu_3d, alpha=6.80812), "start_x": [0] * 27, "solution_x": solution_x_bratu_3d, "start_criterion": 4.888529, "solution_criterion": 0, }, "brownale": { "fun": brown_almost_linear, "start_x": [0.5] * 100, "solution_x": [1] * 100, "start_criterion": 2.524757e5, "solution_criterion": 0, }, "broydn_3d": { "fun": broydn_3d, "start_x": [-1] * 100, "solution_x": solution_x_broydn_3d, "start_criterion": 111, "solution_criterion": 0, }, "cbratu_2d": { "fun": cbratu_2d, "start_x": [0] * (2 * 5 * 5), "solution_x": solution_x_cbratu_2d, "start_criterion": 0.4822531, "solution_criterion": 0, }, "broydn_bd": { "fun": broydn_bd, "start_x": [1] * 100, "solution_x": solution_x_broydn_bd, "start_criterion": 2404, "solution_criterion": 0, }, "chandheq": { "fun": chandheq, "start_x": (np.arange(1, 101) / 100).tolist(), "solution_x": None, "start_criterion": 6.923365, "solution_criterion": 0, }, "chemrcta": { "fun": chemrcta, "start_x": [1] * 100, "solution_x": None, "start_criterion": 3.0935, "solution_criterion": 0, "bounds": Bounds(lower=np.concatenate([np.zeros(50), 1e-6 * np.ones(50)])), }, "chemrctb": { "fun": chemrctb, "start_x": [1] * 100, "solution_x": solution_x_chemrctb, "start_criterion": 1.446513, "solution_criterion": 1.404424e-3, "bounds": Bounds(lower=1e-6 * np.ones(100)), }, "chnrsbne": { "fun": chnrsbne, "start_x": [-1] * 50, "solution_x": [1] * 50, "start_criterion": 7635.84, "solution_criterion": 0, }, "drcavty1": { "fun": partial(drcavty, r=500), "start_x": [0] * 100, "solution_x": None, "start_criterion": 0.4513889, "solution_criterion": 0, }, "drcavty2": { "fun": partial(drcavty, r=1000), "start_x": [0] * 100, "solution_x": solution_x_drcavty2, "start_criterion": 0.4513889, "solution_criterion": 3.988378e-4, }, "drcavty3": { "fun": partial(drcavty, r=4500), "start_x": [0] * 100, "solution_x": solution_x_drcavty3, "start_criterion": 0.4513889, "solution_criterion": 0, }, "eigena": { "fun": partial(eigen, param=np.diag(np.arange(1, 11))), "start_x": [1] * 10 + np.eye(10).flatten().tolist(), "solution_x": [*np.arange(1, 11).tolist(), 1] + ([0] * 10 + [1]) * 9, "start_criterion": 285, "solution_criterion": 0, "bounds": Bounds(lower=np.zeros(110)), }, "eigenb": { "fun": partial( eigen, param=np.diag(2 * np.ones(10)) + np.diag(-np.ones(9), k=1) ), "start_x": [1] * 10 + np.eye(10).flatten().tolist(), "solution_x": solution_x_eigenb, "start_criterion": 19, "solution_criterion": 1.55654284, # we suspect a typo in Cartis and Roberts (2019); # according to table 3 in their paper, the minimum is at 0. }, "flosp2hh": { "fun": partial( flosp2, a=np.array([1, 0, -1], dtype=np.int64), b=np.array([1, 0, -1], dtype=np.int64), ra=1e7, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 519, "solution_criterion": 1 / 3, }, "flosp2hl": { "fun": partial( flosp2, a=np.array([1, 0, -1], dtype=np.float64), b=np.array([1, 0, -1], dtype=np.float64), ra=1e3, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 519, "solution_criterion": 1 / 3, }, "flosp2hm": { "fun": partial( flosp2, a=np.array([1, 0, -1], dtype=np.float64), b=np.array([1, 0, -1], dtype=np.float64), ra=1e5, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 519, "solution_criterion": 1 / 3, }, "flosp2th": { "fun": partial( flosp2, a=np.array([0, 1, 0], dtype=np.float64), b=np.array([0, 1, 1], dtype=np.float64), ra=1e7, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 516, "solution_criterion": 0, }, "flosp2tl": { "fun": partial( flosp2, a=np.array([0, 1, 0], dtype=np.float64), b=np.array([0, 1, 1], dtype=np.float64), ra=1e3, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 516, "solution_criterion": 0, }, "flosp2tm": { "fun": partial( flosp2, a=np.array([0, 1, 0], dtype=np.float64), b=np.array([0, 1, 1], dtype=np.float64), ra=1e5, ), "start_x": [0] * 59, "solution_x": None, # multiple argmins "start_criterion": 516, "solution_criterion": 0, }, "freurone": { "fun": freurone, "start_x": [0.5, -2] + [0] * 98, "solution_x": solution_x_freurone, "start_criterion": 9.95565e4, "solution_criterion": 1.196458e4, }, "hatfldg": { "fun": hatfldg, "start_x": [1] * 25, "solution_x": [0] * 11 + [-1, 1] + [0] * 12, "start_criterion": 27, "solution_criterion": 0, }, "hydcar20": { "fun": partial(hydcar, n=20, m=3, k=9), "start_x": get_start_points_hydcar20(), "solution_x": solution_x_hydcar20, "start_criterion": 1341.663, "solution_criterion": 0, }, "hydcar6": { "fun": partial(hydcar, n=6, m=3, k=2), "start_x": get_start_points_hydcar6(), "solution_x": solution_x_hydcar6, "start_criterion": 704.1073, "solution_criterion": 0, }, "integreq": { "fun": integreq, "start_x": (np.arange(1, 101) / 101 * (np.arange(1, 101) / 101 - 1)).tolist(), "solution_x": solution_x_integreq, "start_criterion": 0.5730503, "solution_criterion": 0, }, "luksan11": { "fun": luksan11, "start_x": [-0.8] * 100, "solution_x": [1] * 100, "start_criterion": 626.0640, "solution_criterion": 0, }, "luksan12": { "fun": luksan12, "start_x": [-1] * 98, "solution_x": None, "start_criterion": 3.2160e4, "solution_criterion": None, # we found a lower minimum than Cartis and Roberts (2019) at 1651.837; # according to table 3 in their paper, the minimum is at 4292.197. # We suspect, however, that the true optimum is even lower. # That is why we disable this test function for the time being. }, "luksan13": { "fun": luksan13, "start_x": [-1] * 98, "solution_x": solution_x_luksan13, "start_criterion": 6.4352e4, "solution_criterion": 24949.67040503685711883, # we found a lower minimum than Cartis and Roberts (2019); # according to table 3 in their paper, the minimum is at 25188.86 }, "luksan14": { "fun": luksan14, "start_x": [-1] * 98, "solution_x": solution_x_luksan14, "start_criterion": 2.6880e4, "solution_criterion": 123.9235, }, "luksan15": { "fun": luksan15, "start_x": [-0.8, 1.2, -1.2, 0.8] * 25, "solution_x": solution_x_luksan15, "start_criterion": 2.701585e4, "solution_criterion": 3.569697, }, "luksan16": { "fun": luksan16, "start_x": [-0.8, 1.2, -1.2, 0.8] * 25, "solution_x": solution_x_luksan16, "start_criterion": 1.306848e4, "solution_criterion": 3.569697, }, "luksan17": { "fun": luksan17, "start_x": [-0.8, 1.2, -1.2, 0.8] * 25, "solution_x": None, # multiple argmins "start_criterion": 1.687370e6, "solution_criterion": 0.4931613, }, "luksan21": { "fun": luksan21, "start_x": [ih * (ih - 1) for ih in np.arange(1, 101) * (1 / 101)], "solution_x": solution_x_luksan21, "start_criterion": 99.98751, "solution_criterion": 0, }, "luksan22": { "fun": luksan22, "start_x": [-1.2 if i % 2 == 0 else 1 for i in range(100)], "solution_x": solution_x_luksan22, "start_criterion": 2.487686e4, "solution_criterion": 872.9230, }, "methanb8": { "fun": methane, "start_x": get_start_points_methanb8(), "solution_x": solution_x_methane, "start_criterion": 1.043105, "solution_criterion": 0, }, "methanl8": { "fun": methane, "start_x": get_start_points_methanl8(), "solution_x": solution_x_methane, "start_criterion": 4345.100, "solution_criterion": 0, }, "morebvne": { "fun": morebvne, "start_x": [t * (t - 1) for t in np.arange(1, 101) * (1 / 101)], "solution_x": solution_x_morebvne, "start_criterion": 3.633100e-4, "solution_criterion": 0, }, "msqrta": { "fun": msqrta, "start_x": get_start_points_msqrta(10), "solution_x": solution_x_msqrta, "start_criterion": 212.7162, "solution_criterion": 0, }, "msqrtb": { "fun": msqrta, "start_x": get_start_points_msqrta(10, flag=2), "solution_x": solution_x_msqrtb, "start_criterion": 205.0753, "solution_criterion": 0, }, "oscigrne": { "fun": oscigrne, "start_x": [-2] + [1] * 99, "solution_x": solution_x_oscigrne, "start_criterion": 6.120720e8, "solution_criterion": 0, }, "penalty_1": { "fun": penalty_1, "start_x": list(range(1, 101)), "solution_x": None, "start_criterion": 1.144806e11, "solution_criterion": 9.025000e-9, }, "penalty_2": { "fun": penalty_2, "start_x": [0.5] * 100, "solution_x": solution_x_penalty2, "start_criterion": 1.591383e6, "solution_criterion": 0.9809377, }, "powellse": { "fun": powell_singular, "start_x": [3.0, -1.0, 0.0, 1] * 25, "solution_x": [0] * 100, "start_criterion": 41875, "solution_criterion": 0, }, "qr3d": { "fun": partial(qr3d, m=5), "start_x": get_start_points_qr3d(5), "solution_x": solution_x_qr3d, "start_criterion": 1.2, "solution_criterion": 0, "bounds": Bounds( lower=[-np.inf] * 25 + [0 if i == j else -np.inf for i in range(5) for j in range(5)] ), }, "qr3dbd": { "fun": partial(qr3dbd, m=5), "start_x": get_start_points_qr3dbd(5), "solution_x": solution_x_qr3dbd, "start_criterion": 1.2, "solution_criterion": 0, "bounds": Bounds( lower=[-np.inf] * 25 + [0 if i == j else -np.inf for i in range(5) for j in range(5)] ), }, "spmsqrt": { "fun": spmsqrt, "start_x": get_start_points_spmsqrt(34), "solution_x": solution_x_spmsqrt, "start_criterion": 74.33542, "solution_criterion": 0, }, "semicn2u": { "fun": semicon2, "start_x": [0] * 100, "solution_x": solution_x_semicon2, "start_criterion": 2.025037e4, "solution_criterion": 0, }, "semicon2": { "fun": semicon2, "start_x": [0] * 100, "solution_x": solution_x_semicon2, "start_criterion": 2.025037e4, "solution_criterion": 0, "bounds": Bounds(lower=-5 * np.ones(100), upper=0.2 * 700 * np.ones(100)), }, "vardimne": { "fun": vardimne, "start_x": [1 - i / 100 for i in range(1, 101)], "solution_x": [1] * 100, "start_criterion": 1.310584e14, "solution_criterion": 0, }, "watsonne": { "fun": watson, "start_x": [0] * 31, "solution_x": solution_x_watson, "start_criterion": 30, "solution_criterion": 0, }, "yatpsq_1": { "fun": partial(yatpsq_1, dim_in=10), "start_x": [6] * 100 + [0] * 20, "solution_x": solution_x_yatpsq_1, "start_criterion": 2.073643e6, "solution_criterion": 0, }, "yatpsq_2": { "fun": partial(yatpsq_2, dim_in=10), "start_x": [10] * 100 + [0] * 20, "solution_x": solution_x_yatpsq_2, "start_criterion": 1.831687e5, "solution_criterion": 0, }, } ================================================ FILE: src/optimagic/benchmarking/get_benchmark_problems.py ================================================ from functools import partial, wraps import numpy as np from optimagic import mark from optimagic.benchmarking.cartis_roberts import CARTIS_ROBERTS_PROBLEMS from optimagic.benchmarking.more_wild import MORE_WILD_PROBLEMS from optimagic.benchmarking.noise_distributions import NOISE_DISTRIBUTIONS from optimagic.shared.process_user_function import infer_aggregation_level from optimagic.typing import AggregationLevel from optimagic.utilities import get_rng def get_benchmark_problems( name, *, additive_noise=False, additive_noise_options=None, multiplicative_noise=False, multiplicative_noise_options=None, scaling=False, scaling_options=None, seed=None, exclude=None, ): """Get a dictionary of test problems for a benchmark. Args: name (str): The name of the set of test problems. Currently "more_wild" is the only supported one. additive_noise (bool): Whether to add additive noise to the problem. Default False. additive_noise_options (dict or None): Specifies the amount and distribution of the addititve noise added to the problem. Has the entries: - distribition (str): One of "normal", "gumbel", "uniform", "logistic", "laplace". Default "normal". - std (float): The standard deviation of the noise. This works for all distributions, even if those distributions are normally not specified via a standard deviation (e.g. uniform). - correlation (float): Number between 0 and 1 that specifies the auto correlation of the noise. multiplicative_noise (bool): Whether to add multiplicative noise to the problem. Default False. multiplicative_noise_options (dict or None): Specifies the amount and distribition of the multiplicative noise added to the problem. Has entries: - distribition (str): One of "normal", "gumbel", "uniform", "logistic", "laplace". Default "normal". - std (float): The standard deviation of the noise. This works for all distributions, even if those distributions are normally not specified via a standard deviation (e.g. uniform). - correlation (float): Number between 0 and 1 that specifies the auto correlation of the noise. - clipping_value (float): A non-negative float. Multiplicative noise becomes zero if the function value is zero. To avoid this, we do not implement multiplicative noise as `f_noisy = f * epsilon` but by `f_noisy` = f + (epsilon - 1) * f_clipped` where f_clipped is bounded away from zero from both sides by the clipping value. scaling (bool): Whether the parameter space of the problem should be rescaled. scaling_options (dict): Dict containing the keys "min_scale", and "max_scale". If scaling is True, the parameters the optimizer sees are the standard parameters multiplied by np.linspace(min_scale, max_scale, len(params)). If min_scale and max_scale have very different orders of magnitude, the problem becomes harder to solve for many optimizers. seed (Union[None, int, numpy.random.Generator]): If seed is None or int the numpy.random.default_rng is used seeded with seed. If seed is already a Generator instance then that instance is used. exclude (str or List): Problems to exclude. Returns: dict: Nested dictionary with benchmark problems of the structure: {"name": {"inputs": {...}, "solution": {...}, "info": {...}}} where "inputs" are keyword arguments for ``minimize`` such as the criterion function and start parameters. "solution" contains the entries "params" and "value" and "info" might contain information about the test problem. """ if exclude is None: exclude = {} elif isinstance(exclude, str): exclude = [exclude] else: exclude = set(exclude) rng = get_rng(seed) raw_problems = _get_raw_problems(name) raw_problems = {k: v for k, v in raw_problems.items() if k not in exclude} is_noisy = bool(additive_noise or multiplicative_noise) if additive_noise: additive_options = _process_noise_options(additive_noise_options, False) else: additive_options = None if multiplicative_noise: multiplicative_options = _process_noise_options( multiplicative_noise_options, True ) else: multiplicative_options = None if scaling: scaling_options = scaling_options if scaling_options is not None else {} scaling_options = {"min_scale": 0.1, "max_scale": 10, **scaling_options} else: scaling_options = None problems = {} for prob_name, specification in raw_problems.items(): inputs = _create_problem_inputs( specification, additive_options=additive_options, multiplicative_options=multiplicative_options, scaling_options=scaling_options, rng=rng, ) problems[prob_name] = { "inputs": inputs, "noise_free_fun": specification["fun"], "solution": _create_problem_solution( specification, scaling_options=scaling_options ), "noisy": is_noisy, "info": specification.get("info", {}), "start_criterion": specification["start_criterion"], } return problems def _get_raw_problems(name): if name == "more_wild": raw_problems = MORE_WILD_PROBLEMS elif name == "cartis_roberts": raw_problems = CARTIS_ROBERTS_PROBLEMS elif name == "example": subset = { "rosenbrock_good_start", "helical_valley_good_start", "powell_singular_good_start", "freudenstein_roth_good_start", "bard_good_start", "box_3d", "brown_dennis_good_start", "chebyquad_6", "bdqrtic_8", "mancino_5_good_start", } raw_problems = {k: v for k, v in MORE_WILD_PROBLEMS.items() if k in subset} elif name == "estimagic": subset_mw = { "cube_8", "chebyquad_6", "bdqrtic_8", "linear_full_rank_bad_start", "chebyquad_7", "osborne_two_bad_start", "bdqrtic_10", "bdqrtic_11", "heart_eight_bad_start", "mancino_5_bad_start", "chebyquad_8", "cube_6", "cube_5", "bdqrtic_12", "chebyquad_10", "chebyquad_9", "chebyquad_11", "mancino_8", "mancino_10", "mancino_12_bad_start", } subset_cr = { "hatfldg", "bratu_3d", "cbratu_2d", "chnrsbne", "bratu_2d", "vardimne", "penalty_1", "arglale", "arglble", } subset_add_steps = { "rosenbrock_good_start", "cube_5", "chebyquad_10", } raw_problems = {} for k, v in MORE_WILD_PROBLEMS.items(): if k in subset_mw: raw_problems[k] = v if k in subset_add_steps: problem = v.copy() raw_func = problem["fun"] problem["fun"] = wraps(raw_func)(partial(_step_func, raw_func=raw_func)) raw_problems[f"{k}_with_steps"] = problem for k, v in CARTIS_ROBERTS_PROBLEMS.items(): if k in subset_cr: raw_problems[k] = v else: raise NotImplementedError() return raw_problems def _step_func(x, raw_func): return raw_func(x.round(3)) def _create_problem_inputs( specification, additive_options, multiplicative_options, scaling_options, rng ): _x = np.array(specification["start_x"]) if scaling_options is not None: scaling_factor = _get_scaling_factor(_x, scaling_options) _x = _x * scaling_factor else: scaling_factor = None problem_type = infer_aggregation_level(specification["fun"]) problem_type_to_marker = { AggregationLevel.SCALAR: mark.scalar, AggregationLevel.LIKELIHOOD: mark.likelihood, AggregationLevel.LEAST_SQUARES: mark.least_squares, } _criterion = partial( _internal_criterion_template, criterion=specification["fun"], additive_options=additive_options, multiplicative_options=multiplicative_options, scaling_factor=scaling_factor, rng=rng, ) _criterion = problem_type_to_marker[problem_type](_criterion) inputs = {"fun": _criterion, "params": _x} return inputs def _create_problem_solution(specification, scaling_options): _solution_x = specification.get("solution_x") if _solution_x is None: _solution_x = np.array(specification["start_x"]) * np.nan elif isinstance(_solution_x, list): _solution_x = np.array(_solution_x) _params = _solution_x if scaling_options is not None: _params = _params * _get_scaling_factor(_params, scaling_options) _value = specification["solution_criterion"] solution = { "params": _params, "value": _value, } return solution def _get_scaling_factor(x, options): return np.linspace(options["min_scale"], options["max_scale"], len(x)) def _internal_criterion_template( params, criterion, additive_options, multiplicative_options, scaling_factor, rng ): if scaling_factor is not None: params = params / scaling_factor critval = criterion(params) noise = _get_combined_noise( critval, additive_options=additive_options, multiplicative_options=multiplicative_options, rng=rng, ) noisy_critval = critval + noise return noisy_critval def _get_combined_noise(fval, additive_options, multiplicative_options, rng): size = len(np.atleast_1d(fval)) if multiplicative_options is not None: options = multiplicative_options.copy() std = options.pop("std") clipval = options.pop("clipping_value") scaled_std = std * _clip_away_from_zero(fval, clipval) multiplicative_noise = _sample_from_distribution( **options, std=scaled_std, size=size, rng=rng ) else: multiplicative_noise = 0 if additive_options is not None: additive_noise = _sample_from_distribution( **additive_options, size=size, rng=rng ) else: additive_noise = 0 return multiplicative_noise + additive_noise def _sample_from_distribution(distribution, mean, std, size, rng, correlation=0): sample = NOISE_DISTRIBUTIONS[distribution](size=size, rng=rng) dim = size if isinstance(size, int) else size[1] if correlation != 0 and dim > 1: chol = np.linalg.cholesky(np.diag(np.ones(dim) - correlation) + correlation) sample = (chol @ sample.T).T sample = sample / sample.std() sample *= std sample += mean return sample def _process_noise_options(options, is_multiplicative): options = {} if options is None else options defaults = {"std": 0.01, "distribution": "normal", "correlation": 0, "mean": 0} if is_multiplicative: defaults["clipping_value"] = 1 processed = { **defaults, **options, } distribution = processed["distribution"] if distribution not in NOISE_DISTRIBUTIONS: raise ValueError( f"Invalid distribution: {distribution}. " "Allowed are {list(NOISE_DISTRIBUTIONS)}" ) std = processed["std"] if std < 0: raise ValueError(f"std must be non-negative. Not: {std}") corr = processed["correlation"] if corr < 0: raise ValueError(f"corr must be non-negative. Not: {corr}") if is_multiplicative: clipping_value = processed["clipping_value"] if clipping_value < 0: raise ValueError( f"clipping_value must be non-negative. Not: {clipping_value}" ) return processed def _clip_away_from_zero(a, clipval): is_scalar = np.isscalar(a) a = np.atleast_1d(a) is_positive = a >= 0 clipped = np.where(is_positive, np.clip(a, clipval, np.inf), a) clipped = np.where(~is_positive, np.clip(clipped, -np.inf, -clipval), clipped) if is_scalar: clipped = clipped[0] return clipped ================================================ FILE: src/optimagic/benchmarking/more_wild.py ================================================ """Define the More-Wild Benchmark Set. This benchmark set is contains 53 test cases for nonlinear least squares solvers. The test cases are built out of 22 functions, originally derived from the CUTEr Problems. It was used to benchmark all modern model based non-linear derivative free least squares solvers (e.g. POUNDERS, DFOGN, DFOLS). The parameter dimensions are quite small, varying between 2 and 12. The benchmark set was first described In More and Wild, 2009. Fortran and Matlab Code is available here. We use the following sources of information to construct the benchmark set: - https://www.mcs.anl.gov/~more/dfo/fortran/dfovec.f for the function implementation - https://www.mcs.anl.gov/~more/dfo/fortran/dfoxs.f for the base starting points - https://www.mcs.anl.gov/~more/dfo/fortran/dfo.dat for: - The mapping test cases to criterion functions (column 1) - The dimensionalities of parameter vectors (column 2) - The dimensionalities of the output (column 3) - Whether the base start vector is multiplied by a factor of ten or not (column 4). """ from functools import partial import numpy as np from optimagic import mark @mark.least_squares def linear_full_rank(x, dim_out): temp = 2 * x.sum() / dim_out + 1 out = np.full(dim_out, -temp) out[: len(x)] += x return out @mark.least_squares def linear_rank_one(x, dim_out): dim_in = len(x) sm = np.arange(1, dim_in + 1) @ x fvec = np.arange(1, dim_out + 1) * sm - 1.0 return fvec @mark.least_squares def linear_rank_one_zero_columns_rows(x, dim_out): dim_in = len(x) sm = (np.arange(2, dim_in) * x[1:-1]).sum() fvec = np.arange(dim_out) * sm - 1.0 fvec[-1] = -1.0 return fvec @mark.least_squares def rosenbrock(x): fvec = np.zeros(2) fvec[0] = 10 * (x[1] - x[0] ** 2) fvec[1] = 1.0 - x[0] return fvec @mark.least_squares def helical_valley(x): temp = 8 * np.arctan(1.0) temp1 = np.sign(x[1]) * 0.25 if x[0] > 0: temp1 = np.arctan(x[1] / x[0]) / temp elif x[0] < 0: temp1 = np.arctan(x[1] / x[0]) / temp + 0.5 temp2 = np.sqrt(x[0] ** 2 + x[1] ** 2) fvec = np.zeros(3) fvec[0] = 10 * (x[2] - 10 * temp1) fvec[1] = 10 * (temp2 - 1.0) fvec[2] = x[2] return fvec @mark.least_squares def powell_singular(x): fvec = np.zeros(4) fvec[0] = x[0] + 10 * x[1] fvec[1] = np.sqrt(5.0) * (x[2] - x[3]) fvec[2] = (x[1] - 2 * x[2]) ** 2 fvec[3] = np.sqrt(10.0) * (x[0] - x[3]) ** 2 return fvec @mark.least_squares def freudenstein_roth(x): fvec = np.zeros(2) fvec[0] = -13 + x[0] + ((5 - x[1]) * x[1] - 2) * x[1] fvec[1] = -29 + x[0] + ((1.0 + x[1]) * x[1] - 14) * x[1] return fvec @mark.least_squares def bard(x, y): fvec = np.zeros(len(y)) for i in range(1, round(len(y) / 2) + 1): temp = len(y) + 1 - i fvec[i - 1] = y[i - 1] - (x[0] + i / (x[1] * temp + x[2] * i)) for i in range(round(len(y) / 2) + 1, len(y) + 1): temp = len(y) + 1 - i fvec[i - 1] = y[i - 1] - (x[0] + i / (x[1] * temp + x[2] * temp)) return fvec @mark.least_squares def kowalik_osborne(x, y1, y2): temp1 = y1 * (y1 + x[1]) temp2 = y1 * (y1 + x[2]) + x[3] fvec = y2 - x[0] * temp1 / temp2 return fvec @mark.least_squares def meyer(x, y): temp = 5 * np.arange(1, len(y) + 1) + 45 + x[2] temp1 = x[1] / temp temp2 = np.exp(temp1) fvec = x[0] * temp2 - y return fvec @mark.least_squares def watson(x): dim_in = len(x) fvec = np.zeros(31) for i in range(1, 30): temp = i / 29 sum_1 = (np.arange(1, dim_in) * temp ** np.arange(dim_in - 1) * x[1:]).sum() sum_2 = (temp ** np.arange(dim_in) * x).sum() fvec[i - 1] = sum_1 - sum_2**2 - 1.0 fvec[29] = x[0] fvec[30] = x[1] - x[0] ** 2 - 1.0 return fvec @mark.least_squares def box_3d(x, dim_out): fvec = np.zeros(dim_out) for i in range(1, dim_out + 1): fvec[i - 1] = ( np.exp(-i / 10 * x[0]) - np.exp(-i / 10 * x[1]) + (np.exp(-i) - np.exp(-i / 10)) * x[2] ) return fvec @mark.least_squares def jennrich_sampson(x, dim_out): fvec = ( 2 * (1.0 + np.arange(1, dim_out + 1)) - np.exp(np.arange(1, dim_out + 1) * x[0]) - np.exp(np.arange(1, dim_out + 1) * x[1]) ) return fvec @mark.least_squares def brown_dennis(x, dim_out): fvec = np.zeros(dim_out) for i in range(1, dim_out + 1): temp = i / 5 temp_1 = x[0] + temp * x[1] - np.exp(temp) temp_2 = x[2] + np.sin(temp) * x[3] - np.cos(temp) fvec[i - 1] = temp_1**2 + temp_2**2 return fvec @mark.least_squares def chebyquad(x, dim_out): fvec = np.zeros(dim_out) dim_in = len(x) for i in range(1, dim_in + 1): temp_1 = 1.0 temp_2 = 2 * x[i - 1] - 1.0 temp_3 = 2 * temp_2 for j in range(dim_out): fvec[j] = fvec[j] + temp_2 temp_4 = temp_3 * temp_2 - temp_1 temp_1 = temp_2 temp_2 = temp_4 for i in range(1, dim_out + 1): fvec[i - 1] = fvec[i - 1] / dim_in if i % 2 == 0: fvec[i - 1] = fvec[i - 1] + 1 / (i**2 - 1.0) return fvec @mark.least_squares def brown_almost_linear(x): dim_in = len(x) sm = -(dim_in + 1) + x.sum() product = x.prod() fvec = x + sm fvec[dim_in - 1] = product - 1.0 return fvec @mark.least_squares def osborne_one(x, y): temp = 10 * np.arange(len(y)) temp_1 = np.exp(-x[3] * temp) temp_2 = np.exp(-x[4] * temp) fvec = y - (x[0] + x[1] * temp_1 + x[2] * temp_2) return fvec @mark.least_squares def osborne_two(x, y): temp_array = np.zeros((4, len(y))) temp = np.arange(len(y)) / 10 temp_array[0] = np.exp(-x[4] * temp) temp_array[1] = np.exp(-x[5] * (temp - x[8]) ** 2) temp_array[2] = np.exp(-x[6] * (temp - x[9]) ** 2) temp_array[3] = np.exp(-x[7] * (temp - x[10]) ** 2) fvec = y - (temp_array.T * x[:4]).T.sum(axis=0) return fvec @mark.least_squares def bdqrtic(x): # the length of array x should be more than 5. dim_in = len(x) fvec = np.zeros(2 * (dim_in - 4)) for i in range(dim_in - 4): fvec[i] = -4 * x[i] + 3 fvec[dim_in - 4 + i] = ( x[i] ** 2 + 2 * x[i + 1] ** 2 + 3 * x[i + 2] ** 2 + 4 * x[i + 3] ** 2 + 5 * x[dim_in - 1] ** 2 ) return fvec @mark.least_squares def cube(x): fvec = 10 * (x - np.roll(x, 1) ** 3) fvec[0] = x[0] - 1.0 return fvec @mark.least_squares def mancino(x): dim_in = len(x) fvec = np.zeros(dim_in) for i in range(dim_in): sm = 0 for j in range(dim_in): temp = np.sqrt(x[i] ** 2 + (i + 1) / (j + 1)) sm += temp * ((np.sin(np.log(temp))) ** 5 + (np.cos(np.log(temp))) ** 5) fvec[i] = 1400 * x[i] + (i + 1 - 50) ** 3 + sm return fvec @mark.least_squares def heart_eight(x, y): dim_y = len(y) fvec = np.zeros(dim_y) fvec[0] = x[0] + x[1] - y[0] fvec[1] = x[2] + x[3] - y[1] fvec[2] = x[4] * x[0] + x[5] * x[1] - x[6] * x[2] - x[7] * x[3] - y[2] fvec[3] = x[6] * x[0] + x[7] * x[1] + x[4] * x[2] + x[5] * x[3] - y[3] fvec[4] = ( x[0] * (x[4] ** 2 - x[6] ** 2) - 2 * x[2] * x[4] * x[6] + x[1] * (x[5] ** 2 - x[7] ** 2) - 2 * x[3] * x[5] * x[7] - y[4] ) fvec[5] = ( x[2] * (x[4] ** 2 - x[6] ** 2) + 2 * x[0] * x[4] * x[6] + x[3] * (x[5] ** 2 - x[7] ** 2) + 2 * x[1] * x[5] * x[7] - y[5] ) fvec[6] = ( x[0] * x[4] * (x[4] ** 2 - 3 * x[6] ** 2) + x[2] * x[6] * (x[6] ** 2 - 3 * x[4] ** 2) + x[1] * x[5] * (x[5] ** 2 - 3 * x[7] ** 2) + x[3] * x[7] * (x[7] ** 2 - 3 * x[5] ** 2) - y[6] ) fvec[7] = ( x[2] * x[4] * (x[4] ** 2 - 3 * x[6] ** 2) - x[0] * x[6] * (x[6] ** 2 - 3 * x[4] ** 2) + x[3] * x[5] * (x[5] ** 2 - 3 * x[7] ** 2) - x[1] * x[7] * (x[7] ** 2 - 3 * x[5] ** 2) - y[7] ) return fvec @mark.least_squares def get_start_points_mancino(n, a=1): x = np.zeros(n) for i in range(1, n + 1): sm = 0 for j in range(1, n + 1): sm += np.sqrt(i / j) * ( (np.sin(np.log(np.sqrt(i / j)))) ** 5 + (np.cos(np.log(np.sqrt(i / j)))) ** 5 ) x[i - 1] = -8.7110e-04 * ((i - 50) ** 3 + sm) return (x * a).tolist() y_vec = np.array( [ 0.1400, 0.1800, 0.2200, 0.2500, 0.2900, 0.3200, 0.3500, 0.3900, 0.3700, 0.5800, 0.7300, 0.9600, 1.3400, 2.1000, 4.3900, ] ) v_vec = np.array( [ 4.0000, 2.0000, 1.0000, 0.5000, 0.2500, 0.1670, 0.1250, 0.1000, 0.0833, 0.0714, 0.0625, ] ) y2_vec = np.array( [ 0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246, ] ) y3_vec = np.array( [ 34780, 28610, 23650, 19630, 16370, 13720, 11540, 9744, 8261, 7030, 6005, 5147, 4427, 3820, 3307, 2872, ] ) y4_vec = np.array( [ 8.44e-1, 9.08e-1, 9.32e-1, 9.36e-1, 9.25e-1, 9.08e-1, 8.81e-1, 8.5e-1, 8.18e-1, 7.84e-1, 7.51e-1, 7.18e-1, 6.85e-1, 6.58e-1, 6.28e-1, 6.03e-1, 5.8e-1, 5.58e-1, 5.38e-1, 5.22e-1, 5.06e-1, 4.9e-1, 4.78e-1, 4.67e-1, 4.57e-1, 4.48e-1, 4.38e-1, 4.31e-1, 4.24e-1, 4.2e-1, 4.14e-1, 4.11e-1, 4.06e-1, ] ) y5_vec = np.array( [ 1.366e0, 1.191e0, 1.112e0, 1.013e0, 9.91e-1, 8.85e-1, 8.31e-1, 8.47e-1, 7.86e-1, 7.25e-1, 7.46e-1, 6.79e-1, 6.08e-1, 6.55e-1, 6.16e-1, 6.06e-1, 6.02e-1, 6.26e-1, 6.51e-1, 7.24e-1, 6.49e-1, 6.49e-1, 6.94e-1, 6.44e-1, 6.24e-1, 6.61e-1, 6.12e-1, 5.58e-1, 5.33e-1, 4.95e-1, 5.0e-1, 4.23e-1, 3.95e-1, 3.75e-1, 3.72e-1, 3.91e-1, 3.96e-1, 4.05e-1, 4.28e-1, 4.29e-1, 5.23e-1, 5.62e-1, 6.07e-1, 6.53e-1, 6.72e-1, 7.08e-1, 6.33e-1, 6.68e-1, 6.45e-1, 6.32e-1, 5.91e-1, 5.59e-1, 5.97e-1, 6.25e-1, 7.39e-1, 7.1e-1, 7.29e-1, 7.2e-1, 6.36e-1, 5.81e-1, 4.28e-1, 2.92e-1, 1.62e-1, 9.8e-2, 5.4e-2, ] ) linear_full_rank_solution_x = [ -0.9999999988839997, -1.0000000177422066, -1.0000000115935452, -1.0000000228208163, -1.0000000488884697, -0.9999999970458138, -0.999999957053959, -1.0000000040514776, -0.9999999708374043, ] freudenstein_roth_solution_x = [11.4127789219781, -0.8968052599835741] bard_solution_x = [0.08241056005476516, 1.1330360796060677, 2.3436951913379658] kowalik_osborne_solution_x = [ 0.19280693401647758, 0.19128233030789646, 0.12305650338704374, 0.1360623315234073, ] meyer_solution_x = [0.005609636453940975, 6181.3463491557495, 345.22363473367955] watson_6_solution_x = [ -0.01572508595814696, 1.0124348692251488, -0.23299161822960684, 1.2604300607312298, -1.5137288869025518, 0.9929964192277573, ] # Note: only nlopt_neldermead got close to the correct optimal criterion value. # Parameter values might be less precise than others but should be precise enough # for all practical purposes. watson_9_solution_x = [ -1.5307729818292037e-05, 0.9997897038761921, 0.014763956456196943, 0.14634240306061744, 1.000820801996808, -2.617730533377693, 4.104402503186126, -3.1436119083184844, 1.0526263240326197, ] # Note: only nlopt_nobyqa got close to the correct optimal criterion value. # Parameter values might be less precise than others but should be precise enough # for all practical purposes. watson_12_solution_x = [ -1.257374334661004e-07, 1.000009574359581, -0.0005801330054146337, 0.339181153679104, -0.01717885040751319, 0.1133023927390161, 0.19016852711009063, -0.21697797575421524, -0.20528305553311146, 0.9344814896242725, -0.8979508634897754, 0.3182351206188577, ] brown_dennis_solution_x = [ -11.594439969349615, 13.203630099554186, -0.40343943943781074, 0.2367787758603151, ] chebyquad_6_solution_x = [ 0.06687659094608964, 0.2887406731194441, 0.36668229924164747, 0.6333177007583523, 0.7112593268805555, 0.9331234090539102, ] chebyquad_7_solution_x = [ 0.0580691496209753, 0.23517161235742137, 0.3380440947400461, 0.49999999999999983, 0.6619559052599537, 0.7648283876425783, 0.9419308503790245, ] chebyquad_8_solution_x = [ 0.043152760689960816, 0.19309084165259105, 0.2663287079773684, 0.5000000016286815, 0.5000000007226908, 0.8069091602434582, 0.7336712939109635, 0.9568472402172841, ] chebyquad_9_solution_x = [ 0.04420534613578318, 0.19949067230988682, 0.23561910847105574, 0.4160469078926057, 0.5839530921074088, 0.4999999999999922, 0.800509327690123, 0.7643808915289372, 0.9557946538642177, ] chebyquad_10_solution_x = [ 0.07474816709152399, 0.17151817795786592, 0.28643415454482585, 0.35964645053932914, 0.4707505262783716, 0.6167383355304029, 0.6167383367837294, 0.7998108031241883, 0.844854641539109, 0.9670066274628275, ] chebyquad_11_solution_x = [ 0.02995874447661457, 0.1373112070822553, 0.18836638791417698, 0.3588431173822416, 0.3588431160884765, 0.5000000000242054, 0.6411568833224512, 0.6411568815391566, 0.8116336110470005, 0.8626887929155374, 0.9700412549151204, ] osborne_one_solution_x = [ 0.37541005253870485, 1.9358469347077125, -1.4646871598379403, 0.012867534697214533, 0.02212269960299629, ] osborne_two_solution_x = [ 1.3099771555174913, 0.4315537955622272, 0.6336616986693765, 0.5994305344293098, 0.7541832304802704, 0.9042885759622441, 1.365811821857166, 4.823698851312894, 2.398684862961737, 4.568874597996633, 5.675341470445994, ] bdqrtic_8_solution_x = [ 0.616075443630495, 0.4861767187980861, 0.39190293828200784, 0.32635052133139375, 5.7665311977077046e-09, 9.348707442258251e-09, 7.066347917413364e-09, -2.030598138768078e-09, ] bdqrtic_10_solution_x = [ 0.6255364749479968, 0.4851009828850974, 0.3671943518989714, 0.28518847760113386, 0.33016716122418716, 0.37757199483645576, -3.24040819296658e-09, -1.8973118921921425e-08, -2.2244236071548075e-08, 1.9263207246002433e-09, ] bdqrtic_11_solution_x = [ 0.6251418193253757, 0.4858196102070445, 0.3712502347939938, 0.28350403794642487, 0.31694697562905494, 0.33873300184720523, 0.3759208995980027, -1.8942209640948616e-08, 3.418631657404969e-08, -4.003185000628104e-09, 3.166166094063382e-09, ] bdqrtic_12_solution_x = [ 0.6248003622228653, 0.48537650602979937, 0.37165912289534886, 0.2859718523039759, 0.31552001728813406, 0.3253724392486982, 0.33781861543778574, 0.37402021737899876, -4.429208872117422e-09, -1.008941638491605e-08, -2.5608732325955336e-08, 4.485976896804288e-09, ] mancino_5_solution_x = [ 84.28291101102532, 79.20603967293438, 74.3364141135311, 69.6711474112178, 65.20718113814442, ] mancino_8_solution_x = [ 84.43334222593528, 79.33454939399172, 74.44387011026309, 69.7592945870252, 65.27853533617875, 60.9988580578957, 56.9169379354432, 53.028761291567236, ] mancino_10_solution_x = [ 84.53434289477315, 79.42084435375007, 74.51601545241338, 69.81844699647671, 65.32637991893166, 61.03748806452533, 56.94869518846038, 53.056052319528746, 49.35469508461959, 45.83889035077595, ] mancino_12_solution_x = [ 84.63591921594158, 79.5076423105225, 74.5885724920863, 69.87791406833868, 65.37444824684921, 61.07626530788906, 56.98054088428213, 53.08338921660163, 49.379816810523714, 45.86378591833196, 42.52838225939789, 39.36606891417026, ] heart_eight_solution_x = [ -0.311626605565399, -0.37837339443458845, 0.3282442301180765, -0.3722442301180588, -1.282227094270286, 2.4943003120854743, 1.5548658787873983, -1.384637842863253, ] MORE_WILD_PROBLEMS = { "linear_full_rank_good_start": { "fun": mark.least_squares(partial(linear_full_rank, dim_out=45)), "start_x": [1] * 9, "solution_x": linear_full_rank_solution_x, "start_criterion": 72, "solution_criterion": 36, }, "linear_full_rank_bad_start": { "fun": mark.least_squares(partial(linear_full_rank, dim_out=45)), "start_x": [10] * 9, "solution_x": linear_full_rank_solution_x, "start_criterion": 1125, "solution_criterion": 36, }, "linear_rank_one_good_start": { "fun": mark.least_squares(partial(linear_rank_one, dim_out=35)), "start_x": [1] * 7, # no unique solution "solution_x": None, "start_criterion": 1.165420e7, "solution_criterion": 8.380281690143324, }, "linear_rank_one_bad_start": { "fun": mark.least_squares(partial(linear_rank_one, dim_out=35)), "start_x": [10] * 7, # no unique solution "solution_x": None, "start_criterion": 1.168591e9, "solution_criterion": 8.380282, }, "linear_rank_one_zero_columns_rows_good_start": { "fun": mark.least_squares( partial(linear_rank_one_zero_columns_rows, dim_out=35) ), "start_x": [1] * 7, # no unique solution "solution_x": None, "start_criterion": 4.989195e6, "solution_criterion": 9.880597014926506, }, "linear_rank_one_zero_columns_rows_bad_start": { "fun": mark.least_squares( partial(linear_rank_one_zero_columns_rows, dim_out=35) ), "start_x": [10] * 7, # no unique solution "solution_x": None, "start_criterion": 5.009356e8, "solution_criterion": 9.880597014926506, }, "rosenbrock_good_start": { "fun": rosenbrock, "start_x": [-1.2, 1], "solution_x": [1, 1], "start_criterion": 24.2, "solution_criterion": 0, }, "rosenbrock_bad_start": { "fun": rosenbrock, "start_x": [-12, 10], "solution_x": [1, 1], "start_criterion": 1.795769e6, "solution_criterion": 0, }, "helical_valley_good_start": { "fun": helical_valley, "start_x": [-1, 0, 0], "solution_x": [1, 0, 0], "start_criterion": 2500, "solution_criterion": 0, }, "helical_valley_bad_start": { "fun": helical_valley, "start_x": [-10, 0, 0], "solution_x": [1, 0, 0], "start_criterion": 10600, "solution_criterion": 0, }, "powell_singular_good_start": { "fun": powell_singular, "start_x": [3, -1, 0, 1], "solution_x": [0] * 4, "start_criterion": 215, "solution_criterion": 0, }, "powell_singular_bad_start": { "fun": powell_singular, "start_x": [30, -10, 0, 10], "solution_x": [0] * 4, "start_criterion": 1.615400e6, "solution_criterion": 0, }, "freudenstein_roth_good_start": { "fun": freudenstein_roth, "start_x": [0.5, -2], "solution_x": freudenstein_roth_solution_x, "start_criterion": 400.5, "solution_criterion": 48.984253679240013, }, "freudenstein_roth_bad_start": { "fun": freudenstein_roth, "start_x": [5, -20], "solution_x": freudenstein_roth_solution_x, "start_criterion": 1.545754e8, "solution_criterion": 48.984253679240013, }, "bard_good_start": { "fun": mark.least_squares(partial(bard, y=y_vec)), "start_x": [1] * 3, "solution_x": bard_solution_x, "start_criterion": 41.68170, "solution_criterion": 0.00821487730657897, }, "bard_bad_start": { "fun": mark.least_squares(partial(bard, y=y_vec)), "start_x": [10] * 3, "solution_x": bard_solution_x, "start_criterion": 1306.234, "solution_criterion": 0.00821487730657897, }, "kowalik_osborne": { "fun": mark.least_squares(partial)( kowalik_osborne, y1=v_vec, y2=y2_vec, ), "start_x": [0.25, 0.39, 0.415, 0.39], "solution_x": kowalik_osborne_solution_x, "start_criterion": 5.313172e-3, "solution_criterion": 0.00030750560384924, }, "meyer": { "fun": mark.least_squares(partial(meyer, y=y3_vec)), "start_x": [0.02, 4000, 250], "solution_x": meyer_solution_x, "start_criterion": 1.693608e9, "solution_criterion": 87.945855170395831, }, "watson_6_good_start": { "fun": watson, "start_x": [0.5] * 6, "solution_x": watson_6_solution_x, "start_criterion": 16.43083, "solution_criterion": 0.00228767005355236, }, "watson_6_bad_start": { "fun": watson, "start_x": [5] * 6, "solution_x": watson_6_solution_x, "start_criterion": 2.323367e6, "solution_criterion": 0.00228767005355236, }, "watson_9_good_start": { "fun": watson, "start_x": [0.5] * 9, "solution_x": watson_9_solution_x, "start_criterion": 26.90417, "solution_criterion": 1.399760e-6, }, "watson_9_bad_start": { "fun": watson, "start_x": [5] * 9, "solution_x": watson_9_solution_x, "start_criterion": 8.158877e6, "solution_criterion": 1.399760e-6, }, "watson_12_good_start": { "fun": watson, "start_x": [0.5] * 12, "solution_x": watson_12_solution_x, "start_criterion": 73.67821, "solution_criterion": 4.722381e-10, }, "watson_12_bad_start": { "fun": watson, "start_x": [5] * 12, "solution_x": watson_12_solution_x, "start_criterion": 2.059384e7, "solution_criterion": 4.722381e-10, }, "box_3d": { "fun": mark.least_squares(partial(box_3d, dim_out=10)), "start_x": [0, 10, 20], "solution_x": [1, 10, 1], "start_criterion": 1031.154, "solution_criterion": 0, }, "jennrich_sampson": { "fun": mark.least_squares(partial(jennrich_sampson, dim_out=10)), "start_x": [0.3, 0.4], "solution_x": [0.2578252135686162] * 2, "start_criterion": 4171.306, "solution_criterion": 124.3621823556148, }, "brown_dennis_good_start": { "fun": mark.least_squares(partial(brown_dennis, dim_out=20)), "start_x": [25, 5, -5, -1], "solution_x": brown_dennis_solution_x, "start_criterion": 7.926693e6, "solution_criterion": 85822.20162635, }, "brown_dennis_bad_start": { "fun": mark.least_squares(partial(brown_dennis, dim_out=20)), "start_x": [250, 50, -50, -10], "solution_x": brown_dennis_solution_x, "start_criterion": 3.081064e11, "solution_criterion": 85822.20162635, }, "chebyquad_6": { "fun": mark.least_squares(partial(chebyquad, dim_out=6)), "start_x": [i / 7 for i in range(1, 7)], "solution_x": chebyquad_6_solution_x, "start_criterion": 4.642817e-2, "solution_criterion": 0, }, "chebyquad_7": { "fun": mark.least_squares(partial(chebyquad, dim_out=7)), "start_x": [i / 8 for i in range(1, 8)], "solution_x": chebyquad_7_solution_x, "start_criterion": 3.377064e-2, "solution_criterion": 0, }, "chebyquad_8": { "fun": mark.least_squares(partial(chebyquad, dim_out=8)), "start_x": [i / 9 for i in range(1, 9)], "solution_x": chebyquad_8_solution_x, "start_criterion": 3.861770e-2, "solution_criterion": 0.003516873725677, }, "chebyquad_9": { "fun": mark.least_squares(partial(chebyquad, dim_out=9)), "start_x": [i / 10 for i in range(1, 10)], "solution_x": chebyquad_9_solution_x, "start_criterion": 2.888298e-2, "solution_criterion": 0, }, "chebyquad_10": { "fun": mark.least_squares(partial(chebyquad, dim_out=10)), "start_x": [i / 11 for i in range(1, 11)], "solution_x": chebyquad_10_solution_x, "start_criterion": 3.376327e-2, "solution_criterion": 0.00477271369637536, }, "chebyquad_11": { "fun": mark.least_squares(partial(chebyquad, dim_out=11)), "start_x": [i / 12 for i in range(1, 12)], "solution_x": chebyquad_11_solution_x, "start_criterion": 2.674060e-2, "solution_criterion": 0.00279976155186576, }, "brown_almost_linear": { "fun": brown_almost_linear, "start_x": [0.5] * 10, "solution_x": [1] * 10, "start_criterion": 273.2480, "solution_criterion": 0, }, "osborne_one": { "fun": mark.least_squares(partial(osborne_one, y=y4_vec)), "start_x": [0.5, 1.5, 1, 0.01, 0.02], "solution_x": osborne_one_solution_x, "start_criterion": 16.17411, "solution_criterion": 0.00005464894697483, }, "osborne_two_good_start": { "fun": mark.least_squares(partial(osborne_two, y=y5_vec)), "start_x": [1.3, 0.65, 0.65, 0.7, 0.6, 3, 5, 7, 2, 4.5, 5.5], "solution_x": osborne_two_solution_x, "start_criterion": 2.093420, "solution_criterion": 0.0401377362935477, }, "osborne_two_bad_start": { "fun": mark.least_squares(partial(osborne_two, y=y5_vec)), "start_x": [13, 6.5, 6.5, 7, 6, 30, 50, 70, 20, 45, 55], "solution_x": osborne_two_solution_x, "start_criterion": 199.6847, "solution_criterion": 0.0401377362935477, }, "bdqrtic_8": { "fun": bdqrtic, "start_x": [1] * 8, "solution_x": bdqrtic_8_solution_x, "start_criterion": 904, "solution_criterion": 10.2389734213174, }, "bdqrtic_10": { "fun": bdqrtic, "start_x": [1] * 10, "solution_x": bdqrtic_10_solution_x, "start_criterion": 1356, "solution_criterion": 18.28116175359353, }, "bdqrtic_11": { "fun": bdqrtic, "start_x": [1] * 11, "solution_x": bdqrtic_11_solution_x, "start_criterion": 1582, "solution_criterion": 22.260591734883817, }, "bdqrtic_12": { "fun": bdqrtic, "start_x": [1] * 12, "solution_x": bdqrtic_12_solution_x, "start_criterion": 1808, "solution_criterion": 26.2727663967939, }, "cube_5": { "fun": cube, "start_x": [0.5] * 5, "solution_x": [1] * 5, "start_criterion": 56.5, "solution_criterion": 0, }, "cube_6": { "fun": cube, "start_x": [0.5] * 6, "solution_x": [1] * 6, "start_criterion": 70.5625, "solution_criterion": 0, }, "cube_8": { "fun": cube, "start_x": [0.5] * 8, "solution_x": [1] * 8, "start_criterion": 98.6875, "solution_criterion": 0, }, "mancino_5_good_start": { "fun": mancino, "start_x": get_start_points_mancino(5), "solution_x": mancino_5_solution_x, "start_criterion": 2.539084e9, "solution_criterion": 0, }, "mancino_5_bad_start": { "fun": mancino, "start_x": get_start_points_mancino(5, 10), "solution_x": mancino_5_solution_x, "start_criterion": 6.873795e12, "solution_criterion": 0, }, "mancino_8": { "fun": mancino, "start_x": get_start_points_mancino(8), "solution_x": mancino_8_solution_x, "start_criterion": 3.367961e9, "solution_criterion": 0, }, "mancino_10": { "fun": mancino, "start_x": get_start_points_mancino(10), "solution_x": mancino_10_solution_x, "start_criterion": 3.735127e9, "solution_criterion": 0, }, "mancino_12_good_start": { "fun": mancino, "start_x": get_start_points_mancino(12), "solution_x": mancino_12_solution_x, "start_criterion": 3.991072e9, "solution_criterion": 0, }, "mancino_12_bad_start": { "fun": mancino, "start_x": get_start_points_mancino(12, 10), "solution_x": mancino_12_solution_x, "start_criterion": 1.130015e13, "solution_criterion": 0, }, "heart_eight_good_start": { "fun": mark.least_squares( partial( heart_eight, y=np.array([-0.69, -0.044, -1.57, -1.31, -2.65, 2, -12.6, 9.48]), ) ), "start_x": [-0.3, -0.39, 0.3, -0.344, -1.2, 2.69, 1.59, -1.5], "solution_x": heart_eight_solution_x, "start_criterion": 9.385672, "solution_criterion": 0, }, "heart_eight_bad_start": { "fun": mark.least_squares( partial( heart_eight, y=np.array([-0.69, -0.044, -1.57, -1.31, -2.65, 2, -12.6, 9.48]), ) ), "start_x": [-3, -3.9, 3, -3.44, -12, 26.9, 15.9, -15], "solution_x": heart_eight_solution_x, "start_criterion": 3.365815e10, "solution_criterion": 0, }, "brown_almost_linear_medium": { "fun": brown_almost_linear, "start_x": [0.5] * 100, "solution_x": [1] * 100, "start_criterion": 2.524757e5, "solution_criterion": 0, }, } ================================================ FILE: src/optimagic/benchmarking/noise_distributions.py ================================================ import numpy as np def _standard_logistic(size, rng): scale = np.sqrt(3) / np.pi return rng.logistic(loc=0, scale=scale, size=size) def _standard_uniform(size, rng): ub = np.sqrt(3) lb = -ub return rng.uniform(lb, ub, size=size) def _standard_normal(size, rng): return rng.normal(size=size) def _standard_gumbel(size, rng): gamma = 0.577215664901532 scale = np.sqrt(6) / np.pi loc = -scale * gamma return rng.gumbel(loc=loc, scale=scale, size=size) def _standard_laplace(size, rng): return rng.laplace(scale=np.sqrt(0.5), size=size) NOISE_DISTRIBUTIONS = { "normal": _standard_normal, "gumbel": _standard_gumbel, "logistic": _standard_logistic, "uniform": _standard_uniform, "laplace": _standard_laplace, } ================================================ FILE: src/optimagic/benchmarking/process_benchmark_results.py ================================================ import numpy as np import pandas as pd def process_benchmark_results( problems, results, stopping_criterion, x_precision=1e-4, y_precision=1e-4 ): """Create tidy DataFrame with all information needed for the benchmarking plots. Args: problems (dict): optimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. stopping_criterion (str): one of "x_and_y", "x_or_y", "x", "y", or None. Determines how convergence is determined from the two precisions. If None, no convergence criterion is applied. x_precision (float): how close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. y_precision (float): how close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. Default is 1e-4. Returns: pandas.DataFrame: tidy DataFrame with the following columns: - problem - algorithm - n_evaluations - walltime - criterion - criterion_normalized - monotone_criterion - monotone_criterion_normalized - parameter_distance - parameter_distance_normalized - monotone_parameter_distance - monotone_parameter_distance_normalized """ histories = [] infos = [] for (problem_name, algorithm_name), result in results.items(): history, is_converged = _process_one_result( problem=problems[problem_name], result=result, stopping_criterion=stopping_criterion, x_precision=x_precision, y_precision=y_precision, ) history["problem"] = problem_name history["algorithm"] = algorithm_name histories.append(history) info = { "problem": problem_name, "algorithm": algorithm_name, "is_converged": is_converged, } infos.append(info) histories = pd.concat(histories, ignore_index=True) infos = pd.DataFrame(infos).set_index(["problem", "algorithm"]).unstack() infos.columns = [tup[1] for tup in infos.columns] return histories, infos def _process_one_result( problem, result, stopping_criterion, x_precision, y_precision, ): # input processing assert isinstance(x_precision, float) assert isinstance(y_precision, float) # extract information _params_hist = result["params_history"] _solution_crit = problem["solution"]["value"] _start_crit = problem["start_criterion"] _solution_x = problem["solution"].get("params") _start_x = problem["inputs"]["params"] _needed_step = np.linalg.norm(_solution_x - _start_x) if isinstance(_solution_x, np.ndarray) and not np.isfinite(_solution_x).all(): _solution_x = None # calculate the different transformations of criterion values crit_hist = np.array(result["criterion_history"]) monotone_crit_hist = np.minimum.accumulate(crit_hist) normalized_crit_hist = (crit_hist - _solution_crit) / (_start_crit - _solution_crit) normalized_monotone_crit_hist = (monotone_crit_hist - _solution_crit) / ( _start_crit - _solution_crit ) # calculate the different versions of params distance if we have a solution if _solution_x is not None: params_dist = np.linalg.norm(np.array(_params_hist - _solution_x), axis=1) monotone_params_dist = np.minimum.accumulate(params_dist) params_dist_normalized = params_dist / _needed_step monotone_params_dist_normalized = monotone_params_dist / _needed_step else: params_dist = np.full(len(_params_hist), np.nan) monotone_params_dist = np.full(len(_params_hist), np.nan) params_dist_normalized = np.full(len(_params_hist), np.nan) monotone_params_dist_normalized = np.full(len(_params_hist), np.nan) # put everything together in a dict out_dict = { "n_evaluations": np.arange(len(crit_hist)), "n_batches": result["batches_history"], "walltime": result["time_history"], "criterion": crit_hist, "criterion_normalized": normalized_crit_hist, "monotone_criterion": monotone_crit_hist, "monotone_criterion_normalized": normalized_monotone_crit_hist, "parameter_distance": params_dist, "monotone_parameter_distance": monotone_params_dist, "parameter_distance_normalized": params_dist_normalized, "monotone_parameter_distance_normalized": monotone_params_dist_normalized, } # calculate at which iteration the problem has been solved if stopping_criterion is not None: is_converged_x, x_idx = _check_convergence(params_dist_normalized, x_precision) is_converged_y, y_idx = _check_convergence(normalized_crit_hist, y_precision) flag_aggregators = { "x": lambda x, y: x, "y": lambda x, y: y, "x_and_y": lambda x, y: x and y, "x_or_y": lambda x, y: x or y, } is_converged = flag_aggregators[stopping_criterion]( x=is_converged_x, y=is_converged_y ) if is_converged: idx_aggregators = { "x": lambda x, y: x, "y": lambda x, y: y, "x_and_y": _aggregate_idxs_with_and, "x_or_y": _aggregate_idxs_with_or, } solution_idx = idx_aggregators[stopping_criterion](x=x_idx, y=y_idx) if solution_idx is not None: out_dict = {k: v[: solution_idx + 1] for k, v in out_dict.items()} # create a DataFrame and add metadata out = pd.DataFrame(out_dict) return out, is_converged def _check_convergence(values, threshold): boo = values <= threshold if boo.any(): is_converged = True idx = np.argmax(boo) else: is_converged = False idx = None return is_converged, idx def _aggregate_idxs_with_and(x, y): if x is None or y is None: out = None else: out = max(x, y) return out def _aggregate_idxs_with_or(x, y): if x is None and y is None: out = None elif x is None: out = y elif y is None: out = x else: out = min(x, y) return out ================================================ FILE: src/optimagic/benchmarking/run_benchmark.py ================================================ """Functions to create, run and visualize optimization benchmarks. TO-DO: - Add other benchmark sets: - finish medium scale problems from https://arxiv.org/pdf/1710.11005.pdf, Page 34. - add scalar problems from https://github.com/AxelThevenot - Add option for deterministic noise or wiggle. """ import numpy as np from pybaum import tree_just_flatten from optimagic import batch_evaluators from optimagic.algorithms import AVAILABLE_ALGORITHMS from optimagic.optimization.optimize import minimize from optimagic.parameters.tree_registry import get_registry def run_benchmark( problems, optimize_options, *, batch_evaluator="joblib", n_cores=1, error_handling="continue", max_criterion_evaluations=1_000, disable_convergence=True, ): """Run problems with different optimize options. Args: problems (dict): Nested dictionary with benchmark problems of the structure: {"name": {"inputs": {...}, "solution": {...}, "info": {...}}} where "inputs" are keyword arguments for ``minimize`` such as the criterion function and start parameters. "solution" contains the entries "params" and "value" and "info" might contain information about the test problem. optimize_options (list or dict): Either a list of algorithms or a Nested dictionary that maps a name for optimizer settings (e.g. ``"lbfgsb_strict_criterion"``) to a dictionary of keyword arguments for arguments for ``minimize`` (e.g. ``{"algorithm": "scipy_lbfgsb", "algo_options": {"convergence.ftol_rel": 1e-12}}``). Alternatively, the values can just be an algorithm which is then benchmarked at default settings. batch_evaluator (str or callable): See :ref:`batch_evaluators`. n_cores (int): Number of optimizations that is run in parallel. Note that in addition to that an optimizer might parallelize. error_handling (str): One of "raise", "continue". max_criterion_evaluations (int): Shortcut to set the maximum number of criterion evaluations instead of passing them in via algo options. In case an optimizer does not support this stopping criterion, we also use this as max iterations. disable_convergence (bool): If True, we set extremely strict convergence convergence criteria by default, such that most optimizers will exploit their full computation budget set by max_criterion_evaluations. Returns: dict: Nested Dictionary with information on the benchmark run. The outer keys are tuples where the first entry is the name of the problem and the second the name of the optimize options. The values are dicts with the entries: "params_history", "criterion_history", "time_history" and "solution". """ if isinstance(batch_evaluator, str): batch_evaluator = getattr( batch_evaluators, f"{batch_evaluator}_batch_evaluator" ) opt_options = _process_optimize_options( optimize_options, max_evals=max_criterion_evaluations, disable_convergence=disable_convergence, ) minimize_arguments, keys = _get_optimization_arguments_and_keys( problems, opt_options ) raw_results = batch_evaluator( func=minimize, arguments=minimize_arguments, n_cores=n_cores, error_handling=error_handling, unpack_symbol="**", ) processing_arguments = [] for name, raw_result in zip(keys, raw_results, strict=False): processing_arguments.append( {"optimize_result": raw_result, "problem": problems[name[0]]} ) results = batch_evaluator( func=_process_one_result, arguments=processing_arguments, n_cores=n_cores, error_handling="raise", unpack_symbol="**", ) results = dict(zip(keys, results, strict=False)) return results def _process_optimize_options(raw_options, max_evals, disable_convergence): if not isinstance(raw_options, dict): dict_options = {} for option in raw_options: if isinstance(option, str): dict_options[option] = option else: dict_options[option.__name__] = option else: dict_options = raw_options default_algo_options = {} if max_evals is not None: default_algo_options["stopping.maxfun"] = max_evals default_algo_options["stopping.maxiter"] = max_evals if disable_convergence: default_algo_options["convergence.ftol_rel"] = 1e-14 default_algo_options["convergence.xtol_rel"] = 1e-14 default_algo_options["convergence.gtol_rel"] = 1e-14 out_options = {} for name, _option in dict_options.items(): if not isinstance(_option, dict): option = {"algorithm": _option} else: option = _option.copy() algo_options = {**default_algo_options, **option.get("algo_options", {})} algo_options = {k.replace(".", "_"): v for k, v in algo_options.items()} option["algo_options"] = algo_options if isinstance(option.get("algo_options"), dict): option["algo_options"] = {**default_algo_options, **option["algo_options"]} else: option["algo_options"] = default_algo_options out_options[name] = option return out_options def _get_optimization_arguments_and_keys(problems, opt_options): kwargs_list = [] names = [] for prob_name, problem in problems.items(): for option_name, options in opt_options.items(): algo = options["algorithm"] if isinstance(algo, str): if algo not in AVAILABLE_ALGORITHMS: raise ValueError(f"Invalid algorithm: {algo}") else: valid_options = set(AVAILABLE_ALGORITHMS[algo].__dataclass_fields__) else: valid_options = set(algo.__dataclass_fields__) algo_options = options["algo_options"] algo_options = {k: v for k, v in algo_options.items() if k in valid_options} kwargs = {**options, **problem["inputs"]} kwargs["algo_options"] = algo_options kwargs_list.append(kwargs) names.append((prob_name, option_name)) return kwargs_list, names def _process_one_result(optimize_result, problem): """Process the result of one optimization run. Args: optimize_result (OptimizeResult): Result of one optimization run. problem (dict): Problem specification. Returns: dict: Processed result. """ _registry = get_registry(extended=True) _criterion = problem["noise_free_fun"] _start_x = problem["inputs"]["params"] _start_crit_value = _criterion(_start_x) if isinstance(_start_crit_value, np.ndarray): _start_crit_value = (_start_crit_value**2).sum() _is_noisy = problem["noisy"] _solution_crit = problem["solution"]["value"] # This will happen if the optimization raised an error if isinstance(optimize_result, str): params_history_flat = [tree_just_flatten(_start_x, registry=_registry)] criterion_history = [_start_crit_value] time_history = [np.inf] batches_history = [0] else: history = optimize_result.history params_history = history.params params_history_flat = [ tree_just_flatten(p, registry=_registry) for p in params_history ] if _is_noisy: criterion_history = np.array([_criterion(p) for p in params_history]) if criterion_history.ndim == 2: criterion_history = (criterion_history**2).sum(axis=1) else: criterion_history = history.fun criterion_history = np.clip(criterion_history, _solution_crit, np.inf) batches_history = history.batches time_history = history.start_time return { "params_history": params_history_flat, "criterion_history": criterion_history, "time_history": time_history, "batches_history": batches_history, "solution": optimize_result, } ================================================ FILE: src/optimagic/config.py ================================================ import importlib.util from pathlib import Path import plotly.express as px DOCS_DIR = Path(__file__).parent.parent / "docs" OPTIMAGIC_ROOT = Path(__file__).parent PLOTLY_TEMPLATE = "simple_white" PLOTLY_PALETTE = px.colors.qualitative.Set2 # The hex strings are obtained from the Plotly D3 qualitative palette. DEFAULT_PALETTE = [ "#1F77B4", "#FF7F0E", "#2CA02C", "#D62728", "#9467BD", "#8C564B", "#E377C2", "#7F7F7F", "#BCBD22", "#17BECF", ] DEFAULT_N_CORES = 1 CRITERION_PENALTY_SLOPE = 0.1 CRITERION_PENALTY_CONSTANT = 100 def _is_installed(module_name: str) -> bool: """Return True if the given module is installed, otherwise False.""" return importlib.util.find_spec(module_name) is not None # ====================================================================================== # Check Available Optimization Packages # ====================================================================================== IS_PETSC4PY_INSTALLED = _is_installed("petsc4py") IS_NLOPT_INSTALLED = _is_installed("nlopt") IS_PYBOBYQA_INSTALLED = _is_installed("pybobyqa") IS_DFOLS_INSTALLED = _is_installed("dfols") IS_PYGMO_INSTALLED = _is_installed("pygmo") IS_CYIPOPT_INSTALLED = _is_installed("cyipopt") IS_FIDES_INSTALLED = _is_installed("fides") IS_JAX_INSTALLED = _is_installed("jax") IS_TRANQUILO_INSTALLED = _is_installed("tranquilo") IS_NUMBA_INSTALLED = _is_installed("numba") IS_IMINUIT_INSTALLED = _is_installed("iminuit") IS_NEVERGRAD_INSTALLED = _is_installed("nevergrad") # despite the similar names, the bayes_opt and bayes_optim packages are # completely unrelated. However, both of them are dependencies of nevergrad. IS_BAYESOPTIM_INSTALLED = _is_installed("bayes-optim") # Note: There is a dependancy conflict with nevergrad and bayesian_optimization # installing nevergrad pins bayesian_optimization to 1.4.0, # but "bayes_opt" requires bayesian_optimization>=2.0.0 to work. # so if nevergrad is installed, bayes_opt will not work and vice-versa. IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2 = ( _is_installed("bayes_opt") and importlib.metadata.version("bayesian_optimization") > "2.0.0" ) IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED = _is_installed("gradient_free_optimizers") IS_PYGAD_INSTALLED = _is_installed("pygad") IS_PYSWARMS_INSTALLED = _is_installed("pyswarms") # ====================================================================================== # Check Available Visualization Packages # ====================================================================================== IS_MATPLOTLIB_INSTALLED = _is_installed("matplotlib") IS_BOKEH_INSTALLED = _is_installed("bokeh") IS_ALTAIR_INSTALLED = _is_installed("altair") ================================================ FILE: src/optimagic/constraints.py ================================================ from __future__ import annotations from abc import ABC, abstractmethod from dataclasses import KW_ONLY, dataclass from typing import Any, Callable import numpy as np import pandas as pd from numpy.typing import ArrayLike from optimagic.exceptions import InvalidConstraintError from optimagic.optimization.algo_options import CONSTRAINTS_ABSOLUTE_TOLERANCE from optimagic.typing import PyTree class Constraint(ABC): """Base class for all constraints used for subtyping.""" @abstractmethod def _to_dict(self) -> dict[str, Any]: pass def identity_selector(x: PyTree) -> PyTree: return x @dataclass(frozen=True) class FixedConstraint(Constraint): """Constraint that fixes the selected parameters at their starting values. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selector: Callable[[PyTree], PyTree] = identity_selector def _to_dict(self) -> dict[str, Any]: return {"type": "fixed", "selector": self.selector} def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") @dataclass(frozen=True) class IncreasingConstraint(Constraint): """Constraint that ensures the selected parameters are increasing. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selector: Callable[[PyTree], PyTree] = identity_selector def _to_dict(self) -> dict[str, Any]: return {"type": "increasing", "selector": self.selector} def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") @dataclass(frozen=True) class DecreasingConstraint(Constraint): """Constraint that ensures that the selected parameters are decreasing. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selector: Callable[[PyTree], PyTree] = identity_selector def _to_dict(self) -> dict[str, Any]: return {"type": "decreasing", "selector": self.selector} def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") @dataclass(frozen=True) class EqualityConstraint(Constraint): """Constraint that ensures that the selected parameters are equal. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selector: Callable[[PyTree], PyTree] = identity_selector def _to_dict(self) -> dict[str, Any]: return {"type": "equality", "selector": self.selector} def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") @dataclass(frozen=True) class ProbabilityConstraint(Constraint): """Constraint that ensures that the selected parameters are probabilities. This constraint ensures that each of the selected parameters is positive and that the sum of the selected parameters is 1. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selector: Callable[[PyTree], PyTree] = identity_selector def _to_dict(self) -> dict[str, Any]: return {"type": "probability", "selector": self.selector} def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") @dataclass(frozen=True) class PairwiseEqualityConstraint(Constraint): """Constraint that ensures that groups of selected parameters are equal. This constraint ensures that each pair between the selected parameters is equal. Attributes: selectors: A list of functions that take as input the parameters and return the subsets of parameters to be constrained. Raises: InvalidConstraintError: If the selector is not callable. """ selectors: list[Callable[[PyTree], PyTree]] def _to_dict(self) -> dict[str, Any]: return {"type": "pairwise_equality", "selectors": self.selectors} def __post_init__(self) -> None: if len(self.selectors) < 2: raise InvalidConstraintError("At least two selectors must be provided.") if not all(callable(s) for s in self.selectors): raise InvalidConstraintError("All selectors must be callable.") @dataclass(frozen=True) class FlatCovConstraint(Constraint): """Constraint that ensures the selected parameters are a valid covariance matrix. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. regularization: Helps in guiding the optimization towards finding a positive definite covariance matrix instead of only a positive semi-definite matrix. Larger values correspond to a higher likelihood of positive definiteness. Defaults to 0. Raises: InvalidConstraintError: If the selector is not callable or regularization is not a non-negative float or int. """ selector: Callable[[PyTree], PyTree] = identity_selector _: KW_ONLY regularization: float = 0.0 def _to_dict(self) -> dict[str, Any]: return { "type": "covariance", "selector": self.selector, "regularization": self.regularization, } def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") if not isinstance(self.regularization, float | int) or self.regularization < 0: raise InvalidConstraintError( "'regularization' must be a non-negative float or int." ) @dataclass(frozen=True) class FlatSDCorrConstraint(Constraint): """Constraint that ensures the selected parameters are a valid correlation matrix. This constraint ensures that each of the selected parameters is positive and that the sum of the selected parameters is 1. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. regularization: Helps in guiding the optimization towards finding a positive definite covariance matrix instead of only a positive semi-definite matrix. Larger values correspond to a higher likelihood of positive definiteness. Defaults to 0. Raises: InvalidConstraintError: If the selector is not callable or regularization is not a non-negative float or int. """ selector: Callable[[PyTree], PyTree] = identity_selector _: KW_ONLY regularization: float = 0.0 def _to_dict(self) -> dict[str, Any]: return { "type": "sdcorr", "selector": self.selector, "regularization": self.regularization, } def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") if not isinstance(self.regularization, float | int) or self.regularization < 0: raise InvalidConstraintError( "'regularization' must be a non-negative float or int." ) @dataclass(frozen=True) class LinearConstraint(Constraint): """Constraint that bounds a linear combination of the selected parameters. This constraint ensures that a linear combination of the selected parameters with the 'weights' is either equal to 'value', or is bounded by 'lower_bound' and 'upper_bound'. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. weights: The weights for the linear combination. If a scalar is provided, it is used for all parameters. Otherwise, it must have the same structure as the selected parameters. lower_bound: The lower bound for the linear combination. Defaults to None. upper_bound: The upper bound for the linear combination. Defaults to None. value: The value to compare the linear combination to. Defaults to None. Raises: InvalidConstraintError: If the selector is not callable, or if the weights, lower_bound, upper_bound, or value are not valid. """ selector: Callable[[PyTree], ArrayLike | "pd.Series[float]" | float | int] = ( identity_selector ) _: KW_ONLY weights: ArrayLike | "pd.Series[float]" | float | int | None = None lower_bound: float | int | None = None upper_bound: float | int | None = None value: float | int | None = None def _to_dict(self) -> dict[str, Any]: return { "type": "linear", "selector": self.selector, "weights": self.weights, **_select_non_none( lower_bound=self.lower_bound, upper_bound=self.upper_bound, value=self.value, ), } def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") if _all_none(self.lower_bound, self.upper_bound, self.value): raise InvalidConstraintError( "At least one of 'lower_bound', 'upper_bound', or 'value' must be " "non-None." ) if self.value is not None and not _all_none(self.lower_bound, self.upper_bound): raise InvalidConstraintError( "'value' cannot be used with 'lower_bound' or 'upper_bound'." ) if not isinstance(self.weights, np.ndarray | list | pd.Series | float | int): raise InvalidConstraintError( "'weights' must be an array-like, a pandas Series, a float, or an int." ) if self.lower_bound is not None and not isinstance( self.lower_bound, float | int ): raise InvalidConstraintError("'lower_bound' must be a float or an int.") if self.upper_bound is not None and not isinstance( self.upper_bound, float | int ): raise InvalidConstraintError("'upper_bound' must be a float or an int.") if self.value is not None and not isinstance(self.value, float | int): raise InvalidConstraintError("'value' must be a float or an int.") @dataclass(frozen=True) class NonlinearConstraint(Constraint): """Constraint that bounds a nonlinear function of the selected parameters. This constraint ensures that a nonlinear function of the selected parameters is either equal to 'value', or is bounded by 'lower_bound' and 'upper_bound'. Attributes: selector: A function that takes as input the parameters and returns the subset of parameters to be constrained. By default, all parameters are constrained. func: The constraint function which is applied to the selected parameters. derivative: The derivative of the constraint function with respect to the selected parameters. Defaults to None. lower_bound: The lower bound for the nonlinear function. Can be a scalar or of the same structure as output of the constraint function. Defaults to None. upper_bound: The upper bound for the nonlinear function. Can be a scalar or of the same structure as output of the constraint function. Defaults to None. value: The value to compare the nonlinear function to. Can be a scalar or of the same structure as output of the constraint function. Defaults to None. tol: The tolerance for the constraint function. Defaults to `optimagic.optimization.algo_options.CONSTRAINTS_ABSOLUTE_TOLERANCE`. Raises: InvalidConstraintError: If the selector is not callable, or if the func, derivative, lower_bound, upper_bound, or value are not valid. """ selector: Callable[[PyTree], PyTree] = identity_selector _: KW_ONLY func: Callable[[PyTree], ArrayLike | "pd.Series[float]" | float] | None = None derivative: Callable[[PyTree], PyTree] | None = None lower_bound: ArrayLike | "pd.Series[float]" | float | None = None upper_bound: ArrayLike | "pd.Series[float]" | float | None = None value: ArrayLike | "pd.Series[float]" | float | None = None tol: float = CONSTRAINTS_ABSOLUTE_TOLERANCE def _to_dict(self) -> dict[str, Any]: return { "type": "nonlinear", "selector": self.selector, **_select_non_none( func=self.func, derivative=self.derivative, # In the dict representation, we write _bounds instead of _bound. lower_bounds=self.lower_bound, upper_bounds=self.upper_bound, value=self.value, tol=self.tol, ), } def __post_init__(self) -> None: if not callable(self.selector): raise InvalidConstraintError("'selector' must be callable.") if _all_none(self.lower_bound, self.upper_bound, self.value): raise InvalidConstraintError( "At least one of 'lower_bound', 'upper_bound', or 'value' must be " "non-None." ) if self.value is not None and not _all_none(self.lower_bound, self.upper_bound): raise InvalidConstraintError( "'value' cannot be used with 'lower_bound' or 'upper_bound'." ) if self.tol is not None and ( not isinstance(self.tol, float | int) or self.tol < 0 ): raise InvalidConstraintError("'tol' must be non-negative.") if self.func is None or not callable(self.func): raise InvalidConstraintError("'func' must be callable.") if self.derivative is not None and not callable(self.derivative): raise InvalidConstraintError("'derivative' must be callable.") def _all_none(*args: Any) -> bool: return all(v is None for v in args) def _select_non_none(**kwargs: Any) -> dict[str, Any]: return {k: v for k, v in kwargs.items() if v is not None} ================================================ FILE: src/optimagic/decorators.py ================================================ """This module contains various decorators. There are two kinds of decorators defined in this module which consists of either two or three nested functions. The former are decorators without and the latter with arguments. For more information on decorators, see this `guide `_ on https://realpython.com which provides a comprehensive overview. .. _guide: https://realpython.com/primer-on-python-decorators/ """ import functools import warnings from optimagic.exceptions import get_traceback def catch( func=None, *, exception=Exception, exclude=(KeyboardInterrupt, SystemExit), onerror=None, default=None, warn=True, reraise=False, ): """Catch and handle exceptions. This decorator can be used with and without additional arguments. Args: exception (Exception or tuple): One or several exceptions that are caught and handled. By default all Exceptions are caught and handled. exclude (Exception or tuple): One or several exceptionts that are not caught. By default those are KeyboardInterrupt and SystemExit. onerror (None or Callable): Callable that takes an Exception as only argument. This is called when an exception occurs. default: Value that is returned when as the output of func when an exception occurs. Can be one of the following: - a constant - "__traceback__", in this case a string with a traceback is returned. - callable with the same signature as func. warn (bool): If True, the exception is converted to a warning. reraise (bool): If True, the exception is raised after handling it. """ def decorator_catch(func): @functools.wraps(func) def wrapper_catch(*args, **kwargs): try: res = func(*args, **kwargs) except exclude: raise except exception as e: if onerror is not None: onerror(e) if reraise: raise e tb = get_traceback() if warn: msg = f"The following exception was caught:\n\n{tb}" warnings.warn(msg) if default == "__traceback__": res = tb elif callable(default): res = default(*args, **kwargs) else: res = default return res return wrapper_catch if callable(func): return decorator_catch(func) else: return decorator_catch def unpack(func=None, symbol=None): def decorator_unpack(func): if symbol is None: @functools.wraps(func) def wrapper_unpack(arg): return func(arg) elif symbol == "*": @functools.wraps(func) def wrapper_unpack(arg): return func(*arg) elif symbol == "**": @functools.wraps(func) def wrapper_unpack(arg): return func(**arg) return wrapper_unpack if callable(func): return decorator_unpack(func) else: return decorator_unpack def deprecated(func, msg): def decorator_deprecated(func): @functools.wraps(func) def wrapper_deprecated(*args, **kwargs): warnings.warn(msg, FutureWarning) return func(*args, **kwargs) return wrapper_deprecated if callable(func): return decorator_deprecated(func) else: return decorator_deprecated ================================================ FILE: src/optimagic/deprecations.py ================================================ import logging import warnings from dataclasses import replace from functools import wraps from pathlib import Path from typing import Any, Callable, ParamSpec, cast from optimagic import mark from optimagic.constraints import Constraint, InvalidConstraintError from optimagic.logging.logger import ( LogOptions, SQLiteLogOptions, ) from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.parameters.bounds import Bounds from optimagic.typing import AggregationLevel _logger = logging.getLogger(__name__) def throw_criterion_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `criterion` argument has been " "renamed to `fun`. Please use `fun` instead of `criterion`. Using `criterion` " " will become an error in optimagic version 0.6.0 and later." ) warnings.warn(msg, FutureWarning) def throw_criterion_kwargs_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `criterion_kwargs` argument has " "been renamed to `fun_kwargs`. Please use `fun_kwargs` instead of " "`criterion_kwargs`. Using `criterion_kwargs` will become an error in " "optimagic version 0.6.0 and later." ) warnings.warn(msg, FutureWarning) def throw_derivative_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `derivative` argument has been " "renamed to `jac`. Please use `jac` instead of `derivative`. Using `derivative`" " will become an error in optimagic version 0.6.0 and later. For more details " "see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html" ) warnings.warn(msg, FutureWarning) def throw_derivative_kwargs_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `derivative_kwargs` argument has " "been renamed to `jac_kwargs`. Please use `jac_kwargs` instead of " "`derivative_kwargs`. Using `derivative_kwargs` will become an error in " "optimagic version 0.6.0 and later. For more details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html" ) warnings.warn(msg, FutureWarning) def throw_criterion_and_derivative_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `criterion_and_derivative` " "argument has been renamed to `fun_and_jac`. Please use `fun_and_jac` " "instead of `criterion_and_derivative`. Using `criterion_and_derivative` " "will become an error in optimagic version 0.6.0 and later. For more details " "see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html" ) warnings.warn(msg, FutureWarning) def throw_criterion_and_derivative_kwargs_future_warning(): msg = ( "To align optimagic with scipy.optimize, the `criterion_and_derivative_kwargs` " "argument has been renamed to `fun_and_jac_kwargs`. Please use " "`fun_and_jac_kwargs` instead of `criterion_and_derivative_kwargs`. Using " "`criterion_and_derivative_kwargs` will become an error in optimagic version " "0.6.0 and later. For more details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html" ) warnings.warn(msg, FutureWarning) def throw_scaling_options_future_warning(): msg = ( "Specifying scaling options via the argument `scaling_options` is deprecated " "and will be removed in optimagic version 0.6.0 and later. You can pass these " "options directly to the `scaling` argument instead." ) warnings.warn(msg, FutureWarning) def throw_multistart_options_future_warning(): msg = ( "Specifying multistart options via the argument `multistart_options` is " "deprecated and will be removed in optimagic version 0.6.0 and later. You can " "pass these options directly to the `multistart` argument instead. For more " "details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html" ) warnings.warn(msg, FutureWarning) def throw_derivatives_step_ratio_future_warning(): msg = ( "The `step_ratio` argument is deprecated and will be removed alongside " "Richardson extrapolation in optimagic version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_derivatives_n_steps_future_warning(): msg = ( "The `n_steps` argument is deprecated and will be removed alongside " "Richardson extrapolation in optimagic version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_derivatives_return_info_future_warning(): msg = ( "The `return_info` argument is deprecated and will be removed alongside " "Richardson extrapolation in optimagic version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_derivatives_return_func_value_future_warning(): msg = ( "The `return_func_value` argument is deprecated and will be removed in " "optimagic version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_numdiff_result_func_evals_future_warning(): msg = ( "The `func_evals` attribute is deprecated and will be removed in optimagic " "version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_numdiff_result_derivative_candidates_future_warning(): msg = ( "The `derivative_candidates` attribute is deprecated and will be removed in " "optimagic version 0.6.0." ) warnings.warn(msg, FutureWarning) def throw_numdiff_options_deprecated_in_estimate_ml_future_warning(): msg = ( "The argument `numdiff_options` is deprecated for `estimate_ml` and will be " "removed in estimagic version 0.6.0. Please use the `jacobian_numdiff_options` " "and `hessian_numdiff_options` arguments instead to specify the options for " "the first and second numerical derivative estimation." ) warnings.warn(msg, FutureWarning) def throw_numdiff_options_deprecated_in_estimate_msm_future_warning(): msg = ( "The argument `numdiff_options` is deprecated for `estimate_msm` and will be " "removed in estimagic version 0.6.0. Please use the `jacobian_numdiff_options` " "argument instead." ) warnings.warn(msg, FutureWarning) def throw_dict_access_future_warning(attribute, obj_name): msg = ( f"The dictionary access for '{attribute}' is deprecated and will be removed " "in optimagic version 0.6.0. Please use the new attribute access instead: " f"`{obj_name}.{attribute}`." ) warnings.warn(msg, FutureWarning) def throw_none_valued_batch_evaluator_warning(): msg = ( "Passing `None` as the `batch_evaluator` is deprecated and will be " "removed in optimagic version 0.6.0. Please use the string 'joblib' instead to " "use the joblib batch evaluator by default." ) warnings.warn(msg, FutureWarning) def throw_make_subplot_kwargs_in_slice_plot_future_warning(): msg = ( "The `make_subplot_kwargs` argument in `slice_plot` is deprecated and will be " "removed in optimagic version 0.6.0. Customization of the subplots can be done " "by modifying the returned figure." ) warnings.warn(msg, FutureWarning) def replace_and_warn_about_deprecated_algo_options(algo_options): if not isinstance(algo_options, dict): return algo_options algo_options = {k.replace(".", "_"): v for k, v in algo_options.items()} replacements = { "stopping_max_criterion_evaluations": "stopping_maxfun", "stopping_max_iterations": "stopping_maxiter", "convergence_absolute_criterion_tolerance": "convergence_ftol_abs", "convergence_relative_criterion_tolerance": "convergence_ftol_rel", "convergence_scaled_criterion_tolerance": "convergence_ftol_scaled", "convergence_absolute_params_tolerance": "convergence_xtol_abs", "convergence_relative_params_tolerance": "convergence_xtol_rel", "convergence_absolute_gradient_tolerance": "convergence_gtol_abs", "convergence_relative_gradient_tolerance": "convergence_gtol_rel", "convergence_scaled_gradient_tolerance": "convergence_gtol_scaled", } present = sorted(set(algo_options) & set(replacements)) if present: msg = ( "The following keys in `algo_options` are deprecated and will be removed " "in optimagic version 0.6.0 and later. Please replace them as follows:\n" ) for k in present: msg += f" {k} -> {replacements[k]}\n" warnings.warn(msg, FutureWarning) out = {k: v for k, v in algo_options.items() if k not in present} for k in present: out[replacements[k]] = algo_options[k] return out def replace_and_warn_about_deprecated_bounds( lower_bounds, upper_bounds, bounds, soft_lower_bounds=None, soft_upper_bounds=None, ): old_bounds = { "lower": lower_bounds, "upper": upper_bounds, "soft_lower": soft_lower_bounds, "soft_upper": soft_upper_bounds, } old_present = [k for k, v in old_bounds.items() if v is not None] if old_present: substring = ", ".join(f"{b}_bound" for b in old_present) substring = substring.replace(", ", ", and ", -1) msg = ( f"Specifying bounds via the arguments {substring} is " "deprecated and will be removed in optimagic version 0.6.0 and later. " "Please use the `bounds` argument instead." ) warnings.warn(msg, FutureWarning) if bounds is None and old_present: bounds = Bounds(**old_bounds) return bounds def convert_dict_to_function_value(candidate): """Convert the deprecated dictionary output to a suitable FunctionValue object. No warning is raised here because this function will be called repeatedly! """ special_keys = ["value", "contributions", "root_contributions"] if is_dict_output(candidate): info = {k: v for k, v in candidate.items() if k not in special_keys} if "root_contributions" in candidate: out = LeastSquaresFunctionValue(candidate["root_contributions"], info) elif "contributions" in candidate: out = LikelihoodFunctionValue(candidate["contributions"], info) else: out = ScalarFunctionValue(candidate["value"], info) else: out = candidate return out def is_dict_output(candidate): """Check if the output is a dictionary with special keys.""" special_keys = ["value", "contributions", "root_contributions"] return isinstance(candidate, dict) and any(k in candidate for k in special_keys) def throw_dict_output_warning(): msg = ( "Returning a dictionary with the special keys 'value', 'contributions', or " "'root_contributions' is deprecated and will be removed in optimagic version " "0.6.0 and later. Please use the optimagic.mark.scalar, optimagic.mark." "least_squares, or optimagic.mark.likelihood decorators to indicate the type " "of problem you are solving. Use optimagic.FunctionValue objects to return " "additional information for the logging. Please see the documentation for more " "details: https://optimagic.readthedocs.io/en/latest/how_to/how_to_criterion_function.html" ) warnings.warn(msg, FutureWarning) def infer_problem_type_from_dict_output(output): if "root_contributions" in output: out = AggregationLevel.LEAST_SQUARES elif "contributions" in output: out = AggregationLevel.LIKELIHOOD else: out = AggregationLevel.SCALAR return out P = ParamSpec("P") def replace_dict_output(func: Callable[P, Any]) -> Callable[P, Any]: """Replace the deprecated dictionary output by a suitable FunctionValue. This has no effect if the function does not return a dictionary with at least one of the special keys "value", "contributions" or "root_contributions" or a tuple where the first entry is such a dictionary. This decorator does not add a warning because the function will be evaluated many times and the warning would pop up too often. """ @wraps(func) def wrapper(*args: P.args, **kwargs: P.kwargs) -> Any: raw = func(*args, **kwargs) # fun and jac case if isinstance(raw, tuple): out = (convert_dict_to_function_value(raw[0]), raw[1]) # fun case else: out = convert_dict_to_function_value(raw) return out return wrapper def throw_key_warning_in_derivatives(): msg = ( "The `key` argument in first_derivative and second_derivative is deprecated " "and will be removed in optimagic version 0.6.0 and later. Please use the " "`unpacker` argument instead. While `key` was a string, `unpacker` is a " "callable that takes the output of `func` and returns the desired output that " "is then differentiated." ) warnings.warn(msg, FutureWarning) def throw_dict_constraints_future_warning_if_required( constraints: list[dict[str, Any]] | dict[str, Any], ) -> None: replacements = { "fixed": "optimagic.FixedConstraint", "increasing": "optimagic.IncreasingConstraint", "decreasing": "optimagic.DecreasingConstraint", "equality": "optimagic.EqualityConstraint", "probability": "optimagic.ProbabilityConstraint", "pairwise_equality": "optimagic.PairwiseEqualityConstraint", "covariance": "optimagic.FlatCovConstraint", "sdcorr": "optimagic.FlatSDCorrConstraint", "linear": "optimagic.LinearConstraint", "nonlinear": "optimagic.NonlinearConstraint", } if not isinstance(constraints, list): constraints = [constraints] types_or_none = [ constraint.get("type", None) if isinstance(constraint, dict) else None for constraint in constraints ] types = [t for t in types_or_none if t is not None] if types: msg = ( "Specifying constraints as a dictionary is deprecated and will be removed " "in optimagic version 0.6.0. Please replace them using the new optimagic " "constraint objects:\n" ) for t in types: msg += f" {{'type': '{t}', ...}} -> {replacements[t]}(...)\n" msg += ( "\nFor more details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_constraints.html" ) warnings.warn(msg, FutureWarning) def replace_and_warn_about_deprecated_multistart_options(options): """Replace deprecated multistart options and warn about them. Args: options (MultistartOptions): The multistart options to replace. Returns: MultistartOptions: The replaced multistart options. """ replacements = {} if options.share_optimization is not None: msg = ( "The `share_optimization` option is deprecated and will be removed in " "version 0.6.0. Use `stopping_maxopt` instead to specify the number of " "optimizations directly. For more details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html" ) warnings.warn(msg, FutureWarning) if options.convergence_relative_params_tolerance is not None: msg = ( "The `convergence_relative_params_tolerance` option is deprecated and will " "be removed in version 0.6.0. Use `convergence_xtol_rel` instead. For more " "details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html" ) warnings.warn(msg, FutureWarning) if options.convergence_xtol_rel is None: replacements["convergence_xtol_rel"] = ( options.convergence_relative_params_tolerance ) if options.optimization_error_handling is not None: msg = ( "The `optimization_error_handling` option is deprecated and will be " "removed in version 0.6.0. Setting this attribute also sets the error " "handling for exploration. Use the new `error_handling` option to set the " "error handling for both optimization and exploration. For more details " "see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html" ) warnings.warn(msg, FutureWarning) if options.error_handling is None: replacements["error_handling"] = options.optimization_error_handling if options.exploration_error_handling is not None: msg = ( "The `exploration_error_handling` option is deprecated and will be " "removed in version 0.6.0. Setting this attribute also sets the error " "handling for exploration. Use the new `error_handling` option to set the " "error handling for both optimization and exploration. For more details " "see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html" ) warnings.warn(msg, FutureWarning) if options.error_handling is None: replacements["error_handling"] = options.exploration_error_handling return replace(options, **replacements) def replace_and_warn_about_deprecated_base_steps( step_size, base_steps, ): if base_steps is not None: msg = ( "The `base_steps` argument is deprecated and will be removed alongside " "Richardson extrapolation in optimagic version 0.6.0. To specify the " "step size use the `step_size` argument instead. For more details see the " "documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_step_size.html" ) warnings.warn(msg, FutureWarning) if step_size is None: step_size = base_steps return step_size def replace_and_warn_about_deprecated_derivatives(candidate, name): msg = ( f"Specifying a dictionary of {name} functions is deprecated and will be " "removed in optimagic version 0.6.0. Please specify a single function that " "returns the correct derivative for your optimizer or a list of functions that " "are decorated with the `mark.scalar`, `mark.likelihood` or " "`mark.least_squares` decorators. For more details see the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html" ) warnings.warn(msg, FutureWarning) key_to_marker = { "value": mark.scalar, "contributions": mark.likelihood, "root_contributions": mark.least_squares, } out = [] for key, func in candidate.items(): if key in key_to_marker: out.append(key_to_marker[key](func)) return out def handle_log_options_throw_deprecated_warning( log_options: dict[str, Any], logger: str | Path | LogOptions | None ) -> str | Path | LogOptions | None: msg = ( "Usage of the parameter log_options is deprecated " "and will be removed in a future version. " "Provide a LogOptions instance for the parameter `logging`, if you need to " "configure the logging." ) warnings.warn(msg, FutureWarning) logging_is_path_or_string = isinstance(logger, str) or isinstance(logger, Path) log_options_is_dict = isinstance(log_options, dict) compatible_keys = {"fast_logging", "if_table_exists", "if_database_exists"} log_options_is_compatible = set(log_options.keys()).issubset(compatible_keys) if logging_is_path_or_string: if log_options_is_dict and log_options_is_compatible: warnings.warn( f"\nUsing {log_options=} to create an instance of SQLiteLogOptions. " f"This mechanism will be removed in the future.", FutureWarning, ) if "if_table_exists" in log_options: warnings.warn( "Found 'if_table_exists' in options dictionary. " "This option is deprecated and setting it has no effect.", FutureWarning, ) log_options = { k: v for k, v in log_options.items() if k != "if_table_exists" } return SQLiteLogOptions(cast(str | Path, logger), **log_options) elif not log_options_is_compatible: raise ValueError( f"Found string or path for logger argument, but parameter" f" {log_options=} is not compatible to {compatible_keys=}." f"Explicitly create a Logger instance for configuration." ) return logger def pre_process_constraints( constraints: list[Constraint | dict[str, Any]] | Constraint | dict[str, Any] | None, ) -> list[dict[str, Any]]: """Convert all ways of specifying constraints to a list of dictionaries. For the optimagic release 0.5.0 we only implemented the new constraint API, but have not overhauled the internal representation of constraints yet. As a result, we convert all ways of specifying constraints, and in particular the new interface, to the old format, that is, a list of dictionaries. Once we have refactor the internal representation of constraints, we will be able to go the other way, and convert all formats to the new one. """ if constraints is None: return [] if isinstance(constraints, dict | Constraint): constraints = [constraints] if isinstance(constraints, list): out = [] invalid_types: list[type] = [] for constr in constraints: if isinstance(constr, Constraint): out.append(constr._to_dict()) elif isinstance(constr, dict): out.append(constr) else: invalid_types.append(type(constr)) if invalid_types: msg = ( f"Invalid constraint types: {set(invalid_types)}. Must be a constraint " "object imported from `optimagic`." ) raise InvalidConstraintError(msg) else: msg = ( f"Invalid constraint type: {type(constraints)}. Must be a constraint " "object or list thereof imported from `optimagic`. For more details see " "the documentation: " "https://optimagic.readthedocs.io/en/latest/how_to/how_to_constraints.html" ) raise InvalidConstraintError(msg) return out ================================================ FILE: src/optimagic/differentiation/__init__.py ================================================ ================================================ FILE: src/optimagic/differentiation/derivatives.py ================================================ import functools import itertools import re from dataclasses import dataclass from itertools import product from typing import Any, Callable, Literal, NamedTuple, cast import numpy as np import pandas as pd from numpy.typing import NDArray from pybaum import tree_flatten, tree_just_flatten, tree_unflatten from pybaum import tree_just_flatten as tree_leaves from optimagic import batch_evaluators, deprecations from optimagic.config import DEFAULT_N_CORES from optimagic.deprecations import ( replace_and_warn_about_deprecated_base_steps, replace_and_warn_about_deprecated_bounds, ) from optimagic.differentiation import finite_differences from optimagic.differentiation.generate_steps import generate_steps from optimagic.differentiation.richardson_extrapolation import richardson_extrapolation from optimagic.parameters.block_trees import hessian_to_block_tree, matrix_to_block_tree from optimagic.parameters.bounds import Bounds, get_internal_bounds, pre_process_bounds from optimagic.parameters.tree_registry import get_registry from optimagic.typing import BatchEvaluatorLiteral, PyTree @dataclass(frozen=True) class NumdiffResult: """Result of a numerical differentiation. The following relationship holds for vector-valued functions with vector-valued parameters: First Derivative: ----------------- - f: R -> R leads to shape (1,), usually called derivative - f: R^m -> R leads to shape (m, ), usually called Gradient - f: R -> R^n leads to shape (n, 1), usually called Jacobian - f: R^m -> R^n leads to shape (n, m), usually called Jacobian Second Derivative: ------------------ - f: R -> R leads to shape (1,), usually called second derivative - f: R^m -> R leads to shape (m, m), usually called Hessian - f: R -> R^n leads to shape (n,), usually called Hessian - f: R^m -> R^n leads to shape (n, m, m), usually called Hessian tensor Attributes: derivative: The estimated derivative at the parameters. The structure of the derivative depends on the input parameters and the output of the function. func_value: The value of the function at the parameters. """ derivative: PyTree func_value: PyTree | None = None # deprecated _func_evals: pd.DataFrame | dict[str, pd.DataFrame | None] | None = None _derivative_candidates: pd.DataFrame | None = None @property def func_evals(self) -> pd.DataFrame | dict[str, pd.DataFrame | None] | None: deprecations.throw_numdiff_result_func_evals_future_warning() return self._func_evals @property def derivative_candidates(self) -> pd.DataFrame | None: deprecations.throw_numdiff_result_derivative_candidates_future_warning() return self._derivative_candidates def __getitem__(self, key: str) -> Any: deprecations.throw_dict_access_future_warning(key, obj_name=type(self).__name__) return getattr(self, key) class Evals(NamedTuple): pos: NDArray[np.float64] neg: NDArray[np.float64] def first_derivative( func: Callable[[PyTree], PyTree], params: PyTree, *, bounds: Bounds | None = None, func_kwargs: dict[str, Any] | None = None, method: Literal["central", "forward", "backward"] = "central", step_size: float | PyTree | None = None, scaling_factor: float | PyTree = 1, min_steps: float | PyTree | None = None, f0: PyTree | None = None, n_cores: int = DEFAULT_N_CORES, error_handling: Literal["continue", "raise", "raise_strict"] = "continue", batch_evaluator: BatchEvaluatorLiteral | Callable = "joblib", unpacker: Callable[[Any], PyTree] | None = None, # deprecated lower_bounds: PyTree | None = None, upper_bounds: PyTree | None = None, base_steps: PyTree | None = None, key: str | None = None, step_ratio: float | None = None, n_steps: int | None = None, return_info: bool | None = None, return_func_value: bool | None = None, ) -> NumdiffResult: """Evaluate first derivative of func at params according to method and step options. Internally, the function is converted such that it maps from a 1d array to a 1d array. Then the Jacobian of that function is calculated. The parameters and the function output can be optimagic-pytrees; for more details on estimagi-pytrees see :ref:`eppytrees`. By default the resulting Jacobian will be returned as a block-pytree. For a detailed description of all options that influence the step size as well as an explanation of how steps are adjusted to bounds in case of a conflict, see :func:`~optimagic.differentiation.generate_steps.generate_steps`. Args: func: Function of which the derivative is calculated. params: A pytree. See :ref:`params`. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are not used during numerical differentiation. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. func_kwargs: Additional keyword arguments for func, optional. method: One of ["central", "forward", "backward"], default "central". step_size: 1d array of the same length as params. step_size * scaling_factor is the absolute value of the first (and possibly only) step used in the finite differences approximation of the derivative. If step_size * scaling_factor conflicts with bounds, the actual steps will be adjusted. If step_size is not provided, it will be determined according to a rule of thumb as long as this does not conflict with min_steps. scaling_factor: Scaling factor which is applied to step_size. If it is an numpy.ndarray, it needs to be as long as params. scaling_factor is useful if you want to increase or decrease the base_step relative to the rule-of-thumb or user provided base_step, for example to benchmark the effect of the step size. Default 1. min_steps: Minimal possible step sizes that can be chosen to accommodate bounds. Must have same length as params. By default min_steps is equal to step_size, i.e step size is not decreased beyond what is optimal according to the rule of thumb. f0: 1d numpy array with func(x), optional. n_cores: Number of processes used to parallelize the function evaluations. Default 1. error_handling: One of "continue" (catch errors and continue to calculate derivative estimates. In this case, some derivative estimates can be missing but no errors are raised), "raise" (catch errors and continue to calculate derivative estimates at first but raise an error if all evaluations for one parameter failed) and "raise_strict" (raise an error as soon as a function evaluation fails). batch_evaluator (str or callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the optimagic batch_evaluators. unpacker: A callable that takes the output of func and returns the part of the output that is needed for the derivative calculation. If None, the output of func is used as is. Default None. Returns: NumdiffResult: A numerical differentiation result. """ # ================================================================================== # handle deprecations # ================================================================================== bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) step_size = replace_and_warn_about_deprecated_base_steps( step_size=step_size, base_steps=base_steps, ) if key is not None: deprecations.throw_key_warning_in_derivatives() if unpacker is None: unpacker = lambda x: x[key] if step_ratio is not None: deprecations.throw_derivatives_step_ratio_future_warning() else: step_ratio = 2 if n_steps is not None: deprecations.throw_derivatives_n_steps_future_warning() else: n_steps = 1 if return_info is not None: deprecations.throw_derivatives_return_info_future_warning() else: return_info = False if return_func_value is not None: deprecations.throw_derivatives_return_func_value_future_warning() else: return_func_value = True # ================================================================================== bounds = pre_process_bounds(bounds) unpacker = _process_unpacker(unpacker) # ================================================================================== # Convert scalar | pytree arguments to 1d arrays of floats # ================================================================================== registry = get_registry(extended=True) is_fast_path = _is_1d_array(params) if not is_fast_path: x, params_treedef = tree_flatten(params, registry=registry) x = np.array(x, dtype=np.float64) if scaling_factor is not None and not np.isscalar(scaling_factor): scaling_factor = np.array( tree_just_flatten(scaling_factor, registry=registry) ) if min_steps is not None and not np.isscalar(min_steps): min_steps = np.array(tree_just_flatten(min_steps, registry=registry)) if step_size is not None and not np.isscalar(step_size): step_size = np.array(tree_just_flatten(step_size, registry=registry)) else: x = params.astype(np.float64) scaling_factor = _process_scalar_or_array_argument( scaling_factor, x, "scaling_factor" ) min_steps = _process_scalar_or_array_argument(min_steps, x, "min_steps") step_size = _process_scalar_or_array_argument(step_size, x, "step_size") # ================================================================================== if np.isnan(x).any(): raise ValueError("The parameter vector must not contain NaNs.") internal_lb, internal_ub = get_internal_bounds(params, bounds=bounds) # handle kwargs func_kwargs = {} if func_kwargs is None else func_kwargs partialed_func = functools.partial(func, **func_kwargs) implemented_methods = {"forward", "backward", "central"} if method not in implemented_methods: raise ValueError(f"Method has to be in {implemented_methods}.") # generate the step array step_size = generate_steps( x=x, method=method, n_steps=n_steps, target="first_derivative", base_steps=step_size, scaling_factor=scaling_factor, bounds=Bounds(lower=internal_lb, upper=internal_ub), step_ratio=step_ratio, min_steps=min_steps, ) step_size = cast(NDArray[np.float64], step_size) # generate parameter vectors at which func has to be evaluated as numpy arrays evaluation_points = [] for step_arr in step_size: for i, j in product(range(n_steps), range(len(x))): if np.isnan(step_arr[i, j]): evaluation_points.append(np.nan) else: point = x.copy() point[j] += step_arr[i, j] evaluation_points.append(point) # convert the numpy arrays to whatever is needed by func if not is_fast_path: evaluation_points = [ # entries are either a numpy.ndarray or np.nan _unflatten_if_not_nan(p, params_treedef, registry) for p in evaluation_points ] # we always evaluate f0, so we can fall back to one-sided derivatives if # two-sided derivatives fail. The extra cost is negligible in most cases. if f0 is None: evaluation_points.append(params) # do the function evaluations, including error handling batch_error_handling = "raise" if error_handling == "raise_strict" else "continue" raw_evals = _nan_skipping_batch_evaluator( func=partialed_func, arguments=evaluation_points, n_cores=n_cores, error_handling=batch_error_handling, batch_evaluator=batch_evaluator, ) # extract information on exceptions that occurred during function evaluations exc_info = "\n\n".join([val for val in raw_evals if isinstance(val, str)]) raw_evals = [val if not isinstance(val, str) else np.nan for val in raw_evals] # store full function value at params as func_value and a processed version of it # that we need to calculate derivatives as f0 if f0 is None: f0 = raw_evals[-1] raw_evals = raw_evals[:-1] func_value = f0 f0_tree = unpacker(f0) scalar_out = np.isscalar(f0_tree) vector_out = isinstance(f0_tree, np.ndarray) and f0_tree.ndim == 1 if scalar_out: f0 = np.array([f0_tree], dtype=float) elif vector_out: f0 = f0_tree.astype(float) else: f0 = tree_leaves(f0_tree, registry=registry) f0 = np.array(f0, dtype=np.float64) # convert the raw evaluations to numpy arrays raw_evals_arr = _convert_evals_to_numpy( raw_evals=raw_evals, unpacker=unpacker, registry=registry, is_scalar_out=scalar_out, is_vector_out=vector_out, ) # apply finite difference formulae evals_data = np.array(raw_evals_arr).reshape(2, n_steps, len(x), -1) evals_data_transposed = np.transpose(evals_data, axes=(0, 1, 3, 2)) evals = Evals(pos=evals_data_transposed[0], neg=evals_data_transposed[1]) jac_candidates = {} for m in ["forward", "backward", "central"]: jac_candidates[m] = finite_differences.jacobian(evals, step_size, f0, m) # get the best derivative estimate out of all derivative estimates that could be # calculated, given the function evaluations. orders = { "central": ["central", "forward", "backward"], "forward": ["forward", "backward"], "backward": ["backward", "forward"], } if n_steps == 1: jac = _consolidate_one_step_derivatives(jac_candidates, orders[method]) updated_candidates = None else: richardson_candidates = _compute_richardson_candidates( jac_candidates, step_size, n_steps ) jac, updated_candidates = _consolidate_extrapolated(richardson_candidates) # raise error if necessary if error_handling in ("raise", "raise_strict") and np.isnan(jac).any(): raise Exception(exc_info) # results processing if is_fast_path and vector_out: derivative = jac elif is_fast_path and scalar_out: derivative = jac.flatten() else: derivative = matrix_to_block_tree(jac, f0_tree, params) result = {"derivative": derivative} if return_func_value: result["func_value"] = func_value if return_info: info = _collect_additional_info( step_size, evals, updated_candidates, target="first_derivative" ) result = {**result, **info} return NumdiffResult(**result) def second_derivative( func: Callable[[PyTree], PyTree], params: PyTree, *, bounds: Bounds | None = None, func_kwargs: dict[str, Any] | None = None, method: Literal[ "forward", "backward", "central_average", "central_cross" ] = "central_cross", step_size: float | PyTree | None = None, scaling_factor: float | PyTree = 1, min_steps: float | PyTree | None = None, f0: PyTree | None = None, n_cores: int = DEFAULT_N_CORES, error_handling: Literal["continue", "raise", "raise_strict"] = "continue", batch_evaluator: BatchEvaluatorLiteral | Callable = "joblib", unpacker: Callable[[Any], PyTree] | None = None, # deprecated lower_bounds: PyTree | None = None, upper_bounds: PyTree | None = None, base_steps: PyTree | None = None, step_ratio: float | None = None, n_steps: int | None = None, return_info: bool | None = None, return_func_value: bool | None = None, key: str | None = None, ) -> NumdiffResult: """Evaluate second derivative of func at params according to method and step options. Internally, the function is converted such that it maps from a 1d array to a 1d array. Then the Hessians of that function are calculated. The resulting derivative estimate is always a :class:`numpy.ndarray`. The parameters and the function output can be pandas objects (Series or DataFrames with value column). In that case the output of second_derivative is also a pandas object and with appropriate index and columns. Detailed description of all options that influence the step size as well as an explanation of how steps are adjusted to bounds in case of a conflict, see :func:`~optimagic.differentiation.generate_steps.generate_steps`. Args: func: Function of which the derivative is calculated. params: 1d numpy array or :class:`pandas.DataFrame` with parameters at which the derivative is calculated. If it is a DataFrame, it can contain the columns "lower_bound" and "upper_bound" for bounds. See :ref:`params`. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are not used during numerical differentiation. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. func_kwargs: Additional keyword arguments for func, optional. method: One of {"forward", "backward", "central_average", "central_cross"} These correspond to the finite difference approximations defined in equations [7, x, 8, 9] in Rideout [2009], where ("backward", x) is not found in Rideout [2009] but is the natural extension of equation 7 to the backward case. Default "central_cross". step_size: 1d array of the same length as params. step_size * scaling_factor is the absolute value of the first (and possibly only) step used in the finite differences approximation of the derivative. If step_size * scaling_factor conflicts with bounds, the actual steps will be adjusted. If step_size is not provided, it will be determined according to a rule of thumb as long as this does not conflict with min_steps. scaling_factor: Scaling factor which is applied to step_size. If it is an numpy.ndarray, it needs to be as long as params. scaling_factor is useful if you want to increase or decrease the base_step relative to the rule-of-thumb or user provided base_step, for example to benchmark the effect of the step size. Default 1. min_steps: Minimal possible step sizes that can be chosen to accommodate bounds. Must have same length as params. By default min_steps is equal to step_size, i.e step size is not decreased beyond what is optimal according to the rule of thumb. f0: 1d numpy array with func(x), optional. n_cores: Number of processes used to parallelize the function evaluations. Default 1. error_handling: One of "continue" (catch errors and continue to calculate derivative estimates. In this case, some derivative estimates can be missing but no errors are raised), "raise" (catch errors and continue to calculate derivative estimates at first but raise an error if all evaluations for one parameter failed) and "raise_strict" (raise an error as soon as a function evaluation fails). batch_evaluator: Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the optimagic batch_evaluators. unpacker: A callable that takes the output of func and returns the part of the output that is needed for the derivative calculation. If None, the output of func is used as is. Default None. Returns: NumdiffResult: A numerical differentiation result. """ # ================================================================================== # handle deprecations # ================================================================================== bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) step_size = replace_and_warn_about_deprecated_base_steps( step_size=step_size, base_steps=base_steps, ) if step_ratio is not None: deprecations.throw_derivatives_step_ratio_future_warning() else: step_ratio = 2 if n_steps is not None: deprecations.throw_derivatives_n_steps_future_warning() else: n_steps = 1 if return_info is not None: deprecations.throw_derivatives_return_info_future_warning() else: return_info = False if return_func_value is not None: deprecations.throw_derivatives_return_func_value_future_warning() else: return_func_value = True if key is not None: deprecations.throw_key_warning_in_derivatives() if unpacker is None: unpacker = lambda x: x[key] # ================================================================================== bounds = pre_process_bounds(bounds) unpacker = _process_unpacker(unpacker) # ================================================================================== # Convert scalar | pytree arguments to 1d arrays of floats # ================================================================================== registry = get_registry(extended=True) is_fast_path = _is_1d_array(params) if not is_fast_path: x, params_treedef = tree_flatten(params, registry=registry) x = np.array(x, dtype=np.float64) if scaling_factor is not None and not np.isscalar(scaling_factor): scaling_factor = np.array( tree_just_flatten(scaling_factor, registry=registry) ) if min_steps is not None and not np.isscalar(min_steps): min_steps = np.array(tree_just_flatten(min_steps, registry=registry)) if step_size is not None and not np.isscalar(step_size): step_size = np.array(tree_just_flatten(step_size, registry=registry)) else: x = params.astype(np.float64) scaling_factor = _process_scalar_or_array_argument( scaling_factor, x, "scaling_factor" ) min_steps = _process_scalar_or_array_argument(min_steps, x, "min_steps") step_size = _process_scalar_or_array_argument(step_size, x, "step_size") # ================================================================================== unpacker = _process_unpacker(unpacker) internal_lb, internal_ub = get_internal_bounds(params, bounds=bounds) # handle kwargs func_kwargs = {} if func_kwargs is None else func_kwargs partialed_func = functools.partial(func, **func_kwargs) implemented_methods = {"forward", "backward", "central_average", "central_cross"} if method not in implemented_methods: raise ValueError(f"Method has to be in {implemented_methods}.") # generate the step array step_size = generate_steps( x=x, method=("central" if "central" in method else method), n_steps=n_steps, target="second_derivative", base_steps=step_size, scaling_factor=scaling_factor, bounds=Bounds(lower=internal_lb, upper=internal_ub), step_ratio=step_ratio, min_steps=min_steps, ) step_size = cast(NDArray[np.float64], step_size) # generate parameter vectors at which func has to be evaluated as numpy arrays evaluation_points = { # type: ignore "one_step": [], "two_step": [], "cross_step": [], } for step_arr in step_size: # single direction steps for i, j in product(range(n_steps), range(len(x))): if np.isnan(step_arr[i, j]): evaluation_points["one_step"].append(np.nan) else: point = x.copy() point[j] += step_arr[i, j] evaluation_points["one_step"].append(point) # two and cross direction steps for i, j, k in product(range(n_steps), range(len(x)), range(len(x))): if j > k or np.isnan(step_arr[i, j]) or np.isnan(step_arr[i, k]): evaluation_points["two_step"].append(np.nan) evaluation_points["cross_step"].append(np.nan) else: point = x.copy() point[j] += step_arr[i, j] point[k] += step_arr[i, k] evaluation_points["two_step"].append(point) if j == k: evaluation_points["cross_step"].append(np.nan) else: point = x.copy() point[j] += step_arr[i, j] point[k] -= step_arr[i, k] evaluation_points["cross_step"].append(point) # convert the numpy arrays to whatever is needed by func if not is_fast_path: evaluation_points = { # entries are either a numpy.ndarray or np.nan, we unflatten only step_type: [ _unflatten_if_not_nan(p, params_treedef, registry) for p in points ] for step_type, points in evaluation_points.items() } # we always evaluate f0, so we can fall back to one-sided derivatives if # two-sided derivatives fail. The extra cost is negligible in most cases. if f0 is None: evaluation_points["one_step"].append(params) # do the function evaluations for one and two step, including error handling batch_error_handling = "raise" if error_handling == "raise_strict" else "continue" raw_evals = _nan_skipping_batch_evaluator( func=partialed_func, arguments=list(itertools.chain.from_iterable(evaluation_points.values())), n_cores=n_cores, error_handling=batch_error_handling, batch_evaluator=batch_evaluator, ) # extract information on exceptions that occurred during function evaluations exc_info = "\n\n".join([val for val in raw_evals if isinstance(val, str)]) raw_evals = [val if not isinstance(val, str) else np.nan for val in raw_evals] n_one_step, n_two_step, n_cross_step = map(len, evaluation_points.values()) raw_evals = { "one_step": raw_evals[:n_one_step], "two_step": raw_evals[n_one_step : n_two_step + n_one_step], "cross_step": raw_evals[n_two_step + n_one_step :], } # store full function value at params as func_value and a processed version of it # that we need to calculate derivatives as f0 if f0 is None: f0 = raw_evals["one_step"][-1] raw_evals["one_step"] = raw_evals["one_step"][:-1] func_value = f0 f0_tree = unpacker(f0) f0 = tree_leaves(f0_tree, registry=registry) f0 = np.array(f0, dtype=np.float64) # convert the raw evaluations to numpy arrays raw_evals = { step_type: _convert_evals_to_numpy( raw_evals=evals, unpacker=unpacker, registry=registry ) for step_type, evals in raw_evals.items() } # reshape arrays into dimension (n_steps, dim_f, dim_x) or (n_steps, dim_f, dim_x, # dim_x) for finite differences evals = {} evals["one_step"] = _reshape_one_step_evals(raw_evals["one_step"], n_steps, len(x)) evals["two_step"] = _reshape_two_step_evals(raw_evals["two_step"], n_steps, len(x)) evals["cross_step"] = _reshape_cross_step_evals( raw_evals["cross_step"], n_steps, len(x), f0 ) # apply finite difference formulae hess_candidates = {} for m in ["forward", "backward", "central_average", "central_cross"]: hess_candidates[m] = finite_differences.hessian(evals, step_size, f0, m) # get the best derivative estimate out of all derivative estimates that could be # calculated, given the function evaluations. orders = { "central_cross": ["central_cross", "central_average", "forward", "backward"], "central_average": ["central_average", "central_cross", "forward", "backward"], "forward": ["forward", "backward", "central_average", "central_cross"], "backward": ["backward", "forward", "central_average", "central_cross"], } if n_steps == 1: hess = _consolidate_one_step_derivatives(hess_candidates, orders[method]) updated_candidates = None else: raise ValueError( "Richardson extrapolation is not implemented for the second derivative yet." ) # raise error if necessary if error_handling in ("raise", "raise_strict") and np.isnan(hess).any(): raise Exception(exc_info) # results processing derivative = hessian_to_block_tree(hess, f0_tree, params) result = {"derivative": derivative} if return_func_value: result["func_value"] = func_value if return_info: info = _collect_additional_info( step_size, evals, updated_candidates, target="second_derivative" ) result = {**result, **info} return NumdiffResult(**result) def _is_1d_array(candidate: Any) -> bool: return isinstance(candidate, np.ndarray) and candidate.ndim == 1 def _reshape_one_step_evals(raw_evals_one_step, n_steps, dim_x): """Reshape raw_evals for evaluation points with one step. Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to positive and negative steps. Each entry will be a numpy array with dimension (n_steps, dim_f, dim_x). Mathematical: evals.pos = (f(x0 + delta_jl e_j)) evals.neg = (f(x0 - delta_jl e_j)) for j=1,...,dim_x and l=1,...,n_steps """ evals = np.array(raw_evals_one_step).reshape(2, n_steps, dim_x, -1) evals = evals.swapaxes(2, 3) evals = Evals(pos=evals[0], neg=evals[1]) return evals def _process_unpacker( unpacker: None | Callable[[Any], PyTree], ) -> Callable[[Any], PyTree]: """Process the user provided unpacker function. If the unpacker was None, we set it to the identity. """ if unpacker is None: unpacker = lambda x: x else: raw_unpacker = unpacker def unpacker(x): if isinstance(x, float) and np.isnan(x): return x return raw_unpacker(x) return unpacker def _process_scalar_or_array_argument(candidate, x, name): if candidate is None: return None if np.isscalar(candidate): return np.full_like(x, candidate, dtype=np.float64) else: try: candidate = np.asarray(candidate, dtype=np.float64) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = f"{name} must be a scalar or have the same structure as params." raise ValueError(msg) from e if len(candidate) != len(x) or candidate.ndim != 1: msg = f"{name} must be a scalar or have the same structure as params." raise ValueError(msg) return candidate def _reshape_two_step_evals(raw_evals_two_step, n_steps, dim_x): """Reshape raw_evals for evaluation points with two steps. Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to positive and negative steps. Each entry will be a numpy array with dimension (n_steps, dim_f, dim_x, dim_x). Since the array is, by definition, symmetric over the last two dimensions, the function is not evaluated on both sides to save computation time and the information is simply copied here. Mathematical: evals.pos = (f(x0 + delta_jl e_j + delta_kl e_k)) evals.neg = (f(x0 - delta_jl e_j - delta_kl e_k)) for j,k=1,...,dim_x and l=1,...,n_steps """ tril_idx = np.tril_indices(dim_x, -1) evals = np.array(raw_evals_two_step).reshape(2, n_steps, dim_x, dim_x, -1) evals = evals.transpose(0, 1, 4, 2, 3) evals[..., tril_idx[0], tril_idx[1]] = evals[..., tril_idx[1], tril_idx[0]] evals = Evals(pos=evals[0], neg=evals[1]) return evals def _reshape_cross_step_evals(raw_evals_cross_step, n_steps, dim_x, f0): """Reshape raw_evals for evaluation points with cross steps. Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to positive and negative steps. Each entry will be a numpy array with dimension (n_steps, dim_f, dim_x, dim_x). Since the array is, by definition, symmetric over the last two dimensions, the function is not evaluated on both sides to save computation time and the information is simply copied here. In comparison to the two_step case, however, this symmetry holds only over the dimension 'pos' and 'neg'. That is, the lower triangular of the last two dimensions of 'pos' must equal the upper triangular of the last two dimensions of 'neg'. Further, the diagonal of the last two dimensions must be equal to f0. Mathematical: evals.pos = (f(x0 + delta_jl e_j - delta_kl e_k)) evals.neg = (f(x0 - delta_jl e_j + delta_kl e_k)) for j,k=1,...,dim_x and l=1,...,n_steps """ tril_idx = np.tril_indices(dim_x, -1) diag_idx = np.diag_indices(dim_x) evals = np.array(raw_evals_cross_step).reshape(2, n_steps, dim_x, dim_x, -1) evals = evals.transpose(0, 1, 4, 2, 3) evals[0][..., tril_idx[0], tril_idx[1]] = evals[1][..., tril_idx[1], tril_idx[0]] evals[0][..., diag_idx[0], diag_idx[1]] = np.atleast_2d(f0).T[np.newaxis, ...] evals = Evals(pos=evals[0], neg=evals[0].swapaxes(2, 3)) return evals def _convert_evaluation_data_to_frame(steps, evals): """Convert evaluation data to (tidy) data frame. Args: steps (namedtuple): Namedtuple with field names pos and neg. Is generated by :func:`~optimagic.differentiation.generate_steps.generate_steps`. evals (namedtuple): Namedtuple with field names pos and neg. Contains function evaluation corresponding to steps. Returns: df (pandas.DataFrame): Tidy data frame with index (sign, step_number, dim_x dim_f), where sign corresponds to pos or neg in steps and evals, step_number indexes the step, dim_x is the dimension of the input vector and dim_f is the dimension of the function output. The data is given by the two columns step and eval. The data frame has 2 * n_steps * dim_x * dim_f rows. """ n_steps, dim_f, dim_x = evals.pos.shape dfs = [] for direction, step_arr, eval_arr in zip((1, -1), steps, evals, strict=False): df_steps = pd.DataFrame(step_arr, columns=range(dim_x)) df_steps = df_steps.reset_index() df_steps = df_steps.rename(columns={"index": "step_number"}) df_steps = df_steps.melt( id_vars="step_number", var_name="dim_x", value_name="step" ) df_steps = df_steps.sort_values("step_number") df_steps = df_steps.reset_index(drop=True) df_steps = df_steps.apply(lambda col: col.abs() if col.name == "step" else col) reshaped_eval_arr = np.transpose(eval_arr, (0, 2, 1)).reshape(-1, dim_f) df_evals = pd.concat((df_steps, pd.DataFrame(reshaped_eval_arr)), axis=1) df_evals = df_evals.melt( id_vars=["step_number", "dim_x", "step"], var_name="dim_f", value_name="eval", ) df_evals = df_evals.assign(sign=direction) df_evals = df_evals.set_index(["sign", "step_number", "dim_x", "dim_f"]) df_evals = df_evals.sort_index() dfs.append(df_evals) df = pd.concat(dfs).astype({"step": float, "eval": float}) return df def _convert_richardson_candidates_to_frame(jac, err): """Convert (richardson) jacobian candidates and errors to pandas data frame. Args: jac (dict): Dict with richardson jacobian candidates. err (dict): Dict with errors corresponding to richardson jacobian candidates. Returns: df (pandas.DataFrame): Frame with column "der" and "err" and index ["method", "num_term", "dim_x", "dim_f"] with respective meaning: type of method used, e.g. central or foward; kind of value, e.g. derivative or error. """ dim_f, dim_x = jac["forward1"].shape dfs = [] for key, value in jac.items(): method, num_term = _split_into_str_and_int(key) df = pd.DataFrame(value.T, columns=range(dim_f)) df = df.assign(dim_x=range(dim_x)) df = df.melt(id_vars="dim_x", var_name="dim_f", value_name="der") df = df.assign(method=method, num_term=num_term, err=err[key].T.flatten()) dfs.append(df) df = pd.concat(dfs) df = df.set_index(["method", "num_term", "dim_x", "dim_f"]) return df def _convert_evals_to_numpy( raw_evals, unpacker, registry, is_scalar_out=False, is_vector_out=False ): """Harmonize the output of the function evaluations. The raw_evals might contain dictionaries of which we only need one entry, scalar np.nan where we need arrays filled with np.nan or pandas objects. The processed evals only contain numpy arrays. """ # get rid of additional output evals = [unpacker(val) for val in raw_evals] # convert pytrees to arrays if is_scalar_out: evals = [ np.array([val], dtype=float) if not _is_scalar_nan(val) else val for val in evals ] elif is_vector_out: evals = [val.astype(float) if not _is_scalar_nan(val) else val for val in evals] else: evals = [ ( np.array(tree_leaves(val, registry=registry), dtype=np.float64) if not _is_scalar_nan(val) else val ) for val in evals ] # find out the correct output shape try: array = next(x for x in evals if hasattr(x, "shape") or isinstance(x, dict)) out_shape = array.shape except StopIteration: out_shape = "scalar" # convert to correct output shape if out_shape == "scalar": evals = [np.atleast_1d(val) for val in evals] else: for i in range(len(evals)): if isinstance(evals[i], float) and np.isnan(evals[i]): evals[i] = np.full(out_shape, np.nan) return evals def _consolidate_one_step_derivatives(candidates, preference_order): """Replace missing derivative estimates of preferred method with others. Args: candidates (dict): Dictionary with derivative estimates from different methods. preference_order (list): Order on (a subset of) the keys in candidates. Earlier entries are preferred. Returns: consolidated (np.ndarray): Array of same shape as input derivative estimates. """ preferred, others = preference_order[0], preference_order[1:] consolidated = candidates[preferred].copy() for other in others: consolidated = np.where(np.isnan(consolidated), candidates[other], consolidated) return consolidated.reshape(consolidated.shape[1:]) def _consolidate_extrapolated(candidates): """Get the best possible derivative estimate, given an error estimate. Going through ``candidates`` select the best derivative estimate element-wise using the estimated candidates, where best is defined as minimizing the error estimate from the Richardson extrapolation. See https://tinyurl.com/ubn3nv5 for corresponding code in numdifftools and https://tinyurl.com/snle7mb for an explanation of how errors of Richardson extrapolated derivative estimates can be estimated. Args: candidates (dict): Dictionary containing different derivative estimates and their error estimates. Returns: consolidated (np.ndarray): Array of same shape as input derivative estimates. candidate_der_dict (dict): Best derivative estimate given method. candidate_err_dict (dict): Errors corresponding to best derivatives given method """ # first find minimum over steps for each method candidate_der_dict = {} candidate_err_dict = {} for key in candidates: _der = candidates[key]["derivative"] _err = candidates[key]["error"] derivative, error = _select_minimizer_along_axis(_der, _err) candidate_der_dict[key] = derivative candidate_err_dict[key] = error # second find minimum over methods candidate_der = np.stack(list(candidate_der_dict.values())) candidate_err = np.stack(list(candidate_err_dict.values())) consolidated, _ = _select_minimizer_along_axis(candidate_der, candidate_err) updated_candidates = (candidate_der_dict, candidate_err_dict) return consolidated, updated_candidates def _compute_richardson_candidates(jac_candidates, steps, n_steps): """Compute derivative candidates using Richardson extrapolation. Args: jac_candidates (dict): Dictionary with (traditional) derivative estimates from different methods. steps (namedtuple): Namedtuple with the field names pos and neg. Each field contains a numpy array of shape (n_steps, len(x)) with the steps in the corresponding direction. The steps are always symmetric, in the sense that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN. n_steps (int): Number of steps needed. For central methods, this is the number of steps per direction. It is 1 if no Richardson extrapolation is used. Returns: richardson_candidates (dict): Dictionary with derivative estimates and error estimates from different methods. - Keys correspond to the method used, i.e. forward, backward or central differences and the number of terms used in the Richardson extrapolation. - Values represent the corresponding derivative estimate and error estimate, stored as np.ndarrays in a sub-dictionary under "derivative" and "error" respectively, with the first dimensions coinciding with that of an element of ``jac_candidates`` and depending on num_terms, possibly one further dimension. """ richardson_candidates = {} for method in ["forward", "backward", "central"]: for num_terms in range(1, n_steps): derivative, error = richardson_extrapolation( jac_candidates[method], steps, method, num_terms ) richardson_candidates[method + str(num_terms)] = { "derivative": derivative, "error": error, } return richardson_candidates def _select_minimizer_along_axis(derivative, errors): """Select best derivative estimates element wise. Select elements from ``derivative`` which correspond to minimum in ``errors`` along first axis. Args: derivative (np.ndarray): Derivative estimates from Richardson approximation. First axis (axis 0) denotes the potentially multiple estimates. Following dimensions represent the dimension of the derivative, i.e. for a classical gradient ``derivative`` has 2 dimensions, while for a classical jacobian ``derivative`` has 3 dimensions. errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same shape as ``derivative``. Returns: derivative_minimal (np.ndarray): Best derivate estimates chosen with respect to minimizing ``errors``. Note that the best values are selected element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``. error_minimal (np.ndarray): Minimal errors selected element-wise along axis 0 of ``errors``. """ if derivative.shape[0] == 1: jac_minimal = np.squeeze(derivative, axis=0) error_minimal = np.squeeze(errors, axis=0) else: minimizer = np.nanargmin(errors, axis=0) jac_minimal = np.take_along_axis(derivative, minimizer[np.newaxis, :], axis=0) jac_minimal = np.squeeze(jac_minimal, axis=0) error_minimal = np.nanmin(errors, axis=0) return jac_minimal, error_minimal def _nan_skipping_batch_evaluator( func, arguments, n_cores, error_handling, batch_evaluator ): """Evaluate func at each entry in arguments, skipping np.nan entries. The function is only evaluated at inputs that are not a scalar np.nan. The outputs corresponding to skipped inputs as well as for inputs on which func returns np.nan are np.nan. Args: func (function): Python function that returns a numpy array. The shape of the output of func has to be the same for all elements in arguments. arguments (list): List with inputs for func. n_cores (int): Number of processes. Returns: evaluations (list): The function evaluations, same length as arguments. """ # extract information nan_indices = { i for i, arg in enumerate(arguments) if isinstance(arg, float) and np.isnan(arg) } real_args = [arg for i, arg in enumerate(arguments) if i not in nan_indices] # get the batch evaluator if it was provided as string if not callable(batch_evaluator): batch_evaluator = getattr( batch_evaluators, f"{batch_evaluator}_batch_evaluator" ) # evaluate functions evaluations = batch_evaluator( func=func, arguments=real_args, n_cores=n_cores, error_handling=error_handling ) # combine results evaluations = iter(evaluations) results = [] for i in range(len(arguments)): if i in nan_indices: results.append(np.nan) else: results.append(next(evaluations)) return results def _split_into_str_and_int(s): """Splits string in str and int parts. Args: s (str): The string. Returns: str_part (str): The str part. int_part (int): The int part. Example: >>> s = "forward1" >>> _split_into_str_and_int(s) ('forward', 1) """ str_part, int_part = re.findall(r"(\w+?)(\d+)", s)[0] return str_part, int(int_part) def _collect_additional_info(steps, evals, updated_candidates, target): """Combine additional information in dict if return_info is True.""" info = {} # save function evaluations to accessible data frame if target == "first_derivative": func_evals = _convert_evaluation_data_to_frame(steps, evals) info["_func_evals"] = func_evals else: one_step = _convert_evaluation_data_to_frame(steps, evals["one_step"]) info["_func_evals"] = { "one_step": one_step, "two_step": None, "cross_step": None, } if updated_candidates is not None: # combine derivative candidates in accessible data frame derivative_candidates = _convert_richardson_candidates_to_frame( *updated_candidates ) info["_derivative_candidates"] = derivative_candidates return info def _is_scalar_nan(value): return isinstance(value, float) and np.isnan(value) def _unflatten_if_not_nan(leaves, treedef, registry): if isinstance(leaves, np.ndarray): out = tree_unflatten(treedef, leaves, registry=registry) else: out = leaves return out ================================================ FILE: src/optimagic/differentiation/finite_differences.py ================================================ """Finite difference formulae for jacobians and hessians. All functions in this module should not only work for the simple case of one positive and/or one negative step, but also for the Richardson Extrapolation case with several positive and/or several negative steps. Since steps and evals contain NaNs, we have to make sure that the functions do not raise warnings or errors for that case. """ from typing import NamedTuple import numpy as np class Evals(NamedTuple): pos: np.ndarray neg: np.ndarray def jacobian(evals, steps, f0, method): """Calculate a Jacobian estimate with finite differences according to method. Notation: f:R^dim_x -> R^dim_f. We compute the derivative at x0, with f0 = f(x0). Args: evals (namedtuple): It has the fields called pos and neg for evaluations with positive and negative steps, respectively. Each field is a numpy array of shape (n_steps, dim_f, dim_x). It contains np.nan for evaluations that failed or were not attempted because a one-sided derivative rule was chosen. steps (namedtuple): Namedtuple with the fields pos and neg. Each field contains a numpy array of shape (n_steps, dim_x) with the steps in the corresponding direction. The steps are always symmetric, in the sense that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN. f0 (numpy.ndarray): Numpy array of length dim_f with the output of the function at the user supplied parameters. method (str): One of ["forward", "backward", "central"] Returns: jac (numpy.ndarray): Numpy array of shape (n_steps, dim_f, dim_x) with estimated Jacobians. I.e. there are n_step jacobian estimates. """ n_steps, dim_f, dim_x = evals.pos.shape if method == "forward": diffs = evals.pos - f0.reshape(1, dim_f, 1) jac = diffs / steps.pos.reshape(n_steps, 1, dim_x) elif method == "backward": diffs = evals.neg - f0.reshape(1, dim_f, 1) jac = diffs / steps.neg.reshape(n_steps, 1, dim_x) elif method == "central": diffs = evals.pos - evals.neg deltas = steps.pos - steps.neg jac = diffs / deltas.reshape(n_steps, 1, dim_x) else: raise ValueError("Method has to be 'forward', 'backward' or 'central'.") return jac def hessian(evals, steps, f0, method): """Calculate a Hessian estimate with finite differences according to method. Notation: f:R^dim_x -> R^dim_f. We compute the derivative at x0, with f0 = f(x0). The formulae in Rideout [2009] which are implemented here use three types of function evaluations: 1. f(theta + delta_j e_j) 2. f(theta + delta_j e_j + delta_k e_k) 3. f(theta + delta_j e_j - delta_k e_k) Which are called here: 1. ``evals_one``, 2. ``evals_two`` and 3. ``evals_cross``, corresponding to the idea that we are moving in one direction, in two directions and in two cross directions (opposite signs). Note that theta denotes x0, delta_j the step size for the j-th variable and e_j the j-th standard basis vector. Note also that the brackets in the finite difference formulae are not arbitrary but improve the numerical accuracy, see Rideout [2009]. Args: evals (dict[namedtuple]): Dictionary with keys "one_step" for function evals in a single step direction, "two_step" for evals in two steps in the same direction, and "cross_step" for evals in two steps in the opposite direction. Each dict item has the fields called pos and neg for evaluations with positive and negative steps, respectively. Each field is a numpy array of shape (n_steps, dim_f, dim_x). It contains np.nan for evaluations that failed or were not attempted because a one-sided derivative rule was chosen. steps (namedtuple): Namedtuple with the fields pos and neg. Each field contains a numpy array of shape (n_steps, dim_x) with the steps in the corresponding direction. The steps are always symmetric, in the sense that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN. f0 (numpy.ndarray): Numpy array of length dim_f with the output of the function at the user supplied parameters. method (str): One of {"forward", "backward", "central_average", "central_cross"} These correspond to the finite difference approximations defined in equations [7, x, 8, 9] in Rideout [2009], where ("backward", x) is not found in Rideout [2009] but is the natural extension of equation 7 to the backward case. Returns: hess (numpy.ndarray): Numpy array of shape (n_steps, dim_f, dim_x, dim_x) with estimated Hessians. I.e. there are n_step hessian estimates. """ n_steps, dim_f, dim_x = evals["one_step"].pos.shape f0 = f0.reshape(1, dim_f, 1, 1) # rename variables to increase readability in formulas evals_one = Evals( pos=np.expand_dims(evals["one_step"].pos, axis=3), neg=np.expand_dims(evals["one_step"].neg, axis=3), ) evals_two = evals["two_step"] evals_cross = evals["cross_step"] if method == "forward": outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x) diffs = (evals_two.pos - evals_one.pos.swapaxes(2, 3)) - (evals_one.pos - f0) hess = diffs / outer_product_steps elif method == "backward": outer_product_steps = _calculate_outer_product_steps(steps.neg, n_steps, dim_x) diffs = (evals_two.neg - evals_one.neg.swapaxes(2, 3)) - (evals_one.neg - f0) hess = diffs / outer_product_steps elif method == "central_average": outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x) forward = (evals_two.pos - evals_one.pos.swapaxes(2, 3)) - (evals_one.pos - f0) backward = (evals_two.neg - evals_one.neg.swapaxes(2, 3)) - (evals_one.neg - f0) hess = (forward + backward) / (2 * outer_product_steps) elif method == "central_cross": outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x) diffs = (evals_two.pos - evals_cross.pos) - (evals_cross.neg - evals_two.neg) hess = diffs / (4 * outer_product_steps) else: raise ValueError( "Method has to be 'forward', 'backward', 'central_average' or ", "'central_cross'.", ) return hess def _calculate_outer_product_steps(signed_steps, n_steps, dim_x): """Calculate array of outer product of steps. Args: signed_steps (np.ndarray): Square array with either pos or neg steps returned by :func:`~optimagic.differentiation.generate_steps.generate_steps` function n_steps (int): Number of steps needed. For central methods, this is the number of steps per direction. It is 1 if no Richardson extrapolation is used. dim_x (int): Dimension of input vector x. Returns: outer_product_steps (np.ndarray): Array with outer product of steps. Has dimension (n_steps, 1, dim_x, dim_x). """ outer_product_steps = np.array( [np.outer(signed_steps[j], signed_steps[j]) for j in range(n_steps)] ).reshape(n_steps, 1, dim_x, dim_x) return outer_product_steps ================================================ FILE: src/optimagic/differentiation/generate_steps.py ================================================ import warnings from typing import NamedTuple import numpy as np from optimagic.utilities import fast_numpy_full class Steps(NamedTuple): pos: np.ndarray neg: np.ndarray def generate_steps( x, method, n_steps, target, base_steps, scaling_factor, bounds, step_ratio, min_steps, ): """Generate steps for finite differences with or without Richardson Extrapolation. steps can be used to construct x-vectors at which the function has to be evaluated for finite difference formulae. How the vectors are constructed from the steps differs between first and second derivative. Note that both positive and negative steps are returned, even for one-sided methods, because bounds might make it necessary to flip the direction of the method. The rule of thumb for the generation of base_steps is: - first_derivative: `np.finfo(float).eps ** (1 / 2) * np.maximum(np.abs(x), 0.1)` - second_derivative: `np.finfo(float).eps ** (1 / 3) * np.maximum(np.abs(x), 0.1)` Where `np.finfo(float).eps` is machine accuracy. This rule of thumb is also used in statsmodels and scipy. The step generation is bound aware and will try to find a good solution if any step would violate a bound. For this, we use the following rules until no bounds are violated: 1. If a one sided method is used, flip to the direction with more distance to the bound. 2. Decrease the base_steps, unless this would mean to go below min_steps. By default min_steps is equal to base_steps, so no squeezing happens unless explicitly requested by setting a smaller min_step. 3. Set the conflicting steps to NaN, which means that this step won't be usable in the calculation of derivatives. All derivative functions can handle NaNs and will produce the best possible derivative estimate given the remaining steps. If all steps of one parameter are set to NaN, no derivative estimate will be produced for that parameter. Args: x (numpy.ndarray): 1d array at which the derivative is calculated. method (str): One of ["central", "forward", "backward"] n_steps (int): Number of steps needed. For central methods, this is the number of steps per direction. It is 1 if no Richardson extrapolation is used. target (str): One of ["first_derivative", "second_derivative"]. This is used to choose the appropriate rule of thumb for the base_steps. base_steps (numpy.ndarray, optional): 1d array of the same length as x. base_steps * scaling_factor is the absolute value of the first (and possibly only) step used in the finite differences approximation of the derivative. If base_steps * scaling_factor conflicts with bounds, the actual steps will be adjusted. If base_steps is not provided, it will be determined according to a rule of thumb as long as this does not conflict with min_steps. scaling_factor (numpy.ndarray or float): Scaling factor which is applied to base_steps. If it is an numpy.ndarray, it needs to have the same shape as x. scaling_factor is useful if you want to increase or decrease the base_step relative to the rule-of-thumb or user provided base_step, for example to benchmark the effect of the step size. lower_bounds (numpy.ndarray): 1d array with lower bounds for each parameter. upper_bounds (numpy.ndarray): 1d array with upper bounds for each parameter. step_ratio (float or array): Ratio between two consecutive Richardson extrapolation steps in the same direction. default 2.0. Has to be larger than one. step ratio is only used if n_steps > 1. min_steps (numpy.ndarray): Minimal possible step sizes that can be chosen to accommodate bounds. Needs to have same length as x. By default min_steps is equal to base_steps, i.e step size is not decreased beyond what is optimal according to the rule of thumb. Returns: steps (namedtuple): Namedtuple with the field names pos and neg. Each field contains a numpy array of shape (n_steps, len(x)) with the steps in the corresponding direction. The steps are always symmetric, in the sense that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN. """ base_steps = _calculate_or_validate_base_steps( base_steps, x, target, min_steps, scaling_factor ) min_steps = base_steps if min_steps is None else min_steps lower_bounds = bounds.lower upper_bounds = bounds.upper # None-valued bounds are handled by instantiating them as an -inf and inf array. In # the future, this should be handled more gracefully. if lower_bounds is None: lower_bounds = fast_numpy_full(len(x), fill_value=-np.inf) if upper_bounds is None: upper_bounds = fast_numpy_full(len(x), fill_value=np.inf) assert (upper_bounds - lower_bounds >= 2 * min_steps).all(), ( "min_steps is too large to fit into bounds." ) upper_step_bounds = upper_bounds - x lower_step_bounds = lower_bounds - x pos = step_ratio ** np.arange(n_steps) * base_steps.reshape(-1, 1) neg = -pos.copy() if method in ["forward", "backward"]: pos, neg = _set_unused_side_to_nan( x, pos, neg, method, lower_step_bounds, upper_step_bounds ) if np.isfinite(lower_bounds).any() or np.isfinite(upper_bounds).any(): pos, neg = _rescale_to_accomodate_bounds( base_steps, pos, neg, lower_step_bounds, upper_step_bounds, min_steps ) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) pos[pos > upper_step_bounds.reshape(-1, 1)] = np.nan neg[neg < lower_step_bounds.reshape(-1, 1)] = np.nan steps = Steps(pos=pos.T, neg=neg.T) return steps def _calculate_or_validate_base_steps(base_steps, x, target, min_steps, scaling_factor): """Validate user provided base_steps or generate them with rule of thumb. Args: base_steps (numpy.ndarray, optional): 1d array of the same length as x. base_steps * scaling_factor is the absolute value of the first (and possibly only) step used in the finite differences approximation of the derivative. x (numpy.ndarray): 1d array at which the derivative is evaluated target (str): One of ["first_derivative", "second_derivative"]. This is used to choose the appropriate rule of thumb for the base_steps. min_steps (numpy.ndarray or None): Minimal possible step sizes that can be chosen to accommodate bounds. Needs to have same length as x. scaling_factor (numpy.ndarray or float): Scaling factor which is applied to base_steps. If it is an :class:`numpy.ndarray`, it needs to have the same shape as x. Returns: base_steps (numpy.ndarray): 1d array of the same length as x with the absolute value of the first step. """ if np.any(scaling_factor <= 0): raise ValueError("Scaling factor must be strictly positive.") if base_steps is not None: if np.isscalar(base_steps): base_steps = np.full(len(x), base_steps) if base_steps.shape != x.shape: raise ValueError("base_steps has to have the same shape as x.") base_steps = base_steps * scaling_factor if np.isscalar(min_steps): min_steps = np.full(len(x), min_steps) if min_steps is not None and (base_steps <= min_steps).any(): raise ValueError( "scaling_factor * base_steps must be larger than min_steps." ) else: eps = np.finfo(float).eps if target == "first_derivative": base_steps = eps ** (1 / 2) * np.maximum(np.abs(x), 0.1) * scaling_factor elif target == "second_derivative": base_steps = eps ** (1 / 3) * np.maximum(np.abs(x), 0.1) * scaling_factor else: raise ValueError(f"Invalid target: {target}.") if min_steps is not None: base_steps = np.clip(base_steps, a_min=min_steps, a_max=None) return base_steps def _set_unused_side_to_nan( x, # noqa: ARG001 pos, neg, method, lower_step_bounds, upper_step_bounds, ): """Set unused side (i.e. pos or neg) to np.nan. A side is not used if: - It was not requested due to one sided derivatives. - It was requested but a side switch was better due to bounds. This function does not yet guarantee that all bounds are fulfilled. It only switches to the side that has more space to the bound if there is a bound violation. Args: x (numpy.ndarray): 1d array with parameters. pos (numpy.ndarray): Array with positive steps of shape (n_steps, len(x)) neg (numpy.ndarray): Array with negative steps of shape (n_steps, len(x)) method (str): One of ["forward", "backward"] lower_step_bounds (numpy.ndarray): Lower bounds for steps. upper_step_bounds (numpy.ndarray): Upper bounds for steps. Returns: pos (numpy.ndarray): Copy of pos with additional NaNs neg (numpy.ndarray): Copy of neg with additional NaNs """ pos = pos.copy() neg = neg.copy() better_side = np.where(upper_step_bounds >= -lower_step_bounds, 1, -1) max_abs_step = pos[:, -1] if method == "forward": used_side = np.where(upper_step_bounds >= max_abs_step, 1, better_side) elif method == "backward": used_side = np.where(-lower_step_bounds >= max_abs_step, -1, better_side) else: raise ValueError("This function only works for forward or backward method.") pos[used_side == -1] = np.nan neg[used_side == 1] = np.nan return pos, neg def _rescale_to_accomodate_bounds( base_steps, pos, neg, lower_step_bounds, upper_step_bounds, min_steps ): """Rescale steps to make them compatible with bounds unless this violates min_steps. Args: base_steps (np.ndarray, optional): 1d array of the same length as x. base_steps * scaling_factor is the absolute value of the first (and possibly only) step used in the finite differences approximation of the derivative. pos (np.ndarray): Array with positive steps of shape (n_steps, len(x)) neg (np.ndarray): Array with negative steps of shape (n_steps, len(x)) lower_step_bounds (np.ndarray): Lower bounds for steps. upper_step_bounds (np.ndarray): Upper bounds for steps. min_steps (np.ndarray): Minimal possible step sizes that can be chosen to accomodate bounds. Needs to have same length as x. Returns: pos (np.ndarray): Copy of pos with rescaled steps. neg (np.ndarray): Copy of neg with rescaled steps. """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) pos_needed_scaling = _fillna( upper_step_bounds / np.nanmax(pos, axis=1), 1 ).clip(0, 1) neg_needed_scaling = _fillna( lower_step_bounds / np.nanmin(neg, axis=1), 1 ).clip(0, 1) needed_scaling = np.minimum(pos_needed_scaling, neg_needed_scaling) min_possible_scaling = min_steps / base_steps scaling = np.maximum(needed_scaling, min_possible_scaling).reshape(-1, 1) pos = pos * scaling neg = neg * scaling return pos, neg def _fillna(x, val): return np.where(np.isnan(x), val, x) ================================================ FILE: src/optimagic/differentiation/numdiff_options.py ================================================ from dataclasses import dataclass from enum import Enum from typing import Callable, Literal, TypedDict from typing_extensions import NotRequired from optimagic.batch_evaluators import process_batch_evaluator from optimagic.config import DEFAULT_N_CORES from optimagic.exceptions import InvalidNumdiffOptionsError from optimagic.typing import BatchEvaluatorLiteral @dataclass(frozen=True) class NumdiffOptions: """Options for numerical differentiation. Attributes: method: The method to use for numerical differentiation. Can be "central", "forward", or "backward". step_size: The step size to use for numerical differentiation. If None, the default step size will be used. scaling_factor: The scaling factor to use for numerical differentiation. min_steps: The minimum step size to use for numerical differentiation. If None, the default minimum step size will be used. n_cores: The number of cores to use for numerical differentiation. batch_evaluator: The evaluator to use for batch evaluation. Allowed are "joblib", "pathos", and "threading", or a custom callable. Raises: InvalidNumdiffError: If the numdiff options cannot be processed, e.g. because they do not have the correct type. """ method: Literal[ "central", "forward", "backward", "central_cross", "central_average" ] = "central" step_size: float | None = None scaling_factor: float = 1 min_steps: float | None = None n_cores: int = DEFAULT_N_CORES batch_evaluator: BatchEvaluatorLiteral | Callable = "joblib" # type: ignore def __post_init__(self) -> None: _validate_attribute_types_and_values(self) class NumdiffOptionsDict(TypedDict): method: NotRequired[ Literal["central", "forward", "backward", "central_cross", "central_average"] ] step_size: NotRequired[float | None] scaling_factor: NotRequired[float] min_steps: NotRequired[float | None] n_cores: NotRequired[int] batch_evaluator: NotRequired[BatchEvaluatorLiteral | Callable] # type: ignore def pre_process_numdiff_options( numdiff_options: NumdiffOptions | NumdiffOptionsDict | None, ) -> NumdiffOptions | None: """Convert all valid types of Numdiff options to optimagic.NumdiffOptions class. This just harmonizes multiple ways of specifying numdiff options into a single format. It performs runtime type checks, but it does not check whether numdiff options are consistent with other option choices. Args: numdiff_options: The user provided numdiff options. Returns: The numdiff options in the optimagic format. Raises: InvalidNumdiffOptionsError: If numdiff options cannot be processed, e.g. because they do not have the correct type. """ if isinstance(numdiff_options, NumdiffOptions) or numdiff_options is None: pass else: try: numdiff_options = NumdiffOptions(**numdiff_options) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if isinstance(e, InvalidNumdiffOptionsError): raise e raise InvalidNumdiffOptionsError( f"Invalid numdiff options of type: {type(numdiff_options)}. Numdiff " "options must be of type optimagic.NumdiffOptions, a dictionary with a" "subset of the keys {'method', 'step_size', 'scaling_factor', " "'min_steps', 'n_cores', 'batch_evaluator'}, or None." ) from e return numdiff_options def _validate_attribute_types_and_values(options: NumdiffOptions) -> None: if options.method not in { "central", "forward", "backward", "central_cross", "central_average", }: raise InvalidNumdiffOptionsError( f"Invalid numdiff `method`: {options.method}. Numdiff `method` must be " "one of 'central', 'forward', 'backward', 'central_cross', or " "'central_average'." ) if options.step_size is not None and ( not isinstance(options.step_size, float) or options.step_size <= 0 ): raise InvalidNumdiffOptionsError( f"Invalid numdiff `step_size`: {options.step_size}. Step size must be a " "float greater than 0." ) if ( not isinstance(options.scaling_factor, int | float) or options.scaling_factor <= 0 ): raise InvalidNumdiffOptionsError( f"Invalid numdiff `scaling_factor`: {options.scaling_factor}. Scaling " "factor must be an integer or float greater than 0." ) if options.min_steps is not None and ( not isinstance(options.min_steps, float) or options.min_steps <= 0 ): raise InvalidNumdiffOptionsError( f"Invalid numdiff `min_steps`: {options.min_steps}. Minimum step " "size must be a float greater than 0." ) if not isinstance(options.n_cores, int) or options.n_cores <= 0: raise InvalidNumdiffOptionsError( f"Invalid numdiff `n_cores`: {options.n_cores}. Number of cores " "must be an integer greater than 0." ) try: process_batch_evaluator(options.batch_evaluator) except Exception as e: raise InvalidNumdiffOptionsError( f"Invalid batch evaluator: {options.batch_evaluator}." ) from e class NumdiffPurpose(str, Enum): OPTIMIZE = "optimize" ESTIMATE_JACOBIAN = "estimate_jacobian" ESTIMATE_HESSIAN = "estimate_hessian" def get_default_numdiff_options( purpose: NumdiffPurpose, ) -> NumdiffOptions: """Get default numerical derivatives options for a given purpose. Args: purpose: For what purpose the numdiff options are used. Returns: The numdiff options with defaults filled in. """ defaults: NumdiffOptionsDict = {} if purpose == NumdiffPurpose.OPTIMIZE: defaults["method"] = "forward" if purpose == NumdiffPurpose.ESTIMATE_JACOBIAN: defaults["method"] = "central" if purpose == NumdiffPurpose.ESTIMATE_HESSIAN: defaults["method"] = "central_cross" defaults["scaling_factor"] = 2 return NumdiffOptions(**defaults) ================================================ FILE: src/optimagic/differentiation/richardson_extrapolation.py ================================================ import numpy as np from scipy import stats from scipy.linalg import pinv from scipy.ndimage import convolve1d def richardson_extrapolation(sequence, steps, method="central", num_terms=None): """Apply Richardson extrapolation to sequence. Suppose you have a series expansion L = g(h) + a0*(h**p_0) + a1*(h**p_1) + a2*(h**p_2) + ... , where p_i = order + exponentiation_step * i and g(h) -> L as h -> 0, but g(0) != L. For ``method``='central', that is, for a sequence resulting from a central differences derivative approximation, we get ``order`` = 2 and ``exponentiation_step`` = 2, which would result in L = g(h) + a0*(h**2) + a1*(h**4) + a2*(h**6) + ..., where g(h) := [f(x + h) - f(x - h)] / 2h and f the function of interest. See function ``_get_order_and_exponentiation_step`` for more details. If we evaluate the right hand side for different stepsizes h we can fit a polynomial to that sequence of approximations and use the estimated intercept as a better approximation for L. Further, we can compute estimation errors of our approximation. Args: sequence (np.ndarray): The sequence of which we want to approximate the limit. Has dimension (k x n x m), where k denotes the number of sequence elements and an element ``sequence[l, :, :]`` denotes the (n x m) dimensional element steps (namedtuple): Namedtuple with the field names pos and neg. Each field contains a numpy array of shape (n_steps, len(x)) with the steps in the corresponding direction. The steps are always symmetric, in the sense that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN. method (str): One of ["central", "forward", "backward"], default "central". num_terms (int): Number of terms needed to construct one estimate. Default is ``steps.shape[0] - 1``. Returns: limit (np.ndarray): The refined limit. error (np.ndarray): The error approximation of ``limit``. """ seq_len = sequence.shape[0] steps = steps.pos n_steps = steps.shape[0] num_terms = n_steps if num_terms is None else num_terms assert seq_len == n_steps, ( "Length of ``steps`` must coincide with length of ``sequence``." ) assert num_terms > 0, "``num_terms`` must be greater than zero." assert seq_len - 1 >= num_terms, ( "``num_terms`` cannot be greater than ``seq_len`` - 1." ) step_ratio = _compute_step_ratio(steps) order, exponentiation_step = _get_order_and_exponentiation_step(method) richardson_coef = _richardson_coefficients( num_terms, step_ratio, exponentiation_step, order, ) new_sequence = convolve1d( input=sequence, weights=richardson_coef[::-1], axis=0, origin=num_terms // 2 ) m = seq_len - num_terms mm = m + 1 if num_terms >= 2 else seq_len abserr = _estimate_error(new_sequence[:mm], sequence, richardson_coef) limit = new_sequence[:m] error = abserr[:m] return limit, error def _richardson_coefficients(num_terms, step_ratio, exponentiation_step, order): """Compute Richardson coefficients. Let e := ``exponentiation_step``, r := ``step_ratio``, o := ``order`` and n := ``num_terms``. We build a matrix of the form [[1 1 ... 1 ], [1 1/(s)**(2*o) ... 1/(s)**(2*(o+n)) ], R = [1 1/(s**2)**(2*o) ... 1/(s**2)**(2*(o+n)) ], [... ... ... ], [1 1/(s**(n+1))**(2*o) ... 1/(s**(n+1))**(2*(o+n)) ]] which is the weighting matrix in equation 24 in https://tinyurl.com/ybtfj4pm. We then return the first row of R^{-1} as the coefficients, as can be seen in equation 25 in https://tinyurl.com/ybtfj4pm. Args: num_terms (int): Number of terms needed to construct one estimate. Default is ``steps.shape[0] - 1``. step_ratio (float): Ratio between two consecutive steps. Order is chosen such that ``step_ratio`` >= 1. exponentiation_step (int): Step representing the growth of the exponent in the series expansions of the limit. For central difference derivative approximation ``exponentiation_step`` = 2. order (int): Initial order of the approximation error of sequence elements. For central difference derivative approximation ``order`` = 2. Returns: coef (np.ndarray): Richardson coefficients, array has length num_terms + 1. Example: >>> import numpy as np >>> num_terms = 2 >>> step_ratio = 2. >>> exponentiation_step = 2 >>> order = 2 >>> _richardson_coefficients(num_terms, step_ratio, exponentiation_step, order) array([ 0.02222222, -0.44444444, 1.42222222]) """ rows, cols = np.ogrid[: num_terms + 1, :num_terms] coef_mat = np.ones((num_terms + 1, num_terms + 1)) coef_mat[:, 1:] = (1.0 / step_ratio) ** ( rows @ (exponentiation_step * cols + order) ) coef = pinv(coef_mat)[0] return coef def _estimate_error(new_seq, old_seq, richardson_coef): """Estimate error of multiple Richardson limit approximation. Args: new_seq (np.ndarray): Multiple estimates of the limit of ``old_seq``. The last two dimensions coincide with those of ``old_seq``. The first dimensions denotes the number of different estimates. old_seq (np.ndarray): The sequence of which we want to approximate the limit. Has dimension (k x n x m), where k denotes the number of sequence elements and an element ``sequence[l, :, :]`` denotes the (n x m) dimensional element richardson_coef (np.ndarray): Richardson coefficients. See function ``_richardson_coefficient`` for details. Returns: abserr (np.ndarray): The error estimate for each limit approximation in ``new_seq``. """ eps = np.finfo(float).eps t_quantile = stats.t(df=1).ppf(0.975) # 12.7062047361747 in numdifftools new_seq_len = new_seq.shape[0] unnormalized_covariance = np.sum(richardson_coef**2) fact = np.maximum(t_quantile * np.sqrt(unnormalized_covariance), eps * 10.0) if new_seq_len <= 1: delta = np.diff(old_seq, axis=0) tol = np.maximum(np.abs(old_seq[:-1]), np.abs(old_seq[1:])) * fact err = np.abs(delta) converged = err <= tol abserr = err[-new_seq_len:] + np.where( converged[-new_seq_len:], tol[-new_seq_len:] * 10, abs(new_seq - old_seq[-new_seq_len:]) * fact, ) else: err = np.abs(np.diff(new_seq, axis=0)) * fact tol = np.maximum(np.abs(new_seq[1:]), np.abs(new_seq[:-1])) * eps * fact converged = err <= tol abserr = err + np.where( converged, tol * 10, abs(new_seq[:-1] - old_seq[-new_seq_len + 1 :]) * fact, ) return abserr def _get_order_and_exponentiation_step(method): """Return order and exponentiation step given ``method``. Given ``method`` we return the initial order of the approximation error of the sequence under consideration (order) as well as the step size representing the growth of the exponent in the series expansion of the limit (exponentiation_step). See function ``richardson_extrapolation`` for more details. For different methods, different values of order and exponentiation step apply. Consider the following examples, where we continue the notation from function ``richardson_extrapolation`` and use O() to denote the Big O Laundau symbol. Central Differences. Derivative approximation via central difference is given by g(h) := [f(x + h) - f(x - h)] / 2h = f'(x) + r(x, h), where r(x, h) denotes the remainder term. If we expand the remainder term r(x, h) we get r(x, h) = a0*(h**2) + a1*(h**4) + a2*(h**6) + ... with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc. Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h**2) and we notice that order = 2 and exponentiation_step = 2. Forward Differences. Derivative approximation via forward difference is given by g(h) := [f(x + h) - f(x)] / h = f'(x) + r(x, h), where again r(x, h) denotes the remainder term. If we expand the remainder term r(x, h) we get r(x, h) = a0*(h**1) + a1*(h**2) + a2*(h**3) + ... with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc. Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h) and we notice that order = 1 and exponentiation_step = 1. Backward Differences. Analogous to forward differences. Args: method (str): One of ["central", "forward", "backward"], default "central". Returns: order (int): Initial order of the approximation error of sequence elements. exponentiation_step (int): Step representing the growth of the exponent in the series expansions of the limit. Example: >>> _get_order_and_exponentiation_step('central') (2, 2) """ lookup = { "central": (2, 2), "forward": (1, 1), "backward": (1, 1), } order, exponentiation_step = lookup[method] return order, exponentiation_step def _compute_step_ratio(steps): """Compute the step ratio used in producing ``steps``. Args: steps (np.ndarray): Array of shape (n_steps, len(x)) with the steps in the corresponding direction. Returns: step_ratio (float): The step ratio used in producing ``steps``. Example: >>> import numpy as np >>> steps = np.array([[2., np.nan, 2], [4, 4, 4], [8, 8, np.nan]]) >>> _compute_step_ratio(steps) 2.0 """ ratios = steps[1:, :] / steps[:-1, :] finite_ratios = ratios[np.isfinite(ratios)] step_ratio = finite_ratios.item(0) return step_ratio ================================================ FILE: src/optimagic/examples/__init__.py ================================================ ================================================ FILE: src/optimagic/examples/criterion_functions.py ================================================ """Import common objective functions in several optimagic compatible versions. All implemented functions accept arbitrary pytrees as parameters. If possible they are implemented as scalar and least-squares versions. """ from __future__ import annotations import numpy as np import pandas as pd from numpy.typing import NDArray from pybaum import tree_just_flatten, tree_unflatten from optimagic import mark from optimagic.optimization.fun_value import ( FunctionValue, ) from optimagic.parameters.block_trees import matrix_to_block_tree from optimagic.parameters.tree_registry import get_registry from optimagic.typing import PyTree REGISTRY = get_registry(extended=True) @mark.scalar def trid_scalar(params: PyTree) -> float: """Implement Trid function: https://www.sfu.ca/~ssurjano/trid.html.""" x = _get_x(params) return ((x - 1) ** 2).sum() - (x[1:] * x[:-1]).sum() @mark.scalar def trid_gradient(params: PyTree) -> PyTree: """Calculate gradient of trid function.""" x = _get_x(params) l1 = np.insert(x, 0, 0) l1 = np.delete(l1, [-1]) l2 = np.append(x, 0) l2 = np.delete(l2, [0]) flat = 2 * (x - 1) - l1 - l2 return _unflatten_gradient(flat, params) @mark.scalar def trid_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]: """Implement Trid function and calculate gradient.""" val = trid_scalar(params) grad = trid_gradient(params) return val, grad @mark.scalar def rhe_scalar(params: PyTree) -> float: """Implement Rotated Hyper Ellipsoid function. Function description: https://www.sfu.ca/~ssurjano/rothyp.html. """ return (rhe_ls(params) ** 2).sum() @mark.scalar def rhe_gradient(params: PyTree) -> PyTree: """Calculate gradient of rotated_hyper_ellipsoid function.""" x = _get_x(params) flat = np.arange(2 * len(x), 0, -2) * x return _unflatten_gradient(flat, params) @mark.scalar def rhe_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]: """Implement Rotated Hyper Ellipsoid function and calculate gradient.""" val = rhe_scalar(params) grad = rhe_gradient(params) return val, grad @mark.least_squares def rhe_ls(params: PyTree) -> NDArray[np.float64]: """Compute least-squares version of the Rotated Hyper Ellipsoid function.""" x = _get_x(params) dim = len(params) out = np.zeros(dim) for i in range(dim): out[i] = np.sqrt((x[: i + 1] ** 2).sum()) return out @mark.least_squares def rhe_function_value(params: PyTree) -> FunctionValue: """FunctionValue version of Rotated Hyper Ellipsoid function.""" contribs = rhe_ls(params) out = FunctionValue(contribs) return out @mark.scalar def rosenbrock_scalar(params: PyTree) -> float: """Rosenbrock function: https://www.sfu.ca/~ssurjano/rosen.html.""" return (rosenbrock_ls(params) ** 2).sum() @mark.scalar def rosenbrock_gradient(params: PyTree) -> PyTree: """Calculate gradient of rosenbrock function.""" x = _get_x(params) l1 = np.append(np.delete(x, [-1]), 0) l2 = np.delete(np.insert(x, 0, 0), [1]) l3 = np.delete(np.insert(x, 0, 0), [-1]) l4 = np.append(np.delete(x, [0]), 0) l5 = np.append(np.full((len(x) - 1), 2), 0) flat = 100 * (4 * (l1**3) + 2 * l2 - 2 * (l3**2) - 4 * (l4 * x)) + 2 * l1 - l5 return _unflatten_gradient(flat, params) @mark.scalar def rosenbrock_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]: """Implement rosenbrock function and calculate gradient.""" return rosenbrock_scalar(params), rosenbrock_gradient(params) @mark.least_squares def rosenbrock_ls(params: PyTree) -> NDArray[np.float64]: """Least-squares version of the rosenbrock function.""" x = _get_x(params) dim = len(params) out = np.zeros(dim) for i in range(dim - 1): out[i] = np.sqrt(((x[i + 1] - x[i] ** 2) ** 2) * 100 + ((x[i] - 1) ** 2)) return out @mark.least_squares def rosenbrock_function_value(params: PyTree) -> FunctionValue: """FunctionValue version of the rosenbrock function.""" return FunctionValue(rosenbrock_ls(params)) @mark.least_squares def sos_ls(params: PyTree) -> NDArray[np.float64]: """Least-squares version of the sum of squares or sphere function.""" return _get_x(params) @mark.least_squares def sos_ls_with_pd_objects(params: PyTree) -> pd.Series[float]: """Least-squares version of the sphere function returning pandas objects.""" return pd.Series(sos_ls(params)) @mark.scalar def sos_scalar(params: PyTree) -> float: """Sum of squares or sphere function.""" return (_get_x(params) ** 2).sum() @mark.scalar def sos_gradient(params: PyTree) -> PyTree: """Calculate the gradient of the sum of squares function.""" flat = 2 * _get_x(params) return _unflatten_gradient(flat, params) @mark.likelihood def sos_likelihood(params: PyTree) -> NDArray[np.float64]: return _get_x(params) ** 2 @mark.likelihood def sos_likelihood_jacobian(params: PyTree) -> PyTree: """Calculate the likelihood Jacobian of the sum of squares function.""" x = _get_x(params) out_mat = np.diag(2 * x) out_tree = matrix_to_block_tree(out_mat, x, params) return out_tree @mark.least_squares def sos_ls_jacobian(params: PyTree) -> PyTree: """Calculate the least-squares Jacobian of the sum of squares function.""" x = _get_x(params) out_mat = np.eye(len(x)) out_tree = matrix_to_block_tree(out_mat, x, params) return out_tree @mark.scalar def sos_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]: """Calculate sum of squares criterion value and gradient.""" return sos_scalar(params), sos_gradient(params) @mark.likelihood def sos_likelihood_fun_and_jac( params: PyTree, ) -> tuple[NDArray[np.float64], PyTree]: """Calculate sum of squares criterion value and Jacobian.""" return sos_likelihood(params), sos_likelihood_jacobian(params) @mark.least_squares def sos_ls_fun_and_jac( params: PyTree, ) -> tuple[NDArray[np.float64], PyTree]: """Calculate sum of squares criterion value and Jacobian.""" return sos_ls(params), sos_ls_jacobian(params) sos_derivatives = [sos_gradient, sos_likelihood_jacobian, sos_ls_jacobian] def _get_x(params: PyTree) -> NDArray[np.float64]: if isinstance(params, np.ndarray) and params.ndim == 1: x = params.astype(float) else: registry = get_registry(extended=True) x = np.array(tree_just_flatten(params, registry=registry), dtype=np.float64) return x def _unflatten_gradient(flat: NDArray[np.float64], params: PyTree) -> PyTree: out = tree_unflatten(params, flat.tolist(), registry=REGISTRY) return out ================================================ FILE: src/optimagic/examples/numdiff_functions.py ================================================ """Functions with known gradients, jacobians or hessians. All functions take a numpy array with parameters as their first argument. Example inputs for the binary choice functions are in binary_choice_inputs.pickle. They come from the statsmodels documentation: https://tinyurl.com/y4x67vwl We pickled them so we don't need statsmodels as a dependency. """ import numpy as np from scipy.stats import norm FLOAT_EPS = np.finfo(float).eps # ====================================================================================== # Logit # ====================================================================================== def logit_loglike(params, y, x): return logit_loglikeobs(params, y, x).sum() def logit_loglikeobs(params, y, x): q = 2 * y - 1 return np.log(1 / (1 + np.exp(-(q * np.dot(x, params))))) def logit_loglike_gradient(params, y, x): c = 1 / (1 + np.exp(-(np.dot(x, params)))) return np.dot(y - c, x) def logit_loglikeobs_jacobian(params, y, x): c = 1 / (1 + np.exp(-(np.dot(x, params)))) return (y - c).reshape(-1, 1) * x def logit_loglike_hessian(params, y, x): # noqa: ARG001 c = 1 / (1 + np.exp(-(np.dot(x, params)))) return -np.dot(c * (1 - c) * x.T, x) # ====================================================================================== # Probit # ====================================================================================== def probit_loglike(params, y, x): return probit_loglikeobs(params, y, x).sum() def probit_loglikeobs(params, y, x): q = 2 * y - 1 return np.log(np.clip(norm.cdf(q * np.dot(x, params)), FLOAT_EPS, 1)) def probit_loglike_gradient(params, y, x): xb = np.dot(x, params) q = 2 * y - 1 c = q * norm.pdf(q * xb) / np.clip(norm.cdf(q * xb), FLOAT_EPS, 1 - FLOAT_EPS) return np.dot(c, x) def probit_loglikeobs_jacobian(params, y, x): xb = np.dot(x, params) q = 2 * y - 1 c = q * norm.pdf(q * xb) / np.clip(norm.cdf(q * xb), FLOAT_EPS, 1 - FLOAT_EPS) return c.reshape(-1, 1) * x def probit_loglike_hessian(params, y, x): xb = np.dot(x, params) q = 2 * y - 1 c = q * norm.pdf(q * xb) / norm.cdf(q * xb) return np.dot(-c * (c + xb) * x.T, x) ================================================ FILE: src/optimagic/exceptions.py ================================================ import sys from traceback import format_exception class OptimagicError(Exception): """Base exception for optimagic which should be inherited by all exceptions.""" class TableExistsError(OptimagicError): """Exception for database tables that should not exist but do.""" class InvalidFunctionError(OptimagicError): """Exception for invalid user provided functions. This includes user functions that do not comply with interfaces, raise errors or produce NaNs. """ class UserFunctionRuntimeError(OptimagicError): """Exception that is raised when user provided functions raise errors.""" class MissingInputError(OptimagicError): """Exception for missing user provided input.""" class AliasError(OptimagicError): """Exception for aliasing errors.""" class InvalidKwargsError(OptimagicError): """Exception for invalid user provided keyword arguments.""" class InvalidParamsError(OptimagicError): """Exception for invalid user provided parameters.""" class InvalidConstraintError(OptimagicError): """Exception for invalid user provided constraints.""" class InvalidBoundsError(OptimagicError): """Exception for invalid user provided bounds.""" class IncompleteBoundsError(OptimagicError): """Exception when user provided bounds are incomplete.""" class InvalidScalingError(OptimagicError): """Exception for invalid user provided scaling.""" class InvalidMultistartError(OptimagicError): """Exception for invalid user provided multistart options.""" class InvalidNumdiffOptionsError(OptimagicError): """Exception for invalid user provided numdiff options.""" class NotInstalledError(OptimagicError): """Exception when optional dependencies are needed but not installed.""" class NotAvailableError(OptimagicError): """Exception when something is not available, e.g. because a calculation failed.""" class InvalidAlgoOptionError(OptimagicError): """Exception for invalid user provided algorithm options.""" class InvalidAlgoInfoError(OptimagicError): """Exception for invalid user provided algorithm information.""" class InvalidPlottingBackendError(OptimagicError): """Exception for invalid user provided plotting backend.""" class StopOptimizationError(OptimagicError): def __init__(self, message, current_status): super().__init__(message) self.message = message self.current_status = current_status def __reduce__(self): """Taken from here: https://tinyurl.com/y6eeys2f.""" return (StopOptimizationError, (self.message, self.current_status)) def get_traceback(): tb = format_exception(*sys.exc_info()) if isinstance(tb, list): tb = "".join(tb) return tb INVALID_INFERENCE_MSG = ( "Taking the inverse of the information matrix failed. Only ever use this " "covariance matrix or standard errors based on it for diagnostic purposes, not for " "drawing conclusions." ) INVALID_SENSITIVITY_MSG = ( "Taking inverse failed during the calculation of sensitvity measures. Interpret " "them with caution." ) ================================================ FILE: src/optimagic/logging/__init__.py ================================================ from .logger import ( SQLiteLogOptions as SQLiteLogOptions, ) from .logger import ( SQLiteLogReader as SQLiteLogReader, ) from .types import ExistenceStrategy as ExistenceStrategy ================================================ FILE: src/optimagic/logging/base.py ================================================ import io import warnings from abc import ABC, abstractmethod from dataclasses import asdict, fields, is_dataclass from typing import Any, Generic, Type, TypeVar import cloudpickle import pandas as pd from optimagic.exceptions import get_traceback from optimagic.typing import DictLikeAccess InputType = TypeVar("InputType", bound=DictLikeAccess) OutputType = TypeVar("OutputType", bound=DictLikeAccess) class _KeyValueStore(Generic[InputType, OutputType], ABC): """Generic abstract base class for a key-value store. This class defines the basic interface for key-value stores that support insertion and selection of items based on a primary key. Args: input_type: The type of input data that can be stored. output_type: The type of output data that can be retrieved. primary_key: The primary key used to uniquely identify items in the store. Raises: ValueError: If input_type or output_type is not a dataclass, or if the primary key is not found in output_type fields. """ def __init__( self, input_type: Type[InputType], output_type: Type[OutputType], primary_key: str, ): if not (is_dataclass(input_type) and is_dataclass(output_type)): raise ValueError("Arguments input_type and output_type must by dataclasses") output_fields = {f.name for f in fields(output_type)} if primary_key not in output_fields: raise ValueError( f"Primary key {primary_key} not found in output_type fields " f"{fields(output_type)}" ) self._output_type = output_type self._input_type = input_type self._primary_key = primary_key self._supported_fields = {f.name for f in fields(input_type)} @property def primary_key(self) -> str: """Get the primary key of the store. Returns: The primary key field name. """ return self._primary_key @abstractmethod def insert(self, value: InputType) -> None: """Implement this method to insert a new value into the key-value store. Make sure an auto-increment logic is implemented for the insertion. """ @abstractmethod def _select_by_key(self, key: int) -> list[OutputType]: """Implement this method to select a value from the store by its primary key.""" @abstractmethod def _select_all(self) -> list[OutputType]: """Implement this method to select all values from the store.""" def select(self, key: int | None = None) -> list[OutputType]: """Select items from the store. Args: key: Optional; the primary key of the item to be selected. If not provided, all items will be selected. Returns: A list of output items. """ if key is None: return self._select_all() return self._select_by_key(key) @abstractmethod def select_last_rows(self, n_rows: int) -> list[OutputType]: """Implement this to select the last `n_rows` from the store. Args: n_rows: The number of rows to select. Returns: A list of the last `n_rows` output items. """ def to_df(self) -> pd.DataFrame: """Convert the store's data to a Pandas DataFrame. Returns: A DataFrame containing all items in the store. """ items = self._select_all() return pd.DataFrame([asdict(item) for item in items]) class UpdatableKeyValueStore(_KeyValueStore[InputType, OutputType], ABC): """Generic abstract base class for an updatable key-value store. This class extends `KeyValueStore` to add support for updating existing items in the store. """ def update(self, key: int, value: InputType | dict[str, Any]) -> None: """Update an existing item in the store. Args: key: The primary key of the item to be updated. value: The updated item, or a dictionary of fields to update. Raises: ValueError: If any fields in `value` are not supported by the store. """ self._check_fields(value) self._update(key, value) @abstractmethod def _update(self, key: int, value: InputType | dict[str, Any]) -> None: """Implement the internal method to update an existing item in the store.""" def _check_fields(self, value: InputType | dict[str, Any]) -> None: if isinstance(value, dict): not_supported_fields = set(value.keys()).difference(self._supported_fields) if not_supported_fields: raise ValueError( f"Not supported fields {not_supported_fields}. " f"Only supports fields {self._supported_fields}" ) class NonUpdatableKeyValueStore(_KeyValueStore[InputType, OutputType], ABC): def __getattr__(self, name: str) -> Any: if name == "update": msg = ( f"'{self.__class__.__name__}' object does not allow to update items in" f"the store" ) else: msg = f"'{self.__class__.__name__}' object has no attribute '{name}'" raise AttributeError(msg) class RobustPickler: @staticmethod def loads( data: Any, fix_imports: bool = True, # noqa: ARG004 encoding: str = "ASCII", # noqa: ARG004 errors: str = "strict", # noqa: ARG004 buffers: Any = None, # noqa: ARG004 ) -> Any: """Robust pickle loading. We first try to unpickle the object with pd.read_pickle. This makes no difference for non-pandas objects but makes the de-serialization of pandas objects more robust across pandas versions. If that fails, we use cloudpickle. If that fails, we return None but do not raise an error. See: https://github.com/pandas-dev/pandas/issues/16474 """ try: res = pd.read_pickle(io.BytesIO(data), compression=None) except (KeyboardInterrupt, SystemExit): raise except Exception: try: res = cloudpickle.loads(data) except (KeyboardInterrupt, SystemExit): raise except Exception: res = None tb = get_traceback() warnings.warn( f"Unable to read PickleType column from database:\n{tb}\n " "The entry was replaced by None." ) return res @staticmethod def dumps( obj: Any, protocol: str | None = None, *, fix_imports: bool = True, # noqa: ARG001 buffer_callback: Any = None, # noqa: ARG004 ) -> Any: return cloudpickle.dumps(obj, protocol=protocol) ================================================ FILE: src/optimagic/logging/logger.py ================================================ from __future__ import annotations import os from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Generic, Type, TypeVar, cast import numpy as np import pandas as pd import sqlalchemy as sql from sqlalchemy.engine import Engine from optimagic.logging.base import ( NonUpdatableKeyValueStore, UpdatableKeyValueStore, ) from optimagic.logging.sqlalchemy import ( IterationStore, ProblemStore, SQLAlchemyConfig, StepStore, ) from optimagic.logging.types import ( ExistenceStrategy, ExistenceStrategyLiteral, IterationState, IterationStateWithId, ProblemInitialization, ProblemInitializationWithId, StepResult, StepResultWithId, StepType, ) from optimagic.typing import ( Direction, DirectionLiteral, IterationHistory, MultiStartIterationHistory, PyTree, ) class LogOptions: """Base class for defining different log options. Serves as a registry for implemented option classes for better discoverability. """ _subclass_registry: list[Type[LogOptions]] = [] def __init_subclass__( cls: Type[LogOptions], abstract: bool = False, **kwargs: dict[Any, Any] ): if not abstract: LogOptions._subclass_registry.append(cls) super().__init_subclass__(**kwargs) @classmethod def available_option_types(cls) -> list[Type[LogOptions]]: return cls._subclass_registry _LogOptionsType = TypeVar("_LogOptionsType", bound=LogOptions) class LogReader(Generic[_LogOptionsType], ABC): """A class that manages the retrieving of optimization and exploration data. This class exposes methods to retrieve optimization logging data from stores. """ _step_store: UpdatableKeyValueStore[StepResult, StepResultWithId] _iteration_store: NonUpdatableKeyValueStore[IterationState, IterationStateWithId] _problem_store: UpdatableKeyValueStore[ ProblemInitialization, ProblemInitializationWithId ] @property def problem_df(self) -> pd.DataFrame: return self._problem_store.to_df() @classmethod def from_options(cls, log_options: LogOptions) -> LogReader[_LogOptionsType]: log_reader_class = _LOG_OPTION_LOG_READER_REGISTRY.get(type(log_options), None) if log_reader_class is None: raise ValueError( f"No LogReader implementation found for type " f"{type(log_options)}. Available option types: " f"\n {list(_LOG_OPTION_LOG_READER_REGISTRY.keys())}" ) return log_reader_class._create(log_options) @classmethod @abstractmethod def _create(cls, log_options: _LogOptionsType) -> LogReader[_LogOptionsType]: pass def read_iteration(self, iteration: int) -> IterationStateWithId: """Read a specific iteration from the iteration store. Args: iteration: The iteration number to read. Negative values read from the end. Returns: A `CriterionEvaluationWithId` object containing the iteration data. Raises: IndexError: If the iteration is invalid or the store is empty. """ if iteration >= 0: rowid = iteration + 1 else: try: last_row = self._iteration_store.select_last_rows(1) highest_rowid = last_row[0].rowid except IndexError as e: raise IndexError( "Invalid iteration request, iteration store is empty" ) from e # iteration is negative here! assert highest_rowid is not None rowid = highest_rowid + iteration + 1 row_list = self._iteration_store.select(rowid) if len(row_list) == 0: raise IndexError(f"Invalid iteration requested: {iteration}") else: data = row_list[0] return data def read_history(self) -> IterationHistory: """Read the entire iteration history from the iteration store. Returns: An `IterationHistory` object containing the parameters, criterion values, and runtimes. """ raw_res = self._iteration_store.select() params_list = [] criterion_list = [] runtime_list = [] for data in raw_res: if data.scalar_fun is not None: params_list.append(data.params) criterion_list.append(data.scalar_fun) runtime_list.append(data.timestamp) times = np.array(runtime_list) times -= times[0] return IterationHistory(params_list, criterion_list, times) @staticmethod def _normalize_direction( direction: Direction | DirectionLiteral, ) -> Direction: if isinstance(direction, str): direction = Direction(direction) return direction def _build_history_dataframe(self) -> pd.DataFrame: steps = self._step_store.to_df() raw_res = self._iteration_store.select() history: dict[str, list[Any]] = { "params": [], "fun": [], "time": [], "step": [], } for data in raw_res: if data.scalar_fun is not None: history["params"].append(data.params) history["fun"].append(data.scalar_fun) history["time"].append(data.timestamp) history["step"].append(data.step) times = np.array(history["time"]) times -= times[0] # For numpy arrays with ndim = 0, tolist() returns a scalar, which violates the # type hinting list[Any] from above. As history["time"] is always a list, this # case is safe to ignore. history["time"] = times.tolist() df = pd.DataFrame(history) df = df.merge( steps[[f"{self._step_store.primary_key}", "type"]], left_on="step", right_on=f"{self._step_store.primary_key}", ) return df.drop(columns=f"{self._step_store.primary_key}") @staticmethod def _split_exploration_and_optimization( df: pd.DataFrame, ) -> tuple[pd.DataFrame | None, pd.DataFrame]: exploration = df.query(f"type == '{StepType.EXPLORATION.value}'").drop( columns=["step", "type"] ) histories = df.query(f"type == '{StepType.OPTIMIZATION.value}'") histories = histories.drop(columns="type").set_index("step", append=True) return None if exploration.empty else exploration, histories @staticmethod def _sort_exploration( exploration: pd.DataFrame | None, optimization_type: Direction ) -> IterationHistory | None: if exploration is not None: is_minimization = optimization_type is Direction.MINIMIZE exploration = exploration.sort_values(by="fun", ascending=is_minimization) exploration_dict = cast(dict[str, Any], exploration.to_dict(orient="list")) return IterationHistory(**exploration_dict) return exploration @staticmethod def _extract_best_history( histories: pd.DataFrame, optimization_type: Direction ) -> tuple[IterationHistory, list[IterationHistory] | None]: groupby_step_criterion = histories["fun"].groupby(level="step") if optimization_type is Direction.MINIMIZE: best_idx = groupby_step_criterion.min().idxmin() else: best_idx = groupby_step_criterion.max().idxmax() remaining_indices = ( histories.index.get_level_values("step").unique().difference([best_idx]) ) best_history: pd.DataFrame | pd.Series[Any] = histories.xs( best_idx, level="step" ) def _to_dict(pandas_obj: pd.DataFrame | pd.Series) -> dict[str, Any]: if isinstance(pandas_obj, pd.DataFrame): result = pandas_obj.to_dict(orient="list") else: result = best_history.to_dict() return cast(dict[str, Any], result) best_history_dict = _to_dict(best_history) local_histories = [ _to_dict(histories.xs(idx, level="step")) for idx in remaining_indices ] if len(local_histories) == 0: remaining_histories = None else: remaining_histories = [ IterationHistory(**history_dict) for history_dict in local_histories ] return IterationHistory(**best_history_dict), remaining_histories def read_multistart_history( self, direction: Direction | DirectionLiteral ) -> MultiStartIterationHistory: """Read and the multistart optimization history. Args: direction: The optimization direction, either as an enum or string. Returns: A `MultiStartIterationHistory` object containing the best history, local histories, and exploration history. """ optimization_type = self._normalize_direction(direction) history_df = self._build_history_dataframe() exploration, optimization_history = self._split_exploration_and_optimization( history_df ) exploration_history = self._sort_exploration(exploration, optimization_type) best_history, remaining_histories = self._extract_best_history( optimization_history, optimization_type ) return MultiStartIterationHistory( best_history, local_histories=remaining_histories, exploration=exploration_history, ) def read_start_params(self) -> PyTree: """Read the start parameters form the problem store. Returns: A pytree object representing the start parameter. """ return self._problem_store.select(1)[0].params _LogReaderType = TypeVar("_LogReaderType", bound=LogReader[Any]) class LogStore(Generic[_LogOptionsType, _LogReaderType], ABC): """A class that manages the logging of optimization and exploration data. This class handles storing iterations, steps, and problem initialization data using various stores. Args: iteration_store: A non-updatable store for iteration data. step_store: An updatable store for step data. problem_store: An updatable store for problem initialization data. """ def __init__( self, iteration_store: NonUpdatableKeyValueStore[ IterationState, IterationStateWithId ], step_store: UpdatableKeyValueStore[StepResult, StepResultWithId], problem_store: UpdatableKeyValueStore[ ProblemInitialization, ProblemInitializationWithId ], ): self.step_store = step_store self.iteration_store = iteration_store self.problem_store = problem_store @classmethod def from_options( cls, log_options: LogOptions ) -> LogStore[_LogOptionsType, _LogReaderType]: logger_class = _LOG_OPTION_LOGGER_REGISTRY.get(type(log_options), None) if logger_class is None: raise ValueError( f"No Logger implementation found for type " f"{type(log_options)}. Available option types: " f"\n {list(_LOG_OPTION_LOGGER_REGISTRY.keys())}" ) return logger_class.create(log_options) @classmethod @abstractmethod def create( cls, log_options: _LogOptionsType ) -> LogStore[_LogOptionsType, _LogReaderType]: pass class SQLiteLogOptions(SQLAlchemyConfig, LogOptions): """Configuration class for setting up an SQLite database with SQLAlchemy. This class extends the `SQLAlchemyConfig` class to configure an SQLite database. It handles the creation of the database engine, manages database files, and applies various optimizations for logging performance. Args: path (str | Path): The file path to the SQLite database. fast_logging (bool): A boolean that determines if “unsafe” settings are used to speed up write processes to the database. This should only be used for very short running criterion functions where the main purpose of the log is a real-time dashboard, and it would not be catastrophic to get a corrupted database in case of a sudden system shutdown. If one evaluation of the criterion function (and gradient if applicable) takes more than 100 ms, the logging overhead is negligible. if_database_exists (ExistenceStrategy): Strategy for handling an existing database file. One of “extend”, “replace”, “raise”. """ def __init__( self, path: str | Path, fast_logging: bool = True, if_database_exists: ExistenceStrategy | ExistenceStrategyLiteral = ExistenceStrategy.RAISE, ): url = f"sqlite:///{path}" self._fast_logging = fast_logging self._path = path if isinstance(if_database_exists, str): if_database_exists = ExistenceStrategy(if_database_exists) self.if_database_exists = if_database_exists super().__init__(url) @property def path(self) -> str | Path: return self._path def create_engine(self) -> Engine: engine = sql.create_engine(self.url) self._configure_engine(engine) return engine def _configure_engine(self, engine: Engine) -> None: """Configure the sqlite engine. The two functions that configure the emission of the `begin` statement are taken from the sqlalchemy documentation the documentation: https://tinyurl.com/u9xea5z and are the recommended way of working around a bug in the pysqlite driver. The other function speeds up the write process. If fast_logging is False, it does so using only completely safe optimizations. Of fast_logging is True, it also uses unsafe optimizations. """ @sql.event.listens_for(engine, "connect") def do_connect(dbapi_connection: Any, connection_record: Any) -> None: # noqa: ARG001 # disable pysqlite's emitting of the BEGIN statement entirely. # also stops it from emitting COMMIT before absolutely necessary. dbapi_connection.isolation_level = None @sql.event.listens_for(engine, "begin") def do_begin(conn: Any) -> None: # emit our own BEGIN conn.exec_driver_sql("BEGIN DEFERRED") @sql.event.listens_for(engine, "connect") def set_sqlite_pragma(dbapi_connection: Any, connection_record: Any) -> None: # noqa: ARG001 cursor = dbapi_connection.cursor() cursor.execute("PRAGMA journal_mode = WAL") if self._fast_logging: cursor.execute("PRAGMA synchronous = OFF") else: cursor.execute("PRAGMA synchronous = NORMAL") cursor.close() class SQLiteLogReader(LogReader[SQLiteLogOptions]): """A class that manages the retrieving of optimization and exploration data from a SQLite database. This class exposes methods to retrieve optimization logging data from stores. Args: path (str | Path): The path to the SQLite database file. """ def __init__(self, path: str | Path): if not os.path.exists(path): raise FileNotFoundError(f"No file found at {path=}") log_options = SQLiteLogOptions( path, fast_logging=True, if_database_exists=ExistenceStrategy.EXTEND ) self._iteration_store = IterationStore(log_options) self._step_store = StepStore(log_options) self._problem_store = ProblemStore(log_options) @classmethod def _create(cls, log_options: SQLiteLogOptions) -> SQLiteLogReader: """Create an instance of SQLiteLogReader using the provided log options. Args: log_options (SQLiteLogOptions): Configuration options for the SQLite log. Returns: SQLiteLogReader: An instance of SQLiteLogReader initialized with the provided log options. """ return cls(log_options.path) class _SQLiteLogStore(LogStore[SQLiteLogOptions, SQLiteLogReader]): """A logger class that stores and manages optimization and exploration data using SQLite. It supports different strategies for handling existing databases, such as extending, replacing, or raising an error. """ @staticmethod def _handle_existing_database( path: str | Path, if_database_exists: ExistenceStrategy | ExistenceStrategyLiteral, ) -> None: if isinstance(if_database_exists, str): if_database_exists = ExistenceStrategy(if_database_exists) database_exists = os.path.exists(path) if database_exists: if if_database_exists is ExistenceStrategy.RAISE: raise FileExistsError( f"The database at {path} already exists. To reuse and extend " f"the existing database, set if_database_exists to " f"ExistenceStrategy.EXTEND." ) elif if_database_exists is ExistenceStrategy.REPLACE: try: os.remove(path) except PermissionError as e: msg = ( f"Failed to remove file {path}. " f"In particular, this can happen on Windows " f"machines, when a different process is accessing the file, " f"which results in a PermissionError. In this case, delete" f"the file manually." ) raise RuntimeError(msg) from e @classmethod def create(cls, log_options: SQLiteLogOptions) -> _SQLiteLogStore: cls._handle_existing_database(log_options.path, log_options.if_database_exists) iteration_store = IterationStore(log_options) step_store = StepStore(log_options) problem_store = ProblemStore(log_options) return cls(iteration_store, step_store, problem_store) _LOG_OPTION_LOGGER_REGISTRY: dict[Type[LogOptions], Type[LogStore[Any, Any]]] = { SQLiteLogOptions: _SQLiteLogStore } _LOG_OPTION_LOG_READER_REGISTRY: dict[Type[LogOptions], Type[LogReader[Any]]] = { SQLiteLogOptions: SQLiteLogReader } ================================================ FILE: src/optimagic/logging/read_log.py ================================================ """Deprecated module: Functions to read data from the database used for logging. The functions in the module are meant for end users of optimagic. They do not require any knowledge of databases. When using them internally, make sure to supply a database to path_or_database. Otherwise, the functions may be very slow. """ from __future__ import annotations import warnings from dataclasses import dataclass from optimagic.logging.logger import SQLiteLogOptions, SQLiteLogReader @dataclass class OptimizeLogReader: def __new__(cls, *args, **kwargs): # type: ignore warnings.warn( "OptimizeLogReader is deprecated and will be removed in a future " "version. Please use optimagic.logging.SQLiteLogReader instead.", FutureWarning, ) sqlite_options = SQLiteLogOptions(*args, **kwargs) return SQLiteLogReader.from_options(sqlite_options) ================================================ FILE: src/optimagic/logging/sqlalchemy.py ================================================ from __future__ import annotations import traceback import warnings from dataclasses import asdict, dataclass from functools import cached_property from typing import Any, Sequence, Type, cast import sqlalchemy as sql from sqlalchemy import Column, Integer, PickleType, String from sqlalchemy.engine.base import Engine from sqlalchemy.sql.base import Executable from sqlalchemy.sql.schema import MetaData from optimagic.logging.base import ( InputType, NonUpdatableKeyValueStore, OutputType, RobustPickler, UpdatableKeyValueStore, ) from optimagic.logging.types import ( IterationState, IterationStateWithId, ProblemInitialization, ProblemInitializationWithId, StepResult, StepResultWithId, ) class SQLAlchemyConfig: """Configuration class for setting up an SQLAlchemy engine and metadata. This class manages the connection URL, engine creation, and metadata reflection for an SQLAlchemy database connection. Args: url: The database URL to connect to. """ def __init__( self, url: str, ): self.url = url @cached_property def metadata(self) -> MetaData: """Get the metadata object. Returns: The SQLAlchemy MetaData object reflecting the database schema. """ engine = self.create_engine() metadata = MetaData() self._configure_reflect() metadata.reflect(engine) return metadata def create_engine(self) -> Engine: """Create and return an SQLAlchemy engine. Returns: An SQLAlchemy Engine object. """ return sql.create_engine(self.url) @staticmethod def _configure_reflect() -> None: """Mark all BLOB dtypes as PickleType with our custom pickle reader. Code ist taken from the documentation: https://tinyurl.com/y7q287jr """ @sql.event.listens_for(sql.Table, "column_reflect") def _setup_pickletype( inspector: Any, table: sql.Table, column_info: dict[str, Any] ) -> None: # noqa: ARG001 if isinstance(column_info["type"], sql.BLOB): column_info["type"] = sql.PickleType(pickler=RobustPickler) # type:ignore @dataclass class TableConfig: """Configuration for creating and managing SQLAlchemy tables. This class defines the schema for an SQLAlchemy table, including its name, columns, primary key, and strategy for handling existing tables. Args: table_name: The name of the table. columns: A list of SQLAlchemy Column objects defining the table schema. primary_key: The name of the primary key column. """ table_name: str columns: list[sql.Column[Any]] primary_key: str @property def column_names(self) -> list[str]: return [c.name for c in self.columns] def create_table(self, metadata: MetaData, engine: Engine) -> sql.Table: """Create or reflect the table in the database. Args: metadata: The SQLAlchemy MetaData object. engine: The SQLAlchemy Engine object. Returns: The SQLAlchemy Table object representing the created or reflected table. """ metadata.reflect(engine) table = sql.Table( self.table_name, metadata, *self.columns, extend_existing=True ) metadata.create_all(engine) return table class _SQLAlchemyStoreMixin: """Mixin class for common SQLAlchemy store operations. This class provides common methods for selecting, inserting, and executing SQL statements in an SQLAlchemy-based key-value store. Args: db_config: The SQLAlchemyConfig object for database configuration. table_config: The TableConfig object for table configuration. """ def __init__(self, db_config: SQLAlchemyConfig, table_config: TableConfig): self._db_config = db_config self._engine = db_config.create_engine() self._table_config = table_config self._table = table_config.create_table(db_config.metadata, self._engine) @property def column_names(self) -> list[str]: return self._table_config.column_names @property def table_name(self) -> str: return self._table_config.table_name @property def table(self) -> sql.Table: return self._table @property def engine(self) -> Engine: return self._engine def _select_row_by_key(self, key: int) -> list[Any]: stmt = self._table.select().where( getattr(self._table.c, self._table_config.primary_key) == key ) return self._execute_read_statement(stmt) def _select_all_rows(self) -> list[Any]: stmt = self._table.select() return self._execute_read_statement(stmt) def _select_last_rows(self, n_rows: int) -> list[Any]: stmt = ( self._table.select() .order_by(getattr(self._table.c, self._table_config.primary_key).desc()) .limit(n_rows) ) result = self._execute_read_statement(stmt) return result[::-1] def _insert(self, insert_values: dict[str, Any]) -> None: stmt = self._table.insert().values(**insert_values) self._execute_write_statement(stmt) def _execute_read_statement(self, statement: Executable) -> list[Any]: with self._engine.connect() as connection: return connection.execute(statement).fetchall() def _execute_write_statement(self, statement: Executable) -> None: try: with self._engine.begin() as connection: connection.execute(statement) except (KeyboardInterrupt, SystemExit): raise except Exception: exception_info = traceback.format_exc() warnings.warn( f"Unable to write to database. The traceback was:\n\n{exception_info}" ) class SQLAlchemySimpleStore( NonUpdatableKeyValueStore[InputType, OutputType], _SQLAlchemyStoreMixin, ): """A simple SQLAlchemy-based key-value store that does not support updates. This class provides basic key-value storage functionality using SQLAlchemy, where values are serialized and stored as BLOBs. The store does not support updating existing entries. Args: table_name: The name of the table. primary_key: The primary key column name. db_config: The SQLAlchemyConfig object for database configuration. """ _value_column: str = "serialized_value" def __init__( self, table_name: str, primary_key: str, db_config: SQLAlchemyConfig, input_type: Type[InputType], output_type: Type[OutputType], ): super().__init__(input_type, output_type, primary_key) columns = [ sql.Column(primary_key, sql.Integer, primary_key=True, autoincrement=True), sql.Column(self._value_column, sql.PickleType(pickler=RobustPickler)), # type:ignore ] table_config = TableConfig(table_name, columns, self.primary_key) _SQLAlchemyStoreMixin.__init__(self, db_config, table_config) def __reduce__( self, ) -> tuple[ Type[SQLAlchemySimpleStore[Any, Any]], tuple[str, str, SQLAlchemyConfig, Type[Any], Type[Any]], ]: return SQLAlchemySimpleStore, ( self.table_name, self.primary_key, self._db_config, self._input_type, self._output_type, ) def insert(self, value: InputType) -> None: """Insert a new value into the store. Args: value: The value to insert into the store. """ self._insert({self._value_column: value}) def _select_by_key(self, key: int) -> list[OutputType]: result = self._select_row_by_key(key) return self._post_process(result) def _select_all(self) -> list[OutputType]: result = self._select_all_rows() return self._post_process(result) def select_last_rows(self, n_rows: int) -> list[OutputType]: """Select the last `n_rows` values from the store. Args: n_rows: The number of rows to select. Returns: A list of the last `n_rows` output values. """ result = self._select_last_rows(n_rows) return self._post_process(result) def _post_process(self, results: Sequence[sql.Row]) -> list[OutputType]: # type:ignore output_list = [] for row in results: row_dict = {self.primary_key: row[0]} row_dict.update(asdict(row[-1])) output_list.append(self._output_type(**row_dict)) return output_list class SQLAlchemyTableStore( UpdatableKeyValueStore[InputType, OutputType], _SQLAlchemyStoreMixin ): """An SQLAlchemy-based key-value store that supports updates. This class provides key-value storage functionality using SQLAlchemy, allowing for insertion, updating, and selection of data. Args: table_config: The TableConfig object defining the table schema. db_config: The SQLAlchemyConfig object for database configuration. input_type: The type of input data. output_type: The type of output data. """ def __init__( self, table_config: TableConfig, db_config: SQLAlchemyConfig, input_type: Type[InputType], output_type: Type[OutputType], ): _SQLAlchemyStoreMixin.__init__(self, db_config, table_config) super().__init__(input_type, output_type, self._table_config.primary_key) def __reduce__( self, ) -> tuple[ Type[SQLAlchemyTableStore[Any, Any]], tuple[TableConfig, SQLAlchemyConfig, Type[Any], Type[Any]], ]: return SQLAlchemyTableStore, ( self._table_config, self._db_config, self._input_type, self._output_type, ) def insert(self, value: InputType) -> None: """Insert a new value into the store. Args: value: The value to insert into the store. """ self._insert(asdict(value)) def _update(self, key: int, value: InputType | dict[str, Any]) -> None: if not isinstance(value, dict): update_values = asdict(value) else: update_values = value stmt = ( self._table.update() .where(getattr(self._table.c, self.primary_key) == key) .values(**update_values) ) self._execute_write_statement(stmt) def _select_by_key(self, key: int) -> list[OutputType]: result = self._select_row_by_key(key) return self._post_process(result) def _select_all(self) -> list[OutputType]: result = self._select_all_rows() return self._post_process(result) def select_last_rows(self, n_rows: int) -> list[OutputType]: """Select the last `n_rows` values from the store. Args: n_rows: The number of rows to select. Returns: A list of the last `n_rows` output values. """ result = self._select_last_rows(n_rows) return self._post_process(result) def _post_process(self, results: Sequence[sql.Row]) -> list[OutputType]: # type:ignore return [ self._output_type(**dict(zip(self.column_names, row, strict=False))) for row in results ] class IterationStore(SQLAlchemySimpleStore[IterationState, IterationStateWithId]): """Store for managing iteration data in an SQLite database. Args: db_config (SQLiteConfig): The SQLiteConfig object for database configuration. """ _TABLE_NAME = "optimization_iterations" _PRIMARY_KEY = "rowid" def __init__( self, db_config: SQLAlchemyConfig, ): super().__init__( self._TABLE_NAME, self._PRIMARY_KEY, db_config, IterationState, IterationStateWithId, ) class StepStore(SQLAlchemyTableStore[StepResult, StepResultWithId]): """Store for managing step data in an SQLite database. Args: db_config (SQLiteConfig): The SQLiteConfig object for database configuration. """ _TABLE_NAME = "steps" _PRIMARY_KEY = "rowid" def __init__( self, db_config: SQLAlchemyConfig, ): columns = [ Column(self._PRIMARY_KEY, Integer, primary_key=True, autoincrement=True), Column("type", String), # e.g. optimization Column("status", String), # e.g. running Column("n_iterations", Integer), # optional Column("name", String), # e.g. "optimization-1", "exploration", not unique ] table_config = TableConfig( self._TABLE_NAME, cast(list[Column[Any]], columns), self._PRIMARY_KEY, ) super().__init__( table_config, db_config, StepResult, StepResultWithId, ) class ProblemStore( SQLAlchemyTableStore[ProblemInitialization, ProblemInitializationWithId] ): """Store for managing optimization problem initialization data in an SQLite database. Args: db_config (SQLiteConfig): The SQLiteConfig object for database configuration. """ _TABLE_NAME = "optimization_problem" _PRIMARY_KEY = "rowid" def __init__( self, db_config: SQLAlchemyConfig, ): columns = [ Column(self._PRIMARY_KEY, Integer, primary_key=True, autoincrement=True), Column("direction", String), Column("params", PickleType(pickler=RobustPickler)), # type:ignore ] table_config = TableConfig( self._TABLE_NAME, cast(list[Column[Any]], columns), self._PRIMARY_KEY, ) super().__init__( table_config, db_config, ProblemInitialization, ProblemInitializationWithId, ) ================================================ FILE: src/optimagic/logging/types.py ================================================ from dataclasses import dataclass from enum import Enum from typing import Literal from optimagic.optimization.fun_value import SpecificFunctionValue from optimagic.typing import ( DictLikeAccess, Direction, DirectionLiteral, PyTree, ) class StepStatus(str, Enum): """Status of a step in a process. Attributes: SCHEDULED: Indicates that the step is scheduled but not yet started. RUNNING: Indicates that the step is currently in progress. COMPLETE: Indicates that the step has completed successfully. SKIPPED: Indicates that the step was skipped. """ SCHEDULED = "scheduled" RUNNING = "running" COMPLETE = "complete" SKIPPED = "skipped" StepStatusLiteral = Literal["scheduled", "running", "complete", "skipped"] class StepType(str, Enum): """Type of step in a process. Attributes: OPTIMIZATION: Represents an optimization step. EXPLORATION: Represents an exploration step. """ OPTIMIZATION = "optimization" EXPLORATION = "exploration" StepTypeLiteral = Literal["optimization", "exploration"] class ExistenceStrategy(str, Enum): """Strategies to handle the existence of a database or table. Attributes: RAISE: Raise an error if the database resp. table exists. EXTEND: Extend the existing database or table. REPLACE: Replace the existing database or table. """ RAISE = "raise" EXTEND = "extend" REPLACE = "replace" ExistenceStrategyLiteral = Literal["raise", "extend", "replace"] @dataclass(frozen=True) class IterationState(DictLikeAccess): """Result of a criterion evaluation. Attributes: params: The parameters used in the evaluation. timestamp: The time at which the evaluation was performed. value: The result value of the evaluation. valid: Indicates if the evaluation is valid. criterion_eval: Optional, additional evaluation information. internal_derivative: Optional, derivative information used internally. step: Optional, step number associated with the evaluation. exceptions: Optional, exceptions encountered during evaluation. hash: Optional, hash of the evaluation for identification purposes. """ params: PyTree timestamp: float scalar_fun: float | None valid: bool raw_fun: SpecificFunctionValue | None step: int | None exceptions: str | None def combine(self, other: "IterationState") -> "IterationState": """Combine two iteration states. Args: other (IterationState): The second iteration state. Returns: IterationState: The combined iteration state. """ raw = [e for e in [self.exceptions, other.exceptions] if e is not None] exceptions: str | None = None if raw: exceptions = "\n\n".join(raw) new = IterationState( # one of the values must be None params=self.params, timestamp=min(self.timestamp, other.timestamp), scalar_fun=self.scalar_fun or other.scalar_fun, valid=self.valid and other.valid, # one of the values must be None raw_fun=self.raw_fun or other.raw_fun, step=self.step, exceptions=exceptions, ) return new @dataclass(frozen=True) class IterationStateWithId(IterationState): """Criterion evaluation result with an ID. Attributes: rowid: The unique ID associated with the evaluation result. Raises: ValueError: If `rowid` is None. """ rowid: int | None = None def __post_init__(self) -> None: if self.rowid is None: raise ValueError("rowid must not be None") @dataclass(frozen=True) class StepResult(DictLikeAccess): """Result of a process step. Attributes: name: The name of the step. type: The type of the step, either as `StepType` or string. status: The status of the step, either as `StepStatus` or string. n_iterations: Optional, the number of iterations performed in the step. """ name: str type: StepType | StepTypeLiteral status: StepStatus | StepStatusLiteral n_iterations: int | None = None def __post_init__(self) -> None: if isinstance(self.type, str): object.__setattr__(self, "type", StepType(self.type)) if isinstance(self.status, str): object.__setattr__(self, "status", StepStatus(self.status)) @dataclass(frozen=True) class StepResultWithId(StepResult): """Step result with an ID. Attributes: rowid: The unique ID associated with the step result. Raises: ValueError: If `rowid` is None. """ rowid: int | None = None def __post_init__(self) -> None: if self.rowid is None: raise ValueError("rowid must not be None") super().__post_init__() @dataclass(frozen=True) class ProblemInitialization(DictLikeAccess): """Start characteristics of an optimization problem. Attributes: direction: The direction of optimization, either as `Direction` or string literal. params: The parameters for the initialization. """ direction: Direction | DirectionLiteral params: PyTree @dataclass(frozen=True) class ProblemInitializationWithId(ProblemInitialization): """Problem initialization with an ID. Attributes: rowid: The unique ID associated with the problem initialization. Raises: ValueError: If `rowid` is None. """ rowid: int | None = None def __post_init__(self) -> None: if self.rowid is None: raise ValueError("rowid must not be None") ================================================ FILE: src/optimagic/mark.py ================================================ from functools import wraps from typing import Any, Callable, ParamSpec, TypeVar from optimagic.optimization.algorithm import AlgoInfo from optimagic.typing import AggregationLevel P = ParamSpec("P") ScalarFuncT = TypeVar("ScalarFuncT", bound=Callable[..., Any]) VectorFuncT = TypeVar("VectorFuncT", bound=Callable[..., Any]) def scalar(func: ScalarFuncT) -> ScalarFuncT: """Mark a function as a scalar function.""" wrapper = func try: wrapper._problem_type = AggregationLevel.SCALAR # type: ignore except (KeyboardInterrupt, SystemExit): raise except Exception: @wraps(func) def wrapper(*args, **kwargs): # type: ignore return func(*args, **kwargs) wrapper._problem_type = AggregationLevel.SCALAR # type: ignore return wrapper def least_squares(func: VectorFuncT) -> VectorFuncT: """Mark a function as a least squares function.""" wrapper = func try: wrapper._problem_type = AggregationLevel.LEAST_SQUARES # type: ignore except (KeyboardInterrupt, SystemExit): raise except Exception: @wraps(func) def wrapper(*args, **kwargs): # type: ignore return func(*args, **kwargs) wrapper._problem_type = AggregationLevel.LEAST_SQUARES # type: ignore return wrapper def likelihood(func: VectorFuncT) -> VectorFuncT: """Mark a function as a likelihood function.""" wrapper = func try: wrapper._problem_type = AggregationLevel.LIKELIHOOD # type: ignore except (KeyboardInterrupt, SystemExit): raise except Exception: @wraps(func) def wrapper(*args, **kwargs): # type: ignore return func(*args, **kwargs) wrapper._problem_type = AggregationLevel.LIKELIHOOD # type: ignore return wrapper # TODO: I get an error when adding bound=Algorithm to AlgorithmSubclass. Why? AlgorithmSubclass = TypeVar("AlgorithmSubclass") def minimizer( name: str, solver_type: AggregationLevel, is_available: bool, is_global: bool, needs_jac: bool, needs_hess: bool, needs_bounds: bool, supports_parallelism: bool, supports_bounds: bool, supports_infinite_bounds: bool, supports_linear_constraints: bool, supports_nonlinear_constraints: bool, disable_history: bool = False, experimental: bool = False, ) -> Callable[[AlgorithmSubclass], AlgorithmSubclass]: """Mark an algorithm as a optimagic minimizer and add AlgoInfo. Args: name: The name of the algorithm as a string. Used in error messages, warnings and the OptimizeResult. solver_type: The type of optimization problem the algorithm solves. Used to distinguish between scalar, least-squares and likelihood optimizers. Can take the values AggregationLevel.SCALAR, AggregationLevel.LEAST_SQUARES and AggregationLevel.LIKELIHOOD. is_available: Whether the algorithm is installed. is_global: Whether the algorithm is a global optimizer. needs_jac: Whether the algorithm needs some kind of first derivative. This needs to be True if the algorithm uses `jac` or `fun_and_jac`. needs_hess: Whether the algorithm needs some kind of second derivative. This is not yet implemented and will be False for all currently wrapped algorithms. needs_bounds: Whether the algorithm needs bounds to run. This is different from supports_bounds in that algorithms that support bounds can run without requiring them. supports_parallelism: Whether the algorithm supports parallelism. This needs to be True if the algorithm previously took `n_cores` and/or `batch_evaluator` as arguments. supports_bounds: Whether the algorithm supports bounds. This needs to be True if the algorithm previously took `lower_bounds` and/or `upper_bounds` as arguments. supports_infinite_bounds: Whether the algorithm supports infinite values in bounds. supports_linear_constraints: Whether the algorithm supports linear constraints. This is not yet implemented and will be False for all currently wrapped algorithms. supports_nonlinear_constraints: Whether the algorithm supports nonlinear constraints. This needs to be True if the algorithm previously took `nonlinear_constraints` as an argument. disable_history: Whether the algorithm should disable history collection. experimental: Whether the algorithm is experimental and should skip tests. """ def decorator(cls: AlgorithmSubclass) -> AlgorithmSubclass: algo_info = AlgoInfo( name=name, solver_type=solver_type, is_available=is_available, is_global=is_global, needs_jac=needs_jac, needs_hess=needs_hess, needs_bounds=needs_bounds, supports_parallelism=supports_parallelism, supports_bounds=supports_bounds, supports_infinite_bounds=supports_infinite_bounds, supports_linear_constraints=supports_linear_constraints, supports_nonlinear_constraints=supports_nonlinear_constraints, disable_history=disable_history, experimental=experimental, ) cls.__algo_info__ = algo_info # type: ignore return cls return decorator ================================================ FILE: src/optimagic/optimization/__init__.py ================================================ ================================================ FILE: src/optimagic/optimization/algo_options.py ================================================ import numpy as np CONVERGENCE_FTOL_REL = 2e-9 """float: Stop when the relative improvement between two iterations is below this. The exact definition of relative improvement depends on the optimizer and should be documented there. To disable it, set it to 0. The default value is inspired by scipy L-BFGS-B defaults, but rounded. """ CONVERGENCE_FTOL_ABS = 0 """float: Stop when the absolute improvement between two iterations is below this. Disabled by default because it is very problem specific. """ CONVERGENCE_GTOL_ABS = 1e-5 """float: Stop when the gradient are smaller than this. For some algorithms this criterion refers to all entries, for others to some norm. For bound constrained optimizers this typically refers to a projected gradient. The exact definition should be documented for each optimizer. The default is the same as scipy. To disable it, set it to zero. """ CONVERGENCE_GTOL_REL = 1e-8 """float: Stop when the gradient, divided by the absolute value of the criterion function is smaller than this. For some algorithms this criterion refers to all entries, for others to some norm.For bound constrained optimizers this typically refers to a projected gradient. The exact definition should be documented for each optimizer. To disable it, set it to zero. """ CONVERGENCE_GTOL_SCALED = 1e-8 """float: Stop when all entries (or for some algorithms the norm) of the gradient, divided by the norm of the gradient at start parameters is smaller than this. For bound constrained optimizers this typically refers to a projected gradient. The exact definition should be documented for each optimizer. To disable it, set it to zero. """ CONVERGENCE_XTOL_REL = 1e-5 """float: Stop when the relative change in parameters is smaller than this. The exact definition of relative change and whether this refers to the maximum change or the average change depends on the algorithm and should be documented there. To disable it, set it to zero. The default is the same as in scipy. """ CONVERGENCE_XTOL_ABS = 0 """float: Stop when the absolute change in parameters between two iterations is smaller than this. Whether this refers to the maximum change or the average change depends on the algorithm and should be documented there. Disabled by default because it is very problem specific. To enable it, set it to a value larger than zero. """ STOPPING_MAXFUN = 1_000_000 """int: If the maximum number of function evaluation is reached, the optimization stops but we do not count this as successful convergence. The function evaluations used to evaluate a numerical gradient do not count for this. """ STOPPING_MAXFUN_GLOBAL = 1_000 """int: If the maximum number of function evaluation is reached, the optimization stops but we do not count this as successful convergence. The function evaluations used to evaluate a numerical gradient do not count for this. Set to a lower number than STOPPING_MAX_CRITERION_EVALUATIONS for global optimizers. """ STOPPING_MAXITER = 1_000_000 """int: If the maximum number of iterations is reached, the optimization stops, but we do not count this as successful convergence. The difference to ``max_criterion_evaluations`` is that one iteration might need several criterion evaluations, for example in a line search or to determine if the trust region radius has to be shrunk. """ CONVERGENCE_SECOND_BEST_FTOL_ABS = 1e-08 """float: absolute criterion tolerance optimagic requires if no other stopping criterion apart from max iterations etc. is available this is taken from scipy (SLSQP's value, smaller than Nelder-Mead). """ CONVERGENCE_SECOND_BEST_XTOL_ABS = 1e-08 """float: The absolute parameter tolerance optimagic requires if no other stopping criterion apart from max iterations etc. is available. This is taken from pybobyqa. """ CONVERGENCE_TARGET_VALUE = None """float or None: Stop when the criterion value is better than or equal to this target. The definition of "better" depends on the optimization direction. - Minimization: criterion <= target - Maximization: criterion >= target Used in population-based algorithms like genetic algorithms. To disable, set to None. """ CONVERGENCE_GENERATIONS_NOIMPROVE = None """int or None: Stop when the best criterion value has not improved for this many consecutive generations. Used in population-based algorithms like genetic algorithms. To disable, set to None. """ MAX_LINE_SEARCH_STEPS = 20 """int: Inspired by scipy L-BFGS-B.""" LIMITED_MEMORY_STORAGE_LENGTH = 10 """int: Taken from scipy L-BFGS-B.""" CONSTRAINTS_ABSOLUTE_TOLERANCE = 1e-5 """float: Allowed tolerance of the equality and inequality constraints for values to be considered 'feasible'. """ N_RESTARTS = 1 """int: Number of times to restart the optimizer if convergence is not reached. This parameter controls how many times the optimization process is restarted in an attempt to achieve convergence. - A value of 1 (the default) indicates that the optimizer will only run once, disabling the restart feature. - Values greater than 1 specify the maximum number of restart attempts. Note: This is distinct from `STOPPING_MAXITER`, which limits the number of iterations within a single optimizer run, not the number of restarts. """ def get_population_size(population_size, x, lower_bound=10): """Default population size for genetic algorithms.""" if population_size is None: population_size = int(np.clip(10 * (len(x) + 1), lower_bound, np.inf)) else: population_size = int(population_size) return population_size ================================================ FILE: src/optimagic/optimization/algorithm.py ================================================ import typing import warnings from abc import ABC, ABCMeta, abstractmethod from dataclasses import dataclass, replace from typing import Any import numpy as np from numpy.typing import NDArray from typing_extensions import Self from optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError from optimagic.logging.types import StepStatus from optimagic.optimization.history import History from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.type_conversion import TYPE_CONVERTERS from optimagic.typing import AggregationLevel @dataclass(frozen=True) class AlgoInfo: name: str solver_type: AggregationLevel is_available: bool is_global: bool needs_jac: bool needs_hess: bool needs_bounds: bool supports_parallelism: bool supports_bounds: bool supports_infinite_bounds: bool supports_linear_constraints: bool supports_nonlinear_constraints: bool disable_history: bool = False experimental: bool = False def __post_init__(self) -> None: report: list[str] = [] if not isinstance(self.name, str): report.append("name must be a string") if not isinstance(self.solver_type, AggregationLevel): report.append("problem_type must be an AggregationLevel") if not isinstance(self.is_available, bool): report.append("is_available must be a bool") if not isinstance(self.is_global, bool): report.append("is_global must be a bool") if not isinstance(self.needs_jac, bool): report.append("needs_jac must be a bool") if not isinstance(self.needs_hess, bool): report.append("needs_hess must be a bool") if not isinstance(self.needs_bounds, bool): report.append("needs_bounds must be a bool") if not isinstance(self.supports_parallelism, bool): report.append("supports_parallelism must be a bool") if not isinstance(self.supports_bounds, bool): report.append("supports_bounds must be a bool") if not isinstance(self.supports_infinite_bounds, bool): report.append("supports_infinite_bounds must be a bool") if not isinstance(self.supports_linear_constraints, bool): report.append("supports_linear_constraints must be a bool") if not isinstance(self.supports_nonlinear_constraints, bool): report.append("supports_nonlinear_constraints must be a bool") if not isinstance(self.disable_history, bool): report.append("disable_history must be a bool") if report: msg = ( "The following arguments to AlgoInfo or `mark.minimizer` are " "invalid:\n" + "\n".join(report) ) raise InvalidAlgoInfoError(msg) @dataclass(frozen=True) class InternalOptimizeResult: """Internal representation of the result of an optimization problem. Args: x: The optimal parameters. fun: The function value at the optimal parameters. success: Whether the optimization was successful. message: A message from the optimizer. status: The status of the optimization. n_fun_evals: The number of function evaluations. n_jac_evals: The number of gradient or jacobian evaluations. n_hess_evals: The number of Hessian evaluations. n_iterations: The number of iterations. jac: The Jacobian of the objective function at the optimal parameters. hess: The Hessian of the objective function at the optimal parameters. hess_inv: The inverse of the Hessian of the objective function at the optimal parameters. max_constraint_violation: The maximum constraint violation. info: Additional information from the optimizer. """ x: NDArray[np.float64] fun: float | NDArray[np.float64] success: bool | None = None message: str | None = None status: int | None = None n_fun_evals: int | None = None n_jac_evals: int | None = None n_hess_evals: int | None = None n_iterations: int | None = None jac: NDArray[np.float64] | None = None hess: NDArray[np.float64] | None = None hess_inv: NDArray[np.float64] | None = None max_constraint_violation: float | None = None info: dict[str, typing.Any] | None = None history: History | None = None multistart_info: dict[str, typing.Any] | None = None def __post_init__(self) -> None: report: list[str] = [] if not isinstance(self.x, np.ndarray): report.append("x must be a numpy array") if not (isinstance(self.fun, np.ndarray) or np.isscalar(self.fun)): report.append("fun must be a numpy array or scalar") if self.success is not None and not isinstance(self.success, bool): report.append("success must be a bool or None") if self.message is not None and not isinstance(self.message, str): report.append("message must be a string or None") if self.n_fun_evals is not None and not isinstance(self.n_fun_evals, int): report.append("n_fun_evals must be an int or None") if self.n_jac_evals is not None and not isinstance(self.n_jac_evals, int): report.append("n_jac_evals must be an int or None") if self.n_hess_evals is not None and not isinstance(self.n_hess_evals, int): report.append("n_hess_evals must be an int or None") if self.n_iterations is not None and not isinstance(self.n_iterations, int): report.append("n_iterations must be an int or None") if self.jac is not None and not isinstance(self.jac, np.ndarray): report.append("jac must be a numpy array or None") if self.hess is not None and not isinstance(self.hess, np.ndarray): report.append("hess must be a numpy array or None") if self.hess_inv is not None and not isinstance(self.hess_inv, np.ndarray): report.append("hess_inv must be a numpy array or None") if self.max_constraint_violation is not None and not np.isscalar( self.max_constraint_violation ): report.append("max_constraint_violation must be a scalar or None") if self.info is not None and not isinstance(self.info, dict): report.append("info must be a dictionary or None") if self.status is not None and not isinstance(self.status, int): report.append("status must be an int or None") if self.max_constraint_violation and not isinstance( self.max_constraint_violation, float ): report.append("max_constraint_violation must be a float or None") if report: msg = ( "The following arguments to InternalOptimizeResult are invalid:\n" + "\n".join(report) ) raise TypeError(msg) class AlgorithmMeta(ABCMeta): """Metaclass to get repr, algo_info and name for classes, not just instances.""" def __repr__(self) -> str: if hasattr(self, "__algo_info__") and self.__algo_info__ is not None: out = f"om.algos.{self.__algo_info__.name}" else: out = self.__class__.__name__ return out @property def name(self) -> str: if hasattr(self, "__algo_info__") and self.__algo_info__ is not None: out = self.__algo_info__.name else: out = self.__class__.__name__ return out @property def algo_info(self) -> AlgoInfo: if not hasattr(self, "__algo_info__") or self.__algo_info__ is None: msg = ( f"The algorithm {self.name} does not have have the __algo_info__ " "attribute. Use the `mark.minimizer` decorator to add this attribute." ) raise AttributeError(msg) return self.__algo_info__ @dataclass(frozen=True) class Algorithm(ABC, metaclass=AlgorithmMeta): """Base class for all optimization algorithms in optimagic. To add an optimizer to optimagic you need to subclass Algorithm and overide the ``_solve_internal_problem`` method. """ @abstractmethod def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: pass def __post_init__(self) -> None: for field in self.__dataclass_fields__: raw_value = getattr(self, field) target_type = typing.cast(type, self.__dataclass_fields__[field].type) if target_type in TYPE_CONVERTERS: try: value = TYPE_CONVERTERS[target_type](raw_value) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = ( f"Could not convert the value of the field {field} to the " f"expected type {target_type}." ) raise InvalidAlgoOptionError(msg) from e object.__setattr__(self, field, value) def with_option(self, **kwargs: Any) -> Self: """Create a modified copy with the given options.""" valid_keys = set(self.__dataclass_fields__) - {"__algo_info__"} invalid = set(kwargs) - valid_keys if invalid: raise InvalidAlgoOptionError( f"The keyword arguments {invalid} are not valid options for " f"the algorithm {self.name}" ) return replace(self, **kwargs) def with_stopping(self, **kwargs: Any) -> Self: """Create a modified copy with the given stopping options.""" options = {} for k, v in kwargs.items(): if k.startswith("stopping_"): options[k] = v else: options[f"stopping_{k}"] = v return self.with_option(**options) def with_convergence(self, **kwargs: Any) -> Self: """Create a modified copy with the given convergence options.""" options = {} for k, v in kwargs.items(): if k.startswith("convergence_"): options[k] = v else: options[f"convergence_{k}"] = v return self.with_option(**options) def solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64], step_id: int, ) -> InternalOptimizeResult: """Solve the internal optimization problem. This method is called internally by `minimize` or `maximize` to solve the internal optimization problem and process the results. """ problem = problem.with_new_history().with_step_id(step_id) if problem.logger: problem.logger.step_store.update( step_id, {"status": str(StepStatus.RUNNING.value)} ) result = self._solve_internal_problem(problem, x0) if (not self.algo_info.disable_history) and (result.history is None): result = replace(result, history=problem.history) if problem.logger: problem.logger.step_store.update( step_id, {"status": str(StepStatus.COMPLETE.value)} ) return result def with_option_if_applicable(self, **kwargs: Any) -> Self: """Call with_option only with applicable keyword arguments.""" valid_keys = set(self.__dataclass_fields__) - {"__algo_info__"} invalid = set(kwargs) - valid_keys if invalid: msg = ( "The following algo_options were ignored because they are not " f"compatible with {self.name}:\n\n {invalid}" ) warnings.warn(msg) kwargs = {k: v for k, v in kwargs.items() if k in valid_keys} return self.with_option(**kwargs) @property def name(self) -> str: """The name of the algorithm.""" # cannot call algo_info here because it would be an infinite recursion if hasattr(self, "__algo_info__") and self.__algo_info__ is not None: return self.__algo_info__.name return self.__class__.__name__ @property def algo_info(self) -> AlgoInfo: """Information about the algorithm.""" if not hasattr(self, "__algo_info__") or self.__algo_info__ is None: msg = ( f"The algorithm {self.name} does not have have the __algo_info__ " "attribute. Use the `mark.minimizer` decorator to add this attribute." ) raise AttributeError(msg) return self.__algo_info__ ================================================ FILE: src/optimagic/optimization/convergence_report.py ================================================ import numpy as np from numpy.typing import NDArray from optimagic.optimization.history import History def get_convergence_report(history: History) -> dict[str, dict[str, float]] | None: is_accepted = history.is_accepted critvals = np.array(history.fun, dtype=np.float64)[is_accepted] params = np.array(history.flat_params, dtype=np.float64)[is_accepted] if len(critvals) < 2: out = None else: out = {} for name, n_entries in [("one_step", 2), ("five_steps", min(6, len(critvals)))]: relevant_critvals = critvals[-n_entries:] relevant_params = params[-n_entries:] max_f_rel, max_f_abs = _get_max_f_changes(relevant_critvals) max_x_rel, max_x_abs = _get_max_x_changes(relevant_params) col_dict = { "relative_criterion_change": max_f_rel, "relative_params_change": max_x_rel, "absolute_criterion_change": max_f_abs, "absolute_params_change": max_x_abs, } out[name] = col_dict return out def _get_max_f_changes(critvals: NDArray[np.float64]) -> tuple[float, float]: best_val = critvals[-1] worst_val = critvals[0] max_change_abs = np.abs(best_val - worst_val) denom = max(np.abs(best_val), 0.1) max_change_rel = max_change_abs / denom return max_change_rel, max_change_abs def _get_max_x_changes(params: NDArray[np.float64]) -> tuple[float, float]: best_x = params[-1] diffs = params - best_x denom = np.clip(np.abs(best_x), 0.1, np.inf) distances_abs = np.linalg.norm(diffs, axis=1) max_change_abs = distances_abs.max() scaled = diffs / denom distances_rel = np.linalg.norm(scaled, axis=1) max_change_rel = distances_rel.max() return max_change_rel, max_change_abs ================================================ FILE: src/optimagic/optimization/create_optimization_problem.py ================================================ import warnings from dataclasses import dataclass from pathlib import Path from typing import Any, Callable, Type from optimagic import deprecations from optimagic.algorithms import ALL_ALGORITHMS from optimagic.deprecations import ( handle_log_options_throw_deprecated_warning, replace_and_warn_about_deprecated_algo_options, replace_and_warn_about_deprecated_bounds, ) from optimagic.differentiation.numdiff_options import ( NumdiffOptions, NumdiffPurpose, get_default_numdiff_options, pre_process_numdiff_options, ) from optimagic.exceptions import ( AliasError, InvalidFunctionError, MissingInputError, ) from optimagic.logging.logger import LogOptions, SQLiteLogOptions from optimagic.optimization.algorithm import AlgoInfo, Algorithm from optimagic.optimization.fun_value import ( SpecificFunctionValue, convert_fun_output_to_function_value, enforce_return_type, enforce_return_type_with_jac, ) from optimagic.optimization.multistart_options import ( MultistartOptions, pre_process_multistart, ) from optimagic.optimization.scipy_aliases import ( map_method_to_algorithm, split_fun_and_jac, ) from optimagic.parameters.bounds import Bounds, pre_process_bounds from optimagic.parameters.scaling import ScalingOptions, pre_process_scaling from optimagic.shared.process_user_function import ( get_kwargs_from_args, infer_aggregation_level, partial_func_of_params, ) from optimagic.typing import AggregationLevel, Direction, ErrorHandling, PyTree from optimagic.utilities import propose_alternatives @dataclass(frozen=True) class OptimizationProblem: """Collect everything that defines the optimization problem. The attributes are very close to the arguments of `maximize` and `minimize` but they are converted to stricter types. For example, the bounds argument that can be a sequence of tuples, a scipy.optimize.Bounds object or an optimagic.Bounds when calling `maximize` or `minimize` is converted to an optimagic.Bounds object. All deprecated arguments are removed and all scipy aliases are replaced by their optimagic counterparts. All user provided functions are partialled if corresponding `kwargs` dictionaries were provided. # TODO: Document attributes after other todos are resolved. """ fun: Callable[[PyTree], SpecificFunctionValue] params: PyTree algorithm: Algorithm bounds: Bounds | None # TODO: Only allow list[Constraint] or Constraint constraints: list[dict[str, Any]] jac: Callable[[PyTree], PyTree] | None fun_and_jac: Callable[[PyTree], tuple[SpecificFunctionValue, PyTree]] | None numdiff_options: NumdiffOptions # TODO: logging will become None | Logger and log_options will be removed error_handling: ErrorHandling logging: LogOptions | None error_penalty: dict[str, Any] | None scaling: ScalingOptions | None multistart: MultistartOptions | None collect_history: bool skip_checks: bool direction: Direction fun_eval: SpecificFunctionValue def create_optimization_problem( direction, fun, params, algorithm, *, bounds, fun_kwargs, constraints, algo_options, jac, jac_kwargs, fun_and_jac, fun_and_jac_kwargs, numdiff_options, logging, error_handling, error_penalty, scaling, multistart, collect_history, skip_checks, # scipy aliases x0, method, args, # scipy arguments that are not yet supported hess, hessp, callback, # scipy arguments that will never be supported options, tol, # deprecated arguments criterion, criterion_kwargs, derivative, derivative_kwargs, criterion_and_derivative, criterion_and_derivative_kwargs, lower_bounds, log_options, upper_bounds, soft_lower_bounds, soft_upper_bounds, scaling_options, multistart_options, ): # ================================================================================== # error handling needed as long as fun is an optional argument # ================================================================================== if fun_and_jac is None and fun is None and criterion is None: msg = ( "Missing objective function. Please provide an objective function as the " "first positional argument or as the keyword argument `fun` or " " with `fun_and_jac`." ) raise MissingInputError(msg) if params is None and x0 is None: msg = ( "Missing start parameters. Please provide start parameters as the second " "positional argument or as the keyword argument `params`." ) raise MissingInputError(msg) if algorithm is None and method is None: msg = ( "Missing algorithm. Please provide an algorithm as the third positional " "argument or as the keyword argument `algorithm`." ) raise MissingInputError(msg) if fun_and_jac is not None and fun is None and criterion is None: if isinstance(fun_and_jac, list): raise NotImplementedError( "If `fun_and_jac` is a list of callables, `fun` is not optional. " ) fun = split_fun_and_jac(fun_and_jac, target="fun") # ================================================================================== # deprecations # ================================================================================== if log_options is not None: logging = handle_log_options_throw_deprecated_warning(log_options, logging) if criterion is not None: deprecations.throw_criterion_future_warning() fun = criterion if fun is None else fun if criterion_kwargs is not None: deprecations.throw_criterion_kwargs_future_warning() fun_kwargs = criterion_kwargs if fun_kwargs is None else fun_kwargs if derivative is not None: deprecations.throw_derivative_future_warning() jac = derivative if jac is None else jac if derivative_kwargs is not None: deprecations.throw_derivative_kwargs_future_warning() jac_kwargs = derivative_kwargs if jac_kwargs is None else jac_kwargs if criterion_and_derivative is not None: deprecations.throw_criterion_and_derivative_future_warning() fun_and_jac = criterion_and_derivative if fun_and_jac is None else fun_and_jac if criterion_and_derivative_kwargs is not None: deprecations.throw_criterion_and_derivative_kwargs_future_warning() fun_and_jac_kwargs = ( criterion_and_derivative_kwargs if fun_and_jac_kwargs is None else fun_and_jac_kwargs ) if scaling_options is not None: deprecations.throw_scaling_options_future_warning() if scaling is True and scaling_options is not None: scaling = scaling_options if multistart_options is not None: deprecations.throw_multistart_options_future_warning() if multistart is True and multistart_options is not None: multistart = multistart_options deprecations.throw_dict_constraints_future_warning_if_required(constraints) algo_options = replace_and_warn_about_deprecated_algo_options(algo_options) bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, soft_lower_bounds=soft_lower_bounds, soft_upper_bounds=soft_upper_bounds, ) if isinstance(jac, dict): jac = deprecations.replace_and_warn_about_deprecated_derivatives(jac, "jac") if isinstance(fun_and_jac, dict): fun_and_jac = deprecations.replace_and_warn_about_deprecated_derivatives( fun_and_jac, "fun_and_jac" ) # ================================================================================== # handle scipy aliases # ================================================================================== if x0 is not None: if params is not None: msg = ( "x0 is an alias for params (for better compatibility with scipy). " "Do not use both x0 and params." ) raise AliasError(msg) else: params = x0 if method is not None: if algorithm is not None: msg = ( "method is an alias for algorithm to select the scipy optimizers under " "their original name. Do not use both method and algorithm." ) raise AliasError(msg) else: algorithm = map_method_to_algorithm(method) if args is not None: if ( fun_kwargs is not None or jac_kwargs is not None or fun_and_jac_kwargs is not None ): msg = ( "args is an alternative to fun_kwargs, jac_kwargs and " "fun_and_jac_kwargs that optimagic supports for compatibility " "with scipy. Do not use args in conjunction with any of the other " "arguments." ) raise AliasError(msg) else: kwargs = get_kwargs_from_args(args, fun, offset=1) fun_kwargs, jac_kwargs, fun_and_jac_kwargs = kwargs, kwargs, kwargs # jac is not an alias but we need to handle the case where `jac=True`, i.e. fun is # actually fun_and_jac. This is not recommended in optimagic because then optimizers # cannot evaluate fun in isolation but we can easily support it for compatibility. if jac is True: jac = None if fun_and_jac is None: fun_and_jac = fun fun = split_fun_and_jac(fun_and_jac, target="fun") # ================================================================================== # Handle scipy arguments that are not yet implemented # ================================================================================== if hess is not None: msg = ( "The hess argument is not yet supported in optimagic. Creat an issue on " "https://github.com/optimagic-dev/optimagic/ if you have urgent need " "for this feature." ) raise NotImplementedError(msg) if hessp is not None: msg = ( "The hessp argument is not yet supported in optimagic. Creat an issue on " "https://github.com/optimagic-dev/optimagic/ if you have urgent need " "for this feature." ) raise NotImplementedError(msg) if callback is not None: msg = ( "The callback argument is not yet supported in optimagic. Creat an issue " "on https://github.com/optimagic-dev/optimagic/ if you have urgent " "need for this feature." ) raise NotImplementedError(msg) # ================================================================================== # Handle scipy arguments that will never be supported # ================================================================================== if options is not None: # TODO: Add link to a how-to guide or tutorial for this msg = ( "The options argument is not supported in optimagic. Please use the " "algo_options argument instead." ) raise NotImplementedError(msg) if tol is not None: # TODO: Add link to a how-to guide or tutorial for this msg = ( "The tol argument is not supported in optimagic. Please use " "algo_options or configured algorithms instead to set convergence criteria " "for your optimizer." ) raise NotImplementedError(msg) # ================================================================================== # Convert literals to enums # ================================================================================== error_handling = ErrorHandling(error_handling) # ================================================================================== # Set default values and check options # ================================================================================== bounds = pre_process_bounds(bounds) scaling = pre_process_scaling(scaling) multistart = pre_process_multistart(multistart) numdiff_options = pre_process_numdiff_options(numdiff_options) constraints = deprecations.pre_process_constraints(constraints) if numdiff_options is None: numdiff_options = get_default_numdiff_options(purpose=NumdiffPurpose.OPTIMIZE) fun_kwargs = {} if fun_kwargs is None else fun_kwargs constraints = [] if constraints is None else constraints algo_options = {} if algo_options is None else algo_options jac_kwargs = {} if jac_kwargs is None else jac_kwargs fun_and_jac_kwargs = {} if fun_and_jac_kwargs is None else fun_and_jac_kwargs error_penalty = {} if error_penalty is None else error_penalty if isinstance(logging, str) or isinstance(logging, Path): log_path = Path(logging) logging = SQLiteLogOptions(log_path) # ================================================================================== # evaluate fun for the first time # ================================================================================== fun = partial_func_of_params( func=fun, kwargs=fun_kwargs, name="criterion", skip_checks=skip_checks, ) # This should be done as late as possible; It has to be done here to infer the # problem type until the decorator approach becomes mandatory. # TODO: Move this into `_optimize` as soon as we reach 0.6.0 try: fun_eval = fun(params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating fun at start params." raise InvalidFunctionError(msg) from e if deprecations.is_dict_output(fun_eval): deprecations.throw_dict_output_warning() # ================================================================================== # infer the problem type # ================================================================================== if deprecations.is_dict_output(fun_eval): problem_type = deprecations.infer_problem_type_from_dict_output(fun_eval) else: problem_type = infer_aggregation_level(fun) if ( problem_type == AggregationLevel.LEAST_SQUARES and direction == Direction.MAXIMIZE ): raise InvalidFunctionError("Least-squares problems cannot be maximized.") # ================================================================================== # process the fun_eval; Can be removed once the first evaluation gets moved to # a later point where the `enforce` decorator has already been applied. # ================================================================================== if deprecations.is_dict_output(fun_eval): fun_eval = deprecations.convert_dict_to_function_value(fun_eval) fun = deprecations.replace_dict_output(fun) else: fun_eval = convert_fun_output_to_function_value(fun_eval, problem_type) fun = enforce_return_type(problem_type)(fun) # ================================================================================== # Process the user provided algorithm # ================================================================================== algorithm = pre_process_user_algorithm(algorithm) algorithm = algorithm.with_option_if_applicable(**algo_options) if algorithm.algo_info.solver_type == AggregationLevel.LIKELIHOOD: if problem_type not in [ AggregationLevel.LIKELIHOOD, AggregationLevel.LEAST_SQUARES, ]: raise InvalidFunctionError( "Likelihood solvers can only be used with likelihood or least-squares " "problems." ) elif algorithm.algo_info.solver_type == AggregationLevel.LEAST_SQUARES: if problem_type != AggregationLevel.LEAST_SQUARES: raise InvalidFunctionError( "Least-squares solvers can only be used with least-squares problems." ) # ================================================================================== # select the correct derivative functions # ================================================================================== if jac is not None: jac = pre_process_derivatives( candidate=jac, name="jac", solver_type=algorithm.algo_info.solver_type ) if fun_and_jac is not None: fun_and_jac = pre_process_derivatives( candidate=fun_and_jac, name="fun_and_jac", solver_type=algorithm.algo_info.solver_type, ) # ================================================================================== # partial the kwargs into corresponding functions # ================================================================================== if jac is not None: jac = partial_func_of_params( func=jac, kwargs=jac_kwargs, name="derivative", skip_checks=skip_checks, ) if fun_and_jac is not None: fun_and_jac = partial_func_of_params( func=fun_and_jac, kwargs=fun_and_jac_kwargs, name="criterion_and_derivative", skip_checks=skip_checks, ) fun_and_jac = deprecations.replace_dict_output(fun_and_jac) fun_and_jac = enforce_return_type_with_jac(algorithm.algo_info.solver_type)( fun_and_jac ) # ================================================================================== # Check types of arguments # ================================================================================== if not skip_checks: if params is None: raise ValueError("params cannot be None") if not isinstance(fun, Callable): raise ValueError("fun must be a callable") if not isinstance(algorithm, Algorithm): raise ValueError("algorithm must be an Algorithm object.") if not isinstance(algo_options, dict | None): raise ValueError("algo_options must be a dictionary or None") if not isinstance(algorithm.algo_info, AlgoInfo): raise ValueError("algo_info must be an AlgoInfo object") if not isinstance(bounds, Bounds | None): raise ValueError("bounds must be a Bounds object or None") if not all(isinstance(c, dict) for c in constraints): # TODO: Only allow list[Constraint] raise ValueError("constraints must be a list of dictionaries") if not isinstance(jac, Callable | None): raise ValueError("jac must be a callable or None") if not isinstance(fun_and_jac, Callable | None): raise ValueError("fun_and_jac must be a callable or None") if not isinstance(numdiff_options, NumdiffOptions): raise ValueError("numdiff_options must be a NumdiffOptions object") if not isinstance(logging, bool | Path | LogOptions | None): raise ValueError( "logging must be a boolean, a path, a LogOptions instance or None" ) if not isinstance(log_options, dict | None): raise ValueError("log_options must be a dictionary or None") if not isinstance(error_penalty, dict | None): raise ValueError("error_penalty must be a dictionary or None") if not isinstance(scaling, ScalingOptions | None): raise ValueError("scaling must be a ScalingOptions object or None") if not isinstance(multistart, MultistartOptions | None): raise ValueError("multistart must be a MultistartOptions object or None") if not isinstance(collect_history, bool): raise ValueError("collect_history must be a boolean") # ================================================================================== # create the problem object # ================================================================================== problem = OptimizationProblem( fun=fun, params=params, algorithm=algorithm, bounds=bounds, constraints=constraints, jac=jac, fun_and_jac=fun_and_jac, numdiff_options=numdiff_options, logging=logging, error_handling=error_handling, error_penalty=error_penalty, scaling=scaling, multistart=multistart, collect_history=collect_history, skip_checks=skip_checks, direction=direction, fun_eval=fun_eval, ) return problem def pre_process_derivatives(candidate, name, solver_type): if callable(candidate): candidate = [candidate] out = None for func in candidate: if not callable(func): raise ValueError(f"{name} must be a callable or sequence of callables.") problem_type = infer_aggregation_level(func) if problem_type == solver_type: out = func if out is None: msg = ( f"You used the `{name}` argument but none of the callables you provided " "has the correct aggregation level for your selected optimization " "algorithm. Falling back to numerical derivatives." ) warnings.warn(msg) return out def pre_process_user_algorithm( algorithm: str | Algorithm | Type[Algorithm], ) -> Algorithm: """Process the user specfied algorithm.""" if isinstance(algorithm, str): try: # Use ALL_ALGORITHMS and not just AVAILABLE_ALGORITHMS such that the # algorithm specific error message with installation instruction will be # reached if an optional dependency is not installed. algorithm = ALL_ALGORITHMS[algorithm]() except KeyError: proposed = propose_alternatives(algorithm, list(ALL_ALGORITHMS)) raise ValueError( f"Invalid algorithm: {algorithm}. Did you mean {proposed}?" ) from None elif isinstance(algorithm, type) and issubclass(algorithm, Algorithm): algorithm = algorithm() return algorithm ================================================ FILE: src/optimagic/optimization/error_penalty.py ================================================ from typing import Callable import numpy as np from numpy.typing import NDArray from optimagic.config import CRITERION_PENALTY_CONSTANT, CRITERION_PENALTY_SLOPE from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, SpecificFunctionValue, ) from optimagic.typing import AggregationLevel, Direction def _scalar_penalty( x: NDArray[np.float64], constant: float | NDArray[np.float64], slope: float | NDArray[np.float64], x0: NDArray[np.float64], dim_out: int | None = None, ) -> tuple[ScalarFunctionValue, NDArray[np.float64]]: # noqa: ARG001 value = constant + slope * np.linalg.norm(x - x0) jac = slope * (x - x0) / np.linalg.norm(x - x0) return ScalarFunctionValue(value=value), jac def _likelihood_penalty( x: NDArray[np.float64], constant: float | NDArray[np.float64], slope: float | NDArray[np.float64], x0: NDArray[np.float64], dim_out: int, ) -> tuple[LikelihoodFunctionValue, NDArray[np.float64]]: # noqa: ARG001 factor = (constant + slope * np.linalg.norm(x - x0)) / dim_out contrib = np.ones(dim_out) * factor row = slope * (x - x0) / (dim_out * np.linalg.norm(x - x0)) jac = np.full((dim_out, len(x)), row) return LikelihoodFunctionValue(value=contrib), jac def _penalty_residuals( x: NDArray[np.float64], constant: float | NDArray[np.float64], slope: float | NDArray[np.float64], x0: NDArray[np.float64], dim_out: int, ) -> tuple[LeastSquaresFunctionValue, NDArray[np.float64]]: factor = np.sqrt((constant + slope * np.linalg.norm(x - x0)) / dim_out) contrib = np.ones(dim_out) * factor scalar_penalty, _ = _scalar_penalty(x, constant, slope, x0) inner_deriv = slope * (x - x0) / np.linalg.norm(x - x0) outer_deriv = 0.5 / np.sqrt(scalar_penalty.value * dim_out) row = outer_deriv * inner_deriv jac = np.full((dim_out, len(x)), row) return LeastSquaresFunctionValue(value=contrib), jac def get_error_penalty_function( start_x: NDArray[np.float64], start_criterion: SpecificFunctionValue, error_penalty: dict[str, float] | None, solver_type: AggregationLevel, direction: Direction, ) -> Callable[[NDArray[np.float64]], tuple[SpecificFunctionValue, NDArray[np.float64]]]: error_penalty = {} if error_penalty is None else error_penalty first_value = start_criterion.internal_value(solver_type) constant, slope = _process_error_penalty( error_penalty=error_penalty, first_value=first_value, direction=direction, ) dim_out = ( 1 if solver_type == AggregationLevel.SCALAR else len(start_criterion.internal_value(solver_type)) # type: ignore ) _penalty: Callable[ [ NDArray[np.float64], float | NDArray[np.float64], float | NDArray[np.float64], NDArray[np.float64], int, ], tuple[SpecificFunctionValue, NDArray[np.float64]], ] if solver_type == AggregationLevel.SCALAR: _penalty = _scalar_penalty elif solver_type == AggregationLevel.LIKELIHOOD: _penalty = _likelihood_penalty elif solver_type == AggregationLevel.LEAST_SQUARES: _penalty = _penalty_residuals def penalty( x: NDArray[np.float64], ) -> tuple[SpecificFunctionValue, NDArray[np.float64]]: out = _penalty( x=x, constant=constant, slope=slope, x0=start_x, dim_out=dim_out, ) return out return penalty def _process_error_penalty( error_penalty: dict[str, float] | None, first_value: float | NDArray[np.float64], direction: Direction, ) -> tuple[float | NDArray[np.float64], float | NDArray[np.float64]]: """Add default options to error_penalty options.""" if error_penalty is not None: error_penalty = error_penalty.copy() else: error_penalty = {} if direction == Direction.MINIMIZE: default_constant = ( first_value + np.abs(first_value) + CRITERION_PENALTY_CONSTANT ) default_slope = CRITERION_PENALTY_SLOPE elif direction == Direction.MAXIMIZE: default_constant = ( first_value - np.abs(first_value) - CRITERION_PENALTY_CONSTANT ) default_slope = -CRITERION_PENALTY_SLOPE else: raise ValueError() constant = error_penalty.get("constant", default_constant) slope = error_penalty.get("slope", default_slope) return constant, slope ================================================ FILE: src/optimagic/optimization/fun_value.py ================================================ import functools from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Callable, ParamSpec import numpy as np from numpy.typing import NDArray from pybaum import tree_just_flatten from optimagic.exceptions import InvalidFunctionError from optimagic.parameters.tree_registry import get_registry from optimagic.typing import AggregationLevel, PyTree, Scalar from optimagic.utilities import isscalar @dataclass(frozen=True) class FunctionValue: value: float | PyTree info: dict[str, Any] | None = None class SpecificFunctionValue(FunctionValue, ABC): @abstractmethod def internal_value( self, solver_type: AggregationLevel ) -> float | NDArray[np.float64]: pass @dataclass(frozen=True) class ScalarFunctionValue(SpecificFunctionValue): value: Scalar info: dict[str, Any] | None = None def __post_init__(self) -> None: if not isscalar(self.value): raise InvalidFunctionError( f"Scalar objective values need to be scalars, not: {type(self.value)}. " "If you meant to provide a scalar objective function, make sure it " "returns a scalar value. If you meant to provide a least_squares or " "likelihood function, use the mark.least_squares or mark.likelihood " "decorators." ) def internal_value(self, solver_type: AggregationLevel) -> float: if solver_type == AggregationLevel.SCALAR: val = float(self.value) else: raise InvalidFunctionError( f"You are using a {solver_type.value} optimizer but provided a " "scalar objective function." ) return val @dataclass(frozen=True) class LeastSquaresFunctionValue(SpecificFunctionValue): value: PyTree info: dict[str, Any] | None = None def __post_init__(self) -> None: if isscalar(self.value): raise InvalidFunctionError( "Least squares objective values cannot be scalars. Your value has " f"scalar type: {type(self.value)}. If you meant to provide a least " "squares objective function, make sure it does not have a scalar value." " If you meant to provide a scalar function, use the mark.scalar " "decorator." ) def internal_value( self, solver_type: AggregationLevel ) -> float | NDArray[np.float64]: resid = _get_flat_value(self.value) val: float | NDArray[np.float64] if solver_type == AggregationLevel.LEAST_SQUARES: val = resid elif solver_type == AggregationLevel.LIKELIHOOD: val = resid**2 else: val = float(resid @ resid) return val @dataclass(frozen=True) class LikelihoodFunctionValue(SpecificFunctionValue): value: PyTree info: dict[str, Any] | None = None def __post_init__(self) -> None: if isscalar(self.value): raise InvalidFunctionError( "Likelihood objective values cannot be scalars. Your value has scalar " f"type: {type(self.value)}. If you meant to provide a likelihood " "objective function, make sure it does not have a scalar value. If you " "meant to provide a scalar function, use the mark.scalar decorator." ) def internal_value( self, solver_type: AggregationLevel ) -> float | NDArray[np.float64]: loglikes = _get_flat_value(self.value) val: float | NDArray[np.float64] if solver_type == AggregationLevel.LIKELIHOOD: val = loglikes elif solver_type == AggregationLevel.SCALAR: val = float(np.sum(loglikes)) else: raise InvalidFunctionError( "You are using a least_squares optimizer but provided a " "likelihood objective function." ) return val def _get_flat_value(value: PyTree) -> NDArray[np.float64]: """Flatten a PyTree value to a 1d numpy array with multiple fast paths.""" if isinstance(value, np.ndarray) and value.ndim == 1: flat = value elif isinstance(value, np.ndarray): flat = value.flatten() else: registry = get_registry(extended=True) flat = tree_just_flatten(value, registry=registry) flat_arr = np.asarray(flat, dtype=np.float64) return flat_arr def convert_fun_output_to_function_value( raw: Scalar | PyTree | FunctionValue, problem_type: AggregationLevel ) -> SpecificFunctionValue: out: FunctionValue if problem_type == AggregationLevel.SCALAR: out = _convert_output_to_scalar_function_value(raw) elif problem_type == AggregationLevel.LEAST_SQUARES: out = _convert_output_to_least_squares_function_value(raw) elif problem_type == AggregationLevel.LIKELIHOOD: out = _convert_output_to_likelihood_function_value(raw) return out def _convert_output_to_scalar_function_value( raw: Scalar | FunctionValue, ) -> ScalarFunctionValue: if isinstance(raw, ScalarFunctionValue): out = raw elif isinstance(raw, FunctionValue): out = ScalarFunctionValue(value=raw.value, info=raw.info) else: out = ScalarFunctionValue(value=raw) return out def _convert_output_to_least_squares_function_value( raw: PyTree | FunctionValue, ) -> LeastSquaresFunctionValue: if isinstance(raw, LeastSquaresFunctionValue): out = raw elif isinstance(raw, FunctionValue): out = LeastSquaresFunctionValue(value=raw.value, info=raw.info) else: out = LeastSquaresFunctionValue(value=raw) return out def _convert_output_to_likelihood_function_value( raw: PyTree | FunctionValue, ) -> LikelihoodFunctionValue: if isinstance(raw, LikelihoodFunctionValue): out = raw elif isinstance(raw, FunctionValue): out = LikelihoodFunctionValue(value=raw.value, info=raw.info) else: out = LikelihoodFunctionValue(value=raw) return out P = ParamSpec("P") def enforce_return_type( problem_type: AggregationLevel, ) -> Callable[ [Callable[P, Scalar | PyTree | FunctionValue]], Callable[P, SpecificFunctionValue] ]: """Enforce a strict return type for objective functions based on problem_type. This has no effect if the function already returns the strictest possible type for the problem_type but converts everything else to that type. """ def decorator_enforce( func: Callable[P, Scalar | PyTree | FunctionValue], ) -> Callable[P, SpecificFunctionValue]: if problem_type == AggregationLevel.SCALAR: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> ScalarFunctionValue: raw = func(*args, **kwargs) return _convert_output_to_scalar_function_value(raw) elif problem_type == AggregationLevel.LEAST_SQUARES: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> LeastSquaresFunctionValue: raw = func(*args, **kwargs) return _convert_output_to_least_squares_function_value(raw) elif problem_type == AggregationLevel.LIKELIHOOD: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> LikelihoodFunctionValue: raw = func(*args, **kwargs) return _convert_output_to_likelihood_function_value(raw) return wrapper_enforce return decorator_enforce def enforce_return_type_with_jac( problem_type: AggregationLevel, ) -> Callable[ [Callable[P, tuple[Scalar | PyTree | FunctionValue, PyTree]]], Callable[P, tuple[SpecificFunctionValue, PyTree]], ]: """Enforce a strict return type for fun_and_jac based on problem_type. This has no effect if the first return value of the function already has the strictest possible type for the problem_type but converts everything else to that type. The second return value stays unchanged. """ def decorator_enforce( func: Callable[P, tuple[Scalar | PyTree | FunctionValue, PyTree]], ) -> Callable[P, tuple[SpecificFunctionValue, PyTree]]: if problem_type == AggregationLevel.SCALAR: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> tuple[ScalarFunctionValue, PyTree]: raw = func(*args, **kwargs) return (_convert_output_to_scalar_function_value(raw[0]), raw[1]) elif problem_type == AggregationLevel.LEAST_SQUARES: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> tuple[LeastSquaresFunctionValue, PyTree]: raw = func(*args, **kwargs) return (_convert_output_to_least_squares_function_value(raw[0]), raw[1]) elif problem_type == AggregationLevel.LIKELIHOOD: @functools.wraps(func) def wrapper_enforce( *args: P.args, **kwargs: P.kwargs ) -> tuple[LikelihoodFunctionValue, PyTree]: raw = func(*args, **kwargs) return (_convert_output_to_likelihood_function_value(raw[0]), raw[1]) return wrapper_enforce return decorator_enforce ================================================ FILE: src/optimagic/optimization/history.py ================================================ import warnings from dataclasses import dataclass from functools import partial from typing import Any, Callable, Iterable, Literal import numpy as np import pandas as pd from numpy.typing import NDArray from pybaum import leaf_names, tree_just_flatten from optimagic.parameters.tree_registry import get_registry from optimagic.timing import CostModel from optimagic.typing import Direction, EvalTask, PyTree @dataclass(frozen=True) class HistoryEntry: params: PyTree fun: float | None start_time: float stop_time: float task: EvalTask class History: # TODO: add counters for the relevant evaluations def __init__( self, direction: Direction, params: list[PyTree] | None = None, fun: list[float | None] | None = None, start_time: list[float] | None = None, stop_time: list[float] | None = None, batches: list[int] | None = None, task: list[EvalTask] | None = None, ) -> None: """Initialize a history. The history must know the direction of the optimization problem in order to correctly return monotone sequences. The history can be initialized empty, for example for usage during an optimization process, or with data, for example to recover a history from a log. """ _validate_args_are_all_none_or_lists_of_same_length( params, fun, start_time, stop_time, batches, task ) self.direction = direction self._params = params if params is not None else [] self._fun = fun if fun is not None else [] self._start_time = start_time if start_time is not None else [] self._stop_time = stop_time if stop_time is not None else [] self._batches = batches if batches is not None else [] self._task = task if task is not None else [] # ================================================================================== # Methods to add entries to the history # ================================================================================== def add_entry(self, entry: HistoryEntry, batch_id: int | None = None) -> None: if batch_id is None: batch_id = self._get_next_batch_id() self._params.append(entry.params) self._fun.append(entry.fun) self._start_time.append(entry.start_time) self._stop_time.append(entry.stop_time) self._batches.append(batch_id) self._task.append(entry.task) def add_batch( self, batch: list[HistoryEntry], batch_size: int | None = None ) -> None: # The naming is complicated here: # batch refers to the entries to be added to the history in one go # batch_size is a property of a parallelizing algorithm that influences how # the batch_ids are assigned. It is not the same as the length of the batch. if batch_size is None: batch_size = len(batch) start = self._get_next_batch_id() n_batches = int(np.ceil(len(batch) / batch_size)) ids = np.repeat(np.arange(start, start + n_batches), batch_size)[: len(batch)] for entry, id in zip(batch, ids, strict=False): self.add_entry(entry, id) def _get_next_batch_id(self) -> int: if not self._batches: batch = 0 else: batch = self._batches[-1] + 1 return batch # ================================================================================== # Properties and methods to access the history # ================================================================================== # Function data, function value, and monotone function value # ---------------------------------------------------------------------------------- def fun_data(self, cost_model: CostModel, monotone: bool = False) -> pd.DataFrame: """Return the function value data. Args: cost_model: The cost model that is used to calculate the time measure. monotone: Whether to return the monotone function values. Defaults to False. Returns: pd.DataFrame: The function value data. The columns are: 'fun', 'time' and 'task'. If monotone is False, value is the fun value, otherwise the monotone function value. If dropna is True, rows with missing values are dropped. """ if monotone: fun = self.monotone_fun else: fun = np.array(self.fun, dtype=np.float64) # converts None to nan timings = self._get_total_timings(cost_model) task = _task_to_categorical(self.task) if not self._is_serial(): # In the non-serial case, we take the batching into account and reduce # timings and fun to one value per batch. timings = _apply_reduction_to_batches( data=timings, batch_ids=self.batches, reduction_function=cost_model.aggregate_batch_time, ) min_or_max = ( np.nanmin if self.direction == Direction.MINIMIZE else np.nanmax ) fun = _apply_reduction_to_batches( data=fun, batch_ids=self.batches, reduction_function=min_or_max, # type: ignore[arg-type] ) # Verify that tasks are homogeneous in each batch, and select first if true. tasks_and_batches = pd.DataFrame({"task": task, "batches": self.batches}) grouped_tasks = tasks_and_batches.groupby("batches")["task"] if not grouped_tasks.nunique().eq(1).all(): raise ValueError("Tasks are not homogeneous in each batch.") task = grouped_tasks.first().reset_index(drop=True) time = np.cumsum(timings) return pd.DataFrame({"fun": fun, "time": time, "task": task}) @property def fun(self) -> list[float | None]: return self._fun @property def monotone_fun(self) -> NDArray[np.float64]: """The monotone function value of the history. If the value is None, the output at that position is nan. """ return _calculate_monotone_sequence(self.fun, direction=self.direction) # Acceptance # ---------------------------------------------------------------------------------- @property def is_accepted(self) -> NDArray[np.bool_]: """Boolean indicator whether a function value is accepted. A function value is accepted if it is smaller (or equal) than the monotone function value counterpart in the case of minimization, or larger (or equal) in the case of maximization. If the value is None, the output at that position is False. """ fun_arr = np.array(self.fun, dtype=np.float64) if self.direction == Direction.MINIMIZE: return fun_arr <= self.monotone_fun elif self.direction == Direction.MAXIMIZE: return fun_arr >= self.monotone_fun # Parameter data, params, flat params, and flat params names # ---------------------------------------------------------------------------------- def params_data( self, dropna: bool = False, collapse_batches: bool = False ) -> pd.DataFrame: """Return the parameter data. Args: dropna: Whether to drop rows with missing function values. These correspond to parameters that were used to calculate pure jacobians. Defaults to False. collapse_batches: Whether to collapse the batches and only keep the parameters that led to the minimal (or maximal) function value in each batch. Defaults to False. Returns: pd.DataFrame: The parameter data. The columns are: 'name' (the parameter names), 'value' (the parameter values), 'task' (the task for which the parameter was used), and 'counter' (a counter that is unique for each row). """ wide = pd.DataFrame(self.flat_params, columns=self.flat_param_names) wide["task"] = _task_to_categorical(self.task) wide["fun"] = self.fun # If requested, we collapse the batches and only keep the parameters that led to # the minimal (or maximal) function value in each batch. if collapse_batches and not self._is_serial(): wide["batches"] = self.batches # Verify that tasks are homogeneous in each batch if not wide.groupby("batches")["task"].nunique().eq(1).all(): raise ValueError("Tasks are not homogeneous in each batch.") # We fill nans with inf or -inf to make sure that the idxmin/idxmax is # well-defined, since there is the possibility that all fun values are nans # in a batch. if self.direction == Direction.MINIMIZE: loc = ( wide.assign(fun_without_nan=wide["fun"].fillna(np.inf)) .groupby("batches")["fun_without_nan"] .idxmin() ) elif self.direction == Direction.MAXIMIZE: loc = ( wide.assign(fun_without_nan=wide["fun"].fillna(-np.inf)) .groupby("batches")["fun_without_nan"] .idxmax() ) wide = wide.loc[loc].drop(columns="batches") # We drop rows with missing values if requested. These correspond to parameters # that were used to calculate pure jacobians. This step must be done before # dropping the fun column and before setting the counter. if dropna: wide = wide.dropna(subset="fun") wide["counter"] = np.arange(len(wide)) long = pd.melt( wide, var_name="name", value_name="value", id_vars=["task", "counter", "fun"], ) data = long.reindex(columns=["counter", "name", "value", "task", "fun"]) return data.set_index(["counter", "name"]).sort_index() @property def params(self) -> list[PyTree]: return self._params @property def flat_params(self) -> list[list[float]]: return _get_flat_params(self._params) @property def flat_param_names(self) -> list[str]: return _get_flat_param_names(param=self._params[0]) # Time # ---------------------------------------------------------------------------------- def _get_total_timings( self, cost_model: CostModel | Literal["wall_time"] ) -> NDArray[np.float64]: """Return the total timings across all tasks. Args: cost_model: The cost model that is used to calculate the time measure. If "wall_time", the wall time is returned. Returns: np.ndarray: The sum of the timings across all tasks. """ if not isinstance(cost_model, CostModel) and cost_model != "wall_time": raise TypeError("cost_model must be a CostModel or 'wall_time'.") if cost_model == "wall_time": return np.array(self.stop_time, dtype=np.float64) - self.start_time[0] fun_time = self._get_timings_per_task( task=EvalTask.FUN, cost_factor=cost_model.fun ) jac_time = self._get_timings_per_task( task=EvalTask.JAC, cost_factor=cost_model.jac ) fun_and_jac_time = self._get_timings_per_task( task=EvalTask.FUN_AND_JAC, cost_factor=cost_model.fun_and_jac ) return fun_time + jac_time + fun_and_jac_time def _get_timings_per_task( self, task: EvalTask, cost_factor: float | None ) -> NDArray[np.float64]: """Return the time measure per task. Args: task: The task for which the time is calculated. cost_factor: The cost factor used to calculate the time. If None, the time is the difference between the start and stop time, otherwise the time is given by the cost factor. Returns: np.ndarray: The time per task. For entries where the task is not the requested task, the time is 0. """ task_mask = np.array([1 if t == task else 0 for t in self.task]) factor: float | NDArray[np.float64] if cost_factor is None: factor = np.array(self.stop_time, dtype=np.float64) - np.array( self.start_time, dtype=np.float64 ) else: factor = cost_factor return factor * task_mask @property def start_time(self) -> list[float]: return self._start_time @property def stop_time(self) -> list[float]: return self._stop_time # Batches and fast_path # ---------------------------------------------------------------------------------- @property def batches(self) -> list[int]: return self._batches def _is_serial(self) -> bool: return np.array_equal(self.batches, np.arange(len(self.batches))) # Tasks # ---------------------------------------------------------------------------------- @property def task(self) -> list[EvalTask]: return self._task # ================================================================================== # Add deprecated dict access # ================================================================================== @property def time(self) -> list[float]: msg = ( "The attribute `time` of History will be deprecated soon. Use the " "`start_time` method instead." ) warnings.warn(msg, FutureWarning) arr = np.array(self._start_time) return (arr - arr[0]).tolist() @property def criterion(self) -> list[float | None]: msg = "The attribute `criterion` of History is deprecated. Use `fun` instead." warnings.warn(msg, FutureWarning) return self.fun @property def runtime(self) -> list[float]: msg = ( "The attribute `runtime` of History will be deprecated soon. Use the " "`start_time` method instead." ) warnings.warn(msg, FutureWarning) return self.time def __getitem__(self, key: str) -> Any: msg = "dict-like access to History is deprecated. Use attribute access instead." warnings.warn(msg, FutureWarning) return getattr(self, key) # ====================================================================================== # Functions directly used in History methods # ====================================================================================== def _get_flat_params(params: list[PyTree]) -> list[list[float]]: fast_path = len(params) > 0 and _is_1d_array(params[0]) if fast_path: flatten = lambda x: x.tolist() else: registry = get_registry(extended=True) flatten = partial(tree_just_flatten, registry=registry) return [flatten(p) for p in params] def _get_flat_param_names(param: PyTree) -> list[str]: fast_path = _is_1d_array(param) if fast_path: # Mypy raises an error here because .tolist() returns a str for zero-dimensional # arrays, but the fast path is only taken for 1d arrays, so it can be ignored. return np.arange(param.size).astype(str).tolist() registry = get_registry(extended=True) return leaf_names(param, registry=registry) def _is_1d_array(param: PyTree) -> bool: return isinstance(param, np.ndarray) and param.ndim == 1 def _calculate_monotone_sequence( sequence: list[float | None], direction: Direction ) -> NDArray[np.float64]: sequence_arr = np.array(sequence, dtype=np.float64) # converts None to nan nan_mask = np.isnan(sequence_arr) if direction == Direction.MINIMIZE: sequence_arr[nan_mask] = np.inf out = np.minimum.accumulate(sequence_arr) elif direction == Direction.MAXIMIZE: sequence_arr[nan_mask] = -np.inf out = np.maximum.accumulate(sequence_arr) out[nan_mask] = np.nan return out # ====================================================================================== # Misc # ====================================================================================== def _validate_args_are_all_none_or_lists_of_same_length( *args: list[Any] | None, ) -> None: all_none = all(arg is None for arg in args) all_list = all(isinstance(arg, list) for arg in args) if not all_none: if all_list: unique_list_lengths = set(map(len, args)) # type: ignore[arg-type] if len(unique_list_lengths) != 1: raise ValueError("All list arguments must have the same length.") else: raise ValueError("All arguments must be lists of the same length or None.") def _task_to_categorical(task: list[EvalTask]) -> "pd.Series[str]": EvalTaskDtype = pd.CategoricalDtype(categories=[t.value for t in EvalTask]) return pd.Series([t.value for t in task], dtype=EvalTaskDtype) def _apply_reduction_to_batches( data: NDArray[np.float64], batch_ids: list[int], reduction_function: Callable[[Iterable[float]], float], ) -> NDArray[np.float64]: """Apply a reduction operator on batches of data. This function assumes that batch_ids are non-empty and sorted. Args: data: 1d array with data. batch_ids: A list with batch ids whose length is equal to the size of data. Values need to be sorted and can be repeated. reduction_function: A reduction function that takes an iterable of floats as input (e.g., a numpy.ndarray or list of floats) and returns a scalar. The function must be able to handle NaN's. Returns: The transformed data. Has one entry per unique batch id, equal to the result of applying the reduction function to the data of that batch. """ batch_starts, batch_stops = _get_batch_starts_and_stops(batch_ids) batch_results: list[float] = [] for start, stop in zip(batch_starts, batch_stops, strict=True): batch_data = data[start:stop] batch_id = batch_ids[start] try: if np.isnan(batch_data).all(): reduced = np.nan else: reduced = reduction_function(batch_data) except Exception as e: msg = ( f"Calling function {reduction_function.__name__} on batch {batch_id} " "of the History raised an Exception. Please verify that " f"{reduction_function.__name__} is well-defined, takes an iterable of " "floats as input and returns a scalar. The function must be able to " "handle NaN's." ) raise ValueError(msg) from e if not np.isscalar(reduced): msg = ( f"Function {reduction_function.__name__} did not return a scalar for " f"batch {batch_id}. Please verify that {reduction_function.__name__} " "returns a scalar when called on an iterable of floats. The function " "must be able to handle NaN's." ) raise ValueError(msg) batch_results.append(float(reduced)) # type: ignore[arg-type,unused-ignore] return np.array(batch_results, dtype=np.float64) def _get_batch_starts_and_stops(batch_ids: list[int]) -> tuple[list[int], list[int]]: """Get start and stop indices of batches. This function assumes that batch_ids are non-empty and sorted. """ ids_arr = np.array(batch_ids, dtype=np.int64) indices = np.where(ids_arr[:-1] != ids_arr[1:])[0] + 1 list_indices: list[int] = indices.tolist() starts = [0, *list_indices] stops = [*starts[1:], len(batch_ids)] return starts, stops ================================================ FILE: src/optimagic/optimization/internal_optimization_problem.py ================================================ import time import warnings from copy import copy from dataclasses import asdict, dataclass, replace from typing import Any, Callable, Literal, cast import numpy as np from numpy.typing import NDArray from typing_extensions import Self from optimagic.batch_evaluators import process_batch_evaluator from optimagic.differentiation.derivatives import first_derivative from optimagic.differentiation.numdiff_options import NumdiffOptions from optimagic.exceptions import UserFunctionRuntimeError, get_traceback from optimagic.logging.logger import LogStore from optimagic.logging.types import IterationState from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, SpecificFunctionValue, ) from optimagic.optimization.history import History, HistoryEntry from optimagic.parameters.bounds import Bounds from optimagic.parameters.conversion import Converter from optimagic.typing import ( AggregationLevel, BatchEvaluator, Direction, ErrorHandling, EvalTask, PyTree, ) @dataclass(frozen=True) class InternalBounds(Bounds): lower: NDArray[np.float64] | None upper: NDArray[np.float64] | None soft_lower: None = None soft_upper: None = None class InternalOptimizationProblem: def __init__( self, fun: Callable[[PyTree], SpecificFunctionValue], jac: Callable[[PyTree], PyTree] | None, fun_and_jac: Callable[[PyTree], tuple[SpecificFunctionValue, PyTree]] | None, converter: Converter, solver_type: AggregationLevel, direction: Direction, bounds: InternalBounds, numdiff_options: NumdiffOptions, error_handling: ErrorHandling, error_penalty_func: Callable[ [NDArray[np.float64]], tuple[SpecificFunctionValue, NDArray[np.float64]], ], batch_evaluator: BatchEvaluator, linear_constraints: list[dict[str, Any]] | None, nonlinear_constraints: list[dict[str, Any]] | None, logger: LogStore[Any, Any] | None, # TODO: add hess and hessp ): self._fun = fun self._jac = jac self._fun_and_jac = fun_and_jac self._converter = converter self._solver_type = solver_type self._direction = direction self._bounds = bounds self._numdiff_options = numdiff_options self._error_handling = error_handling self._error_penalty_func = error_penalty_func self._batch_evaluator = batch_evaluator self._history = History(direction) self._linear_constraints = linear_constraints self._nonlinear_constraints = nonlinear_constraints self._logger = logger self._step_id: int | None = None # ================================================================================== # Public methods used by optimizers # ================================================================================== def fun(self, x: NDArray[np.float64]) -> float | NDArray[np.float64]: """Evaluate the objective function at x. Args: x: The parameter vector at which to evaluate the objective function. Returns: The function value at x. This is a scalar for scalar problems and an array for least squares or likelihood problems. """ fun_value, hist_entry = self._evaluate_fun(x) self._history.add_entry(hist_entry) return fun_value def jac(self, x: NDArray[np.float64]) -> NDArray[np.float64]: """Evaluate the first derivative at x. Args: x: The parameter vector at which to evaluate the first derivative. Returns: The first derivative at x. This is a 1d array for scalar problems (the gradient) and a 2d array for least squares or likelihood problems (the Jacobian). """ jac_value, hist_entry = self._evaluate_jac(x) self._history.add_entry(hist_entry) return jac_value def fun_and_jac( self, x: NDArray[np.float64] ) -> tuple[float | NDArray[np.float64], NDArray[np.float64]]: """Simultaneously evaluate the objective function and its first derivative. See .fun and .jac for details. """ fun_and_jac_value, hist_entry = self._evaluate_fun_and_jac(x) self._history.add_entry(hist_entry) return fun_and_jac_value def batch_fun( self, x_list: list[NDArray[np.float64]], n_cores: int, batch_size: int | None = None, ) -> list[float | NDArray[np.float64]]: """Parallelized batch version of .fun. Args: x_list: A list of parameter vectors at which to evaluate the objective function. n_cores: The number of cores to use for the parallel evaluation. batch_size: Batch size that can be used by some algorithms to simulate the behavior under parallelization on more cores than are actually available. Only used by `criterion_plots` and benchmark plots. Returns: A list of function values at the points in x_list. See .fun for details. """ batch_size = n_cores if batch_size is None else batch_size batch_result = self._batch_evaluator( func=self._evaluate_fun, arguments=x_list, n_cores=n_cores, # This should always be raise because errors are already handled error_handling="raise", ) fun_values = [result[0] for result in batch_result] hist_entries = [result[1] for result in batch_result] self._history.add_batch(hist_entries, batch_size) return fun_values def batch_jac( self, x_list: list[NDArray[np.float64]], n_cores: int, batch_size: int | None = None, ) -> list[NDArray[np.float64]]: """Parallelized batch version of .jac. Args: x_list: A list of parameter vectors at which to evaluate the first derivative. n_cores: The number of cores to use for the parallel evaluation. batch_size: Batch size that can be used by some algorithms to simulate the behavior under parallelization on more cores than are actually available. Only used by `criterion_plots` and benchmark plots. Returns: A list of first derivatives at the points in x_list. See .jac for details. """ batch_size = n_cores if batch_size is None else batch_size batch_result = self._batch_evaluator( func=self._evaluate_jac, arguments=x_list, n_cores=n_cores, # This should always be raise because errors are already handled error_handling="raise", ) jac_values = [result[0] for result in batch_result] hist_entries = [result[1] for result in batch_result] self._history.add_batch(hist_entries, batch_size) return jac_values def batch_fun_and_jac( self, x_list: list[NDArray[np.float64]], n_cores: int, batch_size: int | None = None, ) -> list[tuple[float | NDArray[np.float64], NDArray[np.float64]]]: """Parallelized batch version of .fun_and_jac. Args: x_list: A list of parameter vectors at which to evaluate the objective function and its first derivative. n_cores: The number of cores to use for the parallel evaluation. batch_size: Batch size that can be used by some algorithms to simulate the behavior under parallelization on more cores than are actually available. Only used by `criterion_plots` and benchmark plots. Returns: A list of tuples containing the function value and the first derivative at the points in x_list. See .fun_and_jac for details. """ batch_size = n_cores if batch_size is None else batch_size batch_result = self._batch_evaluator( func=self._evaluate_fun_and_jac, arguments=x_list, n_cores=n_cores, # This should always be raise because errors are already handled error_handling="raise", ) fun_and_jac_values = [result[0] for result in batch_result] hist_entries = [result[1] for result in batch_result] self._history.add_batch(hist_entries, batch_size) return fun_and_jac_values def exploration_fun( self, x_list: list[NDArray[np.float64]], n_cores: int, batch_size: int | None = None, ) -> list[float]: batch_size = n_cores if batch_size is None else batch_size batch_result = self._batch_evaluator( func=self._evaluate_exploration_fun, arguments=x_list, n_cores=n_cores, # This should always be raise because errors are already handled error_handling="raise", ) fun_values = [result[0] for result in batch_result] hist_entries = [result[1] for result in batch_result] self._history.add_batch(hist_entries, batch_size) return fun_values def with_new_history(self) -> Self: new = copy(self) new._history = History(self.direction) return new def with_error_handling(self, error_handling: ErrorHandling) -> Self: new = copy(self) new._error_handling = error_handling return new def with_step_id(self, step_id: int) -> Self: new = copy(self) new._step_id = step_id return new # ================================================================================== # Public attributes # ================================================================================== @property def bounds(self) -> InternalBounds: """Bounds of the optimization problem.""" return self._bounds @property def converter(self) -> Converter: """Converter between external and internal parameter representation. The converter transforms parameters between their user-provided representation (the external representation) and the flat numpy array used by the optimizer (the internal representation). This transformation includes: - Flattening and unflattening of pytree structures. - Applying parameter constraints via reparametrizations. - Scaling and unscaling of parameter values. The Converter object provides the following main attributes: - ``params_to_internal``: Callable that converts a pytree of external parameters to a flat numpy array of internal parameters. - ``params_from_internal``: Callable that converts a flat numpy array of internal parameters to a pytree of external parameters. - ``derivative_to_internal``: Callable that converts the derivative from the external parameter space to the internal space. - ``has_transforming_constraints``: Boolean that is True if the conversion involves constraints that are handled by reparametrization. Examples: The converter is particularly useful for algorithms that require initial values in the internal (flat) parameter space, while allowing the user to specify these values in the more convenient external (pytree) format. Here's how an optimization algorithm might use the converter internally to prepare parameters for the optimizer: >>> from optimagic.optimization.internal_optimization_problem import ( ... SphereExampleInternalOptimizationProblem ... ) >>> import numpy as np >>> >>> # Optimization problem instance. >>> problem = SphereExampleInternalOptimizationProblem() >>> >>> # User provided parameters in external format. >>> user_params = np.array([1.0, 2.0, 3.0]) >>> >>> # Convert to internal format for optimization algorithms. >>> internal_params = problem.converter.params_to_internal(user_params) >>> internal_params array([1., 2., 3.]) """ return self._converter @property def linear_constraints(self) -> list[dict[str, Any]] | None: # TODO: write a docstring as soon as we actually use this return self._linear_constraints @property def nonlinear_constraints(self) -> list[dict[str, Any]] | None: """Internal representation of nonlinear constraints. Compared to the user provided constraints, we have done the following transformations: 1. The constraint a <= g(x) <= b is transformed to h(x) >= 0, where h(x) is - h(x) = g(x), if a == 0 and b == inf - h(x) = g(x) - a, if a != 0 and b == inf - h(x) = (g(x) - a, -g(x) + b) >= 0, if a != 0 and b != inf. 2. The equality constraint g(x) = v is transformed to h(x) >= 0, where h(x) = (g(x) - v, -g(x) + v). 3. Vector constraints are transformed to a list of scalar constraints. g(x) = (g1(x), g2(x), ...) >= 0 is transformed to (g1(x) >= 0, g2(x) >= 0, ...). 4. The constraint function (defined on a selection of user-facing parameters) is transformed to be evaluated on the internal parameters. """ return self._nonlinear_constraints @property def direction(self) -> Direction: """Direction of the optimization problem.""" return self._direction @property def history(self) -> History: """History container for the optimization problem.""" return self._history @property def logger(self) -> LogStore[Any, Any] | None: """Logger for the optimization problem.""" return self._logger # ================================================================================== # Implementation of the public functions; The main difference is that the lower- # level implementations return a history entry instead of adding it to the history # directly so they can be called in parallel! # ================================================================================== def _evaluate_fun( self, x: NDArray[np.float64] ) -> tuple[float | NDArray[np.float64], HistoryEntry]: fun_value, hist_entry, log_entry = self._pure_evaluate_fun(x) if self._logger: self._logger.iteration_store.insert(log_entry) return fun_value, hist_entry def _evaluate_jac( self, x: NDArray[np.float64] ) -> tuple[NDArray[np.float64], HistoryEntry]: if self._jac is not None: jac_value, hist_entry, log_entry = self._pure_evaluate_jac(x) else: if self._fun_and_jac is not None: (_, jac_value), hist_entry, log_entry = self._pure_evaluate_fun_and_jac( x ) else: (_, jac_value), hist_entry, log_entry = ( self._pure_evaluate_numerical_fun_and_jac(x) ) hist_entry = replace(hist_entry, task=EvalTask.JAC) if self._logger: self._logger.iteration_store.insert(log_entry) return jac_value, hist_entry def _evaluate_exploration_fun( self, x: NDArray[np.float64] ) -> tuple[float, HistoryEntry]: fun_value, hist_entry, log_entry = self._pure_exploration_fun(x) if self._logger: self._logger.iteration_store.insert(log_entry) return fun_value, hist_entry def _evaluate_fun_and_jac( self, x: NDArray[np.float64] ) -> tuple[tuple[float | NDArray[np.float64], NDArray[np.float64]], HistoryEntry]: if self._fun_and_jac is not None: (fun_value, jac_value), hist_entry, log_entry = ( self._pure_evaluate_fun_and_jac(x) ) elif self._jac is not None: fun_value, hist_entry, log_entry_fun = self._pure_evaluate_fun(x) jac_value, _, log_entry_jac = self._pure_evaluate_jac(x) hist_entry = replace(hist_entry, task=EvalTask.FUN_AND_JAC) log_entry = log_entry_fun.combine(log_entry_jac) else: (fun_value, jac_value), hist_entry, log_entry = ( self._pure_evaluate_numerical_fun_and_jac(x) ) if self._logger: self._logger.iteration_store.insert(log_entry) return (fun_value, jac_value), hist_entry # ================================================================================== # Atomic evaluations of user provided functions or numerical derivatives # ================================================================================== def _pure_evaluate_fun( self, x: NDArray[np.float64] ) -> tuple[float | NDArray[np.float64], HistoryEntry, IterationState]: """Evaluate fun and handle exceptions. This function does all the conversions from x to params and from SpecificFunctionValue to the internal value, including a sign flip for maximization. If any exception occurs during the evaluation of fun and error handling is set to CONTINUE, the fun value is replaced by a penalty value and a warning is issued. """ start_time = time.perf_counter() params = self._converter.params_from_internal(x) traceback: None | str = None try: fun_value = self._fun(params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if self._error_handling in ( ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ): msg = "An error occurred when evaluating fun during optimization." raise UserFunctionRuntimeError(msg) from e else: traceback = get_traceback() msg = ( "The following exception was caught when evaluating fun during " "optimization. The fun value was replaced by a penalty value to " f"continue with the optimization.:\n\n{traceback}" ) warnings.warn(msg) fun_value, _ = self._error_penalty_func(x) algo_fun_value, hist_fun_value = _process_fun_value( value=fun_value, solver_type=self._solver_type, direction=self._direction ) stop_time = time.perf_counter() hist_entry = HistoryEntry( params=params, fun=hist_fun_value, start_time=start_time, stop_time=stop_time, task=EvalTask.FUN, ) log_entry = IterationState( params=params, timestamp=start_time, scalar_fun=hist_fun_value, valid=not bool(traceback), raw_fun=fun_value, step=self._step_id, exceptions=traceback, ) return algo_fun_value, hist_entry, log_entry def _pure_evaluate_jac( self, x: NDArray[np.float64] ) -> tuple[NDArray[np.float64], HistoryEntry, IterationState]: if self._jac is None: raise ValueError("The jac function is not defined.") start_time = time.perf_counter() traceback: None | str = None params = self._converter.params_from_internal(x) try: jac_value = self._jac(params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if self._error_handling in ( ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ): msg = "An error occurred when evaluating jac during optimization." raise UserFunctionRuntimeError(msg) from e else: traceback = get_traceback() msg = ( "The following exception was caught when evaluating jac during " "optimization. The jac value was replaced by a penalty value to " f"continue with the optimization.:\n\n{traceback}" ) warnings.warn(msg) _, jac_value = self._error_penalty_func(x) out_jac = _process_jac_value( value=jac_value, direction=self._direction, converter=self._converter, x=x ) _assert_finite_jac( out_jac=out_jac, jac_value=jac_value, params=params, origin="jac" ) stop_time = time.perf_counter() hist_entry = HistoryEntry( params=params, fun=None, start_time=start_time, stop_time=stop_time, task=EvalTask.JAC, ) log_entry = IterationState( params=params, timestamp=start_time, scalar_fun=None, valid=not bool(traceback), raw_fun=None, step=self._step_id, exceptions=traceback, ) return out_jac, hist_entry, log_entry def _pure_evaluate_numerical_fun_and_jac( self, x: NDArray[np.float64] ) -> tuple[ tuple[float | NDArray[np.float64], NDArray[np.float64]], HistoryEntry, IterationState, ]: start_time = time.perf_counter() traceback: None | str = None def func(x: NDArray[np.float64]) -> SpecificFunctionValue: p = self._converter.params_from_internal(x) return self._fun(p) try: numdiff_res = first_derivative( func, x, bounds=self._bounds, **asdict(self._numdiff_options), unpacker=lambda x: x.internal_value(self._solver_type), error_handling="raise_strict", ) fun_value = numdiff_res.func_value jac_value = numdiff_res.derivative except (KeyboardInterrupt, SystemExit): raise except Exception as e: if self._error_handling in ( ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ): msg = ( "An error occurred when evaluating a numerical derivative " "during optimization." ) raise UserFunctionRuntimeError(msg) from e else: traceback = get_traceback() msg = ( "The following exception was caught when calculating a " "numerical derivative during optimization. The jac value was " "replaced by a penalty value to continue with the optimization." f":\n\n{traceback}" ) warnings.warn(msg) fun_value, jac_value = self._error_penalty_func(x) _assert_finite_jac( out_jac=jac_value, jac_value=jac_value, params=self._converter.params_from_internal(x), origin="numerical", ) algo_fun_value, hist_fun_value = _process_fun_value( value=fun_value, # type: ignore solver_type=self._solver_type, direction=self._direction, ) if self._direction == Direction.MAXIMIZE: jac_value = -jac_value stop_time = time.perf_counter() hist_entry = HistoryEntry( params=self._converter.params_from_internal(x), fun=hist_fun_value, start_time=start_time, stop_time=stop_time, task=EvalTask.FUN_AND_JAC, ) log_entry = IterationState( params=self._converter.params_from_internal(x), timestamp=start_time, scalar_fun=hist_fun_value, valid=not bool(traceback), raw_fun=fun_value, step=self._step_id, exceptions=traceback, ) return (algo_fun_value, jac_value), hist_entry, log_entry def _pure_exploration_fun( self, x: NDArray[np.float64] ) -> tuple[float, HistoryEntry, IterationState]: start_time = time.perf_counter() params = self._converter.params_from_internal(x) traceback: None | str = None try: fun_value = self._fun(params) except (KeyboardInterrupt, SystemExit): raise except Exception: traceback = get_traceback() msg = ( "The following exception was caught when evaluating fun during the " "exploration phase of a multistart optimization. The fun value was " "replaced by a penalty value to continue with the " f"optimization.:\n\n{traceback}" ) warnings.warn(msg) fun_value, _ = self._error_penalty_func(x) if not traceback: algo_fun_value, hist_fun_value = _process_fun_value( value=fun_value, # For exploration we always need a scalar value solver_type=AggregationLevel.SCALAR, direction=self._direction, ) else: algo_fun_value = -np.inf hist_fun_value = -np.inf if self._direction == Direction.MAXIMIZE: hist_fun_value = np.inf stop_time = time.perf_counter() hist_entry = HistoryEntry( params=params, fun=hist_fun_value, start_time=start_time, stop_time=stop_time, task=EvalTask.EXPLORATION, ) log_entry = IterationState( params=params, timestamp=start_time, scalar_fun=hist_fun_value, valid=not bool(traceback), raw_fun=fun_value, step=self._step_id, exceptions=traceback, ) return cast(float, algo_fun_value), hist_entry, log_entry def _pure_evaluate_fun_and_jac( self, x: NDArray[np.float64] ) -> tuple[ tuple[float | NDArray[np.float64], NDArray[np.float64]], HistoryEntry, IterationState, ]: if self._fun_and_jac is None: raise ValueError("The fun_and_jac function is not defined.") start_time = time.perf_counter() traceback: None | str = None params = self._converter.params_from_internal(x) try: fun_value, jac_value = self._fun_and_jac(params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if self._error_handling in ( ErrorHandling.RAISE, ErrorHandling.RAISE_STRICT, ): msg = ( "An error occurred when evaluating fun_and_jac during optimization." ) raise UserFunctionRuntimeError(msg) from e else: traceback = get_traceback() msg = ( "The following exception was caught when evaluating fun_and_jac " "during optimization. The fun and jac values were replaced by " f"penalty values to continue with the optimization.:\n\n{traceback}" ) warnings.warn(msg) fun_value, jac_value = self._error_penalty_func(x) algo_fun_value, hist_fun_value = _process_fun_value( value=fun_value, solver_type=self._solver_type, direction=self._direction ) if traceback: out_jac = jac_value else: out_jac = self._converter.derivative_to_internal(jac_value, x) if self._direction == Direction.MAXIMIZE: out_jac = -out_jac _assert_finite_jac( out_jac=out_jac, jac_value=jac_value, params=params, origin="fun_and_jac" ) stop_time = time.perf_counter() hist_entry = HistoryEntry( params=params, fun=hist_fun_value, start_time=start_time, stop_time=stop_time, task=EvalTask.FUN_AND_JAC, ) log_entry = IterationState( params=params, timestamp=start_time, scalar_fun=hist_fun_value, valid=not bool(traceback), raw_fun=fun_value, step=self._step_id, exceptions=traceback, ) return (algo_fun_value, out_jac), hist_entry, log_entry def _assert_finite_jac( out_jac: NDArray[np.float64], jac_value: PyTree, params: PyTree, origin: Literal["numerical", "jac", "fun_and_jac"], ) -> None: """Check for infinite and NaN values in the Jacobian and raise an error if found. Args: out_jac: internal processed Jacobian to check for finiteness. jac_value: original Jacobian value as returned by the user function, params: user-facing parameter representation at evaluation point. origin: Source of Jacobian calculation, for the error message. Raises: UserFunctionRuntimeError: If any infinite or NaN values are found in the Jacobian. """ if not np.all(np.isfinite(out_jac)): if origin == "jac" or "fun_and_jac": msg = ( "The optimization failed because the derivative provided via " f"{origin} contains infinite or NaN values." "\nPlease validate the derivative function." ) elif origin == "numerical": msg = ( "The optimization failed because the numerical derivative " "(computed using fun) contains infinite or NaN values." "\nPlease validate the criterion function or try a different optimizer." ) msg += ( f"\nParameters at evaluation point: {params}\nJacobian values: {jac_value}" ) raise UserFunctionRuntimeError(msg) def _process_fun_value( value: SpecificFunctionValue, solver_type: AggregationLevel, direction: Direction, ) -> tuple[float | NDArray[np.float64], float]: """Post-process a function value for use by the algorithm and as history entry. The sign flip for maximization is only applied to the value that will be passed to the algorithm. Args: value: The function value. solver_type: The aggregation level of the solver. direction: The direction of optimization. Returns: A tuple of the function value for the algorithm and the function value for the history entry. """ algo_value = value.internal_value(solver_type) history_value = cast(float, value.internal_value(AggregationLevel.SCALAR)) if direction == Direction.MAXIMIZE: algo_value = -algo_value return algo_value, history_value def _process_jac_value( value: SpecificFunctionValue, direction: Direction, converter: Converter, x: NDArray[np.float64], ) -> NDArray[np.float64]: """Post-process a for use by the algorithm. Args: value: The Jacobian value. direction: The direction of optimization. converter: The converter object. Returns: The Jacobian value for the algorithm. """ out_value = converter.derivative_to_internal(value, x) if direction == Direction.MAXIMIZE: out_value = -out_value return out_value class SphereExampleInternalOptimizationProblem(InternalOptimizationProblem): """Super simple example of an internal optimization problem. This can be used to test algorithm wrappers or to familiarize yourself with the internal optimization problem interface. Args: """ def __init__( self, solver_type: AggregationLevel = AggregationLevel.SCALAR, binding_bounds: bool = False, ) -> None: _fun_dict = { AggregationLevel.SCALAR: lambda x: ScalarFunctionValue(x @ x), AggregationLevel.LIKELIHOOD: lambda x: LikelihoodFunctionValue(x**2), AggregationLevel.LEAST_SQUARES: lambda x: LeastSquaresFunctionValue(x), # noqa: PLW0108 } _jac_dict = { AggregationLevel.SCALAR: lambda x: 2 * x, AggregationLevel.LIKELIHOOD: lambda x: 2 * x, AggregationLevel.LEAST_SQUARES: lambda x: np.eye(len(x)), } fun = _fun_dict[solver_type] jac = _jac_dict[solver_type] fun_and_jac = lambda x: (fun(x), jac(x)) converter = Converter( params_to_internal=lambda x: x, params_from_internal=lambda x: x, derivative_to_internal=lambda x, x0: x, has_transforming_constraints=False, ) direction = Direction.MINIMIZE if binding_bounds: lb = np.arange(10, dtype=np.float64) - 7.0 ub = np.arange(10, dtype=np.float64) - 3.0 self._x_opt = np.array([-3, -2, -1, 0, 0, 0, 0, 0, 1, 2.0]) else: lb = np.full(10, -10, dtype=np.float64) ub = np.full(10, 10, dtype=np.float64) self._x_opt = np.zeros(10) bounds = InternalBounds(lb, ub) numdiff_options = NumdiffOptions() error_handling = ErrorHandling.RAISE error_penalty_func = fun_and_jac batch_evaluator = process_batch_evaluator("joblib") linear_constraints = None nonlinear_constraints = None logger = None super().__init__( fun=fun, jac=jac, fun_and_jac=fun_and_jac, converter=converter, solver_type=solver_type, direction=direction, bounds=bounds, numdiff_options=numdiff_options, error_handling=error_handling, error_penalty_func=error_penalty_func, batch_evaluator=batch_evaluator, linear_constraints=linear_constraints, nonlinear_constraints=nonlinear_constraints, logger=logger, ) class SphereExampleInternalOptimizationProblemWithConverter( InternalOptimizationProblem ): """Super simple example of an internal optimization problem with PyTree Converter. Note: params should be a dict with key-value pairs `"x{i}" : val . eg. `{'x0': 1, 'x1': 2, ...}`. The converter.params_to_internal method converts tree like `{'x0': 1, 'x1': 2, 'x2': 3 ...}` to flat array `[1,2,3 ...]` . The converter.params_from_internal method converts flat array `[1,2,3 ...]` to tree like `{'x0': 1, 'x1': 2, 'x2': 3 ...}`. The converter.derivative_to_internal converts derivative trees {'x0': 2,'x1': 4, } to flat arrays [2,4] and jacobian tree `{ "x0": {"x0": 1, "x1": 0, }, "x1": {"x0": 0, "x1": 1, }` to NDArray [[1, 0,], [0, 1, ],]. }. This can be used to test algorithm wrappers or to familiarize yourself with the internal optimization problem interface. Args: """ def __init__( self, solver_type: AggregationLevel = AggregationLevel.SCALAR, binding_bounds: bool = False, ) -> None: def sphere(params: PyTree) -> SpecificFunctionValue: out = sum([params[f"x{i}"] ** 2 for i in range(len(params))]) return ScalarFunctionValue(out) def ls_sphere(params: PyTree) -> SpecificFunctionValue: out = [params[f"x{i}"] for i in range(len(params))] return LeastSquaresFunctionValue(out) def likelihood_sphere(params: PyTree) -> SpecificFunctionValue: out = [params[f"x{i}"] ** 2 for i in range(len(params))] return LikelihoodFunctionValue(out) _fun_dict = { AggregationLevel.SCALAR: sphere, AggregationLevel.LIKELIHOOD: likelihood_sphere, AggregationLevel.LEAST_SQUARES: ls_sphere, } def sphere_gradient(params: PyTree) -> PyTree: return {f"x{i}": 2 * v for i, v in enumerate(params.values())} def likelihood_sphere_gradient(params: PyTree) -> PyTree: return {f"x{i}": 2 * v for i, v in enumerate(params.values())} def ls_sphere_jac(params: PyTree) -> PyTree: return { f"x{i}": {f"x{j}": 1 if i == j else 0 for j in range(len(params))} for i in range(len(params)) } _jac_dict = { AggregationLevel.SCALAR: sphere_gradient, AggregationLevel.LIKELIHOOD: likelihood_sphere_gradient, AggregationLevel.LEAST_SQUARES: ls_sphere_jac, } fun = _fun_dict[solver_type] jac = _jac_dict[solver_type] fun_and_jac = lambda x: (fun(x), jac(x)) def params_flatten(params: PyTree) -> NDArray[np.float64]: return np.array([v for v in params.values()]).astype(float) def params_unflatten(x: NDArray[np.float64]) -> PyTree: return {f"x{i}": v for i, v in enumerate(x)} def derivative_flatten(tree: PyTree, x: NDArray[np.float64]) -> Any: if solver_type == AggregationLevel.LEAST_SQUARES: out = [list(row.values()) for row in tree.values()] return np.array(out) else: return params_flatten(tree) converter = Converter( params_to_internal=params_flatten, params_from_internal=params_unflatten, derivative_to_internal=derivative_flatten, has_transforming_constraints=False, ) direction = Direction.MINIMIZE if binding_bounds: lb = np.arange(10, dtype=np.float64) - 7.0 ub = np.arange(10, dtype=np.float64) - 3.0 self._x_opt = { f"x{i}": x for i, x in enumerate(np.array([-3, -2, -1, 0, 0, 0, 0, 0, 1, 2.0])) } else: lb = np.full(10, -10, dtype=np.float64) ub = np.full(10, 10, dtype=np.float64) self._x_opt = {f"x{i}": x for i, x in enumerate(np.zeros(10))} bounds = InternalBounds(lb, ub) numdiff_options = NumdiffOptions() error_handling = ErrorHandling.RAISE error_penalty_func = fun_and_jac batch_evaluator = process_batch_evaluator("joblib") linear_constraints = None nonlinear_constraints = None logger = None super().__init__( fun=fun, jac=jac, fun_and_jac=fun_and_jac, converter=converter, solver_type=solver_type, direction=direction, bounds=bounds, numdiff_options=numdiff_options, error_handling=error_handling, error_penalty_func=error_penalty_func, batch_evaluator=batch_evaluator, linear_constraints=linear_constraints, nonlinear_constraints=nonlinear_constraints, logger=logger, ) ================================================ FILE: src/optimagic/optimization/multistart.py ================================================ """Functions for multi start optimization a la TikTak. TikTak (`Arnoud, Guvenen, and Kleineberg `_) is an algorithm for solving global optimization problems. It performs local searches from a set of carefully-selected points in the parameter space. First implemented in Python by Alisdair McKay ( `GitHub Repository `_) """ import warnings from dataclasses import dataclass, replace from typing import Literal import numpy as np from numpy.typing import NDArray from scipy.stats import qmc, triang from optimagic.logging.logger import LogStore from optimagic.logging.types import StepStatus from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.optimization.multistart_options import InternalMultistartOptions from optimagic.optimization.optimization_logging import ( log_scheduled_steps_and_get_ids, ) from optimagic.typing import AggregationLevel, ErrorHandling from optimagic.utilities import get_rng def run_multistart_optimization( local_algorithm: Algorithm, internal_problem: InternalOptimizationProblem, x: NDArray[np.float64], sampling_bounds: InternalBounds, options: InternalMultistartOptions, logger: LogStore | None, error_handling: ErrorHandling, ) -> InternalOptimizeResult: steps = determine_steps(options.n_samples, stopping_maxopt=options.stopping_maxopt) scheduled_steps = log_scheduled_steps_and_get_ids( steps=steps, logger=logger, ) if options.sample is not None: sample = options.sample else: sample = _draw_exploration_sample( x=x, lower=sampling_bounds.lower, upper=sampling_bounds.upper, # -1 because we add start parameters n_samples=options.n_samples - 1, distribution=options.sampling_distribution, method=options.sampling_method, seed=options.seed, ) sample = np.vstack([x.reshape(1, -1), sample]) if logger: logger.step_store.update( scheduled_steps[0], {"status": StepStatus.RUNNING.value} ) exploration_res = run_explorations( internal_problem=internal_problem, sample=sample, n_cores=options.n_cores, step_id=scheduled_steps[0], ) if logger: logger.step_store.update( scheduled_steps[0], {"status": StepStatus.COMPLETE.value} ) scheduled_steps = scheduled_steps[1:] sorted_sample = exploration_res.sorted_sample sorted_values = exploration_res.sorted_values stopping_maxopt = options.stopping_maxopt if stopping_maxopt > len(sorted_sample): n_skipped_steps = stopping_maxopt - len(sorted_sample) stopping_maxopt = len(sorted_sample) warnings.warn( "There are less valid starting points than requested optimizations. " "The number of optimizations has been reduced from " f"{options.stopping_maxopt} to {len(sorted_sample)}." ) skipped_steps = scheduled_steps[-n_skipped_steps:] scheduled_steps = scheduled_steps[:-n_skipped_steps] if logger: for step in skipped_steps: new_status = StepStatus.SKIPPED.value logger.step_store.update(step, {"status": new_status}) batched_sample = get_batched_optimization_sample( sorted_sample=sorted_sample, stopping_maxopt=stopping_maxopt, batch_size=options.batch_size, ) state = { "best_x": sorted_sample[0], "best_y": sorted_values[0], "best_res": None, "x_history": [], "y_history": [], "result_history": [], "start_history": [], } convergence_criteria = { "xtol": options.convergence_xtol_rel, "max_discoveries": options.convergence_max_discoveries, } batch_evaluator = options.batch_evaluator def single_optimization(x0, step_id): """Closure for running a single optimization, given a starting point.""" problem = internal_problem.with_error_handling(error_handling) res = local_algorithm.solve_internal_problem(problem, x0, step_id) return res opt_counter = 0 for batch in batched_sample: weight = options.weight_func(opt_counter, stopping_maxopt) starts = [weight * state["best_x"] + (1 - weight) * x for x in batch] arguments = [ {"x0": x, "step_id": id_} for x, id_ in zip(starts, scheduled_steps[: len(batch)], strict=False) ] scheduled_steps = scheduled_steps[len(batch) :] batch_results = batch_evaluator( func=single_optimization, arguments=arguments, unpack_symbol="**", n_cores=options.n_cores, error_handling=options.error_handling, ) state, is_converged = update_convergence_state( current_state=state, starts=starts, results=batch_results, convergence_criteria=convergence_criteria, solver_type=local_algorithm.algo_info.solver_type, ) opt_counter += len(batch) if is_converged: if logger: for step in scheduled_steps: new_status = StepStatus.SKIPPED.value logger.step_store.update(step, {"status": new_status}) break multistart_info = { "start_parameters": state["start_history"], "local_optima": state["result_history"], "exploration_sample": sorted_sample, "exploration_results": sorted_values, } raw_res = state["best_res"] res = replace(raw_res, multistart_info=multistart_info) return res def determine_steps(n_samples, stopping_maxopt): """Determine the number and type of steps for the multistart optimization. This is mainly used to write them to the log. The number of steps is also used if logging is False. Args: n_samples (int): Number of exploration points for the multistart optimization. stopping_maxopt (int): Number of local optimizations. Returns: list: List of dictionaries with information on each step. """ exploration_step = { "type": "exploration", "status": "running", "name": "exploration", "n_iterations": n_samples, } steps = [exploration_step] for i in range(stopping_maxopt): optimization_step = { "type": "optimization", "status": "scheduled", "name": f"optimization_{i}", } steps.append(optimization_step) return steps def _draw_exploration_sample( x: NDArray[np.float64], lower: NDArray[np.float64] | None, upper: NDArray[np.float64] | None, n_samples: int, distribution: Literal["uniform", "triangular"], method: Literal["sobol", "random", "halton", "latin_hypercube"], seed: int | np.random.Generator | None, ) -> NDArray[np.float64]: """Get a sample of parameter values for the first stage of the tiktak algorithm. The sample is created randomly or using a low discrepancy sequence. Different distributions are available. Args: x: Internal parameter vector of shape (n_params,). lower: Vector of internal lower bounds of shape (n_params,). upper: Vector of internal upper bounds of shape (n_params,). n_samples: Number of sample points. distribution: The distribution from which the exploration sample is drawn. Allowed are "uniform" and "triangular". Defaults to "uniform". method: The method used to draw the exploration sample. Allowed are "sobol", "random", "halton", and "latin_hypercube". Defaults to "sobol". seed: Random number seed or generator. Returns: Array of shape (n_samples, n_params). Each row represents a vector of parameter values. """ if lower is None or upper is None: raise ValueError("lower and upper bounds must be provided for multistart.") for name, bound in zip(["lower", "upper"], [lower, upper], strict=False): if not np.isfinite(bound).all(): raise ValueError( f"multistart optimization requires finite {name}_bounds or " f"soft_{name}_bounds for all parameters." ) if method == "sobol": # Draw `n` points from the open interval (lower, upper)^d. # Note that scipy uses the half-open interval [lower, upper)^d internally. # We apply a burn-in phase of 1, i.e. we skip the first point in the sequence # and thus exclude the lower bound. sampler = qmc.Sobol(d=len(lower), scramble=False, seed=seed) _ = sampler.fast_forward(1) sample_unscaled = sampler.random(n=n_samples) elif method == "halton": sampler = qmc.Halton(d=len(lower), scramble=False, seed=seed) sample_unscaled = sampler.random(n=n_samples) elif method == "latin_hypercube": sampler = qmc.LatinHypercube(d=len(lower), strength=1, seed=seed) sample_unscaled = sampler.random(n=n_samples) elif method == "random": rng = get_rng(seed) sample_unscaled = rng.uniform(size=(n_samples, len(lower))) if distribution == "uniform": sample_scaled = qmc.scale(sample_unscaled, lower, upper) elif distribution == "triangular": sample_scaled = triang.ppf( sample_unscaled, c=(x - lower) / (upper - lower), loc=lower, scale=upper - lower, ) return sample_scaled @dataclass(frozen=True) class _InternalExplorationResult: """Exploration result of the multistart optimization. Attributes: sorted_values: List of sorted function values. sorted_sample: 2d numpy array where each row is the internal parameter vector corresponding to the sorted function values. """ sorted_values: list[float] sorted_sample: NDArray[np.float64] def run_explorations( internal_problem: InternalOptimizationProblem, sample: NDArray[np.float64], n_cores: int, step_id: int, ) -> _InternalExplorationResult: """Do the function evaluations for the exploration phase. Args: internal_problem: The internal optimization problem. sample: 2d numpy array where each row is a sampled internal parameter vector. batch_evaluator: See :ref:`batch_evaluators`. n_cores: Number of cores. step_id: The identifier of the exploration step. Returns: A data object containing - sorted_values: List of sorted function values. Invalid function values are excluded. - sorted_sample: 2d numpy array where each row is the internal parameter vector corresponding to the sorted function values. """ internal_problem = internal_problem.with_step_id(step_id) x_list: list[NDArray[np.float64]] = list(sample) raw_values = np.asarray( internal_problem.exploration_fun(x_list, n_cores=n_cores), dtype=np.float64 ) is_valid = np.isfinite(raw_values) if not is_valid.any(): raise RuntimeError( "All function evaluations of the exploration phase in a multistart " "optimization are invalid. Check your code or the sampling bounds." ) valid_values = raw_values[is_valid] valid_sample = sample[is_valid] # this sorts from low to high values; internal criterion and derivative took care # of the sign switch. sorting_indices = np.argsort(valid_values) out = _InternalExplorationResult( sorted_values=valid_values[sorting_indices].tolist(), sorted_sample=valid_sample[sorting_indices], ) return out def get_batched_optimization_sample(sorted_sample, stopping_maxopt, batch_size): """Create a batched sample of internal parameters for the optimization phase. Note that in the end the optimizations will not be started from those parameter vectors but from a convex combination of that parameter vector and the best parameter vector at the time when the optimization is started. Args: sorted_sample (np.ndarray): 2d numpy array with containing sorted internal parameter vectors. stopping_maxopt (int): Number of optimizations to run. If sample is shorter than that, optimizations are run on all entries of the sample. batch_size (int): Batch size. Returns: list: Nested list of parameter vectors from which an optimization is run. The inner lists have length ``batch_size`` or shorter. """ n_batches = int(np.ceil(stopping_maxopt / batch_size)) start = 0 batched = [] for _ in range(n_batches): stop = min(start + batch_size, len(sorted_sample), stopping_maxopt) batched.append(list(sorted_sample[start:stop])) start = stop return batched def update_convergence_state( current_state, starts, results, convergence_criteria, solver_type ): """Update the state of all quantities related to convergence. Args: current_state (dict): Dictionary with the entries: - "best_x": The currently best parameter vector - "best_y": The currently best function value - "best_res": The currently best optimization result - "x_history": The history of locally optimal parameters - "y_history": The history of locally optimal function values. - "result_history": The history of local optimization results - "start_history": The history of start parameters starts (list): List of starting points for local optimizations. results (list): List of results from local optimizations. convergence_criteria (dict): Dict with the entries "xtol" and "max_discoveries" solver_type: The aggregation level of the local optimizer. Needed to interpret the output of the internal criterion function. Returns: dict: The updated state, same entries as current_state. bool: A bool that indicates if the optimizer has converged. """ # ================================================================================== # unpack some variables # ================================================================================== xtol = convergence_criteria["xtol"] max_discoveries = convergence_criteria["max_discoveries"] best_x = current_state["best_x"] best_y = current_state["best_y"] best_res = current_state["best_res"] # ================================================================================== # filter out optimizations that raised errors # ================================================================================== # get indices of local optimizations that did not fail valid_indices = [i for i, res in enumerate(results) if not isinstance(res, str)] # If all local optimizations failed, return early so we don't have to worry about # index errors later. if not valid_indices: return current_state, False # ================================================================================== # reduce eveything to valid optimizations # ================================================================================== valid_results = [results[i] for i in valid_indices] valid_starts = [starts[i] for i in valid_indices] valid_new_x = [res.x for res in valid_results] valid_new_y = [] # make the criterion output scalar if a least squares optimizer returns an # array as solution_criterion. for res in valid_results: if np.isscalar(res.fun): fun = float(res.fun) elif solver_type == AggregationLevel.LIKELIHOOD: fun = float(np.sum(res.fun)) elif solver_type == AggregationLevel.LEAST_SQUARES: fun = np.dot(res.fun, res.fun) valid_new_y.append(fun) # ================================================================================== # accept new best point if we find a new lowest function value # ================================================================================== best_index = np.argmin(valid_new_y) if valid_new_y[best_index] <= best_y: best_x = valid_new_x[best_index] best_y = valid_new_y[best_index] best_res = valid_results[best_index] # handle the case that the global optimum was found in the exploration sample and # due to floating point imprecisions the result of the optimization that started at # the global optimum is slightly worse elif best_res is None: best_res = valid_results[best_index] # ================================================================================== # update history and state # ================================================================================== new_x_history = current_state["x_history"] + valid_new_x all_x = np.array(new_x_history) relative_diffs = (all_x - best_x) / np.clip(best_x, 0.1, np.inf) distances = np.linalg.norm(relative_diffs, axis=1) n_close = (distances <= xtol).sum() is_converged = n_close >= max_discoveries new_state = { "best_x": best_x, "best_y": best_y, "best_res": best_res, "x_history": new_x_history, "y_history": current_state["y_history"] + valid_new_y, "result_history": current_state["result_history"] + valid_results, "start_history": current_state["start_history"] + valid_starts, } return new_state, is_converged ================================================ FILE: src/optimagic/optimization/multistart_options.py ================================================ from dataclasses import dataclass from functools import partial from typing import Callable, Literal, Sequence, TypedDict, cast import numpy as np from numpy.typing import NDArray from typing_extensions import NotRequired from optimagic.batch_evaluators import process_batch_evaluator from optimagic.deprecations import replace_and_warn_about_deprecated_multistart_options from optimagic.exceptions import InvalidMultistartError from optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral, PyTree # ====================================================================================== # Public Options # ====================================================================================== @dataclass(frozen=True) class MultistartOptions: """Multistart options in optimization problems. Attributes: n_samples: The number of points at which the objective function is evaluated during the exploration phase. If None, n_samples is set to 100 times the number of parameters. stopping_maxopt: The maximum number of local optimizations to run. Defaults to 10% of n_samples. This number may not be reached if multistart converges earlier. sampling_distribution: The distribution from which the exploration sample is drawn. Allowed are "uniform" and "triangular". Defaults to "uniform". sampling_method: The method used to draw the exploration sample. Allowed are "sobol", "random", "halton", and "latin_hypercube". Defaults to "random". sample: A sequence of PyTrees or None. If None, a sample is drawn from the sampling distribution. mixing_weight_method: The method used to determine the mixing weight, i,e, how start parameters for local optimizations are calculated. Allowed are "tiktak" and "linear", or a custom callable. Defaults to "tiktak". mixing_weight_bounds: The lower and upper bounds for the mixing weight. Defaults to (0.1, 0.995). convergence_max_discoveries: The maximum number of discoveries for convergence. Determines after how many re-descoveries of the currently best local optima the multistart algorithm stops. Defaults to 2. convergence_xtol_rel: The relative tolerance in parameters for convergence. Determines the maximum relative distance two parameter vecctors can have to be considered equal. Defaults to 0.01. n_cores: The number of cores to use for parallelization. Defaults to 1. batch_evaluator: The evaluator to use for batch evaluation. Allowed are "joblib", "pathos", and "threading", or a custom callable. batch_size: The batch size for batch evaluation. Must be larger than n_cores or None. seed: The seed for the random number generator. error_handling: The error handling for exploration and optimization errors. Allowed are "raise" and "continue". Raises: InvalidMultistartError: If the multistart options cannot be processed, e.g. because they do not have the correct type. """ n_samples: int | None = None stopping_maxopt: int | None = None sampling_distribution: Literal["uniform", "triangular"] = "uniform" sampling_method: Literal["sobol", "random", "halton", "latin_hypercube"] = "random" sample: Sequence[PyTree] | None = None mixing_weight_method: ( Literal["tiktak", "linear"] | Callable[[int, int, float, float], float] ) = "tiktak" mixing_weight_bounds: tuple[float, float] = (0.1, 0.995) convergence_xtol_rel: float | None = None convergence_max_discoveries: int = 2 n_cores: int = 1 batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = "joblib" batch_size: int | None = None seed: int | np.random.Generator | None = None error_handling: Literal["raise", "continue"] | None = None # Deprecated attributes share_optimization: float | None = None convergence_relative_params_tolerance: float | None = None optimization_error_handling: Literal["raise", "continue"] | None = None exploration_error_handling: Literal["raise", "continue"] | None = None def __post_init__(self) -> None: _validate_attribute_types_and_values(self) class MultistartOptionsDict(TypedDict): n_samples: NotRequired[int | None] stopping_maxopt: NotRequired[int | None] sampling_distribution: NotRequired[Literal["uniform", "triangular"]] sampling_method: NotRequired[ Literal["sobol", "random", "halton", "latin_hypercube"] ] sample: NotRequired[Sequence[PyTree] | None] mixing_weight_method: NotRequired[ Literal["tiktak", "linear"] | Callable[[int, int, float, float], float] ] mixing_weight_bounds: NotRequired[tuple[float, float]] convergence_xtol_rel: NotRequired[float | None] convergence_max_discoveries: NotRequired[int] n_cores: NotRequired[int] batch_evaluator: NotRequired[BatchEvaluatorLiteral | BatchEvaluator] batch_size: NotRequired[int | None] seed: NotRequired[int | np.random.Generator | None] error_handling: NotRequired[Literal["raise", "continue"] | None] # Deprecated attributes share_optimization: NotRequired[float | None] convergence_relative_params_tolerance: NotRequired[float | None] optimization_error_handling: NotRequired[Literal["raise", "continue"] | None] exploration_error_handling: NotRequired[Literal["raise", "continue"] | None] def pre_process_multistart( multistart: bool | MultistartOptions | MultistartOptionsDict | None, ) -> MultistartOptions | None: """Convert all valid types of multistart to a optimagic.MultistartOptions. This just harmonizes multiple ways of specifying multistart options into a single format. It performs runime type checks, but it does not check whether multistart options are consistent with other option choices. Args: multistart: The user provided multistart options. n_params: The number of parameters in the optimization problem. Returns: The multistart options in the optimagic format. Raises: InvalidMultistartError: If the multistart options cannot be processed, e.g. because they do not have the correct type. """ if isinstance(multistart, bool): multistart = MultistartOptions() if multistart else None elif isinstance(multistart, MultistartOptions) or multistart is None: pass else: try: multistart = MultistartOptions(**multistart) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if isinstance(e, InvalidMultistartError): raise e raise InvalidMultistartError( f"Invalid multistart options of type: {type(multistart)}. Multistart " "options must be of type optimagic.MultistartOptions, a dictionary " "with valid keys, None, or a boolean." ) from e if multistart is not None: multistart = replace_and_warn_about_deprecated_multistart_options(multistart) # The replace and warn function cannot be typed due to circular imports, but # we know that the return type is MultistartOptions multistart = cast(MultistartOptions, multistart) return multistart def _validate_attribute_types_and_values(options: MultistartOptions) -> None: if options.n_samples is not None and ( not isinstance(options.n_samples, int) or options.n_samples < 1 ): raise InvalidMultistartError( f"Invalid number of samples: {options.n_samples}. Number of samples " "must be a positive integer or None." ) if options.stopping_maxopt is not None and ( not isinstance(options.stopping_maxopt, int) or options.stopping_maxopt < 0 ): raise InvalidMultistartError( f"Invalid number of optimizations: {options.stopping_maxopt}. Number of " "optimizations must be a positive integer or None." ) if ( options.n_samples is not None and options.stopping_maxopt is not None and options.n_samples < options.stopping_maxopt ): raise InvalidMultistartError( f"Invalid number of samples: {options.n_samples}. Number of samples " "must be at least as large as the number of optimizations." ) if options.sampling_distribution not in ("uniform", "triangular"): raise InvalidMultistartError( f"Invalid sampling distribution: {options.sampling_distribution}. Sampling " f"distribution must be one of ('uniform', 'triangular')." ) if options.sampling_method not in ("sobol", "random", "halton", "latin_hypercube"): raise InvalidMultistartError( f"Invalid sampling method: {options.sampling_method}. Sampling method " f"must be one of ('sobol', 'random', 'halton', 'latin_hypercube')." ) if not isinstance(options.sample, Sequence | None): raise InvalidMultistartError( f"Invalid sample: {options.sample}. Sample must be a sequence of " "parameters." ) if not callable( options.mixing_weight_method ) and options.mixing_weight_method not in ("tiktak", "linear"): raise InvalidMultistartError( f"Invalid mixing weight method: {options.mixing_weight_method}. Mixing " "weight method must be Callable or one of ('tiktak', 'linear')." ) if ( not isinstance(options.mixing_weight_bounds, tuple) or len(options.mixing_weight_bounds) != 2 or not set(type(x) for x in options.mixing_weight_bounds) <= {int, float} ): raise InvalidMultistartError( f"Invalid mixing weight bounds: {options.mixing_weight_bounds}. Mixing " "weight bounds must be a tuple of two numbers." ) if options.convergence_xtol_rel is not None and ( not isinstance(options.convergence_xtol_rel, int | float) or options.convergence_xtol_rel < 0 ): raise InvalidMultistartError( "Invalid relative params tolerance:" f"{options.convergence_xtol_rel}. Relative params " "tolerance must be a number." ) if ( not isinstance(options.convergence_max_discoveries, int | float) or options.convergence_max_discoveries < 1 ): raise InvalidMultistartError( f"Invalid max discoveries: {options.convergence_max_discoveries}. Max " "discoveries must be a positive integer or infinity." ) if not isinstance(options.n_cores, int) or options.n_cores < 1: raise InvalidMultistartError( f"Invalid number of cores: {options.n_cores}. Number of cores " "must be a positive integer." ) try: process_batch_evaluator(options.batch_evaluator) except Exception as e: raise InvalidMultistartError( f"Invalid batch evaluator: {options.batch_evaluator}." ) from e if options.batch_size is not None and ( not isinstance(options.batch_size, int) or options.batch_size < options.n_cores ): raise InvalidMultistartError( f"Invalid batch size: {options.batch_size}. Batch size " "must be a positive integer larger than n_cores, or None." ) if not isinstance(options.seed, int | np.random.Generator | None): raise InvalidMultistartError( f"Invalid seed: {options.seed}. Seed " "must be an integer, a numpy random generator, or None." ) if options.error_handling is not None and options.error_handling not in ( "raise", "continue", ): raise InvalidMultistartError( f"Invalid error handling: {options.error_handling}. Error handling must be " "'raise' or 'continue'." ) # ====================================================================================== # Internal Options # ====================================================================================== def _tiktak_weights( iteration: int, n_iterations: int, min_weight: float, max_weight: float ) -> float: return np.clip(np.sqrt(iteration / n_iterations), min_weight, max_weight) def _linear_weights( iteration: int, n_iterations: int, min_weight: float, max_weight: float ) -> float: unscaled = iteration / n_iterations span = max_weight - min_weight return min_weight + unscaled * span WEIGHT_FUNCTIONS = { "tiktak": _tiktak_weights, "linear": _linear_weights, } @dataclass(frozen=True) class InternalMultistartOptions: """Multistart options used internally in optimagic. Compared to `MultistartOptions`, this data class has stricter types and combines some of the attributes. It is generated at runtime using a `MultistartOptions` instance and the function `get_internal_multistart_options_from_public`. """ n_samples: int weight_func: Callable[[int, int], float] convergence_xtol_rel: float convergence_max_discoveries: int sampling_distribution: Literal["uniform", "triangular"] sampling_method: Literal["sobol", "random", "halton", "latin_hypercube"] sample: NDArray[np.float64] | None seed: int | np.random.Generator | None n_cores: int batch_evaluator: BatchEvaluator batch_size: int error_handling: Literal["raise", "continue"] stopping_maxopt: int def __post_init__(self) -> None: must_be_at_least_1 = [ "n_samples", "stopping_maxopt", "n_cores", "batch_size", "convergence_max_discoveries", ] for attr in must_be_at_least_1: if getattr(self, attr) < 1: raise InvalidMultistartError(f"{attr} must be at least 1.") if self.batch_size < self.n_cores: raise InvalidMultistartError("batch_size must be at least n_cores.") if self.convergence_xtol_rel < 0: raise InvalidMultistartError("convergence_xtol_rel must be at least 0.") def get_internal_multistart_options_from_public( options: MultistartOptions, params: PyTree, params_to_internal: Callable[[PyTree], NDArray[np.float64]], ) -> InternalMultistartOptions: """Get internal multistart options from public multistart options. Args: options: The pre-processed multistart options. params: The parameters of the optimization problem. params_to_internal: A function that converts parameters to internal parameters. Returns: InternalMultistartOptions: The updated options with runtime defaults. """ x = params_to_internal(params) if options.sample is not None: sample = np.array([params_to_internal(x) for x in list(options.sample)]) n_samples = len(options.sample) else: sample = None n_samples = options.n_samples # type: ignore batch_size = options.n_cores if options.batch_size is None else options.batch_size batch_evaluator = process_batch_evaluator(options.batch_evaluator) if callable(options.mixing_weight_method): weight_func = options.mixing_weight_method else: _weight_method = WEIGHT_FUNCTIONS[options.mixing_weight_method] weight_func = partial( _weight_method, min_weight=options.mixing_weight_bounds[0], max_weight=options.mixing_weight_bounds[1], ) if n_samples is None: if options.stopping_maxopt is None: n_samples = 100 * len(x) else: n_samples = 10 * options.stopping_maxopt if options.share_optimization is None: share_optimization = 0.1 else: share_optimization = options.share_optimization if options.stopping_maxopt is None: stopping_maxopt = max(1, int(share_optimization * n_samples)) else: stopping_maxopt = options.stopping_maxopt # Set defaults resulting from deprecated attributes if options.error_handling is not None: error_handling = options.error_handling else: error_handling = "continue" if options.convergence_xtol_rel is not None: convergence_xtol_rel = options.convergence_xtol_rel else: convergence_xtol_rel = 0.01 return InternalMultistartOptions( # Attributes taken directly from MultistartOptions convergence_max_discoveries=options.convergence_max_discoveries, n_cores=options.n_cores, sampling_distribution=options.sampling_distribution, sampling_method=options.sampling_method, seed=options.seed, # Updated attributes sample=sample, n_samples=n_samples, weight_func=weight_func, error_handling=error_handling, convergence_xtol_rel=convergence_xtol_rel, stopping_maxopt=stopping_maxopt, batch_evaluator=batch_evaluator, batch_size=batch_size, ) ================================================ FILE: src/optimagic/optimization/optimization_logging.py ================================================ from typing import Any, cast from optimagic.logging.logger import LogStore from optimagic.logging.types import StepResult, StepStatus def log_scheduled_steps_and_get_ids( steps: list[dict[str, Any]], logger: LogStore | None ) -> list[int]: """Add scheduled steps to the steps table of the database and get their ids. The ids are only determined once the steps are written to the database and the ids of all previously existing steps are known. Args: steps (list): List of dicts with entries for the steps table. logging (bool): Whether to actually write to the database. Returns: list: List of integers with the step ids. """ default_row = {"status": StepStatus.SCHEDULED.value} if logger: for row in steps: data = StepResult(**{**default_row, **row}) logger.step_store.insert(data) last_steps = logger.step_store.select_last_rows(len(steps)) step_ids = cast(list[int], [row.rowid for row in last_steps]) else: step_ids = list(range(len(steps))) return step_ids ================================================ FILE: src/optimagic/optimization/optimize.py ================================================ """Public functions for optimization. This module defines the public functions `maximize` and `minimize` that will be called by users. Internally, `maximize` and `minimize` just call `create_optimization_problem` with all arguments and add the `direction`. In `create_optimization_problem`, the user input is consolidated and converted to stricter types. The resulting `OptimizationProblem` is then passed to `_optimize` which handles the optimization logic. `_optimize` processes the optimization problem and performs the actual optimization. """ from __future__ import annotations from pathlib import Path from typing import Any, Callable, Sequence, Type, cast import numpy as np from scipy.optimize import Bounds as ScipyBounds from optimagic.batch_evaluators import process_batch_evaluator from optimagic.constraints import Constraint from optimagic.differentiation.numdiff_options import NumdiffOptions, NumdiffOptionsDict from optimagic.exceptions import ( IncompleteBoundsError, InvalidFunctionError, ) from optimagic.logging.logger import LogReader, LogStore from optimagic.logging.types import ProblemInitialization from optimagic.optimization.algorithm import Algorithm from optimagic.optimization.create_optimization_problem import ( OptimizationProblem, create_optimization_problem, ) from optimagic.optimization.error_penalty import get_error_penalty_function from optimagic.optimization.fun_value import FunctionValue from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.optimization.multistart import ( run_multistart_optimization, ) from optimagic.optimization.multistart_options import ( MultistartOptions, MultistartOptionsDict, get_internal_multistart_options_from_public, ) from optimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids from optimagic.optimization.optimize_result import OptimizeResult from optimagic.optimization.process_results import ( ExtraResultFields, process_multistart_result, process_single_result, ) from optimagic.parameters.bounds import Bounds from optimagic.parameters.conversion import ( get_converter, ) from optimagic.parameters.nonlinear_constraints import process_nonlinear_constraints from optimagic.parameters.scaling import ScalingOptions, ScalingOptionsDict from optimagic.typing import ( AggregationLevel, Direction, ErrorHandling, ErrorHandlingLiteral, NonNegativeFloat, PyTree, ) FunType = Callable[..., float | PyTree | FunctionValue] AlgorithmType = str | Algorithm | Type[Algorithm] ConstraintsType = Constraint | list[Constraint] | dict[str, Any] | list[dict[str, Any]] JacType = Callable[..., PyTree] FunAndJacType = Callable[..., tuple[float | PyTree | FunctionValue, PyTree]] HessType = Callable[..., PyTree] # TODO: refine this type CallbackType = Callable[..., Any] CriterionType = Callable[..., float | dict[str, Any]] CriterionAndDerivativeType = Callable[..., tuple[float | dict[str, Any], PyTree]] from optimagic.logging.logger import LogOptions def maximize( fun: FunType | CriterionType | None = None, params: PyTree | None = None, algorithm: AlgorithmType | None = None, *, bounds: Bounds | ScipyBounds | Sequence[tuple[float, float]] | None = None, constraints: ConstraintsType | None = None, fun_kwargs: dict[str, Any] | None = None, algo_options: dict[str, Any] | None = None, jac: JacType | list[JacType] | None = None, jac_kwargs: dict[str, Any] | None = None, fun_and_jac: FunAndJacType | CriterionAndDerivativeType | None = None, fun_and_jac_kwargs: dict[str, Any] | None = None, numdiff_options: NumdiffOptions | NumdiffOptionsDict | None = None, # TODO: add typed-dict support? logging: bool | str | Path | LogOptions | dict[str, Any] | None = None, error_handling: ErrorHandling | ErrorHandlingLiteral = ErrorHandling.RAISE, error_penalty: dict[str, float] | None = None, scaling: bool | ScalingOptions | ScalingOptionsDict = False, multistart: bool | MultistartOptions | MultistartOptionsDict = False, collect_history: bool = True, skip_checks: bool = False, # scipy aliases x0: PyTree | None = None, method: str | None = None, args: tuple[Any] | None = None, # scipy arguments that are not yet supported hess: HessType | None = None, hessp: HessType | None = None, callback: CallbackType | None = None, # scipy arguments that will never be supported options: dict[str, Any] | None = None, tol: NonNegativeFloat | None = None, # deprecated arguments criterion: CriterionType | None = None, criterion_kwargs: dict[str, Any] | None = None, derivative: JacType | None = None, derivative_kwargs: dict[str, Any] | None = None, criterion_and_derivative: CriterionAndDerivativeType | None = None, criterion_and_derivative_kwargs: dict[str, Any] | None = None, log_options: dict[str, Any] | None = None, lower_bounds: PyTree | None = None, upper_bounds: PyTree | None = None, soft_lower_bounds: PyTree | None = None, soft_upper_bounds: PyTree | None = None, scaling_options: dict[str, Any] | None = None, multistart_options: dict[str, Any] | None = None, ) -> OptimizeResult: """Maximize fun using algorithm subject to constraints. Args: fun: The objective function of a scalar, least-squares or likelihood optimization problem. Non-scalar objective functions have to be marked with the `mark.likelihood` or `mark.least_squares` decorators. `fun` maps params and fun_kwargs to an objective value. See :ref:`how-to-fun` for details and examples. params: The start parameters for the optimization. Params can be numpy arrays, dictionaries, pandas.Series, pandas.DataFrames, NamedTuples, floats, lists, and any nested combination thereof. See :ref:`params` for details and examples. algorithm: The optimization algorithm to use. Can be a string, subclass of :class:`optimagic.Algorithm` or an instance of a subclass of :class:`optimagic.Algorithm`. For guidelines on how to choose an algorithm see :ref:`how-to-select-algorithms`. For examples of specifying and configuring algorithms see :ref:`specify-algorithm`. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an :class:`optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. See :ref:`how-to-bounds` for details and examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. constraints: Constraints for the optimization problem. Constraints can be specified as a single :class:`optimagic.Constraint` object, a list of Constraint objects. For details and examples check :ref:`constraints`. fun_kwargs: Additional keyword arguments for the objective function. algo_options: Additional options for the optimization algorithm. `algo_options` is an alternative to configuring algorithm objects directly. See :ref:`list_of_algorithms` for supported options of each algorithm. jac: The first derivative of `fun`. Providing a closed form derivative can be a great way to speed up your optimization. The easiest way to get a derivative for your objective function are autodiff frameworks like JAX. For details and examples see :ref:`how-to-jac`. jac_kwargs: Additional keyword arguments for `jac`. fun_and_jac: A function that returns both the objective value and the derivative. This can be used do exploit synergies in the calculation of the function value and its derivative. For details and examples see :ref:`how-to-jac`. fun_and_jac_kwargs: Additional keyword arguments for `fun_and_jac`. numdiff_options: Options for numerical differentiation. Can be a dictionary or an instance of :class:`optimagic.NumdiffOptions`. logging: If None, no logging is used. If a str or pathlib.Path is provided, it is interpreted as path to an sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. and the optimization history will be stored in that database. For more customization, provide LogOptions. For details and examples see :ref:`how-to-logging`. error_handling: If "raise" or ErrorHandling.RAISE, exceptions that occur during the optimization are raised and the optimization is stopped. If "continue" or ErrorHandling.CONTINUE, exceptions are caught and the function value and its derivative are replaced by penalty values. The penalty values are constructed such that the optimizer is guided back towards the start parameters until a feasible region is reached and then continues the optimization from there. For details see :ref:`how-to-errors`. error_penalty: A dictionary with the keys "slope" and "constant" that influences the magnitude of the penalty values. For maximization problems both should be negative. For details see :ref:`how-to-errors`. scaling: If None or False, the parameter space is not rescaled. If True, a heuristic is used to improve the conditioning of the optimization problem. To choose which heuristic is used and to customize the scaling, provide a dictionary or an instance of :class:`optimagic.ScalingOptions`. For details and examples see :ref:`scaling`. multistart: If None or False, no multistart approach is used. If True, the optimization is restarted from multiple starting points. Note that this requires finite bounds or soft bounds for all parameters. To customize the multistart approach, provide a dictionary or an instance of :class:`optimagic.MultistartOptions`. For details and examples see :ref:`how-to-multistart`. collect_history: If True, the optimization history is collected and returned in the OptimizeResult. This is required to create `criterion_plot` or `params_plot` from an OptimizeResult. skip_checks: If True, some checks are skipped to speed up the optimization. This is only relevant if your objective function is very fast, i.e. runs in a few microseconds. x0: Alias for params for scipy compatibility. method: Alternative to algorithm for scipy compatibility. With `method` you can select scipy optimizers via their original scipy name. args: Alternative to fun_kwargs for scipy compatibility. hess: Not yet supported. hessp: Not yet supported. callback: Not yet supported. options: Not yet supported. tol: Not yet supported. criterion: Deprecated. Use fun instead. criterion_kwargs: Deprecated. Use fun_kwargs instead. derivative: Deprecated. Use jac instead. derivative_kwargs: Deprecated. Use jac_kwargs instead. criterion_and_derivative: Deprecated. Use fun_and_jac instead. criterion_and_derivative_kwargs: Deprecated. Use fun_and_jac_kwargs instead. lower_bounds: Deprecated. Use bounds instead. upper_bounds: Deprecated. Use bounds instead. soft_lower_bounds: Deprecated. Use bounds instead. soft_upper_bounds: Deprecated. Use bounds instead. scaling_options: Deprecated. Use scaling instead. multistart_options: Deprecated. Use multistart instead. """ problem = create_optimization_problem( direction=Direction.MAXIMIZE, fun=fun, params=params, bounds=bounds, algorithm=algorithm, fun_kwargs=fun_kwargs, constraints=constraints, algo_options=algo_options, jac=jac, jac_kwargs=jac_kwargs, fun_and_jac=fun_and_jac, fun_and_jac_kwargs=fun_and_jac_kwargs, numdiff_options=numdiff_options, logging=logging, log_options=log_options, error_handling=error_handling, error_penalty=error_penalty, scaling=scaling, multistart=multistart, collect_history=collect_history, skip_checks=skip_checks, # scipy aliases x0=x0, method=method, args=args, # scipy arguments that are not yet supported hess=hess, hessp=hessp, callback=callback, # scipy arguments that will never be supported options=options, tol=tol, # deprecated arguments criterion=criterion, criterion_kwargs=criterion_kwargs, derivative=derivative, derivative_kwargs=derivative_kwargs, criterion_and_derivative=criterion_and_derivative, criterion_and_derivative_kwargs=criterion_and_derivative_kwargs, lower_bounds=lower_bounds, upper_bounds=upper_bounds, soft_lower_bounds=soft_lower_bounds, soft_upper_bounds=soft_upper_bounds, scaling_options=scaling_options, multistart_options=multistart_options, ) return _optimize(problem) def minimize( fun: FunType | CriterionType | None = None, params: PyTree | None = None, algorithm: AlgorithmType | None = None, *, bounds: Bounds | ScipyBounds | Sequence[tuple[float, float]] | None = None, constraints: ConstraintsType | None = None, fun_kwargs: dict[str, Any] | None = None, algo_options: dict[str, Any] | None = None, jac: JacType | list[JacType] | None = None, jac_kwargs: dict[str, Any] | None = None, fun_and_jac: FunAndJacType | CriterionAndDerivativeType | None = None, fun_and_jac_kwargs: dict[str, Any] | None = None, numdiff_options: NumdiffOptions | NumdiffOptionsDict | None = None, # TODO: add typed-dict support? logging: bool | str | Path | LogOptions | dict[str, Any] | None = None, error_handling: ErrorHandling | ErrorHandlingLiteral = ErrorHandling.RAISE, error_penalty: dict[str, float] | None = None, scaling: bool | ScalingOptions | ScalingOptionsDict = False, multistart: bool | MultistartOptions | MultistartOptionsDict = False, collect_history: bool = True, skip_checks: bool = False, # scipy aliases x0: PyTree | None = None, method: str | None = None, args: tuple[Any] | None = None, # scipy arguments that are not yet supported hess: HessType | None = None, hessp: HessType | None = None, callback: CallbackType | None = None, # scipy arguments that will never be supported options: dict[str, Any] | None = None, tol: NonNegativeFloat | None = None, # deprecated arguments criterion: CriterionType | None = None, criterion_kwargs: dict[str, Any] | None = None, derivative: JacType | None = None, derivative_kwargs: dict[str, Any] | None = None, criterion_and_derivative: CriterionAndDerivativeType | None = None, criterion_and_derivative_kwargs: dict[str, Any] | None = None, log_options: dict[str, Any] | None = None, lower_bounds: PyTree | None = None, upper_bounds: PyTree | None = None, soft_lower_bounds: PyTree | None = None, soft_upper_bounds: PyTree | None = None, scaling_options: dict[str, Any] | None = None, multistart_options: dict[str, Any] | None = None, ) -> OptimizeResult: """Minimize criterion using algorithm subject to constraints. Args: fun: The objective function of a scalar, least-squares or likelihood optimization problem. Non-scalar objective functions have to be marked with the `mark.likelihood` or `mark.least_squares` decorators. `fun` maps params and fun_kwargs to an objective value. See :ref:`how-to-fun` for details and examples. params: The start parameters for the optimization. Params can be numpy arrays, dictionaries, pandas.Series, pandas.DataFrames, NamedTuples, floats, lists, and any nested combination thereof. See :ref:`params` for details and examples. algorithm: The optimization algorithm to use. Can be a string, subclass of :class:`optimagic.Algorithm` or an instance of a subclass of :class:`optimagic.Algorithm`. For guidelines on how to choose an algorithm see :ref:`how-to-select-algorithms`. For examples of specifying and configuring algorithms see :ref:`specify-algorithm`. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an :class:`optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. See :ref:`how-to-bounds` for details and examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. constraints: Constraints for the optimization problem. Constraints can be specified as a single :class:`optimagic.Constraint` object, a list of Constraint objects. For details and examples check :ref:`constraints`. fun_kwargs: Additional keyword arguments for the objective function. algo_options: Additional options for the optimization algorithm. `algo_options` is an alternative to configuring algorithm objects directly. See :ref:`list_of_algorithms` for supported options of each algorithm. jac: The first derivative of `fun`. Providing a closed form derivative can be a great way to speed up your optimization. The easiest way to get a derivative for your objective function are autodiff frameworks like JAX. For details and examples see :ref:`how-to-jac`. jac_kwargs: Additional keyword arguments for `jac`. fun_and_jac: A function that returns both the objective value and the derivative. This can be used do exploit synergies in the calculation of the function value and its derivative. For details and examples see :ref:`how-to-jac`. fun_and_jac_kwargs: Additional keyword arguments for `fun_and_jac`. numdiff_options: Options for numerical differentiation. Can be a dictionary or an instance of :class:`optimagic.NumdiffOptions`. logging: If None, no logging is used. If a str or pathlib.Path is provided, it is interpreted as path to an sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. and the optimization history will be stored in that database. For more customization, provide LogOptions. For details and examples see :ref:`how-to-logging`. error_handling: If "raise" or ErrorHandling.RAISE, exceptions that occur during the optimization are raised and the optimization is stopped. If "continue" or ErrorHandling.CONTINUE, exceptions are caught and the function value and its derivative are replaced by penalty values. The penalty values are constructed such that the optimizer is guided back towards the start parameters until a feasible region is reached and then continues the optimization from there. For details see :ref:`how-to-errors`. error_penalty: A dictionary with the keys "slope" and "constant" that influences the magnitude of the penalty values. For minimization problems both should be positive. For details see :ref:`how-to-errors`. scaling: If None or False, the parameter space is not rescaled. If True, a heuristic is used to improve the conditioning of the optimization problem. To choose which heuristic is used and to customize the scaling, provide a dictionary or an instance of :class:`optimagic.ScalingOptions`. For details and examples see :ref:`scaling`. multistart: If None or False, no multistart approach is used. If True, the optimization is restarted from multiple starting points. Note that this requires finite bounds or soft bounds for all parameters. To customize the multistart approach, provide a dictionary or an instance of :class:`optimagic.MultistartOptions`. For details and examples see :ref:`how-to-multistart`. collect_history: If True, the optimization history is collected and returned in the OptimizeResult. This is required to create `criterion_plot` or `params_plot` from an OptimizeResult. skip_checks: If True, some checks are skipped to speed up the optimization. This is only relevant if your objective function is very fast, i.e. runs in a few microseconds. x0: Alias for params for scipy compatibility. method: Alternative to algorithm for scipy compatibility. With `method` you can select scipy optimizers via their original scipy name. args: Alternative to fun_kwargs for scipy compatibility. hess: Not yet supported. hessp: Not yet supported. callback: Not yet supported. options: Not yet supported. tol: Not yet supported. criterion: Deprecated. Use fun instead. criterion_kwargs: Deprecated. Use fun_kwargs instead. derivative: Deprecated. Use jac instead. derivative_kwargs: Deprecated. Use jac_kwargs instead. criterion_and_derivative: Deprecated. Use fun_and_jac instead. criterion_and_derivative_kwargs: Deprecated. Use fun_and_jac_kwargs instead. lower_bounds: Deprecated. Use bounds instead. upper_bounds: Deprecated. Use bounds instead. soft_lower_bounds: Deprecated. Use bounds instead. soft_upper_bounds: Deprecated. Use bounds instead. scaling_options: Deprecated. Use scaling instead. multistart_options: Deprecated. Use multistart instead. """ problem = create_optimization_problem( direction=Direction.MINIMIZE, fun=fun, params=params, algorithm=algorithm, bounds=bounds, fun_kwargs=fun_kwargs, constraints=constraints, algo_options=algo_options, jac=jac, jac_kwargs=jac_kwargs, fun_and_jac=fun_and_jac, fun_and_jac_kwargs=fun_and_jac_kwargs, numdiff_options=numdiff_options, logging=logging, error_handling=error_handling, error_penalty=error_penalty, scaling=scaling, multistart=multistart, collect_history=collect_history, skip_checks=skip_checks, # scipy aliases x0=x0, method=method, args=args, # scipy arguments that are not yet supported hess=hess, hessp=hessp, callback=callback, # scipy arguments that will never be supported options=options, tol=tol, # deprecated arguments criterion=criterion, criterion_kwargs=criterion_kwargs, derivative=derivative, derivative_kwargs=derivative_kwargs, criterion_and_derivative=criterion_and_derivative, criterion_and_derivative_kwargs=criterion_and_derivative_kwargs, lower_bounds=lower_bounds, log_options=log_options, upper_bounds=upper_bounds, soft_lower_bounds=soft_lower_bounds, soft_upper_bounds=soft_upper_bounds, scaling_options=scaling_options, multistart_options=multistart_options, ) return _optimize(problem) def _optimize(problem: OptimizationProblem) -> OptimizeResult: """Solve an optimization problem.""" # ================================================================================== # Split constraints into nonlinear and reparametrization parts # ================================================================================== constraints = problem.constraints nonlinear_constraints = [c for c in constraints if c["type"] == "nonlinear"] if nonlinear_constraints: if not problem.algorithm.algo_info.supports_nonlinear_constraints: raise ValueError( f"Algorithm {problem.algorithm.name} does not support " "nonlinear constraints." ) # the following constraints will be handled via reparametrization constraints = [c for c in constraints if c["type"] != "nonlinear"] # ================================================================================== # Do first evaluation of user provided functions # ================================================================================== first_crit_eval = problem.fun_eval # do first derivative evaluation (if given) if problem.jac is not None: try: first_deriv_eval = problem.jac(problem.params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating derivative at start params." raise InvalidFunctionError(msg) from e if problem.fun_and_jac is not None: try: first_crit_and_deriv_eval = problem.fun_and_jac(problem.params) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "Error while evaluating criterion_and_derivative at start params." raise InvalidFunctionError(msg) from e if problem.jac is not None: used_deriv = first_deriv_eval elif problem.fun_and_jac is not None: used_deriv = first_crit_and_deriv_eval[1] else: used_deriv = None # ================================================================================== # Get the converter (for tree flattening, constraints and scaling) # ================================================================================== converter, internal_params = get_converter( params=problem.params, constraints=constraints, bounds=problem.bounds, func_eval=first_crit_eval.value, solver_type=problem.algorithm.algo_info.solver_type, scaling=problem.scaling, derivative_eval=used_deriv, add_soft_bounds=problem.multistart is not None, ) # ================================================================================== # initialize the log database # ================================================================================== logger: LogStore[Any, Any] | None if problem.logging: logger = LogStore.from_options(problem.logging) problem_data = ProblemInitialization(problem.direction, problem.params) logger.problem_store.insert(problem_data) else: logger = None # ================================================================================== # Strict checking if bounds are required and infinite values in bounds # ================================================================================== if problem.algorithm.algo_info.supports_bounds: bounds_missing = ( internal_params.lower_bounds is None or internal_params.upper_bounds is None ) # Check for infinite values in bounds arrays (only possible in mixed cases now) infinite_values_in_bounds = False if internal_params.lower_bounds is not None: infinite_values_in_bounds |= np.isinf(internal_params.lower_bounds).any() if internal_params.upper_bounds is not None: infinite_values_in_bounds |= np.isinf(internal_params.upper_bounds).any() # Case 1: Algorithm needs bounds but none provided if problem.algorithm.algo_info.needs_bounds and bounds_missing: raise IncompleteBoundsError( f"Algorithm {problem.algorithm.name} requires bounds for all " "parameters. Please provide finite lower and upper bounds." ) # Case 2: Algorithm doesn't support infinite bounds but they are present if ( not problem.algorithm.algo_info.supports_infinite_bounds and infinite_values_in_bounds ): raise IncompleteBoundsError( f"Algorithm {problem.algorithm.name} does not support infinite bounds. " "Please provide finite bounds for all parameters." ) # ================================================================================== # Do some things that require internal parameters or bounds # ================================================================================== if converter.has_transforming_constraints and problem.multistart is not None: raise NotImplementedError( "multistart optimizations are not yet compatible with transforming " "constraints." ) # get error penalty function error_penalty_func = get_error_penalty_function( start_x=internal_params.values, start_criterion=first_crit_eval, error_penalty=problem.error_penalty, solver_type=problem.algorithm.algo_info.solver_type, direction=problem.direction, ) # process nonlinear constraints: internal_nonlinear_constraints = process_nonlinear_constraints( nonlinear_constraints=nonlinear_constraints, params=problem.params, bounds=problem.bounds, converter=converter, numdiff_options=problem.numdiff_options, skip_checks=problem.skip_checks, ) x = internal_params.values internal_bounds = InternalBounds( lower=internal_params.lower_bounds, upper=internal_params.upper_bounds, ) # ================================================================================== # Create a batch evaluator # ================================================================================== # TODO: Make batch evaluator an argument of maximize and minimize and move this # to create_optimization_problem batch_evaluator = process_batch_evaluator("joblib") # ================================================================================== # Create the InternalOptimizationProblem # ================================================================================== internal_problem = InternalOptimizationProblem( fun=problem.fun, jac=problem.jac, fun_and_jac=problem.fun_and_jac, converter=converter, solver_type=problem.algorithm.algo_info.solver_type, direction=problem.direction, bounds=internal_bounds, numdiff_options=problem.numdiff_options, error_handling=problem.error_handling, error_penalty_func=error_penalty_func, batch_evaluator=batch_evaluator, # TODO: Actually pass through linear constraints if possible linear_constraints=None, nonlinear_constraints=internal_nonlinear_constraints, logger=logger, ) # ================================================================================== # Do actual optimization # ================================================================================== if problem.multistart is None: steps = [{"type": "optimization", "name": "optimization"}] # TODO: Actually use the step ids step_id = log_scheduled_steps_and_get_ids( # noqa: F841 steps=steps, logger=logger, )[0] raw_res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id) else: multistart_options = get_internal_multistart_options_from_public( options=problem.multistart, params=problem.params, params_to_internal=converter.params_to_internal, ) sampling_bounds = InternalBounds( lower=internal_params.soft_lower_bounds, upper=internal_params.soft_upper_bounds, ) raw_res = run_multistart_optimization( local_algorithm=problem.algorithm, internal_problem=internal_problem, x=x, sampling_bounds=sampling_bounds, options=multistart_options, logger=logger, error_handling=problem.error_handling, ) # ================================================================================== # Process the result # ================================================================================== _scalar_start_criterion = cast( float, first_crit_eval.internal_value(AggregationLevel.SCALAR) ) log_reader: LogReader[Any] | None extra_fields = ExtraResultFields( start_fun=_scalar_start_criterion, start_params=problem.params, algorithm=problem.algorithm.algo_info.name, direction=problem.direction, n_free=internal_params.free_mask.sum(), ) if problem.multistart is None: res = process_single_result( raw_res=raw_res, converter=converter, solver_type=problem.algorithm.algo_info.solver_type, extra_fields=extra_fields, ) else: res = process_multistart_result( raw_res=raw_res, converter=converter, solver_type=problem.algorithm.algo_info.solver_type, extra_fields=extra_fields, ) if logger is not None: assert problem.logging is not None log_reader = LogReader.from_options(problem.logging) else: log_reader = None res.logger = log_reader return res ================================================ FILE: src/optimagic/optimization/optimize_result.py ================================================ import warnings from dataclasses import dataclass from typing import Any, Dict, Optional import numpy as np import pandas as pd from optimagic import deprecations from optimagic.logging.logger import LogReader from optimagic.optimization.history import History from optimagic.shared.compat import pd_df_map from optimagic.typing import PyTree from optimagic.utilities import to_pickle @dataclass class OptimizeResult: """Optimization result object. **Attributes** Attributes: params: The optimal parameters. fun: The optimal criterion value. start_fun: The criterion value at the start parameters. start_params: The start parameters. algorithm: The algorithm used for the optimization. direction: Maximize or minimize. n_free: Number of free parameters. message: Message returned by the underlying algorithm. success: Whether the optimization was successful. n_fun_evals: Number of criterion evaluations. n_jac_evals: Number of derivative evaluations. n_iterations: Number of iterations until termination. history: Optimization history. convergence_report: The convergence report. multistart_info: Multistart information. algorithm_output: Additional algorithm specific information. """ params: Any fun: float start_fun: float start_params: Any algorithm: str direction: str n_free: int message: str | None = None success: bool | None = None n_fun_evals: int | None = None n_jac_evals: int | None = None n_hess_evals: int | None = None n_iterations: int | None = None status: int | None = None jac: PyTree | None = None hess: PyTree | None = None hess_inv: PyTree | None = None max_constraint_violation: float | None = None history: History | None = None convergence_report: Dict | None = None multistart_info: Optional["MultistartInfo"] = None algorithm_output: Dict[str, Any] | None = None logger: LogReader | None = None # ================================================================================== # Deprecations # ================================================================================== @property def criterion(self) -> float: msg = "The criterion attribute is deprecated. Use the fun attribute instead." warnings.warn(msg, FutureWarning) return self.fun @property def start_criterion(self) -> float: msg = ( "The start_criterion attribute is deprecated. Use the start_fun attribute " "instead." ) warnings.warn(msg, FutureWarning) return self.start_fun @property def n_criterion_evaluations(self) -> int | None: msg = ( "The n_criterion_evaluations attribute is deprecated. Use the n_fun_evals " "attribute instead." ) warnings.warn(msg, FutureWarning) return self.n_fun_evals @property def n_derivative_evaluations(self) -> int | None: msg = ( "The n_derivative_evaluations attribute is deprecated. Use the n_jac_evals " "attribute instead." ) warnings.warn(msg, FutureWarning) return self.n_jac_evals # ================================================================================== # Scipy aliases # ================================================================================== @property def x(self) -> PyTree: return self.params @property def x0(self) -> PyTree: return self.start_params @property def nfev(self) -> int | None: return self.n_fun_evals @property def nit(self) -> int | None: return self.n_iterations @property def njev(self) -> int | None: return self.n_jac_evals @property def nhev(self) -> int | None: return self.n_hess_evals # Enable attribute access using dictionary-style notation for scipy compatibility def __getitem__(self, key): return getattr(self, key) def __repr__(self) -> str: first_line = ( f"{self.direction.title()} with {self.n_free} free parameters terminated" ) if self.success is not None: snippet = "successfully" if self.success else "unsuccessfully" first_line += f" {snippet}" counters = [ ("criterion evaluations", self.n_fun_evals), ("derivative evaluations", self.n_jac_evals), ("iterations", self.n_iterations), ] counters = [(n, v) for n, v in counters if v is not None] if counters: name, val = counters[0] counter_msg = f"after {val} {name}" if len(counters) >= 2: for name, val in counters[1:-1]: counter_msg += f", {val} {name}" name, val = counters[-1] counter_msg += f" and {val} {name}" first_line += f" {counter_msg}" first_line += "." if self.message: message = f"The {self.algorithm} algorithm reported: {self.message}" else: message = None if self.start_fun is not None and self.fun is not None: improvement = ( f"The value of criterion improved from {self.start_fun} to {self.fun}." ) else: improvement = None if self.convergence_report is not None: convergence = _format_convergence_report( self.convergence_report, self.algorithm ) else: convergence = None sections = [first_line, improvement, message, convergence] sections = [sec for sec in sections if sec is not None] msg = "\n\n".join(sections) return msg def to_pickle(self, path): """Save the OptimizeResult object to pickle. Args: path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle. """ to_pickle(self, path=path) @dataclass(frozen=True) class MultistartInfo: """Information about the multistart optimization. Attributes: start_parameters: List of start parameters for each optimization. local_optima: List of optimization results. exploration_sample: List of parameters used for exploration. exploration_results: List of function values corresponding to exploration. n_optimizations: Number of local optimizations that were run. """ start_parameters: list[PyTree] local_optima: list[OptimizeResult] exploration_sample: list[PyTree] exploration_results: list[float] def __getitem__(self, key): deprecations.throw_dict_access_future_warning(key, obj_name=type(self).__name__) return getattr(self, key) @property def n_optimizations(self) -> int: return len(self.local_optima) def _format_convergence_report(report, algorithm): report = pd.DataFrame.from_dict(report) columns = ["one_step", "five_steps"] table = pd_df_map(report[columns], _format_float).astype(str) for col in "one_step", "five_steps": table[col] = table[col] + _create_stars(report[col]) table = table.to_string(justify="center") introduction = ( f"Independent of the convergence criteria used by {algorithm}, " "the strength of convergence can be assessed by the following criteria:" ) explanation = ( "(***: change <= 1e-10, **: change <= 1e-8, *: change <= 1e-5. " "Change refers to a change between accepted steps. The first column only " "considers the last step. The second column considers the last five steps.)" ) out = "\n\n".join([introduction, table, explanation]) return out def _create_stars(sr): stars = pd.cut( sr, bins=[-np.inf, 1e-10, 1e-8, 1e-5, np.inf], labels=["***", "** ", "* ", " "], ).astype("str") return stars def _format_float(number): """Round to four significant digits.""" return f"{number:.4g}" ================================================ FILE: src/optimagic/optimization/process_results.py ================================================ from dataclasses import dataclass, replace from typing import Any import numpy as np from optimagic.optimization.algorithm import InternalOptimizeResult from optimagic.optimization.convergence_report import get_convergence_report from optimagic.optimization.history import History from optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult from optimagic.parameters.conversion import Converter from optimagic.typing import AggregationLevel, Direction, EvalTask, PyTree from optimagic.utilities import isscalar @dataclass(frozen=True) class ExtraResultFields: """Fields for OptimizeResult that are not part of InternalOptimizeResult.""" start_fun: float start_params: PyTree algorithm: str direction: Direction n_free: int def process_single_result( raw_res: InternalOptimizeResult, converter: Converter, solver_type: AggregationLevel, extra_fields: ExtraResultFields, ) -> OptimizeResult: """Process an internal optimizer result.""" params = converter.params_from_internal(raw_res.x) if isscalar(raw_res.fun): fun = float(raw_res.fun) elif solver_type == AggregationLevel.LIKELIHOOD: fun = float(np.sum(raw_res.fun)) elif solver_type == AggregationLevel.LEAST_SQUARES: fun = np.dot(raw_res.fun, raw_res.fun) if extra_fields.direction == Direction.MAXIMIZE: fun = -fun if raw_res.history is not None: conv_report = get_convergence_report(raw_res.history) else: conv_report = None out = OptimizeResult( params=params, fun=fun, start_fun=extra_fields.start_fun, start_params=extra_fields.start_params, algorithm=extra_fields.algorithm, direction=extra_fields.direction.value, n_free=extra_fields.n_free, message=raw_res.message, success=raw_res.success, n_fun_evals=raw_res.n_fun_evals, n_jac_evals=raw_res.n_jac_evals, n_hess_evals=raw_res.n_hess_evals, n_iterations=raw_res.n_iterations, status=raw_res.status, jac=raw_res.jac, hess=raw_res.hess, hess_inv=raw_res.hess_inv, max_constraint_violation=raw_res.max_constraint_violation, history=raw_res.history, algorithm_output=raw_res.info, convergence_report=conv_report, ) return out def process_multistart_result( raw_res: InternalOptimizeResult, converter: Converter, solver_type: AggregationLevel, extra_fields: ExtraResultFields, ) -> OptimizeResult: """Process results of internal optimizers. Args: res (dict): Results dictionary of an internal optimizer or multistart optimizer. """ if raw_res.multistart_info is None: raise ValueError("Multistart info is missing.") if isinstance(raw_res, str): res = _dummy_result_from_traceback(raw_res, extra_fields) else: res = process_single_result( raw_res=raw_res, converter=converter, solver_type=solver_type, extra_fields=extra_fields, ) info = _process_multistart_info( raw_res.multistart_info, converter=converter, solver_type=solver_type, extra_fields=extra_fields, ) # ============================================================================== # create a convergence report for the multistart optimization; This is not # the same as the convergence report for the individual local optimizations. # ============================================================================== report_history = History( direction=extra_fields.direction, fun=[opt.fun for opt in info.local_optima], params=[opt.params for opt in info.local_optima], start_time=len(info.local_optima) * [np.nan], stop_time=len(info.local_optima) * [np.nan], batches=list(range(len(info.local_optima))), task=len(info.local_optima) * [EvalTask.FUN], ) conv_report = get_convergence_report(report_history) res.convergence_report = conv_report res.algorithm = f"multistart_{res.algorithm}" res.n_iterations = _sum_or_none([opt.n_iterations for opt in info.local_optima]) res.n_fun_evals = _sum_or_none([opt.n_fun_evals for opt in info.local_optima]) res.n_jac_evals = _sum_or_none([opt.n_jac_evals for opt in info.local_optima]) res.multistart_info = info return res def _process_multistart_info( info: dict[str, Any], converter: Converter, solver_type: AggregationLevel, extra_fields: ExtraResultFields, ) -> MultistartInfo: # The `info` dictionary is obtained from the `multistart_info` field of the # InternalOptimizeResult returned by `run_multistart_optimization` function. starts = [converter.params_from_internal(x) for x in info["start_parameters"]] optima = [] for res, start in zip(info["local_optima"], starts, strict=False): replacements = { "start_params": start, "start_fun": None, } processed = process_single_result( res, converter=converter, solver_type=solver_type, extra_fields=replace(extra_fields, **replacements), ) optima.append(processed) sample = [converter.params_from_internal(x) for x in info["exploration_sample"]] if extra_fields.direction == Direction.MINIMIZE: exploration_res = info["exploration_results"] else: exploration_res = [-res for res in info["exploration_results"]] return MultistartInfo( start_parameters=starts, local_optima=optima, exploration_sample=sample, exploration_results=exploration_res, ) def _dummy_result_from_traceback( candidate: str, extra_fields: ExtraResultFields ) -> OptimizeResult: out = OptimizeResult( params=extra_fields.start_params, fun=extra_fields.start_fun, start_fun=extra_fields.start_fun, start_params=extra_fields.start_params, algorithm=extra_fields.algorithm, direction=extra_fields.direction.value, n_free=extra_fields.n_free, message=candidate, ) return out def _sum_or_none(summands: list[int | None | float]) -> int | None: if any(s is None for s in summands): out = None else: out = int(np.array(summands).sum()) return out ================================================ FILE: src/optimagic/optimization/scipy_aliases.py ================================================ import functools from optimagic.exceptions import InvalidFunctionError from optimagic.utilities import propose_alternatives def map_method_to_algorithm(method): implemented = { "Nelder-Mead": "scipy_neldermead", "Powell": "scipy_powell", "CG": "scipy_conjugate_gradient", "BFGS": "scipy_bfgs", "Newton-CG": "scipy_newton_cg", "L-BFGS-B": "scipy_lbfgsb", "TNC": "scipy_truncated_newton", "COBYLA": "scipy_cobyla", "SLSQP": "scipy_slsqp", "trust-constr": "scipy_trust_constr", } not_implemented = { "dogleg": "scipy_dogleg", "trust-ncg": "scipy_trust_ncg", "trust-exact": "scipy_trust_exact", "trust-krylov": "scipy_trust_krylov", "COBYQA": "scipy_cobyqa", } if method in implemented: algo = implemented[method] elif method in not_implemented: msg = ( f"The method {method} is not yet wrapped in optimagic. Create an issue on " "https://github.com/optimagic-dev/optimagic/ if you have urgent need " "for this method." ) raise NotImplementedError(msg) else: alt = propose_alternatives(method, list(implemented) + list(not_implemented)) msg = ( "method is an alias for algorithm to select the scipy optimizers under " f"their original name. {method} is not a valid scipy algorithm name. " f"Did you mean {alt}?" ) raise ValueError(msg) return algo def split_fun_and_jac(fun_and_jac, target="fun"): index = 0 if target == "fun" else 1 @functools.wraps(fun_and_jac) def fun(*args, **kwargs): raw = fun_and_jac(*args, **kwargs) try: out = raw[index] except TypeError as e: msg = ( "If you set `jac=True`, `fun` needs to return a tuple where the first " "entry is the value of your objective function and the second entry " "is its derivative." ) raise InvalidFunctionError(msg) from e return out return fun ================================================ FILE: src/optimagic/optimizers/__init__.py ================================================ ================================================ FILE: src/optimagic/optimizers/_pounders/__init__.py ================================================ ================================================ FILE: src/optimagic/optimizers/_pounders/_conjugate_gradient.py ================================================ """Implementation of the Conjugate Gradient algorithm.""" import numpy as np def minimize_trust_cg( model_gradient, model_hessian, trustregion_radius, *, gtol_abs=1e-8, gtol_rel=1e-6 ): """Minimize the quadratic subproblem via (standard) conjugate gradient. Solve the trust-region quadratic subproblem: min_x g.T @ x + 0.5 * x.T @ H @ x s.t. ||x|| <= trustregion_radius approximately, where g denotes the gradient and H the hessian of the quadratic model (i.e. the linear terms and square_terms), respectively. Args: model_gradient (np.ndarray): 1d array of shape (n,) containing the gradient (i.e. linear terms) of the quadratic model. model_hessian (np.ndarray): 2d array of shape (n, n) containing the hessian (i.e .square terms) of the quadratic model. trustregion_radius (float): Radius of the trust-region. gtol_abs (float): Convergence tolerance for the absolute gradient norm. gtol_rel (float): Convergence tolerance for the relative gradient norm. Returns: np.ndarray: Solution vector of shape (n,). """ n = len(model_gradient) max_iter = n * 2 x_candidate = np.zeros(n) residual = model_gradient direction = -model_gradient gradient_norm = np.linalg.norm(residual) stop_tol = max(gtol_abs, gtol_rel * gradient_norm) for _ in range(max_iter): if gradient_norm <= stop_tol: break square_terms = direction.T @ model_hessian @ direction distance_to_boundary = _get_distance_to_trustregion_boundary( x_candidate, direction, trustregion_radius ) # avoid divide by zero warning if square_terms > 0: step_size = (residual @ residual) / square_terms else: step_size = np.inf if square_terms <= 0 or step_size > distance_to_boundary: x_candidate = x_candidate + distance_to_boundary * direction break x_candidate, residual, direction = _update_vectors_for_next_iteration( x_candidate, residual, direction, model_hessian, step_size ) gradient_norm = np.linalg.norm(residual) return x_candidate def _update_vectors_for_next_iteration( x_candidate, residual, direction, hessian, alpha ): """Update candidate, residual, and direction vectors for the next iteration. Args: x_candidate (np.ndarray): Candidate vector of shape (n,). residual (np.ndarray): Array of residuals of shape (n,). The residual vector is defined as `r = Ax - b`, where `A` denotes the hessian matrix and `b` the gradient vector of the quadratic trust-region subproblem. `r` is equivalent to the first derivative of the quadratic subproblem. direction (np.ndarray): Direction vector of shape (n,). Returns: (tuple) Tuple containing: - x_candidate (np.ndarray): Updated candidate vector of shape (n,). - residual (np.ndarray): Updated array of residuals of shape (n,). - direction (np.darray): Updated direction vector of shape (n,). """ residual_old = residual x_candidate = x_candidate + alpha * direction residual = residual_old + alpha * (hessian @ direction) beta = (residual @ residual) / (residual_old @ residual_old) direction = -residual + beta * direction return x_candidate, residual, direction def _get_distance_to_trustregion_boundary(candidate, direction, radius): """Compute the distance of the candidate vector to trustregion boundary. The positive distance sigma is defined in Eculidean norm, as follows: || x + sigma * d || = radius where x denotes the candidate vector, and d the direction vector. Args: candidate(np.ndarray): Candidate vector of shape (n,). direction (np.ndarray): Direction vector of shape (n,). radius (floar): Radius of the trust-region Returns: float: The candidate vector's distance to the trustregion boundary. """ cc = candidate @ candidate cd = candidate @ direction dd = direction @ direction sigma = -cd + np.sqrt(cd * cd + dd * (radius**2 - cc)) sigma /= dd return sigma ================================================ FILE: src/optimagic/optimizers/_pounders/_steihaug_toint.py ================================================ """Implementation of the Steihaug-Toint Conjugate Gradient algorithm.""" import numpy as np def minimize_trust_stcg(model_gradient, model_hessian, trustregion_radius): """Minimize the quadratic subproblem via Steihaug-Toint conjugate gradient. Solve the quadratic trust-region subproblem: min_x g.T @ x + 0.5 * x.T @ hess @ x s.t. ||x|| <= trustregion_radius approximately, where g denotes the gradient and hess the hessian of the quadratic model (i.e. the linear terms and square_terms), respectively. The Steihaug-Toint conjugate gradient method is based on Steihaug (:cite:`Steihaug1983`) and Toint (:cite:`Toint1981`). Args: model_gradient (np.ndarray): 1d array of shape (n,) containing the gradient (i.e. linear terms) of the quadratic model. model_hessian (np.ndarray): 2d array of shape (n, n) containing the hessian (i.e .square terms) of the quadratic model. trustregion_radius (float): Radius of the trust-region. Returns: np.ndarray: Solution vector of shape (n,). """ abstol = 1e-50 rtol = 1e-5 divtol = 10_000 n = len(model_gradient) radius_sq = trustregion_radius**2 residual = -model_gradient rr = residual.T @ residual x_candidate = np.zeros(n) max_iter = min(n, 10_000) z = np.linalg.pinv(model_hessian) @ residual rz = residual @ residual n_iter = 0 diverged = False converged = False norm_r = np.sqrt(rr) norm_r0 = norm_r ttol = max(rtol * norm_r0, abstol) converged, diverged = _check_convergence( norm_r, norm_r0, abstol, ttol, divtol, converged, diverged ) p = model_hessian @ z z = model_hessian @ p n_iter += 1 kappa = p @ z dp = 0 norm_d = 0 norm_p = p @ p if kappa <= 0: converged = True x_candidate, z, n_iter = _update_candidate_vector_and_iteration_number( x_candidate, residual, p, z, model_gradient, model_hessian, rr, trustregion_radius, norm_p, n_iter, ) for _ in range(max_iter): alpha = rz / kappa norm_dp1 = norm_d + alpha * (2 * dp + alpha * norm_p) if trustregion_radius != 0 and norm_dp1 >= radius_sq: converged = True if norm_p > 0: x_candidate = _take_step_to_trustregion_boundary( x_candidate, p, dp, radius_sq, norm_d, norm_p ) break x_candidate = x_candidate + alpha * p residual = residual - alpha * (model_hessian @ p) norm_d = x_candidate @ x_candidate rzm1 = rz rz = residual @ residual norm_r = np.linalg.norm(residual) converged, diverged = _check_convergence( norm_r, norm_r0, abstol, ttol, divtol, converged, diverged ) if converged or diverged: break beta = rz / rzm1 if abs(beta) <= 0: diverged = True break if n_iter >= max_iter: diverged = True break p = residual + beta * p dp = x_candidate @ p norm_p = p @ p z = model_hessian @ p kappa = p @ z n_iter += 1 if kappa <= 0: converged = True if trustregion_radius != 0 and norm_p > 0: x_candidate = _take_step_to_trustregion_boundary( x_candidate, p, dp, radius_sq, norm_d, norm_p ) break return x_candidate def _update_candidate_vector_and_iteration_number( x_candidate, residual, p, z, model_gradient, model_hessian, rr, radius, norm_p, n_iter, ): """Update candidate, z vector, and iteration number.""" radius_sq = radius**2 if radius != 0 and norm_p > 0: # Take step to boundary step = np.sqrt(radius_sq / norm_p) x_candidate = x_candidate + step * p elif radius != 0: if radius_sq >= rr: alpha = 1.0 else: alpha = np.sqrt(radius_sq / rr) x_candidate = x_candidate + alpha * residual z = model_gradient - 0.5 * (model_hessian @ x_candidate) n_iter += 1 return x_candidate, z, n_iter def _take_step_to_trustregion_boundary(x_candidate, p, dp, radius_sq, norm_d, norm_p): """Take step to trust-region boundary.""" step = (np.sqrt(dp * dp + norm_p * (radius_sq - norm_d)) - dp) / norm_p x_candidate = x_candidate + step * p return x_candidate def _check_convergence( rnorm, rnorm0, abstol, # noqa: ARG001 ttol, divtol, converged, diverged, ): """Check for convergence.""" if rnorm <= ttol: converged = True elif rnorm >= divtol * rnorm0: diverged = True return converged, diverged ================================================ FILE: src/optimagic/optimizers/_pounders/_trsbox.py ================================================ """Implementation of the quadratic trustregion solver TRSBOX.""" import numpy as np def minimize_trust_trsbox( model_gradient, model_hessian, trustregion_radius, *, lower_bounds, upper_bounds, ): """Minimize a qaudratic trust-region subproblem using the trsbox algorithm. Solve the quadratic trust-region subproblem: min_x g.T @ x + 0.5 * x.T @ hess @ x s.t. ||x|| <= trustregion_radius lower_bounds <= x <= upper_bounds approximately, using an active-set approach, where g denotes the gradient and hess the hessian of the quadratic model (i.e. the linear terms and square_terms), respectively. The subproblem is assumed to be centered, i.e. ``x_center`` is the zero vector. The trsbox algorithm applies a conjugate gradient step in its main loop. This implementation of the quadratic trsbox algorithm is based on M. J. D. Powell (2009) "The BOBYQA algorithm for bound constrained optimization without derivatives." (cite:`Powell2009`). Some modifications to the termination conditions are taken from the DFBOLS method by Zhang et al. (:cite:`Zhang2010`). Args: model_gradient (np.ndarray): 1d array of shape (n,) containing the gradient (i.e. linear terms) of the quadratic model. model_hessian (np.ndarray): 2d array of shape (n, n) containing the hessian (i.e .square terms) of the quadratic model. lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. trustregion_radius (float): Radius of the trust-region. Returns: np.ndarray: Solution vector for the quadratic trust-region subproblem of shape (n,). """ n = len(model_gradient) x_center = np.zeros(n) n_iter = 0 n_fixed_variables = 0 x_bounded = np.zeros(n) x_bounded[(x_center <= lower_bounds) & (model_gradient >= 0.0)] = -1 x_bounded[(x_center >= upper_bounds) & (model_gradient <= 0.0)] = 1 x_candidate = np.zeros(n) gradient_projected = np.zeros(n) gradient_candidate = model_gradient total_reduction = 0 delta_sq = trustregion_radius**2 curve_min = -1.0 beta = 0 need_alt_trust_step = False max_iter = 100 * n**2 # Main Conjugate Gradient loop for _ in range(max_iter): gradient_projected[x_bounded != 0] = 0 if beta == 0: gradient_projected[x_bounded == 0] = -gradient_candidate[x_bounded == 0] else: gradient_projected[x_bounded == 0] = ( beta * gradient_projected[x_bounded == 0] - gradient_candidate[x_bounded == 0] ) gradient_projected_sumsq = gradient_projected @ gradient_projected if gradient_projected_sumsq == 0: need_alt_trust_step = False break if beta == 0: gradient_sumsq = gradient_projected_sumsq max_iter = n_iter + n - n_fixed_variables if n_iter == 0: gradient_sumsq_initial = gradient_sumsq if gradient_sumsq <= min( 1.0e-6 * gradient_sumsq_initial, 1.0e-18 ) or gradient_sumsq * delta_sq <= min(1.0e-6 * total_reduction**2, 1.0e-18): need_alt_trust_step = False break hess_g = model_hessian @ gradient_projected g_x = gradient_projected[x_bounded == 0] @ x_candidate[x_bounded == 0] g_hess_g = gradient_projected[x_bounded == 0] @ hess_g[x_bounded == 0] raw_distance = ( delta_sq - x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0] ) if raw_distance <= 0: need_alt_trust_step = True break step_len, distance_to_boundary = _take_unconstrained_step_up_to_boundary( raw_distance, gradient_sumsq, gradient_projected_sumsq, g_x, g_hess_g ) if step_len <= 1.0e-30: need_alt_trust_step = False break step_len, index_bound_active = _take_constrained_step_up_to_boundary( x_candidate, gradient_projected, step_len, lower_bounds, upper_bounds ) current_reduction = 0 if step_len > 0: n_iter += 1 ( x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old, ) = _update_candidate_vectors_and_reduction( x_candidate, x_bounded, gradient_candidate, gradient_projected, step_len, total_reduction, curve_min, index_bound_active, gradient_projected_sumsq, gradient_sumsq, g_hess_g, hess_g, ) if index_bound_active is not None: n_fixed_variables += 1 if gradient_projected[index_bound_active] >= 0: x_bounded[index_bound_active] = 1 else: x_bounded[index_bound_active] = -1 delta_sq = delta_sq - x_candidate[index_bound_active] ** 2 if delta_sq <= 0: need_alt_trust_step = True break beta = 0 continue if step_len >= distance_to_boundary: need_alt_trust_step = True break if n_iter == max_iter or current_reduction <= 1.0e-6 * total_reduction: need_alt_trust_step = False break beta = gradient_sumsq / gradient_sumsq_old continue if need_alt_trust_step: curve_min = 0 x_candidate = _perform_alternative_trustregion_step( x_candidate=x_candidate, x_bounded=x_bounded, gradient_candidate=gradient_candidate, model_hessian=model_hessian, lower_bounds=lower_bounds, upper_bounds=upper_bounds, n_fixed_variables=n_fixed_variables, total_reduction=total_reduction, ) else: x_candidate = _apply_bounds_to_candidate_vector( x_candidate, x_bounded, lower_bounds, upper_bounds ) return x_candidate def _perform_alternative_trustregion_step( x_candidate, x_bounded, gradient_candidate, model_hessian, lower_bounds, upper_bounds, n_fixed_variables, total_reduction, ): """Perform the alternative trust-region step.""" n = len(x_candidate) max_iter = 100 * n**2 for _ in range(max_iter): if n_fixed_variables >= n - 1: x_candidate = _apply_bounds_to_candidate_vector( x_candidate, x_bounded, lower_bounds, upper_bounds ) break search_direction = np.zeros(n) search_direction[x_bounded == 0] = x_candidate[x_bounded == 0] x_reduced = x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0] x_grad = x_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0] gradient_reduced = ( gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0] ) hess_s = model_hessian @ search_direction hessian_reduced = hess_s restart_alt_loop = False for _ in range(max_iter): raw_reduction = gradient_reduced * x_reduced - x_grad**2 if raw_reduction <= 1.0e-4 * total_reduction**2: restart_alt_loop = False break search_direction, s_norm = _compute_new_search_direction_and_norm( x_candidate, x_bounded, x_reduced, gradient_candidate, x_grad, raw_reduction, ) ( x_bounded, index_active_bound, n_fixed_variables, active_bound, bound_on_tangent, free_variable_reached_bound, ) = _calc_upper_bound_on_tangent( x_candidate, search_direction, x_bounded, lower_bounds, upper_bounds, n_fixed_variables, ) if free_variable_reached_bound: restart_alt_loop = True break hess_s = model_hessian @ search_direction s_hess_s = np.sum(search_direction[x_bounded == 0] * hess_s[x_bounded == 0]) x_hess_s = np.sum(x_candidate[x_bounded == 0] * hess_s[x_bounded == 0]) x_hess_x = np.sum( x_candidate[x_bounded == 0] * hessian_reduced[x_bounded == 0] ) ( previous_reduction, next_reduction, max_reduction, tangent, index_angle_greatest_reduction, n_angles, ) = _calc_greatest_criterion_reduction( bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm ) if index_angle_greatest_reduction == -1: restart_alt_loop = False break if index_angle_greatest_reduction < n_angles - 1: tangent = _update_tangent( index_angle_greatest_reduction, bound_on_tangent, n_angles, next_reduction, previous_reduction, max_reduction, ) cosine = (1.0 - tangent**2) / (1.0 + tangent**2) sine = 2.0 * tangent / (1.0 + tangent**2) current_reduction = _calc_new_reduction( tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm ) if current_reduction <= 0.0: restart_alt_loop = False break ( x_candidate, gradient_candidate, x_grad, gradient_reduced, hessian_reduced, ) = _update_candidate_vectors_and_reduction_alt_step( x_candidate, search_direction, x_bounded, gradient_candidate, cosine, sine, hess_s, hessian_reduced, ) total_reduction = total_reduction + current_reduction if ( index_active_bound is not None and index_angle_greatest_reduction == n_angles - 1 ): n_fixed_variables += 1 x_bounded[index_active_bound] = active_bound restart_alt_loop = True break if current_reduction <= 0.01 * total_reduction: restart_alt_loop = False break continue if restart_alt_loop: continue else: break x_candidate = _apply_bounds_to_candidate_vector( x_candidate, x_bounded, lower_bounds, upper_bounds ) return x_candidate def _apply_bounds_to_candidate_vector( x_candidate, x_bounded, lower_bounds, upper_bounds, ): """Force candidate vector to lie within bounds.""" x_candidate_new = np.clip(lower_bounds, x_candidate, upper_bounds) x_candidate_new[x_bounded == -1] = lower_bounds[x_bounded == -1] x_candidate_new[x_bounded == 1] = upper_bounds[x_bounded == 1] return x_candidate_new def _take_unconstrained_step_up_to_boundary( raw_distance, gradient_sumsq, gradient_projected_sumsq, g_x, g_hess_g ): """Take unconstrained step, ignoring bounds, up to boundary.""" temp = np.sqrt(gradient_projected_sumsq * raw_distance + g_x**2) if g_x >= 0: distance_to_boundary = raw_distance / (temp + g_x) else: distance_to_boundary = (temp - g_x) / gradient_projected_sumsq if g_hess_g <= 0: step_len = distance_to_boundary else: step_len = min(distance_to_boundary, gradient_sumsq / g_hess_g) return step_len, distance_to_boundary def _update_candidate_vectors_and_reduction( x_candidate, x_bounded, gradient_candidate, gradient_projected, step_len, total_reduction, curve_min, index_bound_active, gradient_projected_sumsq, gradient_sumsq, g_hess_g, hess_g, ): """Update candidate vectors and the associated criterion reduction.""" current_min = g_hess_g / gradient_projected_sumsq if index_bound_active is None and current_min > 0: if curve_min != -1.0: curve_min = min(curve_min, current_min) else: curve_min = current_min gradient_sumsq_old = gradient_sumsq gradient_candidate = gradient_candidate + step_len * hess_g x_candidate = x_candidate + step_len * gradient_projected gradient_sumsq = ( gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0] ) current_reduction = max( step_len * (gradient_sumsq_old - 0.5 * step_len * g_hess_g), 0 ) total_reduction = total_reduction + current_reduction return ( x_candidate, gradient_candidate, current_reduction, total_reduction, curve_min, gradient_sumsq, gradient_sumsq_old, ) def _take_constrained_step_up_to_boundary( x_candidate, gradient_projected, step_len, lower_bounds, upper_bounds ): """Reduce step length, where boundary is hit, to preserve simple bounds.""" index_bound_active = None for i in range(len(x_candidate)): if gradient_projected[i] != 0: if gradient_projected[i] > 0: step_len_constr = ( upper_bounds[i] - x_candidate[i] ) / gradient_projected[i] else: step_len_constr = ( lower_bounds[i] - x_candidate[i] ) / gradient_projected[i] if step_len_constr < step_len: step_len = step_len_constr index_bound_active = i return step_len, index_bound_active def _calc_upper_bound_on_tangent( x_candidate, search_direction, x_bounded, lower_bounds, upper_bounds, n_fixed_variables, ): """Calculate upper bound on tangent of half the angle to the boundary.""" bound_on_tangent = 1 free_variable_reached_bound = False index_active_bound = None active_bound = None for i in range(len(x_candidate)): if x_bounded[i] == 0: lower_bound_centered = x_candidate[i] - lower_bounds[i] upper_bound_centered = upper_bounds[i] - x_candidate[i] if lower_bound_centered <= 0.0: n_fixed_variables += 1 x_bounded[i] = -1 free_variable_reached_bound = True break elif upper_bound_centered <= 0.0: n_fixed_variables += 1 x_bounded[i] = 1 free_variable_reached_bound = True break ssq = x_candidate[i] ** 2 + search_direction[i] ** 2 ssq_lower = ssq - lower_bounds[i] ** 2 if ssq_lower > 0.0: ssq_lower = np.sqrt(ssq_lower) - search_direction[i] if bound_on_tangent * ssq_lower > lower_bound_centered: bound_on_tangent = lower_bound_centered / ssq_lower index_active_bound = i active_bound = -1 ssq_upper = ssq - upper_bounds[i] ** 2 if ssq_upper > 0.0: ssq_upper = np.sqrt(ssq_upper) + search_direction[i] if bound_on_tangent * ssq_upper > upper_bound_centered: bound_on_tangent = upper_bound_centered / ssq_upper index_active_bound = i active_bound = 1 return ( x_bounded, index_active_bound, n_fixed_variables, active_bound, bound_on_tangent, free_variable_reached_bound, ) def _calc_greatest_criterion_reduction( bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm ): """Calculate the greatest feasible reduction in the criterion function. The largest reduction is found by looking at a range of equally spaced values of ``tangent`` in the interval [0, ``bound_on_tangent``], where ``tangent`` is the tangent of half the angle to the trust-region boundary. """ previous_reduction = None next_reduction = None max_reduction = 0 index_angle_greatest_reduction = -1 old_reduction = 0 n_angles = int(17 * bound_on_tangent + 3.1) for i in range(n_angles): tangent = bound_on_tangent * (i + 1) / n_angles sine = 2.0 * tangent / (1.0 + tangent**2) new_reduction = _calc_new_reduction( tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm ) if new_reduction > max_reduction: max_reduction = new_reduction index_angle_greatest_reduction = i previous_reduction = old_reduction elif i == index_angle_greatest_reduction + 1: next_reduction = new_reduction old_reduction = new_reduction return ( previous_reduction, next_reduction, max_reduction, tangent, index_angle_greatest_reduction, n_angles, ) def _update_candidate_vectors_and_reduction_alt_step( x_candidate, search_direction, x_bounded, gradient_candidate, cosine, sine, hess_s, hessian_reduced, ): """Update candidate vectors and the associated criterion reduction. If the angle of the alternative iteration is restricted by a bound on a free variable, that variable is fixed at the bound. """ gradient_candidate_new = ( gradient_candidate + (cosine - 1.0) * hessian_reduced + sine * hess_s ) x_candidate_new = np.copy(x_candidate) x_candidate_new[x_bounded == 0] = ( cosine * x_candidate[x_bounded == 0] + sine * search_direction[x_bounded == 0] ) x_grad = x_candidate_new[x_bounded == 0] @ gradient_candidate_new[x_bounded == 0] gradient_reduced = ( gradient_candidate_new[x_bounded == 0] @ gradient_candidate_new[x_bounded == 0] ) hessian_reduced = cosine * hessian_reduced + sine * hess_s return ( x_candidate_new, gradient_candidate_new, x_grad, gradient_reduced, hessian_reduced, ) def _compute_new_search_direction_and_norm( x_candidate, x_bounded, x_reduced, gradient_candidate, x_grad, raw_reduction ): """Compute the new search direction and its norm.""" raw_reduction = np.sqrt(raw_reduction) search_direction = np.zeros_like(x_candidate) search_direction[x_bounded == 0] = ( x_grad * x_candidate[x_bounded == 0] - x_reduced * gradient_candidate[x_bounded == 0] ) / raw_reduction s_norm = -raw_reduction return search_direction, s_norm def _calc_new_reduction(tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm): """Calculate the new reduction in the criterion function.""" raw_reduction = s_hess_s + tangent * (tangent * x_hess_x - 2.0 * x_hess_s) current_reduction = sine * (tangent * x_grad - s_norm - 0.5 * sine * raw_reduction) return current_reduction def _update_tangent( index_angle_greatest_reduction, bound_on_tangent, n_angles, next_reduction, previous_reduction, max_reduction, ): """Update the tangent of half the angle to the trust-region boundary.""" raw_reduction = (next_reduction - previous_reduction) / ( 2.0 * max_reduction - previous_reduction - next_reduction ) tangent = ( bound_on_tangent * ((index_angle_greatest_reduction + 1) + 0.5 * raw_reduction) / n_angles ) return tangent ================================================ FILE: src/optimagic/optimizers/_pounders/bntr.py ================================================ """Auxiliary functions for the quadratic BNTR trust-region subsolver.""" from functools import reduce from typing import NamedTuple import numpy as np from optimagic.optimizers._pounders._conjugate_gradient import ( minimize_trust_cg, ) from optimagic.optimizers._pounders._steihaug_toint import ( minimize_trust_stcg, ) from optimagic.optimizers._pounders._trsbox import minimize_trust_trsbox EPSILON = np.finfo(float).eps ** (2 / 3) class ActiveBounds(NamedTuple): lower: np.ndarray | None = None upper: np.ndarray | None = None fixed: np.ndarray | None = None active: np.ndarray | None = None inactive: np.ndarray | None = None def bntr( model, lower_bounds, upper_bounds, x_candidate, *, conjugate_gradient_method, maxiter, maxiter_gradient_descent, gtol_abs, gtol_rel, gtol_scaled, gtol_abs_conjugate_gradient, gtol_rel_conjugate_gradient, ): """Minimize a bounded trust-region subproblem via Newton Conjugate Gradient method. The BNTR (Bounded Newton Trust Rregion) algorithm uses an active-set approach to solve the symmetric system of equations: hessian @ x = - gradient only for the inactive parameters of x that lie within the bounds. The active-set estimation employed here is based on Bertsekas (:cite:`Bertsekas1982`). In the main loop, BNTR globalizes the Newton step using a trust-region method based on the predicted versus actual reduction in the criterion function. The trust-region radius is increased only if the accepted step is at the trust-region boundary. Args: model (NamedTuple): NamedTuple containing the parameters of the main model, i.e.: - ``linear_terms`` (np.ndarray): 1d array of shape (n,) - ``square_terms`` (np.ndarray): 2d array of shape (n,n). lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. x_candidate (np.ndarray): Initial guess for the solution of the subproblem. conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - "cg" - "steihaug_toint" - "trsbox" (default) maxiter (int): Maximum number of iterations. If reached, terminate. maxiter_gradient_descent (int): Maximum number of steepest descent iterations to perform when the trust-region subsolver BNTR is used. gtol_abs (float): Convergence tolerance for the absolute gradient norm. gtol_rel (float): Convergence tolerance for the relative gradient norm. gtol_scaled (float): Convergence tolerance for the scaled gradient norm. gtol_abs_conjugate_gradient (float): Convergence tolerance for the absolute gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). gtol_rel_conjugate_gradient (float): Convergence tolerance for the relative gradient norm in the conjugate gradient step of the trust-region subproblem ("BNTR"). Returns: (dict): Result dictionary containing the following keys: - ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,) - ``criterion`` (float): Minimum function value associated with the solution. - ``n_iterations`` (int): Number of iterations the algorithm ran before termination. - ``success`` (bool): Boolean indicating whether a solution has been found before reaching maxiter. """ options_update_radius = { "eta1": 1.0e-4, "eta2": 0.25, "eta3": 0.50, "eta4": 0.90, "alpha1": 0.25, "alpha2": 0.50, "alpha3": 1.00, "alpha4": 2.00, "alpha5": 4.00, "min_radius": 1e-10, "max_radius": 1e10, "default_radius": 100.00, } ( x_candidate, f_candidate, gradient_unprojected, hessian_bounds_inactive, trustregion_radius, active_bounds_info, converged, convergence_reason, ) = _take_preliminary_gradient_descent_step_and_check_for_solution( x_candidate, model, lower_bounds, upper_bounds, maxiter_gradient_descent, gtol_abs, gtol_rel, gtol_scaled, ) for niter in range(maxiter + 1): if converged: break x_old = x_candidate f_old = f_candidate accept_step = False while not accept_step and not converged: gradient_bounds_inactive = gradient_unprojected[active_bounds_info.inactive] hessian_bounds_inactive = _find_hessian_submatrix_where_bounds_inactive( model, active_bounds_info ) ( conjugate_gradient_step, conjugate_gradient_step_inactive_bounds, cg_step_norm, ) = _compute_conjugate_gradient_step( x_candidate, gradient_bounds_inactive, hessian_bounds_inactive, lower_bounds, upper_bounds, active_bounds_info, trustregion_radius, conjugate_gradient_method=conjugate_gradient_method, gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient, gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient, options_update_radius=options_update_radius, ) x_unbounded = x_candidate + conjugate_gradient_step x_candidate = _apply_bounds_to_x_candidate( x_unbounded, lower_bounds, upper_bounds ) predicted_reduction = ( _compute_predicted_reduction_from_conjugate_gradient_step( conjugate_gradient_step, conjugate_gradient_step_inactive_bounds, gradient_unprojected, gradient_bounds_inactive, hessian_bounds_inactive, active_bounds_info, ) ) f_candidate = _evaluate_model_criterion( x_candidate, model.linear_terms, model.square_terms ) actual_reduction = f_old - f_candidate trustregion_radius_old = trustregion_radius ( trustregion_radius, accept_step, ) = _update_trustregion_radius_conjugate_gradient( f_candidate, predicted_reduction, actual_reduction, cg_step_norm, trustregion_radius, options_update_radius, ) if accept_step: gradient_unprojected = ( model.linear_terms + model.square_terms @ x_candidate ) active_bounds_info = _get_information_on_active_bounds( x_candidate, gradient_unprojected, lower_bounds, upper_bounds, ) else: x_candidate = x_old f_candidate = f_old if trustregion_radius == trustregion_radius_old: converged = True break converged, convergence_reason = _check_for_convergence( x_candidate, f_candidate, gradient_unprojected, model, lower_bounds, upper_bounds, converged, convergence_reason, niter, maxiter=maxiter, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, ) result = { "x": x_candidate, "criterion": f_candidate, "n_iterations": niter, "success": converged, "message": convergence_reason, } return result def _take_preliminary_gradient_descent_step_and_check_for_solution( x_candidate, model, lower_bounds, upper_bounds, maxiter_gradient_descent, gtol_abs, gtol_rel, gtol_scaled, ): """Take a preliminary gradient descent step and check if we found a solution.""" options_update_radius = { "mu1": 0.35, "mu2": 0.50, "gamma1": 0.0625, "gamma2": 0.5, "gamma3": 2.0, "gamma4": 5.0, "theta": 0.25, "min_radius": 1e-10, "max_radius": 1e10, "default_radius": 100.0, } converged = False convergence_reason = "Continue iterating." criterion_candidate = _evaluate_model_criterion( x_candidate, model.linear_terms, model.square_terms ) active_bounds_info = _get_information_on_active_bounds( x_candidate, model.linear_terms, lower_bounds, upper_bounds, ) gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate gradient_projected = _project_gradient_onto_feasible_set( gradient_unprojected, active_bounds_info ) converged, convergence_reason = _check_for_convergence( x_candidate, criterion_candidate, gradient_unprojected, model, lower_bounds, upper_bounds, converged, convergence_reason, niter=None, maxiter=None, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, ) if converged: hessian_inactive = model.square_terms trustregion_radius = options_update_radius["default_radius"] else: hessian_inactive = _find_hessian_submatrix_where_bounds_inactive( model, active_bounds_info ) ( x_candidate_gradient_descent, f_min_gradient_descent, step_size_gradient_descent, trustregion_radius, radius_lower_bound, ) = _perform_gradient_descent_step( x_candidate, criterion_candidate, gradient_projected, hessian_inactive, model, lower_bounds, upper_bounds, active_bounds_info, maxiter_gradient_descent, options_update_radius, ) if f_min_gradient_descent < criterion_candidate: criterion_candidate = f_min_gradient_descent x_unbounded = ( x_candidate_gradient_descent - step_size_gradient_descent * gradient_projected ) x_candidate = _apply_bounds_to_x_candidate( x_unbounded, lower_bounds, upper_bounds ) gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate active_bounds_info = _get_information_on_active_bounds( x_candidate, gradient_unprojected, lower_bounds, upper_bounds, ) gradient_projected = _project_gradient_onto_feasible_set( gradient_unprojected, active_bounds_info ) hessian_inactive = _find_hessian_submatrix_where_bounds_inactive( model, active_bounds_info ) converged, convergence_reason = _check_for_convergence( x_candidate, criterion_candidate, gradient_projected, model, lower_bounds, upper_bounds, converged, convergence_reason, niter=None, maxiter=None, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, ) if not converged: trustregion_radius = np.clip( max(trustregion_radius, radius_lower_bound), options_update_radius["min_radius"], options_update_radius["max_radius"], ) return ( x_candidate, criterion_candidate, gradient_unprojected, hessian_inactive, trustregion_radius, active_bounds_info, converged, convergence_reason, ) def _compute_conjugate_gradient_step( x_candidate, gradient_inactive, hessian_inactive, lower_bounds, upper_bounds, active_bounds_info, trustregion_radius, *, conjugate_gradient_method, gtol_abs_conjugate_gradient, gtol_rel_conjugate_gradient, options_update_radius, ): """Compute the bounded Conjugate Gradient trust-region step.""" conjugate_gradient_step = np.zeros_like(x_candidate) if active_bounds_info.inactive.size == 0: # Save some computation and return an adjusted zero step step_inactive = _apply_bounds_to_x_candidate( x_candidate, lower_bounds, upper_bounds ) step_norm = np.linalg.norm(step_inactive) conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step( step_inactive, x_candidate, lower_bounds, upper_bounds, active_bounds_info, ) else: if conjugate_gradient_method == "cg": step_inactive = minimize_trust_cg( gradient_inactive, hessian_inactive, trustregion_radius, gtol_abs=gtol_abs_conjugate_gradient, gtol_rel=gtol_rel_conjugate_gradient, ) step_norm = np.linalg.norm(step_inactive) elif conjugate_gradient_method == "steihaug_toint": step_inactive = minimize_trust_stcg( gradient_inactive, hessian_inactive, trustregion_radius, ) step_norm = np.linalg.norm(step_inactive) elif conjugate_gradient_method == "trsbox": step_inactive = minimize_trust_trsbox( gradient_inactive, hessian_inactive, trustregion_radius, lower_bounds=lower_bounds[active_bounds_info.inactive], upper_bounds=upper_bounds[active_bounds_info.inactive], ) step_norm = np.linalg.norm(step_inactive) else: raise ValueError( "Invalid method: {conjugate_gradient_method}. " "Must be one of cg, steihaug_toint, trsbox." ) if trustregion_radius == 0: if step_norm > 0: # Accept trustregion_radius = np.clip( step_norm, options_update_radius["min_radius"], options_update_radius["max_radius"], ) else: # Re-solve trustregion_radius = np.clip( options_update_radius["default_radius"], options_update_radius["min_radius"], options_update_radius["max_radius"], ) if conjugate_gradient_method == "cg": step_inactive = minimize_trust_cg( gradient_inactive, hessian_inactive, trustregion_radius, gtol_abs=gtol_abs_conjugate_gradient, gtol_rel=gtol_rel_conjugate_gradient, ) step_norm = np.linalg.norm(step_inactive) elif conjugate_gradient_method == "steihaug_toint": step_inactive = minimize_trust_stcg( gradient_inactive, hessian_inactive, trustregion_radius, ) step_norm = np.linalg.norm(step_inactive) elif conjugate_gradient_method == "trsbox": step_inactive = minimize_trust_trsbox( gradient_inactive, hessian_inactive, trustregion_radius, lower_bounds=lower_bounds[active_bounds_info.inactive], upper_bounds=upper_bounds[active_bounds_info.inactive], ) step_norm = np.linalg.norm(step_inactive) if step_norm == 0: raise ValueError("Initial direction is zero.") conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step( step_inactive, x_candidate, lower_bounds, upper_bounds, active_bounds_info, ) return ( conjugate_gradient_step, step_inactive, step_norm, ) def _compute_predicted_reduction_from_conjugate_gradient_step( conjugate_gradient_step, conjugate_gradient_step_inactive, gradient_unprojected, gradient_inactive, hessian_inactive, active_bounds_info, ): """Compute predicted reduction induced by the Conjugate Gradient step.""" if active_bounds_info.active.size > 0: # Projection changed the step, so we have to recompute the step # and the predicted reduction. Leave the rust radius unchanged. cg_step_recomp = conjugate_gradient_step[active_bounds_info.inactive] gradient_inactive_recomp = gradient_unprojected[active_bounds_info.inactive] predicted_reduction = _evaluate_model_criterion( cg_step_recomp, gradient_inactive_recomp, hessian_inactive ) else: # Step did not change, so we can just recover the # pre-computed prediction predicted_reduction = _evaluate_model_criterion( conjugate_gradient_step_inactive, gradient_inactive, hessian_inactive, ) return -predicted_reduction def _perform_gradient_descent_step( x_candidate, f_candidate_initial, gradient_projected, hessian_inactive, model, lower_bounds, upper_bounds, active_bounds_info, maxiter_steepest_descent, options_update_radius, ): """Perform gradient descent step and update trust-region radius.""" f_min = f_candidate_initial gradient_norm = np.linalg.norm(gradient_projected) trustregion_radius = options_update_radius["default_radius"] radius_lower_bound = 0 step_size_accepted = 0 for _ in range(maxiter_steepest_descent): x_old = x_candidate step_size_candidate = trustregion_radius / gradient_norm x_candidate = x_old - step_size_candidate * gradient_projected x_candidate = _apply_bounds_to_x_candidate( x_candidate, lower_bounds, upper_bounds ) f_candidate = _evaluate_model_criterion( x_candidate, model.linear_terms, model.square_terms ) x_diff = x_candidate - x_old if f_candidate < f_min: f_min = f_candidate step_size_accepted = step_size_candidate x_inactive = x_diff[active_bounds_info.inactive] square_terms = x_inactive.T @ hessian_inactive @ x_inactive predicted_reduction = trustregion_radius * ( gradient_norm - 0.5 * trustregion_radius * square_terms / (gradient_norm**2) ) actual_reduction = f_candidate_initial - f_candidate ( trustregion_radius, radius_lower_bound, ) = _update_trustregion_radius_and_gradient_descent( trustregion_radius, radius_lower_bound, predicted_reduction, actual_reduction, gradient_norm, options_update_radius, ) return ( x_candidate, f_min, step_size_accepted, trustregion_radius, radius_lower_bound, ) def _update_trustregion_radius_conjugate_gradient( f_candidate, predicted_reduction, actual_reduction, x_norm_cg, trustregion_radius, options, ): """Update the trust-region radius based on predicted and actual reduction.""" accept_step = False if predicted_reduction < 0 or ~np.isfinite(predicted_reduction): # Reject and start over trustregion_radius = options["alpha1"] * min(trustregion_radius, x_norm_cg) else: if ~np.isfinite(actual_reduction): trustregion_radius = options["alpha1"] * min(trustregion_radius, x_norm_cg) else: if abs(actual_reduction) <= max(1, abs(f_candidate) * EPSILON) and abs( predicted_reduction ) <= max(1, abs(f_candidate) * EPSILON): kappa = 1 else: kappa = actual_reduction / predicted_reduction if kappa < options["eta1"]: # Reject the step trustregion_radius = options["alpha1"] * min( trustregion_radius, x_norm_cg ) else: accept_step = True # Update the trust-region radius only if the computed step is at the # trust-radius boundary if x_norm_cg == trustregion_radius: if kappa < options["eta2"]: # Marginal bad step trustregion_radius = options["alpha2"] * trustregion_radius elif kappa < options["eta3"]: # Reasonable step trustregion_radius = options["alpha3"] * trustregion_radius elif kappa < options["eta4"]: trustregion_radius = options["alpha4"] * trustregion_radius else: # Very good step trustregion_radius = options["alpha5"] * trustregion_radius trustregion_radius = np.clip( trustregion_radius, options["min_radius"], options["max_radius"] ) return trustregion_radius, accept_step def _get_information_on_active_bounds( x, gradient_unprojected, lower_bounds, upper_bounds, ): """Return the index set of active bounds.""" active_lower = np.where((x <= lower_bounds) & (gradient_unprojected > 0))[0] active_upper = np.where((x >= upper_bounds) & (gradient_unprojected < 0))[0] active_fixed = np.where(lower_bounds == upper_bounds)[0] active_all = reduce(np.union1d, (active_fixed, active_lower, active_upper)) inactive = np.setdiff1d(np.arange(len(x)), active_all) active_bounds_info = ActiveBounds( lower=active_lower, upper=active_upper, fixed=active_fixed, active=active_all, inactive=inactive, ) return active_bounds_info def _find_hessian_submatrix_where_bounds_inactive(model, active_bounds_info): """Find the submatrix of the initial hessian where bounds are inactive.""" hessian_inactive = model.square_terms[ active_bounds_info.inactive[:, np.newaxis], active_bounds_info.inactive ] return hessian_inactive def _check_for_convergence( x_candidate, f_candidate, gradient_candidate, model, lower_bounds, upper_bounds, converged, reason, niter, *, maxiter, gtol_abs, gtol_rel, gtol_scaled, ): """Check if we have found a solution.""" direction_fischer_burmeister = _get_fischer_burmeister_direction_vector( x_candidate, gradient_candidate, lower_bounds, upper_bounds ) gradient_norm = np.linalg.norm(direction_fischer_burmeister) gradient_norm_initial = np.linalg.norm(model.linear_terms) if gradient_norm < gtol_abs: converged = True reason = "Norm of the gradient is less than absolute_gradient_tolerance." elif f_candidate != 0 and abs(gradient_norm / f_candidate) < gtol_rel: converged = True reason = ( "Norm of the gradient relative to the criterion value is less than " "relative_gradient_tolerance." ) elif ( gradient_norm_initial != 0 and gradient_norm / gradient_norm_initial < gtol_scaled ): converged = True reason = ( "Norm of the gradient divided by norm of the gradient at the " "initial parameters is less than scaled_gradient_tolerance." ) elif gradient_norm_initial != 0 and gradient_norm == 0 and gtol_scaled == 0: converged = True reason = ( "Norm of the gradient divided by norm of the gradient at the " "initial parameters is less than scaled_gradient_tolerance." ) elif f_candidate <= -np.inf: converged = True reason = "Criterion value is negative infinity." elif niter is not None and niter == maxiter: reason = "Maximum number of iterations reached." return converged, reason def _apply_bounds_to_x_candidate(x, lower_bounds, upper_bounds, bound_tol=0): """Apply upper and lower bounds to the candidate vector.""" x = np.where(x <= lower_bounds + bound_tol, lower_bounds, x) x = np.where(x >= upper_bounds - bound_tol, upper_bounds, x) return x def _project_gradient_onto_feasible_set(gradient_unprojected, active_bounds_info): """Project gradient onto feasible set, where search directions unconstrained.""" gradient_projected = np.zeros_like(gradient_unprojected) gradient_projected[active_bounds_info.inactive] = gradient_unprojected[ active_bounds_info.inactive ] return gradient_projected def _apply_bounds_to_conjugate_gradient_step( step_inactive, x_candidate, lower_bounds, upper_bounds, active_bounds_info, ): """Apply lower and upper bounds to the Conjugate Gradient step.""" cg_step = np.zeros_like(x_candidate) cg_step[active_bounds_info.inactive] = step_inactive if active_bounds_info.lower.size > 0: x_active_lower = x_candidate[active_bounds_info.lower] lower_bound_active = lower_bounds[active_bounds_info.lower] cg_step[active_bounds_info.lower] = lower_bound_active - x_active_lower if active_bounds_info.upper.size > 0: x_active_upper = x_candidate[active_bounds_info.upper] upper_bound_active = upper_bounds[active_bounds_info.upper] cg_step[active_bounds_info.upper] = upper_bound_active - x_active_upper if active_bounds_info.fixed.size > 0: cg_step[active_bounds_info.fixed] = 0 return cg_step def _update_trustregion_radius_and_gradient_descent( trustregion_radius, radius_lower_bound, predicted_reduction, actual_reduction, gradient_norm, options, ): """Update the trust-region radius and its upper bound.""" if abs(actual_reduction) <= EPSILON and abs(predicted_reduction) <= EPSILON: kappa = 1 else: kappa = actual_reduction / predicted_reduction tau_1 = ( options["theta"] * gradient_norm * trustregion_radius / ( options["theta"] * gradient_norm * trustregion_radius + (1 - options["theta"]) * predicted_reduction - actual_reduction ) ) tau_2 = ( options["theta"] * gradient_norm * trustregion_radius / ( options["theta"] * gradient_norm * trustregion_radius - (1 + options["theta"]) * predicted_reduction + actual_reduction ) ) tau_min = min(tau_1, tau_2) tau_max = max(tau_1, tau_2) if abs(kappa - 1) <= options["mu1"]: # Great agreement radius_lower_bound = max(radius_lower_bound, trustregion_radius) if tau_max < 1: tau = options["gamma3"] elif tau_max > options["gamma4"]: tau = options["gamma4"] else: tau = tau_max elif abs(kappa - 1) <= options["mu2"]: # Good agreement radius_lower_bound = max(radius_lower_bound, trustregion_radius) if tau_max < options["gamma2"]: tau = options["gamma2"] elif tau_max > options["gamma3"]: tau = options["gamma3"] else: tau = tau_max else: # Not good agreement if tau_min > 1: tau = options["gamma2"] elif tau_max < options["gamma1"]: tau = options["gamma1"] elif (tau_min < options["gamma1"]) and (tau_max >= 1): tau = options["gamma1"] elif ( (tau_1 >= options["gamma1"]) and (tau_1 < 1.0) and ((tau_2 < options["gamma1"]) or (tau_2 >= 1.0)) ): tau = tau_1 elif ( (tau_2 >= options["gamma1"]) and (tau_2 < 1.0) and ((tau_1 < options["gamma1"]) or (tau_2 >= 1.0)) ): tau = tau_2 else: tau = tau_max trustregion_radius = trustregion_radius * tau return trustregion_radius, radius_lower_bound def _get_fischer_burmeister_direction_vector(x, gradient, lower_bounds, upper_bounds): """Compute the constrained direction vector via the Fischer-Burmeister function.""" fischer_vec = np.vectorize(_get_fischer_burmeister_scalar) fischer_burmeister = reduce( fischer_vec, (upper_bounds - x, -gradient, x - lower_bounds) ) direction = np.where( lower_bounds == upper_bounds, lower_bounds - x, fischer_burmeister ) return direction def _get_fischer_burmeister_scalar(a, b): """Get the value of the Fischer-Burmeister function for two scalar inputs. This method was suggested by Bob Vanderbei. Since the Fischer-Burmeister is symmetric, the order of the scalar inputs does not matter. Args: a (float): First input. b (float): Second input. Returns: float: Value of the Fischer-Burmeister function for inputs a and b. """ if a + b <= 0: fischer_burmeister = np.sqrt(a**2 + b**2) - (a + b) else: fischer_burmeister = -2 * a * b / (np.sqrt(a**2 + b**2) + (a + b)) return fischer_burmeister def _evaluate_model_criterion( x, gradient, hessian, ): """Evaluate the criterion function value of the main model. Args: x (np.ndarray): Parameter vector of shape (n,). gradient (np.ndarray): Gradient of shape (n,) for which the main model shall be evaluated. hessian (np.ndarray): Hessian of shape (n, n) for which the main model shall be evaulated. Returns: float: Criterion value of the main model. """ return gradient.T @ x + 0.5 * x.T @ hessian @ x ================================================ FILE: src/optimagic/optimizers/_pounders/gqtpar.py ================================================ """Auxiliary functions for the quadratic GQTPAR trust-region subsolver.""" from typing import NamedTuple import numpy as np from scipy.linalg import cho_solve, solve_triangular from scipy.linalg.lapack import dpotrf as compute_cholesky_factorization from scipy.optimize._trustregion_exact import estimate_smallest_singular_value class HessianInfo(NamedTuple): hessian_plus_lambda: np.ndarray | None = None # shape (n_params, n_params) upper_triangular: np.ndarray | None = None # shape (n_params, n_params) already_factorized: bool = False class DampingFactors(NamedTuple): candidate: float | None = None lower_bound: float | None = None upper_bound: float | None = None def gqtpar(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200): """Solve the quadratic trust-region subproblem via nearly exact iterative method. This subproblem solver is mainly based on Conn et al. (2000) "Trust region methods" (:cite:`Conn2000`), pp. 169-200. But ideas from Nocedal and Wright (2006) "Numerical optimization" (:cite:`Nocedal2006`), pp. 83-91, who implement a similar algorithm, were also used. The original algorithm was developed by More and Sorensen (1983) (:cite:`More1983`) and is known as "GQTPAR". The vector x* is a global solution to the quadratic subproblem: min_x f + g @ x + 0.5 * x.T @ H @ x, if and only if ||x|| <= trustregion_radius and if there is a scalar lambda >= 0, such that: 1) (H + lambda * I(n)) x* = -g 2) lambda (trustregion_radius - ||x*||) = 0 3) H + lambda * I is positive definite where g denotes the gradient and H the hessian of the quadratic model, respectively. k_easy and k_hard are stopping criteria for the iterative subproblem solver. See pp. 194-197 in :cite:`Conn2000` for a more detailed description. Args: model (NamedTuple): NamedTuple containing the parameters of the main model, i.e. - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). x_candidate (np.ndarray): Initial guess for the solution of the subproblem. k_easy (float): Stopping criterion for the "easy" case. k_hard (float): Stopping criterion for the "hard" case. maxiter (int): Maximum number of iterations to perform. If reached, terminate. Returns: (dict): Result dictionary containing the following keys: - ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,) - ``criterion`` (float): Minimum function value associated with the solution. """ hessian_info = HessianInfo() # Small floating point number signaling that for vectors smaller # than that backward substituition is not reliable. # See Golub, G. H., Van Loan, C. F. (2013), "Matrix computations", p.165. zero_threshold = ( model.square_terms.shape[0] * np.finfo(float).eps * np.linalg.norm(model.square_terms, np.inf) ) stopping_criteria = { "k_easy": k_easy, "k_hard": k_hard, } gradient_norm = np.linalg.norm(model.linear_terms) lambdas = _get_initial_guess_for_lambdas(model) converged = False for _niter in range(maxiter): if hessian_info.already_factorized: hessian_info = hessian_info._replace(already_factorized=False) else: hessian_info, factorization_info = add_lambda_and_factorize_hessian( model, hessian_info, lambdas ) if factorization_info == 0 and gradient_norm > zero_threshold: ( x_candidate, hessian_info, lambdas, converged, ) = _find_new_candidate_and_update_parameters( model, hessian_info, lambdas, stopping_criteria, converged, ) elif factorization_info == 0 and gradient_norm <= zero_threshold: ( x_candidate, lambdas, converged, ) = _check_for_interior_convergence_and_update( x_candidate, hessian_info, lambdas, stopping_criteria, converged, ) else: lambdas = _update_lambdas_when_factorization_unsuccessful( hessian_info, lambdas, factorization_info, ) if converged: break f_min = ( model.linear_terms.T @ x_candidate + 0.5 * x_candidate.T @ model.square_terms @ x_candidate ) result = { "x": x_candidate, "criterion": f_min, "n_iterations": _niter, "success": converged, } return result def _get_initial_guess_for_lambdas( main_model, ): """Return good initial guesses for lambda, its lower and upper bound. Given a trust-region radius, good initial guesses for the damping factor lambda, along with its lower bound and upper bound, are computed. The values are chosen accordingly to the guidelines on section 7.3.8 (p. 192) from :cite:`Conn2000`. Args: main_model (NamedTuple): Named tuple containing the parameters of the main model, i.e.: - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). Returns: (dict): Dictionary containing the initial guess for the damping factor lambda, along with its lower and upper bound. The respective keys are: - "candidate" - "upper_bound" - "lower_bound" """ gradient_norm = np.linalg.norm(main_model.linear_terms) model_hessian = main_model.square_terms hessian_infinity_norm = np.linalg.norm(model_hessian, np.inf) hessian_frobenius_norm = np.linalg.norm(model_hessian, "fro") hessian_gershgorin_lower, hessian_gershgorin_upper = _compute_gershgorin_bounds( main_model ) lambda_lower_bound = max( 0, -min(model_hessian.diagonal()), gradient_norm - min(hessian_gershgorin_upper, hessian_frobenius_norm, hessian_infinity_norm), ) lambda_upper_bound = max( 0, gradient_norm + min(-hessian_gershgorin_lower, hessian_frobenius_norm, hessian_infinity_norm), ) if lambda_lower_bound == 0: lambda_candidate = 0 else: lambda_candidate = _get_new_lambda_candidate( lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound ) lambdas = DampingFactors( candidate=lambda_candidate, lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound, ) return lambdas def add_lambda_and_factorize_hessian(main_model, hessian_info, lambdas): """Add lambda to hessian and factorize it into its upper triangular matrix. Args: main_model (NamedTuple): Named tuple containing the parameters of the main model, i.e.: - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). hessian_info (NamedTuple): Named tuple containing transformations of the hessian, i.e. square_terms, from the main model. The keys are: - ``hessian_plus_lambda`` (np.ndarray): The square terms of the main model plus the identity matrix times lambda. 2d array of shape (n, n). - ``upper_triangular`` (np.ndarray): Factorization of the hessian from the main model into its upper triangular matrix. The diagonal is filled and the lower lower triangular contains zeros. 2d array of shape (n, n). - ``info_already_factorized`` (bool): Boolean indicating whether the hessian has already been factorized for the current iteration. Returns: Tuple: - hessian_info (dict): Named tuple containing the updated transformations of the hessian, i.e. square_terms, from the main model. See above. - factorization_info (int): Non-negative integer k indicating whether the factorization of the hessian into its upper triangular matrix has been successful. If k = 0, the factorization has been successful. A value k > 0 means that the leading k by k submatrix constitues the first non-positive definite leading submatrix of the hessian. """ n = main_model.square_terms.shape[0] hessian_plus_lambda = main_model.square_terms + lambdas.candidate * np.eye(n) hessian_upper_triangular, factorization_info = compute_cholesky_factorization( hessian_plus_lambda, lower=False, overwrite_a=False, clean=True, ) hessian_info_new = hessian_info._replace( hessian_plus_lambda=hessian_plus_lambda, upper_triangular=hessian_upper_triangular, ) return hessian_info_new, factorization_info def _find_new_candidate_and_update_parameters( main_model, hessian_info, lambdas, stopping_criteria, converged, ): """Find new candidate vector and update transformed hessian and lambdas.""" x_candidate = cho_solve( (hessian_info.upper_triangular, False), -main_model.linear_terms ) x_norm = np.linalg.norm(x_candidate) if x_norm <= 1 and lambdas.candidate == 0: converged = True w = solve_triangular(hessian_info.upper_triangular, x_candidate, trans="T") w_norm = np.linalg.norm(w) newton_step = _compute_newton_step(lambdas, x_norm, w_norm) if x_norm < 1: ( x_candidate, hessian_info, lambdas_new, converged, ) = _update_candidate_and_parameters_when_candidate_within_trustregion( x_candidate, main_model, hessian_info, lambdas, newton_step, stopping_criteria, converged, ) else: lambdas_new, converged = _update_lambdas_when_candidate_outside_trustregion( lambdas, newton_step, x_norm, stopping_criteria, converged, ) return ( x_candidate, hessian_info, lambdas_new, converged, ) def _check_for_interior_convergence_and_update( x_candidate, hessian_info, lambdas, stopping_criteria, converged, ): """Check for interior convergence, update candidate vector and lambdas.""" if lambdas.candidate == 0: x_candidate = np.zeros_like(x_candidate) converged = True s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular) step_len = 2 if step_len**2 * s_min**2 <= stopping_criteria["k_hard"] * lambdas.candidate: x_candidate = step_len * z_min converged = True lambda_lower_bound = max(lambdas.lower_bound, lambdas.upper_bound - s_min**2) lambda_new_candidate = _get_new_lambda_candidate( lower_bound=lambda_lower_bound, upper_bound=lambdas.candidate ) lambdas_new = lambdas._replace( candidate=lambda_new_candidate, lower_bound=lambda_lower_bound, upper_bound=lambdas.candidate, ) return x_candidate, lambdas_new, converged def _update_lambdas_when_factorization_unsuccessful( hessian_info, lambdas, factorization_info ): """Update lambdas in the case that factorization of hessian not successful.""" delta, v = _compute_terms_to_make_leading_submatrix_singular( hessian_info, factorization_info, ) v_norm = np.linalg.norm(v) lambda_lower_bound = max(lambdas.lower_bound, lambdas.candidate + delta / v_norm**2) lambda_new_candidate = _get_new_lambda_candidate( lower_bound=lambda_lower_bound, upper_bound=lambdas.upper_bound ) lambdas_new = lambdas._replace( candidate=lambda_new_candidate, lower_bound=lambda_lower_bound, ) return lambdas_new def _get_new_lambda_candidate(lower_bound, upper_bound): """Update current lambda so that it lies within its bounds. Args: lower_boud (float): lower bound of the current candidate dumping factor. upper_bound(float): upper bound of the current candidate dumping factor. Returns: float: New candidate for the damping factor lambda. """ lambda_new_candidate = max( np.sqrt(np.clip(lower_bound * upper_bound, 0, np.inf)), lower_bound + 0.01 * (upper_bound - lower_bound), ) return lambda_new_candidate def _compute_gershgorin_bounds(main_model): """Compute upper and lower Gregoshgorin bounds for a square matrix. The Gregoshgorin bounds are the upper and lower bounds for the eigenvalues of the square hessian matrix (i.e. the square terms of the main model). See :cite:`Conn2000`. Args: main_model (NamedTuple): Named tuple containing the parameters of the main model, i.e.: - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). Returns: Tuple: - lower_bound (float): Lower Gregoshgorin bound. - upper_bound (float): Upper Gregoshgorin bound. """ model_hessian = main_model.square_terms hessian_diag = np.diag(model_hessian) hessian_diag_abs = np.abs(hessian_diag) hessian_row_sums = np.sum(np.abs(model_hessian), axis=1) lower_gershgorin = np.min(hessian_diag + hessian_diag_abs - hessian_row_sums) upper_gershgorin = np.max(hessian_diag - hessian_diag_abs + hessian_row_sums) return lower_gershgorin, upper_gershgorin def _compute_newton_step(lambdas, p_norm, w_norm): """Compute the Newton step. Args: lambdas (NamedTuple): Named tuple containing the current candidate value for the damping factor lambda, its lower bound and upper bound. p_norm (float): Frobenius (i.e. L2-norm) of the candidate vector. w_norm (float): Frobenius (i.e. L2-norm) of vector w, which is the solution to the following triangular system: U.T w = p. Returns: float: Newton step computed according to formula (4.44) p.87 from Nocedal and Wright (2006). """ return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1) def _update_candidate_and_parameters_when_candidate_within_trustregion( x_candidate, main_model, hessian_info, lambdas, newton_step, stopping_criteria, converged, ): """Update candidate vector, hessian, and lambdas when x outside trust-region.""" n = len(x_candidate) s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular) step_len = _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min) quadratic_term = x_candidate.T @ hessian_info.hessian_plus_lambda @ x_candidate relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambdas.candidate) if relative_error <= stopping_criteria["k_hard"]: x_candidate = x_candidate + step_len * z_min converged = True lambda_new_lower_bound = max(lambdas.lower_bound, lambdas.candidate - s_min**2) hessian_plus_lambda = main_model.square_terms + newton_step * np.eye(n) _, factorization_unsuccessful = compute_cholesky_factorization( hessian_plus_lambda, lower=False, overwrite_a=False, clean=True, ) if factorization_unsuccessful == 0: hessian_already_factorized = True lambda_new_candidate = newton_step else: hessian_already_factorized = hessian_info.already_factorized lambda_new_lower_bound = max(lambda_new_lower_bound, newton_step) lambda_new_candidate = _get_new_lambda_candidate( lower_bound=lambda_new_lower_bound, upper_bound=lambdas.candidate ) hessian_info_new = hessian_info._replace( hessian_plus_lambda=hessian_plus_lambda, already_factorized=hessian_already_factorized, ) lambdas_new = lambdas._replace( candidate=lambda_new_candidate, lower_bound=lambda_new_lower_bound, upper_bound=lambdas.candidate, ) return x_candidate, hessian_info_new, lambdas_new, converged def _update_lambdas_when_candidate_outside_trustregion( lambdas, newton_step, p_norm, stopping_criteria, converged ): """Update lambas in the case that candidate vector lies outside trust-region.""" relative_error = abs(p_norm - 1) if relative_error <= stopping_criteria["k_easy"]: converged = True lambdas_new = lambdas._replace(candidate=newton_step, lower_bound=lambdas.candidate) return lambdas_new, converged def _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min): """Compute the smallest step length for the candidate vector. Choose step_length with the smallest magnitude. The reason for this choice is explained at p. 6 in :cite:`More1983`, just before the formula for tau. Args: x_candidate (np.ndarray): Candidate vector of shape (n,). z_min (float): Smallest singular value of the hessian matrix. Returns: float: Step length with the smallest magnitude. """ ta, tb = _solve_scalar_quadratic_equation(x_candidate, z_min) step_len = min([ta, tb], key=abs) return step_len def _solve_scalar_quadratic_equation(z, d): """Return the sorted values that solve the scalar quadratic equation. Solve the scalar quadratic equation ||z + t d|| == trustregion_radius. This is like a line-sphere intersection. Computation of the ``aux`` step, ``ta`` and ``tb`` is mathematically equivalent to equivalent the following calculation: ta = (-b - sqrt_discriminant) / (2*a) tb = (-b + sqrt_discriminant) / (2*a) but produces smaller round-off errors. For more details, look at "Matrix Computation" p.97. Args: z (np.ndarray): Eigenvector of the upper triangular hessian matrix. d (float): Smallest singular value of the upper triangular of the hessian matrix. Returns: Tuple: The two values of t, sorted from low to high. - (float) Lower value of t. - (float) Higher value of t. """ a = d.T @ d b = 2 * z.T @ d c = z.T @ z - 1 sqrt_discriminant = np.sqrt(b * b - 4 * a * c) aux = b + np.copysign(sqrt_discriminant, b) ta = -aux / (2 * a) tb = -2 * c / aux return sorted([ta, tb]) def _compute_terms_to_make_leading_submatrix_singular(hessian_info, k): """Compute terms that make the leading submatrix of the hessian singular. The "hessian" here refers to the matrix H + lambda * I(n), where H is the initial hessian, lambda is the current damping factor, I the identity matrix, and m the number of rows/columns of the symmetric hessian matrix. Args: hessian (np.ndarray): Symmetric k by k hessian matrix, which is not positive definite. upper_triangular (np.ndarray) Upper triangular matrix resulting of an incomplete Cholesky decomposition of the hessian matrix. k (int): Positive integer such that the leading k by k submatrix from hessian is the first non-positive definite leading submatrix. Returns: Tuple: - delta(float): Amount that should be added to the element (k, k) of the leading k by k submatrix of the hessian to make it singular. - v (np.ndarray): A vector such that ``v.T B v = 0``. Where B is the hessian after ``delta`` is added to its element (k, k). """ hessian_plus_lambda = hessian_info.hessian_plus_lambda upper_triangular = hessian_info.upper_triangular n = len(hessian_plus_lambda) delta = ( np.sum(upper_triangular[: k - 1, k - 1] ** 2) - hessian_plus_lambda[k - 1, k - 1] ) v = np.zeros(n) v[k - 1] = 1 if k != 1: v[: k - 1] = solve_triangular( upper_triangular[: k - 1, : k - 1], -upper_triangular[: k - 1, k - 1] ) return delta, v ================================================ FILE: src/optimagic/optimizers/_pounders/linear_subsolvers.py ================================================ """Collection of linear trust-region subsolvers.""" from typing import NamedTuple import numpy as np class LinearModel(NamedTuple): intercept: float | None = None linear_terms: np.ndarray | None = None # shape (n_params, n_params) def minimize_trsbox_linear( linear_model, lower_bounds, upper_bounds, trustregion_radius, *, zero_treshold=1e-14 ): """Minimize a linear trust-region subproblem using the trsbox algorithm. Solve the linear subproblem: min_x g.T @ x s.t. lower_bound <= x <= upper_bound ||x||**2 <= trustregion_radius**2 using an active-set approach. This algorithm is an implementation of the TRSBOX routine from M. J. D. Powell (2009) "The BOBYQA algorithm for bound constrained optimization without derivatives." (cite:`Powell2009`). Args: linear_model (NamedTuple): Named tuple containing the parameters of the linear model, i.e.: - ``intercept`` (float): Intercept of the linear model. - ``linear_terms`` (np.ndarray): 1d array of shape (n,) with the linear terms of the mdoel. lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. trustregion_radius (float): Radius of the trust-region. zero_treshold (float): Treshold for treating numerical values as zero. Numbers smaller than this are considered zero up to machine precision. Returns: (np.ndarray): Solution vector for the linear trust-region subproblem. Array of shape (n,). """ lower_bounds_internal = np.minimum(lower_bounds, -zero_treshold) upper_bounds_internal = np.maximum(upper_bounds, zero_treshold) model_gradient = linear_model.linear_terms n = len(model_gradient) x_candidate = np.zeros(n) direction = -model_gradient indices_inactive_directions = np.where(np.abs(direction) < zero_treshold)[0] direction[indices_inactive_directions] = 0 active_directions = np.setdiff1d(np.arange(n), indices_inactive_directions) set_active_directions = iter(active_directions) for _ in range(n): if np.linalg.norm(direction) < zero_treshold: break x_candidate_unconstr = _take_unconstrained_step_up_to_boundary( x_candidate, direction, trustregion_radius, zero_treshold=zero_treshold ) active_bound, index_active_bound = _find_next_active_bound( x_candidate_unconstr, lower_bounds_internal, upper_bounds_internal, set_active_directions, ) if active_bound is None: x_candidate = x_candidate_unconstr break else: x_candidate, direction = _take_constrained_step_up_to_boundary( x_candidate, direction, active_bound, index_active_bound, ) return x_candidate def improve_geomtery_trsbox_linear( x_center, linear_model, lower_bounds, upper_bounds, trustregion_radius, *, zero_treshold=1e-14, ): """Maximize a Lagrange polynomial of degree one to improve geometry of the model. Let a Lagrange polynomial of degree one be defined by: L(x) = c + g.T @ (x - x_center), where c and g denote the constant term and the linear terms (gradient) of the linear model, respectively. In order to maximize L(x), we maximize the absolute value of L(x) in a trust-region setting. I.e. we solve: max_x abs(c + g.T @ (x - x_center)) s.t. lower_bound <= x <= upper_bound ||x - x_center|| <= trustregion_radius In order to find the solution x*, we first minimize and then maximize g.T @ (x - center). The resulting candidate vectors are then plugged into the objective function L(x) to check which one yields the largest absolute value of the Lagrange polynomial. Args: x_center (np.ndarray): 1d array of shape (n,) containing the center of the parameter vector. linear_model (NamedTuple): Named tuple containing the parameters of the linear model that form the Lagrange polynomial, including: - ``intercept`` (float): Intercept of the linear model. - ``linear_terms`` (np.ndarray): 1d array of shape (n,) with the linear terms of the mdoel. lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. trustregion_radius (float): Radius of the trust-region. zero_treshold (float): Treshold for treating numerical values as zero. Numbers smaller than this are considered zero up to machine precision. Returns: np.ndarray: Solution vector of shape (n,) that maximizes the Lagrange polynomial. """ if np.any(lower_bounds > x_center + zero_treshold): raise ValueError("x_center violates lower bound.") if np.any(x_center - zero_treshold > upper_bounds): raise ValueError("x_center violates upper bound.") # Minimize and maximize g.T @ (x - x_center), respectively linear_model_to_minimize = linear_model linear_model_to_maximize = linear_model._replace( linear_terms=-linear_model.linear_terms ) x_candidate_min = minimize_trsbox_linear( linear_model_to_minimize, lower_bounds - x_center, upper_bounds - x_center, trustregion_radius, zero_treshold=zero_treshold, ) x_candidate_max = minimize_trsbox_linear( linear_model_to_maximize, lower_bounds - x_center, upper_bounds - x_center, trustregion_radius, zero_treshold=zero_treshold, ) lagrange_polynomial = lambda x: abs( linear_model.intercept + linear_model.linear_terms.T @ x ) if lagrange_polynomial(x_candidate_min) >= lagrange_polynomial(x_candidate_max): x_lagrange = x_candidate_min + x_center else: x_lagrange = x_candidate_max + x_center return x_lagrange def _find_next_active_bound( x_candidate_unconstr, lower_bounds, upper_bounds, set_active_directions, ): """Find the next active bound and return its index. A (lower or upper) bound is considered active if x_candidate <= lower_bounds x_candidate >= upper_bounds Args: x_candidate_unconstr (np.ndarray): Unconstrained candidate vector of shape (n,), which ignores bound constraints. lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. set_active_directions (iterator): Iterator over the indices of active search directions, i.e. directions that are not zero. Returns: Tuple: - active_bound (float or None): The next active bound. It can be a lower or active bound. If None, there are no more active bounds left in the set of active search directions. - index_bound_active (int or None): Index where an active lower or upper bound has been found. None, if no active bound has been detected. """ index_active = next(set_active_directions) while True: if x_candidate_unconstr[index_active] >= upper_bounds[index_active]: active_bound = upper_bounds[index_active] break elif x_candidate_unconstr[index_active] <= lower_bounds[index_active]: active_bound = lower_bounds[index_active] break else: try: index_active = next(set_active_directions) except StopIteration: active_bound = None break return active_bound, index_active def _take_constrained_step_up_to_boundary( x_candidate, direction, active_bound, index_bound_active ): """Take largest constrained step possible until trust-region boundary is hit. Args: x_candidate (np.ndarray): Current candidate vector of shape (n,). direction (np.ndarray): Direction vector of shape (n,). active_bound (float): The active (lower or upper) bound. index_bound_active (int): Index where an active lower or upper bound has been found. Returns: Tuple: - x_candidate (np.ndarray): New candidate vector of shape (n,). - direction (np.ndarray): New direction vector of shape (n,), where the search direction of the active_bound has been set to zero. """ step_size_constr = (active_bound - x_candidate[index_bound_active]) / direction[ index_bound_active ] x_candidate = x_candidate + step_size_constr * direction x_candidate[index_bound_active] = active_bound # Do not search in this direction anymore direction[index_bound_active] = 0 return x_candidate, direction def _take_unconstrained_step_up_to_boundary( x_candidate, direction, trustregion_radius, zero_treshold ): """Take largest unconstrained step possible until trust-region boundary is hit. Args: x_candidate (np.ndarray): Current candidate vector of shape (n,). direction (np.ndarray): Direction vector of shape (n,). trustregion_radius (float): Radius of the trust-region. zero_treshold (float): Treshold for treating numerical values as zero. Numbers smaller than this are considered zero up to machine precision. Returns: np.ndarray: Updated, unconstrained candidate vector of shape (n,). """ step_size_unconstr = _get_distance_to_trustregion_boundary( x_candidate, direction, trustregion_radius, zero_treshold ) x_candidate_unconstr = x_candidate + step_size_unconstr * direction return x_candidate_unconstr def _get_distance_to_trustregion_boundary( x, direction, trustregion_radius, zero_treshold ): """Compute the candidate vector's distance to the trustregion boundary. Given the candidate vector, find the largest step alpha in direction g that satisfies ||x|| <= trustregion_radius, where g denotes the direction vector. To find alpha, i.e. the candidate's distance to the trust-region boundary, solve ||x + alpha * g||**2 = trustregion_radius**2 s.t. alpha >= 0 Using this method, the solution exists whenever ||x|| <= trustregion_radius**2. Choose alpha = 0, if the direction vector is zero everywhere. Args: x (np.ndarray): Candidate vector of shape (n,). direction (np.ndarray): Direction vector of shape (n,). trustregion_radius (float): Radius of the trust-region. zero_treshold (float): Treshold for treating numerical values as zero. Numbers smaller than this are considered zero up to machine precision. Returns: float: Distance between the candidate vector and the trust-region boundary. """ g_dot_x = direction.T @ x g_sumsq = direction @ direction x_sumsq = x @ x l2_norm = np.sqrt(g_sumsq) if l2_norm < zero_treshold: distance_to_boundary = 0 else: distance_to_boundary = ( np.sqrt( np.maximum(0, g_dot_x**2 + g_sumsq * (trustregion_radius**2 - x_sumsq)) ) - g_dot_x ) / g_sumsq return distance_to_boundary ================================================ FILE: src/optimagic/optimizers/_pounders/pounders_auxiliary.py ================================================ """Auxiliary functions for the pounders algorithm.""" from typing import NamedTuple import numpy as np from scipy.linalg import qr_multiply from optimagic.optimizers._pounders.bntr import ( bntr, ) from optimagic.optimizers._pounders.gqtpar import ( gqtpar, ) class ResidualModel(NamedTuple): intercepts: np.ndarray | None = None # shape (n_residuals,) linear_terms: np.ndarray | None = None # shape (n_residuals, n_params) square_terms: np.ndarray | None = None # shape (n_residuals, n_params, n_params) class MainModel(NamedTuple): linear_terms: np.ndarray | None = None # shape (n_params,) square_terms: np.ndarray | None = None # shape (n_params, n_params) def create_initial_residual_model(history, accepted_index, delta): """Update linear and square terms of the initial residual model. Args: history (LeastSquaresHistory): Class storing history of xs, residuals, and critvals. accepted_index (int): Index in history pointing to the currently accepted candidate vector. delta (float): Trust-region radius. Returns: ResidualModel: Residual model containing the initial parameters for ``linear_terms`` and ``square_terms``. """ center_info = { "x": history.get_best_x(), "residuals": history.get_best_residuals(), "radius": delta, } n_params = len(center_info["x"]) n_residuals = center_info["residuals"].shape[0] indices_not_min = [i for i in range(n_params + 1) if i != accepted_index] x_candidate, residuals_candidate, _ = history.get_centered_entries( center_info=center_info, index=indices_not_min, ) linear_terms = np.linalg.solve(x_candidate, residuals_candidate) square_terms = np.zeros((n_residuals, n_params, n_params)) residual_model = ResidualModel( intercepts=history.get_best_residuals(), linear_terms=linear_terms, square_terms=square_terms, ) return residual_model def update_residual_model(residual_model, coefficients_to_add, delta, delta_old): """Update linear and square terms of the residual model. Args: residual_model (ResidualModel): Residual model with the following parameters: ``intercepts``, ``linear_terms``, and ``square terms``. coefficients_to_add (dict): Coefficients used for updating the parameters of the residual model. delta (float): Trust region radius of the current iteration. delta_old (float): Trust region radius of the previous iteration. Returns: ResidualModel: Residual model containing the updated parameters ``linear_terms`` and ``square_terms``. """ linear_terms_new = ( coefficients_to_add["linear_terms"] + (delta / delta_old) * residual_model.linear_terms ) square_terms_new = ( coefficients_to_add["square_terms"] + (delta / delta_old) ** 2 * residual_model.square_terms ) residual_model_updated = residual_model._replace( linear_terms=linear_terms_new, square_terms=square_terms_new ) return residual_model_updated def create_main_from_residual_model( residual_model, multiply_square_terms_with_intercepts=True ): """Update linear and square terms of the main model via the residual model. Args: residual_model (ResidualModel): Residual model with the following parameters: ``intercepts``, ``linear_terms``, and ``square terms``. multiply_square_terms_with_residuals (bool): Indicator whether we multiply the main model's ``square terms`` with the intercepts of the residual model. Returns: MainModel: Main model containing the updated parameters ``linear_terms`` and ``square terms``. """ linear_terms_main_model = residual_model.linear_terms @ residual_model.intercepts square_terms_main_model = ( residual_model.linear_terms @ residual_model.linear_terms.T ) if multiply_square_terms_with_intercepts is True: square_terms_main_model = ( square_terms_main_model + residual_model.square_terms.T @ residual_model.intercepts ) main_model = MainModel( linear_terms=linear_terms_main_model, square_terms=square_terms_main_model ) return main_model def update_main_model_with_new_accepted_x(main_model, x_candidate): """Use accepted candidate to update the linear terms of the residual model. Args: main_model (MainModel): Main model with the following parameters: ``linear_terms`` and ``square terms``. x_candidate (np.ndarray): Vector of centered x candidates of shape (n_params,). Returns: MainModel: Main model containing the updated ``linear_terms``. """ linear_terms_new = main_model.linear_terms + main_model.square_terms @ x_candidate main_model_updated = main_model._replace(linear_terms=linear_terms_new) return main_model_updated def update_residual_model_with_new_accepted_x(residual_model, x_candidate): """Use accepted candidate to update residual model. Args: residual_model (ResidualModel): Residual model containing the parameters of the residual model, i.e. ``intercepts``, ``linear_terms``, and ``square terms``. x_candidate (np.ndarray): Vector of centered x candidates of shape (n_params,). Returns: ResidualModel: Residual model containing the updated parameters `intercepts`` and ``linear_terms``. """ intercepts_new = ( residual_model.intercepts + x_candidate @ residual_model.linear_terms + 0.5 * (x_candidate.T @ residual_model.square_terms @ x_candidate) ) linear_terms_new = ( residual_model.linear_terms + (residual_model.square_terms @ x_candidate).T ) residual_model_updated = residual_model._replace( intercepts=intercepts_new, linear_terms=linear_terms_new ) return residual_model_updated def solve_subproblem( x_accepted, main_model, lower_bounds, upper_bounds, delta, solver, *, conjugate_gradient_method, maxiter, maxiter_gradient_descent, gtol_abs, gtol_rel, gtol_scaled, gtol_abs_conjugate_gradient, gtol_rel_conjugate_gradient, k_easy, k_hard, ): """Solve the quadratic subproblem. Args: x_accepted (np.ndarray): Currently accepted candidate vector of shape (n_params,). delta (float): Current trust region radius. main_model (MainModel): Main model with the following parameters: ``linear_terms`` and ``square terms``. lower_bounds (np.ndarray): 1d array of shape (n_params,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n_params,) with upper bounds for the parameter vector x. delta (float) Current trust-region radius solver (str): Trust-region subsolver to use. Currently, two internal solvers are supported: - "bntr" (default, supports bound constraints) - "gqtpar" (does not support bound constraints) conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - "cg" - "steihaug_toint" - "trsbox" (default) maxiter (int): Maximum number of iterations to perform when solving the trust-region subproblem. maxiter_gradient_descent (int): Maximum number of gradient descent iterations to perform when the trust-region subsolver "bntr" is used. gtol_abs (float): Convergence tolerance for the absolute gradient norm in the trust-region subproblem ("bntr"). gtol_rel (float): Convergence tolerance for the relative gradient norm in the trust-region subproblem ("bntr"). gtol_scaled (float): Convergence tolerance for the scaled gradient norm in the trust-region subproblem ("bntr"). gtol_abs_conjugate_gradient (float): Convergence tolerance for the absolute gradient norm in the conjugate gradient step of the trust-region subproblem ("bntr"). gtol_rel_conjugate_gradient (float): Convergence tolerance for the relative gradient norm in the conjugate gradient step of the trust-region subproblem ("bntr"). k_easy (float): Stopping criterion for the "easy" case in the trust-region subproblem ("gqtpar"). k_hard (float): Stopping criterion for the "hard" case in the trust-region subproblem ("gqtpar"). Returns: (dict): Result dictionary containing the followng keys: - "x" (np.ndarray): The solution vector of shape (n_params,) - "criterion" (float): The value of the criterion functions associated with the solution - "n_iterations" (int): Number of iterations performed before termination. - "success" (bool): Boolean indicating whether a solution has been found before reaching maxiter. """ x0 = np.zeros_like(x_accepted) # Normalize bounds. If none provided, use unit cube [-1, 1] if lower_bounds is not None: lower_bounds = (lower_bounds - x_accepted) / delta lower_bounds[lower_bounds < -1] = -1 else: lower_bounds = -np.ones_like(x_accepted) if upper_bounds is not None: upper_bounds = (upper_bounds - x_accepted) / delta upper_bounds[upper_bounds > 1] = 1 else: upper_bounds = np.ones_like(x_accepted) # Check if bounds are valid if np.max(lower_bounds - upper_bounds) > 1e-10: raise ValueError("Upper bounds < lower bounds in subproblem.") if np.max(lower_bounds - x0) > 1e-10: raise ValueError("Initial guess < lower bounds in subproblem.") if np.max(x0 - upper_bounds) > 1e-10: raise ValueError("Initial guess > upper bounds in subproblem.") if solver == "bntr": options = { "conjugate_gradient_method": conjugate_gradient_method, "maxiter": maxiter, "maxiter_gradient_descent": maxiter_gradient_descent, "gtol_abs": gtol_abs, "gtol_rel": gtol_rel, "gtol_scaled": gtol_scaled, "gtol_abs_conjugate_gradient": gtol_abs_conjugate_gradient, "gtol_rel_conjugate_gradient": gtol_rel_conjugate_gradient, } result = bntr(main_model, lower_bounds, upper_bounds, x_candidate=x0, **options) elif solver == "gqtpar": result = gqtpar( main_model, x_candidate=x0, k_easy=k_easy, k_hard=k_hard, maxiter=maxiter, ) else: raise ValueError( "Invalid subproblem solver: {solver}. Must be one of bntr, gqtpar." ) # Test bounds post-solution if np.max(lower_bounds - result["x"]) > 1e-5: raise ValueError("Subproblem solution < lower bounds.") if np.max(result["x"] - upper_bounds) > 1e-5: raise ValueError("Subproblem solution > upper bounds.") return result def find_affine_points( history, x_accepted, model_improving_points, project_x_onto_null, delta, theta1, c, model_indices, n_modelpoints, ): """Find affine points. Args: history (LeastSquaresHistory): Class storing history of xs, residuals, and critvals. x_accepted (np.ndarray): Accepted solution vector of the subproblem. Shape (n_params,). model_improving_points (np.ndarray): Array of shape (n_params, n_params) including points to improve the main model, i.e. make the main model fully linear, i.e. just-identified. If *project_x_onto_null* is False, it is an array filled with zeros. project_x_onto_null (int): Indicator whether to calculate the QR decomposition of *model_improving_points* and multiply it with vector *x_projected*. delta (float): Delta, current trust-region radius. theta1 (float): Threshold for adding the current x candidate to the model. c (float): Threshold for acceptance of the norm of our current x candidate. model_indices (np.ndarray): Indices related to the candidates of x that are currently in the main model. Shape (2 * n_params + 1,). n_modelpoints (int): Current number of model points. Returns: Tuple: - model_improving_points (np.ndarray): Array of shape (n_params, n_params) including points to improve the main model, i.e. make the main model fully linear, i.e. just-identified. - model_indices (np.ndarray): Indices related to the candidates of x that are currently in the main model. Shape (2 *n_params* + 1,). - n_modelpoints (int): Current number of model points. - project_x_onto_null (int): Indicator whether to calculate the QR decomposition of *model_improving_points* and multiply it with vector *x_projected*. Relevant for next call of *find_affine_points()*. """ n_params = len(x_accepted) for i in range(history.get_n_fun() - 1, -1, -1): center_info = {"x": x_accepted, "radius": delta} x_candidate = history.get_centered_xs(center_info, index=i) candidate_norm = np.linalg.norm(x_candidate) x_projected = x_candidate if candidate_norm <= c: if project_x_onto_null is True: x_projected, _ = qr_multiply(model_improving_points, x_projected) proj = np.linalg.norm(x_projected[n_modelpoints:]) # Add this index to the model if proj >= theta1: model_indices[n_modelpoints] = i model_improving_points[:, n_modelpoints] = x_candidate project_x_onto_null = True n_modelpoints += 1 if n_modelpoints == n_params: break return model_improving_points, model_indices, n_modelpoints, project_x_onto_null def add_geomtery_points_to_make_main_model_fully_linear( history, main_model, model_improving_points, model_indices, x_accepted, n_modelpoints, delta, criterion, lower_bounds, upper_bounds, batch_fun, n_cores, ): """Add points until main model is fully linear. Args: history (LeastSquaresHistory): Class storing history of xs, residuals, and critvals. main_model (MainModel): Main model with the following parameters: ``linear_terms`` and ``square terms``. model_improving_points (np.ndarray): Array of shape (n_params, n_params) including points to improve the main model. model_indices (np.ndarray): Indices of the candidates of x that are currently in the main model. Shape (2 * n_params + 1,). x_accepted (np.ndarray): Accepted solution vector of the subproblem. Shape (n_params,). n_modelpoints (int): Current number of model points. delta (float): Delta, current trust-region radius. criterion (callable): Criterion function. lower_bounds (np.ndarray): Lower bounds. Must have same length as the initial guess of the parameter vector. Equal to -1 if not provided by the user. upper_bounds (np.ndarray): Upper bounds. Must have same length as the initial guess of the parameter vector. Equal to 1 if not provided by the user. batch_fun (str or callable): Function that takes a list of parameter vectors and evaluates the objective function on each of them. n_cores (int): Number of processes used to parallelize the function evaluations. Returns: Tuple: - history (class): Class storing history of xs, residuals, and critvals. - model_indices (np.ndarray): Indices of the candidates of x that are currently in the main model. Shape (2 * n_params + 1,). """ n_params = len(x_accepted) current_history = history.get_n_fun() x_candidate = np.zeros_like(x_accepted) x_candidates_list = [] criterion_candidates_list = [] model_improving_points, _ = qr_multiply(model_improving_points, np.eye(n_params)) for i in range(n_modelpoints, n_params): change_direction = model_improving_points[:, i] @ main_model.linear_terms if change_direction > 0: model_improving_points[:, i] *= -1 x_candidate = delta * model_improving_points[:, i] + x_accepted # Project into feasible region if lower_bounds is not None and upper_bounds is not None: x_candidate = np.median( np.stack([lower_bounds, x_candidate, upper_bounds]), axis=0 ) x_candidates_list.append(x_candidate) model_indices[i] = current_history + i - n_modelpoints criterion_candidates_list = batch_fun(x_list=x_candidates_list, n_cores=n_cores) history.add_entries(x_candidates_list, criterion_candidates_list) return history, model_indices def evaluate_residual_model( centered_xs, centered_residuals, residual_model, ): """Compute the difference between observed and predicted model evaluations. We use a quadratic model of the form: f(x) = a + x.T @ b + 0.5 x.T @ C @ x , where C is lower triangular. Note the connection of b and C to the gradient: f'(x) = b + (C + C.T) @ x, and the Hessian: f''(x) = C + C.T. Args: residual_model (ResidualModel): The residual model. Has entries: - ``intercept``: corresponds to 'a' in the above equation - ``linear_terms``: corresponds to 'b' in the above equation - ``square_terms``: corresponds to 'C' in the above equation centered_xs (np.ndarray): Centered x sample. Shape (n_modelpoints, n_params). centered_residuals (np.ndarray): Centered residuals, i.e. the observed model evaluations. Shape (n_maxinterp, n_residuals). Returns: np.ndarray: Observed minus predicted model evaluations, has shape (n_modelpoints, n_residuals). """ n_residuals = centered_residuals.shape[1] n_modelpoints = centered_xs.shape[0] y_residuals = np.empty((n_modelpoints, n_residuals), dtype=np.float64) for j in range(n_residuals): x_dot_square_terms = centered_xs @ residual_model.square_terms[j, :, :] for i in range(n_modelpoints): y_residuals[i, j] = ( centered_residuals[i, j] - residual_model.linear_terms[:, j] @ centered_xs[i, :] - 0.5 * (x_dot_square_terms[i, :] @ centered_xs[i, :]) ) return y_residuals def get_feature_matrices_residual_model( history, x_accepted, model_indices, delta, c2, theta2, n_maxinterp ): """Obtain the feature matrices for fitting the residual model. Pounders uses underdetermined sample sets, with at most n_maxinterp points in the model. Hence, the fitting method is interpolation, where the solution represents the quadratic whose Hessian matrix is of minimum Frobenius norm. For a mathematical exposition see :cite:`Wild2008`, p. 3-5. Args: history (LeastSquaresHistory): Class storing history of xs, residuals, and critvals. x_accepted (np.ndarray): Accepted solution vector of the subproblem. Shape (n_params,). model_indices (np.ndarray): Indices of the candidates of x that are currently in the model. Shape (2 * n_params + 1,). delta (float): Delta, current trust-region radius. c2 (int): Threshold for acceptance of the norm of our current x candidate. Equal to 10 by default. theta2 (float): Threshold for adding the current x candidate to the model. n_maxinterp (int): Maximum number of interpolation points. By default, 2 * n_params + 1 points. Returns: Tuple: - m_mat (np.ndarray): Polynomial feature matrix of the linear terms. Shape(n_params + 1, n_params + 1). - n_mat (np.ndarray): Polynomial feature matrix of the square terms. Shape(n_modelpoints, n_poly_features). - z_mat (np.ndarray): Basis for the null space of m_mat. Shape(n_modelpoints, n_modelpoints - n_params - 1). - n_z_mat (np.ndarray): Lower triangular matrix of xs that form the monomial basis. Shape(n_poly_features, n_modelpoints - n_params - 1). - n_modelpoints (int): Current number of model points. """ n_params = len(x_accepted) n_poly_features = n_params * (n_params + 1) // 2 m_mat = np.zeros((n_maxinterp, n_params + 1)) m_mat[:, 0] = 1 m_mat_pad = np.zeros((n_maxinterp, n_maxinterp)) m_mat_pad[:n_maxinterp, : n_params + 1] = m_mat n_mat = np.zeros((n_maxinterp, n_poly_features)) center_info = {"x": x_accepted, "radius": delta} for i in range(n_params + 1): m_mat[i, 1:] = history.get_centered_xs(center_info, index=model_indices[i]) n_mat[i, :] = _get_monomial_basis(m_mat[i, 1:]) point = history.get_n_fun() - 1 n_modelpoints = n_params + 1 while (n_modelpoints < n_maxinterp) and (point >= 0): reject = False # Reject any points already in the model for i in range(n_params + 1): if point == model_indices[i]: reject = True break if reject is False: candidate_x = history.get_centered_xs(center_info, index=point) candidate_norm = np.linalg.norm(candidate_x) if candidate_norm > c2: reject = True if reject is True: point -= 1 continue m_mat[n_modelpoints, 1:] = history.get_centered_xs(center_info, index=point) n_mat[n_modelpoints, :] = _get_monomial_basis(m_mat[n_modelpoints, 1:]) m_mat_pad = np.zeros((n_maxinterp, n_maxinterp)) m_mat_pad[:n_maxinterp, : n_params + 1] = m_mat _n_z_mat, _ = qr_multiply( m_mat_pad[: n_modelpoints + 1, :], n_mat.T[:n_poly_features, : n_modelpoints + 1], ) beta = np.linalg.svd(_n_z_mat.T[n_params + 1 :], compute_uv=False) if beta[min(n_modelpoints - n_params, n_poly_features) - 1] > theta2: # Accept point model_indices[n_modelpoints] = point n_z_mat = _n_z_mat n_modelpoints += 1 point -= 1 z_mat, _ = qr_multiply( m_mat_pad[:n_modelpoints, :], np.eye(n_maxinterp)[:, :n_modelpoints], ) # Just-identified case if n_modelpoints == (n_params + 1): n_z_mat = np.zeros((n_maxinterp, n_poly_features)) n_z_mat[:n_params, :n_params] = np.eye(n_params) return ( m_mat[: n_params + 1, : n_params + 1], n_mat[:n_modelpoints], z_mat[:n_modelpoints, n_params + 1 : n_modelpoints], n_z_mat[:, n_params + 1 : n_modelpoints], n_modelpoints, ) def fit_residual_model( m_mat, n_mat, z_mat, n_z_mat, y_residuals, n_modelpoints, ): """Fit a linear model using the pounders fitting method. Pounders uses underdetermined sample sets, with at most 2 * n_params + 1 points in the model. Hence, the fitting method is interpolation, where the solution represents the quadratic whose Hessian matrix is of minimum Frobenius norm. For a mathematical exposition, see :cite:`Wild2008`, p. 3-5. Args: m_mat (np.ndarray): Polynomial feature matrix of the linear terms. Shape(n_params + 1, n_params + 1). n_mat (np.ndarray): Polynomial feature matrix of the square terms. Shape(n_modelpoints, n_poly_features). z_mat (np.ndarray): Basis for the null space of m_mat. Shape(n_modelpoints, n_modelpoints - n_params - 1). n_z_mat (np.ndarray): Lower triangular matrix of xs that form the monomial basis. Shape(n_poly_features, n_modelpoints - n_params - 1). n_modelpoints (int): Current number of model points. y_residuals (np.ndarray): The dependent variable. Observed minus predicted evaluations of the residual model. Shape (n_modelpoints, n_residuals). n_maxinterp (int): Maximum number of interpolation points. By default, 2 * n_params + 1 points. Returns: dict: The coefficients of the residual model. """ n_params = m_mat.shape[1] - 1 n_residuals = y_residuals.shape[1] n_poly_terms = n_params * (n_params + 1) // 2 _is_just_identified = n_modelpoints == (n_params + 1) coeffs_linear = np.empty((n_residuals, n_params)) coeffs_square = np.empty((n_residuals, n_params, n_params)) if _is_just_identified: coeffs_first_stage = np.zeros(n_params) beta = np.zeros(n_poly_terms) else: n_z_mat_square = n_z_mat.T @ n_z_mat for k in range(n_residuals): if not _is_just_identified: z_y_vec = np.dot(z_mat.T, y_residuals[:, k]) coeffs_first_stage = np.linalg.solve( np.atleast_2d(n_z_mat_square), np.atleast_1d(z_y_vec), ) beta = np.atleast_2d(n_z_mat) @ coeffs_first_stage rhs = y_residuals[:, k] - n_mat @ beta alpha = np.linalg.solve(m_mat, rhs[: n_params + 1]) coeffs_linear[k, :] = alpha[1 : (n_params + 1)] num = 0 for i in range(n_params): coeffs_square[k, i, i] = beta[num] num += 1 for j in range(i + 1, n_params): coeffs_square[k, j, i] = beta[num] / np.sqrt(2) coeffs_square[k, i, j] = beta[num] / np.sqrt(2) num += 1 coef = { "linear_terms": coeffs_linear.T, "square_terms": coeffs_square, } return coef def update_trustregion_radius( result_subproblem, rho, model_is_valid, delta, delta_min, delta_max, eta1, gamma0, gamma1, ): """Update the trust-region radius.""" norm_x_sub = np.sqrt(np.sum(result_subproblem["x"] ** 2)) if rho >= eta1 and norm_x_sub > 0.5 * delta: delta = min(delta * gamma1, delta_max) elif model_is_valid is True: delta = max(delta * gamma0, delta_min) return delta def get_last_model_indices_and_check_for_repeated_model( model_indices, last_model_indices, n_modelpoints, n_last_modelpoints ): """Get the last model_indices and check if we have reused the same model.""" if n_modelpoints == n_last_modelpoints: same_model_used = True else: same_model_used = False for i in range(n_modelpoints): if same_model_used: if model_indices[i] == last_model_indices[i]: same_model_used = True else: same_model_used = False last_model_indices[i] = model_indices[i] n_last_modelpoints = n_modelpoints return last_model_indices, n_last_modelpoints, same_model_used def add_accepted_point_to_residual_model(model_indices, accepted_index, n_modelpoints): """Add accepted point to the residual model.""" model_indices[1 : n_modelpoints + 1] = model_indices[:n_modelpoints] model_indices[0] = accepted_index return model_indices def _get_monomial_basis(x): """Get the monomial basis (basis for quadratic functions) of x. Monomial basis = .5*[x(1)^2 sqrt(2)*x(1)*x(2) ... sqrt(2)*x(1)*x(n_params) ... ... x(2)^2 sqrt(2)*x(2)*x(3) .. x(n_params)^2] Args: x (np.ndarray): Parameter vector of shape (n_params,). Returns: np.ndarray: Monomial basis of x of shape (n_params * (n_params + 1) / 2,). """ n_params = len(x) monomial_basis = np.zeros(int(n_params * (n_params + 1) / 2)) j = 0 for i in range(n_params): monomial_basis[j] = 0.5 * x[i] ** 2 j += 1 for k in range(i + 1, n_params): monomial_basis[j] = x[i] * x[k] / np.sqrt(2) j += 1 return monomial_basis ================================================ FILE: src/optimagic/optimizers/_pounders/pounders_history.py ================================================ """History class for pounders and similar optimizers.""" import numpy as np class LeastSquaresHistory: """Container to save and retrieve history entries for a least-square optimizer. These entries are: - xs - residuals - critvals The class automatically determines the 'best' entries, i.e. entries related to the x that yield the smallest critval - given all xs stored so far. Xs and residuals can be both saved and accessed in their centered and uncentered form. 'Centered' meaning that they are scaled by their corresponding 'best' entry. 'Uncentered' simply being the raw entries. Critvals don't need to be added explicitly, as they are computed internally as the sum of squares of the residuals whenever new entries are added. """ def __init__(self): self.xs = None self.best_x = None self.residuals = None self.best_residuals = None self.critvals = None self.n_fun = 0 self.best_index = 0 self.best_critval = np.inf def add_entries(self, xs, residuals): """Add new parameter vectors and residuals to the history. Args: xs (np.ndarray or list): 1d or 2d array or list of 1d arrays with parameter vectors. residuals (np.ndarray or list): 1d or 2d array or list of 1d arrays with least square residuals. """ xs = np.atleast_2d(xs) residuals = np.atleast_2d(residuals) critvals = np.atleast_1d((residuals**2).sum(axis=-1)) argmin_candidate = critvals.argmin() min_candidate = critvals[argmin_candidate] if min_candidate < self.best_critval: self.best_index = argmin_candidate + self.n_fun self.best_x = xs[argmin_candidate] self.best_residuals = residuals[argmin_candidate] if len(xs) != len(residuals): raise ValueError() self.xs = _add_entries_to_array(self.xs, xs, self.n_fun) self.residuals = _add_entries_to_array(self.residuals, residuals, self.n_fun) self.critvals = _add_entries_to_array(self.critvals, critvals, self.n_fun) self.n_fun += len(xs) def add_centered_entries(self, xs, residuals, center_info): """Add new parameter vectors and residuals to the history. Args: xs (np.ndarray or list): 1d or 2d array or list of 1d arrays with parameter vectors. residuals (np.ndarray or list): 1d or 2d array or list of 1d arrays with least square residuals. center_info (dict): Dictionary with the entries "x", "residuals" and "radius". The information is used to uncenter parameters and residuals before adding them to the history. """ xs = np.atleast_2d(xs) residuals = np.atleast_2d(residuals) xs_uncentered = xs * center_info["radius"] + center_info["x"] residuals_uncentered = residuals + center_info["residuals"] self.add_entries(xs_uncentered, residuals_uncentered) def get_entries(self, index=None): """Retrieve xs, residuals and critvals from the history. Args: index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with parameter vectors. np.ndarray: 1d or 2d array with residuals. np.ndarray: Float or 1d array with criterion values. """ names = ["xs", "residuals", "critvals"] out = (getattr(self, name)[: self.n_fun] for name in names) # Reducing arrays to length n_fun ensures that invalid indices raise IndexError if index is not None: out = [arr[index] for arr in out] return tuple(out) def get_xs(self, index=None): """Retrieve xs from history. Args: index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with parameter vectors """ out = self.xs[: self.n_fun] out = out[index] if index is not None else out return out def get_residuals(self, index=None): """Retrieve residuals from history. Args: index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with residuals. """ out = self.residuals[: self.n_fun] out = out[index] if index is not None else out return out def get_critvals(self, index=None): """Retrieve critvals from history. Args: index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: Float or 1d array with criterion values. """ out = self.critvals[: self.n_fun] out = out[index] if index is not None else out return out def get_centered_entries(self, center_info, index=None): """Retrieve xs, residuals and critvals from the history. Args: center_info (dict): Dictionary with the entries "x", "residuals" and "radius". The information is used to center parameters, residuals and critvals. index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with centered parameter vectors np.ndarray: 1d or 2d array with centered residuals np.ndarray: Float or 1d array with centered criterion values. """ xs_unc, residuals_unc, _ = self.get_entries(index=index) xs = (xs_unc - center_info["x"]) / center_info["radius"] residuals = residuals_unc - center_info["residuals"] critvals = (residuals**2).sum(axis=-1) return xs, residuals, critvals def get_centered_xs(self, center_info, index=None): """Retrieve centered xs from the history. Args: center_info (dict): Dictionary with the entries "x" and "radius". The information is used to center parameters. index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with centered parameter vectors. """ xs_unc = self.get_xs(index=index) xs = (xs_unc - center_info["x"]) / center_info["radius"] return xs def get_centered_residuals(self, center_info, index=None): """Retrieve centered residuals from the history. Args: center_info (dict): Dictionary with the entry "residuals". The information is used to center residuals. index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: 1d or 2d array with centered residuals. """ residuals_unc = self.get_residuals(index=index) residuals = residuals_unc - center_info["residuals"] return residuals def get_centered_critvals(self, center_info, index=None): """Retrieve centered critvals from the history. Args: center_info (dict): Dictionary with the entry"residuals". The information is used to center critvals. index (None, int or np.ndarray): Specifies the subset of rows that will be returned. Returns: np.ndarray: Float or 1d array with centered criterion values. """ residuals_unc = self.get_residuals(index=index) residuals = residuals_unc - center_info["residuals"] critvals = (residuals**2).sum(axis=-1) return critvals def get_n_fun(self): return self.n_fun def get_best_index(self): return self.best_index def get_best_entries(self): return self.get_entries(index=self.best_index) def get_best_x(self): return self.get_xs(index=self.best_index) def get_best_residuals(self): return self.get_residuals(index=self.best_index) def get_best_critval(self): return self.get_critvals(index=self.best_index) def get_best_centered_entries(self, center_info): return self.get_centered_entries(self, center_info, index=self.best_index) def _add_entries_to_array(arr, new, position): if arr is None: shape = 100_000 if new.ndim == 1 else (100_000, new.shape[1]) arr = np.full(shape, np.nan) if len(arr) - position - len(new) < 0: n_extend = max(len(arr), len(new)) if arr.ndim == 2: extension_shape = (n_extend, arr.shape[1]) arr = np.vstack([arr, np.full(extension_shape, np.nan)]) else: arr = np.hstack([arr, np.full(n_extend, np.nan)]) arr[position : position + len(new)] = new return arr ================================================ FILE: src/optimagic/optimizers/bayesian_optimizer.py ================================================ """Implement Bayesian optimization using bayes_opt.""" from __future__ import annotations from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal, Type import numpy as np from numpy.typing import NDArray from scipy.optimize import NonlinearConstraint from optimagic import mark from optimagic.config import IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2 from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import N_RESTARTS from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, UnitIntervalFloat, ) if TYPE_CHECKING: from bayes_opt import BayesianOptimization from bayes_opt.acquisition import AcquisitionFunction @mark.minimizer( name="bayes_opt", solver_type=AggregationLevel.SCALAR, is_available=IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, # temp disable_history=False, ) @dataclass(frozen=True) class BayesOpt(Algorithm): """Minimize a scalar function using Bayesian Optimization with Gaussian Process. This optimizer wraps the BayesianOptimization package :cite:`Nogueira2014`, which implements a surrogate model-based global optimization algorithm. It works by constructing a posterior distribution over the objective function via a Gaussian process that best approximates it. Instead of directly optimizing the expensive original function, it uses a proxy optimization problem by finding the maximum of an acquisition function, which is computationally cheaper than evaluating the original function. The algorithm starts by sampling a few initial points (init_points) to gather observations of the objective function. These observations are used to fit a Gaussian process surrogate model that learns about the function's behavior. The optimizer then uses an acquisition function to iteratively select promising new points to evaluate, updates its model, and this continues for stopping_maxiter iterations. This optimizer is well-suited for expensive functions where each evaluation is costly (simulations, experiments, model training), black-box optimization where gradients are unavailable, and problems with a limited evaluation budget. Default parameter values match those of the underlying BayesianOptimization package where appropriate. Nonlinear constraints are currently not supported. """ init_points: PositiveInt = 5 """Number of random points sampled before optimization. More points improve initial GP fit but increase evaluation cost. Default = 5. """ stopping_maxiter: PositiveInt = 25 """Number of Bayesian optimization iterations to perform after initial exploration.""" verbose: Literal[0, 1, 2] = 0 """Verbosity level (0 for silent, 1 for brief, 2 for detailed output).""" kappa: NonNegativeFloat = 2.576 """Exploration-exploitation trade-off parameter for Upper Confidence Bound acquisition. Controls the balance between exploration and exploitation when using the Upper Confidence Bound (UCB) acquisition function. Higher values favor exploration over exploitation . This parameter is only used when the acquisition function is "ucb" or "upper_confidence_bound". The default value of 2.576 corresponds to a 99% confidence interval. """ xi: PositiveFloat = 0.01 """Exploration-exploitation trade-off parameter for Expected/Probability of Improvement. Controls the balance between exploration and exploitation for Expected Improvement (EI) and Probability of Improvement (POI) acquisition functions. Higher values favor exploration over exploitation . This parameter is only used when the acquisition function is "ei", "expected_improvement", "poi", or "probability_of_improvement". The default value is 0.01. """ exploration_decay: UnitIntervalFloat | None = None """Rate at which exploration decays over time during optimization. If specified, the exploration parameters (kappa or xi) are multiplied by this factor after each iteration, gradually shifting from exploration to exploitation. Must be between 0 and 1 (range: (0, 1]) If None, no decay is applied and exploration remains constant. """ exploration_decay_delay: NonNegativeInt | None = None """Number of iterations to delay before applying exploration decay. If specified, exploration decay only begins after this many iterations have completed. If None, decay is applied from the first iteration. """ seed: int | None = None """Random seed for reproducible results.""" acquisition_function: ( str | AcquisitionFunction | Type[AcquisitionFunction] | None ) = None """Strategy for selecting the next evaluation point during optimization. The acquisition function determines how to balance exploration and exploitation when selecting the next point to evaluate. Supported options: - String: "ucb"/"upper_confidence_bound", "ei"/"expected_improvement", "poi"/"probability_of_improvement" - AcquisitionFunction instance: Pre-configured acquisition function object - AcquisitionFunction class: Class that will be instantiated with default parameters - None: Uses package default (UCB for unconstrained, EI for constrained problems) """ allow_duplicate_points: bool = False """Whether to allow repeated evaluation of the same point.""" enable_sdr: bool = False """Enable Sequential Domain Reduction (SDR). When True, the search domain is iteratively shrunk around promising regions using SDR parameters (`sdr_gamma_osc`, `sdr_gamma_pan`, `sdr_eta`, `sdr_minimum_window`). """ sdr_gamma_osc: float = 0.7 """Oscillation shrinkage parameter for SDR. Controls how aggressively the search space shrinks in oscillating fashion. Only used when enable_sdr is True. Typical range: [0.5, 0.7]. Default = 0.7. """ sdr_gamma_pan: float = 1.0 """Panning parameter for SDR. Controls the panning behavior during domain reduction. Only used when enable_sdr is True. Typical value: 1.0. Default = 1.0. """ sdr_eta: float = 0.9 """Zoom parameter for SDR. Only used when enable_sdr is True. Default = 0.9. """ sdr_minimum_window: NonNegativeFloat = 0.0 """Minimum window size for Sequential Domain Reduction. Only used when enable_sdr is True. Default = 0.0. """ alpha: float = 1e-6 """Noise parameter for the Gaussian Process model. Controls the amount of noise assumed in the objective function observations. Default is 1e-6. """ n_restarts: int = N_RESTARTS """Number of times to restart the optimization.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2: raise NotInstalledError( "To use the 'bayes_opt' optimizer you need to install bayes_opt. " "Use 'pip install bayesian-optimization'. " "Check the documentation for more details: " "https://bayesian-optimization.github.io/BayesianOptimization/index.html" ) from bayes_opt import BayesianOptimization pbounds = _process_bounds(problem.bounds) acq = _process_acquisition_function( acquisition_function=self.acquisition_function, kappa=self.kappa, xi=self.xi, exploration_decay=self.exploration_decay, exploration_decay_delay=self.exploration_decay_delay, random_seed=self.seed, ) constraint = None constraint = self._process_constraints(problem.nonlinear_constraints) def objective(**kwargs: dict[str, float]) -> float: x = _extract_params_from_kwargs(kwargs) return -float( problem.fun(x) ) # Negate to convert minimization to maximization bounds_transformer = None if self.enable_sdr: from bayes_opt import SequentialDomainReductionTransformer bounds_transformer = SequentialDomainReductionTransformer( gamma_osc=self.sdr_gamma_osc, gamma_pan=self.sdr_gamma_pan, eta=self.sdr_eta, minimum_window=self.sdr_minimum_window, ) optimizer = BayesianOptimization( f=objective, pbounds=pbounds, acquisition_function=acq, constraint=constraint, random_state=self.seed, verbose=self.verbose, bounds_transformer=bounds_transformer, allow_duplicate_points=self.allow_duplicate_points, ) # Set Gaussian Process parameters optimizer.set_gp_params(alpha=self.alpha, n_restarts_optimizer=self.n_restarts) # Use initial point as first probe probe_params = {f"param{i}": float(val) for i, val in enumerate(x0)} optimizer.probe( params=probe_params, lazy=True, ) optimizer.maximize( init_points=self.init_points, n_iter=self.stopping_maxiter, ) res = _process_bayes_opt_result(optimizer=optimizer, x0=x0, problem=problem) return res def _process_constraints( self, constraints: list[dict[str, Any]] | None ) -> NonlinearConstraint | None: """Temporarily skip processing of nonlinear constraints. Args: constraints: List of constraint dictionaries from the problem Returns: None. Nonlinear constraint processing is deferred. """ # TODO: Implement proper handling of nonlinear constraints in future. return None def _process_bounds(bounds: InternalBounds) -> dict[str, tuple[float, float]]: """Process bounds for bayesian optimization. Args: bounds: Internal bounds object. Returns: Dictionary mapping parameter names to (lower, upper) bound tuples. Raises: ValueError: If bounds are None or infinite. """ if not ( bounds.lower is not None and bounds.upper is not None and np.all(np.isfinite(bounds.lower)) and np.all(np.isfinite(bounds.upper)) ): raise ValueError( "Bayesian optimization requires finite bounds for all parameters. " "Bounds cannot be None or infinite." ) return { f"param{i}": (lower, upper) for i, (lower, upper) in enumerate(zip(bounds.lower, bounds.upper, strict=True)) } def _extract_params_from_kwargs(params_dict: dict[str, Any]) -> NDArray[np.float64]: """Extract parameters from kwargs dictionary. Args: params_dict: Dictionary with parameter values. Returns: Array of parameter values. """ return np.array(list(params_dict.values())) def _process_acquisition_function( acquisition_function: ( str | AcquisitionFunction | Type[AcquisitionFunction] | None ), kappa: NonNegativeFloat, xi: PositiveFloat, exploration_decay: float | None, exploration_decay_delay: NonNegativeInt | None, random_seed: int | None, ) -> AcquisitionFunction | None: """Create and return the appropriate acquisition function. Args: acquisition_function: The acquisition function specification. Can be one of the following: - A string: "upper_confidence_bound" (or "ucb"), "expected_improvement" (or "ei"), "probability_of_improvement" (or "poi") - An instance of `AcquisitionFunction` - A class inheriting from `AcquisitionFunction` - None (uses the default acquisition function from the bayes_opt package) kappa: Exploration-exploitation trade-off parameter for Upper Confidence Bound acquisition function. Higher values favor exploration over exploitation. xi: Exploration-exploitation trade-off parameter for Expected Improvement and Probability of Improvement acquisition functions. Higher values favor exploration over exploitation. exploration_decay: Rate at which exploration parameters (kappa or xi) decay over time. None means no decay is applied. exploration_decay_delay: Number of iterations before starting the decay. None means decay is applied from the start. random_seed: Random seed for reproducibility. Returns: The configured acquisition function instance or None for default. Raises: ValueError: If acquisition_function is an invalid string. TypeError: If acquisition_function is not a string, an AcquisitionFunction instance, a class inheriting from AcquisitionFunction, or None. """ from bayes_opt import acquisition acquisition_function_aliases = { "ucb": "ucb", "upper_confidence_bound": "ucb", "ei": "ei", "expected_improvement": "ei", "poi": "poi", "probability_of_improvement": "poi", } if acquisition_function is None: return None elif isinstance(acquisition_function, str): acq_name = acquisition_function.lower() if acq_name not in acquisition_function_aliases: raise ValueError( f"Invalid acquisition_function string: '{acquisition_function}'. " f"Must be one of: {', '.join(acquisition_function_aliases.keys())}" ) canonical_name = acquisition_function_aliases[acq_name] if canonical_name == "ucb": return acquisition.UpperConfidenceBound( kappa=kappa, exploration_decay=exploration_decay, exploration_decay_delay=exploration_decay_delay, random_state=random_seed, ) elif canonical_name == "ei": return acquisition.ExpectedImprovement( xi=xi, exploration_decay=exploration_decay, exploration_decay_delay=exploration_decay_delay, random_state=random_seed, ) elif canonical_name == "poi": return acquisition.ProbabilityOfImprovement( xi=xi, exploration_decay=exploration_decay, exploration_decay_delay=exploration_decay_delay, random_state=random_seed, ) else: raise ValueError(f"Unhandled canonical name: {canonical_name}") # If acquisition_function is an instance of AcquisitionFunction class elif isinstance(acquisition_function, acquisition.AcquisitionFunction): return acquisition_function # If acquisition_function is a class inheriting from AcquisitionFunction elif isinstance(acquisition_function, type) and issubclass( acquisition_function, acquisition.AcquisitionFunction ): if issubclass( acquisition_function, acquisition.ExpectedImprovement ) or issubclass(acquisition_function, acquisition.ProbabilityOfImprovement): return acquisition_function( xi=xi, exploration_decay=exploration_decay, exploration_decay_delay=exploration_decay_delay, random_state=random_seed, ) elif issubclass(acquisition_function, acquisition.UpperConfidenceBound): return acquisition_function( kappa=kappa, exploration_decay=exploration_decay, exploration_decay_delay=exploration_decay_delay, random_state=random_seed, ) else: return acquisition_function() else: raise TypeError( "acquisition_function must be None, a string, " "an AcquisitionFunction instance, or a class inheriting from " "AcquisitionFunction. " f"Got type: {type(acquisition_function).__name__}" ) def _process_bayes_opt_result( optimizer: BayesianOptimization, x0: NDArray[np.float64], problem: InternalOptimizationProblem, ) -> InternalOptimizeResult: """Convert BayesianOptimization result to InternalOptimizeResult format. Args: optimizer: The BayesianOptimization instance after optimization x0: Initial parameter values problem: The internal optimization problem Returns: InternalOptimizeResult with processed results """ n_evals = len(optimizer.space) if optimizer.max is not None: best_params = optimizer.max["params"] best_x = _extract_params_from_kwargs(best_params) best_y = -optimizer.max["target"] # Un-negate the result success = True message = "Optimization succeeded" else: best_x = x0 best_y = float(problem.fun(x0)) success = False message = ( "Optimization did not succeed " "returning the initial point as the best available result." ) return InternalOptimizeResult( x=best_x, fun=best_y, success=success, message=message, n_iterations=n_evals, n_fun_evals=n_evals, n_jac_evals=0, ) ================================================ FILE: src/optimagic/optimizers/bhhh.py ================================================ """Implement Berndt-Hall-Hall-Hausman (BHHH) algorithm.""" from dataclasses import dataclass from typing import Callable, cast import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt @mark.minimizer( name="bhhh", solver_type=AggregationLevel.LIKELIHOOD, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class BHHH(Algorithm): converence_gtol_abs: NonNegativeFloat = 1e-8 # TODO: Why is this 200? stopping_maxiter: PositiveInt = 200 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = bhhh_internal( fun_and_jac=cast( Callable[[NDArray[np.float64]], NDArray[np.float64]], problem.fun_and_jac, ), x=x0, gtol_abs=self.converence_gtol_abs, maxiter=self.stopping_maxiter, ) return res def bhhh_internal( fun_and_jac: Callable[[NDArray[np.float64]], NDArray[np.float64]], x: NDArray[np.float64], gtol_abs: NonNegativeFloat, maxiter: PositiveInt, ) -> InternalOptimizeResult: """Minimize a likelihood function using the BHHH algorithm. Args: criterion_and_derivative: The objective function to be minimized. x: Initial guess of the parameter vector (starting points). convergence_absolute_gradient_tolerance: Stopping criterion for the gradient tolerance. stopping_max_iterations: Maximum number of iterations. If reached, terminate. Returns: InternalOptimizeResult: The result of the optimization. """ criterion_accepted, gradient = fun_and_jac(x) x_accepted = x hessian_approx = np.dot(gradient.T, gradient) gradient_sum = np.sum(gradient, axis=0) direction = np.linalg.solve(hessian_approx, gradient_sum) gtol = np.dot(gradient_sum, direction) initial_step_size = 1.0 step_size = initial_step_size niter = 1 while niter < maxiter: niter += 1 x_candidate = x_accepted + step_size * direction criterion_candidate, gradient = fun_and_jac(x_candidate) # If previous step was accepted if step_size == initial_step_size: hessian_approx = np.dot(gradient.T, gradient) else: criterion_candidate, gradient = fun_and_jac(x_candidate) # Line search if np.sum(criterion_candidate) > np.sum(criterion_accepted): step_size /= 2 if step_size <= 0.01: # Accept step x_accepted = x_candidate criterion_accepted = criterion_candidate # Reset step size step_size = initial_step_size # If decrease in likelihood, calculate new direction vector else: # Accept step x_accepted = x_candidate criterion_accepted = criterion_candidate gradient_sum = np.sum(gradient, axis=0) direction = np.linalg.solve(hessian_approx, gradient_sum) gtol = np.dot(gradient_sum, direction) if gtol < 0: hessian_approx = np.dot(gradient.T, gradient) direction = np.linalg.solve(hessian_approx, gradient_sum) # Reset stepsize step_size = initial_step_size if gtol < gtol_abs: break res = InternalOptimizeResult( x=x_accepted, fun=criterion_accepted, message="Under development", n_iterations=niter, ) return res ================================================ FILE: src/optimagic/optimizers/fides.py ================================================ """Implement the fides optimizer.""" import logging from dataclasses import dataclass from typing import Callable, Literal, cast import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_FIDES_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_ABS, CONVERGENCE_FTOL_REL, CONVERGENCE_GTOL_ABS, CONVERGENCE_GTOL_REL, CONVERGENCE_XTOL_ABS, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveFloat, PositiveInt, ) @mark.minimizer( name="fides", solver_type=AggregationLevel.SCALAR, is_available=IS_FIDES_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class Fides(Algorithm): hessian_update_strategy: Literal[ "bfgs", "bb", "bg", "dfp", "sr1", ] = "bfgs" convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL stopping_maxiter: PositiveInt = STOPPING_MAXITER stopping_max_seconds: float = np.inf trustregion_initial_radius: PositiveFloat = 1.0 trustregion_stepback_strategy: Literal[ "truncate", "reflect", "reflect_single", "mixed", ] = "truncate" trustregion_subspace_dimension: Literal[ "full", "2D", "scg", ] = "full" trustregion_max_stepback_fraction: float = 0.95 trustregion_decrease_threshold: float = 0.25 trustregion_increase_threshold: float = 0.75 trustregion_decrease_factor: float = 0.25 trustregion_increase_factor: float = 2.0 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = fides_internal( fun_and_jac=cast( Callable[[NDArray[np.float64]], NDArray[np.float64]], problem.fun_and_jac, ), x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, hessian_update_strategy=self.hessian_update_strategy, convergence_ftol_abs=self.convergence_ftol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_gtol_abs=self.convergence_gtol_abs, convergence_gtol_rel=self.convergence_gtol_rel, stopping_maxiter=self.stopping_maxiter, stopping_max_seconds=self.stopping_max_seconds, trustregion_initial_radius=self.trustregion_initial_radius, trustregion_stepback_strategy=self.trustregion_stepback_strategy, trustregion_subspace_dimension=self.trustregion_subspace_dimension, trustregion_max_stepback_fraction=self.trustregion_max_stepback_fraction, trustregion_decrease_threshold=self.trustregion_decrease_threshold, trustregion_increase_threshold=self.trustregion_increase_threshold, trustregion_decrease_factor=self.trustregion_decrease_factor, trustregion_increase_factor=self.trustregion_increase_factor, ) return res def fides_internal( fun_and_jac: Callable[[NDArray[np.float64]], NDArray[np.float64]], x: NDArray[np.float64], lower_bounds: NDArray[np.float64] | None, upper_bounds: NDArray[np.float64] | None, hessian_update_strategy: Literal[ "bfgs", "bb", "bg", "dfp", "sr1", ], convergence_ftol_abs: NonNegativeFloat, convergence_ftol_rel: NonNegativeFloat, convergence_xtol_abs: NonNegativeFloat, convergence_gtol_abs: NonNegativeFloat, convergence_gtol_rel: NonNegativeFloat, stopping_maxiter: PositiveInt, stopping_max_seconds: float, trustregion_initial_radius: PositiveFloat, trustregion_stepback_strategy: Literal[ "truncate", "reflect", "reflect_single", "mixed", ], trustregion_subspace_dimension: Literal[ "full", "2D", "scg", ], trustregion_max_stepback_fraction: float, trustregion_decrease_threshold: float, trustregion_increase_threshold: float, trustregion_decrease_factor: float, trustregion_increase_factor: float, ) -> InternalOptimizeResult: """Minimize a scalar function using the Fides Optimizer. For details see :ref: `fides_algorithm`. """ if not IS_FIDES_INSTALLED: raise NotInstalledError( "The 'fides' algorithm requires the fides package to be installed. " "You can install it with `pip install fides>=0.7.4`." ) from fides import Optimizer fides_options = { "delta_init": trustregion_initial_radius, "eta": trustregion_increase_threshold, "fatol": convergence_ftol_abs, "frtol": convergence_ftol_rel, "gamma1": trustregion_decrease_factor, "gamma2": trustregion_increase_factor, "gatol": convergence_gtol_abs, "grtol": convergence_gtol_rel, "maxiter": stopping_maxiter, "maxtime": stopping_max_seconds, "mu": trustregion_decrease_threshold, "stepback_strategy": trustregion_stepback_strategy, "subspace_solver": trustregion_subspace_dimension, "theta_max": trustregion_max_stepback_fraction, "xtol": convergence_xtol_abs, } hessian_instance = _create_hessian_updater_from_user_input(hessian_update_strategy) lower_bounds = np.full(len(x), -np.inf) if lower_bounds is None else lower_bounds upper_bounds = np.full(len(x), np.inf) if upper_bounds is None else upper_bounds opt = Optimizer( fun=fun_and_jac, lb=lower_bounds, ub=upper_bounds, verbose=logging.ERROR, options=fides_options, funargs=None, hessian_update=hessian_instance, resfun=False, ) raw_res = opt.minimize(x) res = _process_fides_res(raw_res, opt) out = InternalOptimizeResult( x=res["solution_x"], fun=res["solution_criterion"], jac=res["solution_derivative"], hess=res["solution_hessian"], success=res["success"], message=res["message"], n_iterations=res["n_iterations"], ) return out def _process_fides_res(raw_res, opt): """Create an optimagic results dictionary from the Fides output. Args: raw_res (tuple): Tuple containing the Fides result opt (fides.Optimizer): Fides Optimizer after minimize has been called on it. """ fval, x, grad, hess = raw_res res = { "solution_criterion": fval, "solution_x": x, "solution_derivative": grad, "solution_hessian": hess, "success": opt.converged, "n_iterations": opt.iteration, "message": _process_exitflag(opt.exitflag), } return res def _process_exitflag(exitflag): messages = { "DID_NOT_RUN": "The optimizer did not run", "MAXITER": "Reached maximum number of allowed iterations", "MAXTIME": "Expected to reach maximum allowed time in next iteration", "NOT_FINITE": "Encountered non-finite fval/grad/hess", "EXCEEDED_BOUNDARY": "Exceeded specified boundaries", "DELTA_TOO_SMALL": "Trust Region Radius too small to proceed", "FTOL": "Converged according to fval difference", "XTOL": "Converged according to x difference", "GTOL": "Converged according to gradient norm", } out = messages.get(exitflag.name) return out def _create_hessian_updater_from_user_input(hessian_update_strategy): from fides import hessian_approximation hessians_needing_residuals = ( hessian_approximation.FX, hessian_approximation.SSM, hessian_approximation.TSSM, hessian_approximation.GNSBFGS, ) unsupported_hess_msg = ( f"{hessian_update_strategy} not supported because it requires " "residuals. Choose one of 'BB', 'BFGS', 'BG', 'DFP' or 'SR1' or pass " "an instance of the fides.hessian_approximation.HessianApproximation " "class." ) if hessian_update_strategy in ("broyden", "Broyden", "BROYDEN"): raise ValueError( "You cannot use the Broyden update strategy without specifying the " "interpolation parameter phi. Import the Broyden class from " "`fides.hessian_approximation`, create an instance of it with your " "desired value of phi and pass this instance instead." ) elif isinstance(hessian_update_strategy, str): if hessian_update_strategy.lower() in ["fx", "ssm", "tssm", "gnsbfgs"]: raise NotImplementedError(unsupported_hess_msg) else: hessian_name = hessian_update_strategy.upper() hessian_class = getattr(hessian_approximation, hessian_name) hessian_instance = hessian_class() elif isinstance( hessian_update_strategy, hessian_approximation.HessianApproximation ): hessian_instance = hessian_update_strategy if isinstance(hessian_instance, hessians_needing_residuals): raise NotImplementedError(unsupported_hess_msg) else: raise TypeError( "You must provide a hessian_update_strategy that is either a string or an " "instance of the fides.hessian_approximation.HessianApproximation class." ) return hessian_instance ================================================ FILE: src/optimagic/optimizers/gfo_optimizers.py ================================================ from __future__ import annotations import math from dataclasses import dataclass from functools import partial from typing import TYPE_CHECKING, Any, Literal import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_ABS, STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER, get_population_size, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.parameters.conversion import Converter from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveFloat, PositiveInt, PyTree, ) from optimagic.typing import UnitIntervalFloat as ProbabilityFloat if TYPE_CHECKING: import pandas as pd from gradient_free_optimizers.optimizers.base_optimizer import BaseOptimizer @dataclass(frozen=True) class GFOCommonOptions: """Common options for all optimizers from GFO.""" n_grid_points: PositiveInt | PyTree = 201 """Number of grid points per dimension. If an integer is provided, it will be used for all dimensions. """ n_init: PositiveInt = 20 """Number of initialization steps to run. Accordingly, N//2 positions will be initialized in a grid like pattern and remaining initialized at the vertices and randomly in the search space. """ stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" stopping_maxtime: NonNegativeFloat | None = None """Maximum time in seconds before termination.""" convergence_target_value: float | None = None """"Stop the optimization if the objective function is less than this value.""" convergence_iter_noimprove: PositiveInt = 1000000 # do not want to trigger this """Number of iterations without improvement before termination.""" convergence_ftol_abs: NonNegativeFloat | None = ( CONVERGENCE_FTOL_ABS # set to zero, so disabled ) """Converge if the absolute change in the objective function is less than this value.""" convergence_ftol_rel: NonNegativeFloat | None = None """Converge if the relative change in the objective function is less than this value.""" caching: bool = True """Whether to cache evaluated param and function values in a dictionary for lookup.""" extra_start_params: list[PyTree] | None = None """List of additional start points for the optimization run. In case of population based optimizers, the initial_population can be provided via `extra_start_params` """ warm_start: pd.DataFrame | None = None """Pandas dataframe that contains score and paramter information that will be automatically loaded into the memory. example: score x1 x2 x... 0.756 0.1 0.2 ... 0.823 0.3 0.1 ... ... ... ... ... ... ... ... ... """ verbosity: Literal["progress_bar", "print_results", "print_times"] | bool = False """Determines what part of the optimization information will be printed.""" seed: int | None = None """Random seed for reproducibility.""" rand_rest_p: ProbabilityFloat = 0 """Probability for the optimization algorithm to jump to a random position in an iteration step.""" # ================================================================================== # Local optimizers # ================================================================================== @mark.minimizer( name="gfo_hillclimbing", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOHillClimbing(GFOCommonOptions, Algorithm): """Minimize a scalar function using the HillClimbing algorithm. This algorithm is a Python implementation of the HillClimbing algorithm through the gradient_free_optimizers package. Hill climbing is a local search algorithm suited for exploring combinatorial search spaces. “It starts at an initial point, which is the best point chosen from `n_init` initialization runs, and continues to move to positions within its neighbourhood with a better solution. It has no method against getting stuck in local optima. """ epsilon: PositiveFloat = 0.03 """The step-size of the hill climbing algorithm. If step_size is too large the newly selected positions will be at the edge of the search space. If its value is very low it might not find new positions. """ distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" """The mathematical distribution the algorithm draws samples from. All available distributions are taken from the numpy-package. """ n_neighbours: PositiveInt = 3 """The number of positions the algorithm explores from its current postion before setting its current position to the best of those neighbour positions. If the value of n_neighbours is large the hill-climbing-based algorithm will take a lot of time to choose the next position to move to, but the choice will probably be a good one. It might be a prudent approach to increase n_neighbours of the search- space has a lot of dimensions, because there are more possible directions to move to. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.HillClimbingOptimizer optimizer = partial( opt, epsilon=self.epsilon, distribution=self.distribution, n_neighbours=self.n_neighbours, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_stochastichillclimbing", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOStochasticHillClimbing(Algorithm, GFOCommonOptions): """Minimize a scalar function using the Stochastic Hill Climbing algorithm. This algorithm is a Python implementation of the StochasticHillClimbing algorithm through the gradient_free_optimizers package. Stochastic hill climbing extends the normal hill climbing by accepting worse positions with a probability `p_accept` as a next position helping against getting stuck in local optima. """ epsilon: PositiveFloat = 0.03 """The step-size of the hill climbing algorithm. If step_size is too large the newly selected positions will be at the edge of the search space. If its value is very low it might not find new positions. """ distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" """The mathematical distribution the algorithm draws samples from. All available distributions are taken from the numpy-package. """ n_neighbours: PositiveInt = 3 """The number of positions the algorithm explores from its current postion before setting its current position to the best of those neighbour positions. If the value of n_neighbours is large the hill-climbing-based algorithm will take a lot of time to choose the next position to move to, but the choice will probably be a good one. It might be a prudent approach to increase n_neighbours of the search- space has a lot of dimensions, because there are more possible directions to move to. """ p_accept: ProbabilityFloat = 0.5 """The probability factor used in the equation to calculate if a worse position is accepted as the new position. If the new score is not better than the previous one the algorithm accepts worse positions with probability p_accept. .. math:: score_{normalized} = norm * \\frac{score_{current} - score_{new}} {score_{current} + score_{new}} .. math:: p = \\exp^{-score_{normalized}} If p is less than p_accept the new position gets accepted anyways. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.StochasticHillClimbingOptimizer optimizer = partial( opt, epsilon=self.epsilon, distribution=self.distribution, n_neighbours=self.n_neighbours, p_accept=self.p_accept, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_repulsinghillclimbing", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFORepulsingHillClimbing(Algorithm, GFOCommonOptions): """Minimize a scalar function using the Repulsing Hill Climbing algorithm. This algorithm is a Python implementation of the Repulsing Hill Climbing algorithm through the gradient_free_optimizers package. The algorithm inherits from the Hill climbing which is a local search algorithm but always activates its methods to escape local optima. """ epsilon: PositiveFloat = 0.03 """The step-size of the hill climbing algorithm. If step_size is too large the newly selected positions will be at the edge of the search space. If its value is very low it might not find new positions. """ distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" """The mathematical distribution the algorithm draws samples from. All available distributions are taken from the numpy-package. """ n_neighbours: PositiveInt = 3 """The number of positions the algorithm explores from its current position before setting its current position to the best of those neighbour positions.""" repulsion_factor: PositiveFloat = 5 """The algorithm increases the step size by multiplying it with the repulsion_factor for the next iteration. This way the algorithm escapes the region that does not offer better positions. .. math:: \\epsilon = \\epsilon * {repulsion factor} """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.RepulsingHillClimbingOptimizer optimizer = partial( opt, epsilon=self.epsilon, distribution=self.distribution, n_neighbours=self.n_neighbours, repulsion_factor=self.repulsion_factor, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_simulatedannealing", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, experimental=True, ) @dataclass(frozen=True) class GFOSimulatedAnnealing(Algorithm, GFOCommonOptions): """Minimize a scalar function using the Simulated Annealing algorithm. This algorithm is a Python implementation of Simulated Annealing through the gradient_free_optimizers package. Simulated annealing chooses its next possible position similar to hill climbing, but it accepts worse results with a probability that decreases with time. It simulates a temperature that decreases with each iteration, similar to a material cooling down. """ epsilon: PositiveFloat = 0.03 """The step-size of the algorithm. If step_size is too large the newly selected positions will be at the edge of the search space. If its value is very low it might not find new positions. """ distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" """The mathematical distribution the algorithm draws samples from. All available distributions are taken from the numpy-package. """ n_neighbours: PositiveInt = 3 """The number of positions the algorithm explores from its current position before setting its current position to the best of those neighbour positions.""" start_temp: PositiveFloat = 1 """The start_temp is a factor for the probability p of accepting a worse position. .. math:: p = \\exp^{-\\frac{score_{normalized}}{temp}} """ annealing_rate: PositiveFloat = 0.97 """Rate at which the temperatur-value of the algorithm decreases. An annealing rate above 1 increases the temperature over time. .. math:: start\\_temp \\leftarrow start\\_temp * annealing\\_rate """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.SimulatedAnnealingOptimizer optimizer = partial( opt, epsilon=self.epsilon, distribution=self.distribution, n_neighbours=self.n_neighbours, start_temp=self.start_temp, annealing_rate=self.annealing_rate, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_downhillsimplex", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, experimental=True, ) @dataclass(frozen=True) class GFODownhillSimplex(Algorithm, GFOCommonOptions): """Minimize a scalar function using the Downhill Simplex algorithm. This algorithm is a Python implementation of the Downhill Simplex algorithm through the gradient_free_optimizers package. The Downhill simplex or Nelder mead algorithm works by grouping `number of dimensions + 1` positions into a simplex, which can explore the search-space by changing shape. The simplex changes shape by reflecting, expanding, contracting or shrinking via the alpha, gamma, beta or sigma parameters. It needs at least `number of dimensions + 1` initial positions to form a simplex in the search-space and the movement of the positions in the simplex are affected by each other. """ simplex_reflection: PositiveFloat = 1 """The reflection parameter of the simplex algorithm.""" simplex_expansion: PositiveFloat = 2 """The expansion parameter of the simplex algorithm.""" simplex_contraction: PositiveFloat = 0.5 """The contraction parameter of the simplex algorithm.""" simplex_shrinking: PositiveFloat = 0.5 """The shrinking parameter of the simplex algorithm.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.DownhillSimplexOptimizer optimizer = partial( opt, alpha=self.simplex_reflection, gamma=self.simplex_expansion, beta=self.simplex_contraction, sigma=self.simplex_shrinking, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_powells_method", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOPowellsMethod(Algorithm, GFOCommonOptions): """Minimize a scalar function using Powell's Method. This algorithm is a Python implementation of the Powell's Method algorithm through the gradient_free_optimizers package. This powell's method implementation works by optimizing each search space dimension at a time with the hill climbing algorithm. It works by setting the search space range for all dimensions except one to a single value. The hill climbing algorithms searches the best position within this dimension. After `iters_p_dim` iterations the next dimension is searched, while the search space range from the previously searched dimension is set to the best position, This way the algorithm finds new best positions one dimension at a time. """ iters_p_dim: PositiveInt = 10 """Number of iterations the algorithm will let the hill-climbing algorithm search to find the best position before it changes to the next dimension of the search space. Typical range: 5 to 15. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo opt = gfo.PowellsMethod optimizer = partial( opt, iters_p_dim=self.iters_p_dim, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res # ================================================================================== # Population Based # ================================================================================== @mark.minimizer( name="gfo_pso", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOParticleSwarmOptimization(Algorithm, GFOCommonOptions): r"""Minimize a scalar function using the Particle Swarm Optimization algorithm. This algorithm is a Python implementation of the Particle Swarm Optimization algorithm through the gradient_free_optimizers package. Particle Swarm Optimization is a global population based algorithm. The algorithm simulates a swarm of particles which move according to their own inertia across the search space. Each particle adjusts its position based on its own experience (cognitive weight) and the experiences of its neighbors or the swarm (social weight), using velocity updates. The algorithm iteratively guides the swarm toward promising regions of the search space. The velocity of a particle is calculated by the following equation: .. math:: v_{n+1} = \\omega \\cdot v_n + c_k \\cdot r_1 \\cdot (p_{best}-p_n) + c_s \\cdot r_2 \\cdot (g_{best} - p_n) """ population_size: PositiveInt | None = None """Size of the population.""" inertia: NonNegativeFloat = 0.5 / math.log(2.0) """The inertia of the movement of the individual particles in the population.""" cognitive_weight: NonNegativeFloat = 0.5 + math.log(2.0) """A factor of the movement towards the personal best position of the individual particles in the population.""" social_weight: NonNegativeFloat = 0.5 + math.log(2.0) """A factor of the movement towards the global best position of the individual particles in the population.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.ParticleSwarmOptimizer optimizer = partial( opt, population=population_size, inertia=self.inertia, cognitive_weight=self.cognitive_weight, social_weight=self.social_weight, rand_rest_p=self.rand_rest_p, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_parallel_tempering", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOParallelTempering(Algorithm, GFOCommonOptions): r"""Minimize a scalar function using the Parallel Tempering algorithm. This algorithm is a Python implementation of the Parallel Tempering algorithm through the gradient_free_optimizers package. Parallel Tempering is a global optimization algorithm that is inspired by metallurgical annealing. It runs multiple optimizer instances at different "starting temperatures" in parallel. Periodically, swaps between these runs are attempted. Swaps between optimization runs at different temperatures allow the optimizer to overcome local optima. The probability of swapping temperatures for any combination of optimizer instances is given by. .. math:: p = \\min \\left( 1, \\exp\\left[{(\\text{score}_i- \\text{score}_j)\\left(\\frac{1}{T_i}-\\frac{1}{T_j}\\right)}\\right] \\right) """ population_size: PositiveInt | None = None """Size of the population.""" n_iter_swap: PositiveInt = 10 """The number of iterations the algorithm performs before switching temperatures of the individual optimizers in the population.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.ParallelTemperingOptimizer optimizer = partial( opt, population=population_size, n_iter_swap=self.n_iter_swap, rand_rest_p=self.rand_rest_p, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_spiral_optimization", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOSpiralOptimization(Algorithm, GFOCommonOptions): r"""Minimize a scalar function using the Spiral Optimization algorithm. This algorithm is a Python implementation of the Spiral Optimization algorithm through the gradient_free_optimizers package. Spiral Optimization is a population-based algorithm, in which a number of particles move in a spiral-like pattern to explore the search space and converge to the best known position as the spiral decays. The position of each particle is updated according to the following equation: .. math:: x_i (k+1) = x^* (k) + r(k) \\cdot R(\\theta) \\cdot (x_i(k)- x^*(k)) where: - `k` = k-th iteration - `x_i(k)` = current position. - `x*(k)` = center position (known best position of all particles) - `r(k)` = decay rate , - `R` = rotation matrix. and rotation matrix R is given by .. math:: R(\\theta) = \\begin{bmatrix} 0^{\\top}_{n-1} & -1 \\\\ I_{n-1} & 0_{n-1} \\end{bmatrix} """ population_size: PositiveInt | None = None """Size of the population.""" decay_rate: NonNegativeFloat = 0.99 """The decay rate `r` is a factor, by which the radius of the spiral movement of the particles decays during their spiral movement. Lower values accelerate the convergence of the particles to the best known position, while values above 1 eventually lead to a movement where the particles spiral away from each other. Typical range: 0.85 to 1.15. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.SpiralOptimization optimizer = partial( opt, population=population_size, decay_rate=self.decay_rate, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_genetic_algorithm", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOGeneticAlgorithm(Algorithm, GFOCommonOptions): """Minimize a scalar function using the Genetic Algorithm. This algorithm is a Python implementation of the Genetic Algorithm through the gradient_free_optimizers package. The Genetic Algorithm is an evolutionary algorithm inspired by the process of natural selection. It evolves a population of candidate solutions over generations using mechanisms like selection, crossover, and mutation of genes(bits) to find the best solution. """ population_size: PositiveInt | None = None """Size of the population.""" mutation_rate: ProbabilityFloat = 0.5 """Probability of a mutation event occurring in an individual of the population. Mutation helps in maintaining genetic diversity within the population and prevents the algorithm from getting stuck in local optima. Bits are randomly altered with. .. math:: x'_i = \\begin{cases} x_i & \\text{if } \\text{rand} > p_m \\\\ 1 - x_i & \\text{if } \\text{rand} \\leq p_m \\end{cases} where p_m is mutation_rate. """ crossover_rate: ProbabilityFloat = 0.5 """Probability of a crossover event occurring between two parents. A higher crossover rate increases the diversity of the offspring, which can help in exploring the search space more effectively. Crossover happens with. .. math:: u_{i,j}^{(g)} = \\begin{cases} v_{i,j}^{(g)} & \\text{if } \\text{rand}_j \\leq C_r \\text{ or } j = j_{\\text{rand}} \\\\ x_{i,j}^{(g)} & \\text{otherwise} \\end{cases} where C_r is crossover_rate . """ n_parents: PositiveInt = 2 """The number of parents selected from the current population to participate in the crossover process to produce offspring. By default, pairs of parents are selected to generate new offspring. """ n_offsprings: PositiveInt = 10 """The number of offsprings generated in each generation through the processes of crossover and mutation. Typically, the number of offspring is equal to the population size, ensuring that the population size remains constant over generations. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.GeneticAlgorithmOptimizer optimizer = partial( opt, population=population_size, mutation_rate=self.mutation_rate, crossover_rate=self.crossover_rate, n_parents=self.n_parents, offspring=self.n_offsprings, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_evolution_strategy", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFOEvolutionStrategy(Algorithm, GFOCommonOptions): r"""Minimize a scalar function using the Evolution Strategy algorithm. This algorithm is a Python implementation of the Evolution Strategy algorithm through the gradient_free_optimizers package. Evolution Strategy is a evolutionary algorithm inspired by natural evolution and work by iteratively improving a population of candidate solutions through mutation, crossover, and selection. A population of parents generates offspring, and only the fittest individuals from both parents and offspring are selected to form the next generation. The algorithm uses both mutation and crossover to create new candidate solutions. The choice between mutation and crossover is determined probabilistically based on their respective rates in the following way. .. math:: \\text{total_rate} = \\text{mutation_rate} + \\text{crossover_rate} .. math:: R = \\text{random_float} (0 ... \\text{total_rate}) .. code-block:: if R <= mutation-rate: do mutation else: do crossover """ population_size: PositiveInt | None = None """Size of the population.""" stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of iterations.""" mutation_rate: ProbabilityFloat = 0.7 """Probability of a mutation event occurring in an individual.""" crossover_rate: ProbabilityFloat = 0.3 """Probability of an individual to perform a crossover with the best individual in the population.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.EvolutionStrategyOptimizer optimizer = partial( opt, population=population_size, mutation_rate=self.mutation_rate, crossover_rate=self.crossover_rate, rand_rest_p=self.rand_rest_p, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res @mark.minimizer( name="gfo_differential_evolution", solver_type=AggregationLevel.SCALAR, is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class GFODifferentialEvolution(Algorithm, GFOCommonOptions): r"""Minimize a scalar function using the Differential Evolution algorithm. This algorithm is a Python implementation of the Differential Evolution algorithm through the gradient_free_optimizers package. Differential Evolution is a population-based optimization algorithm that creates iteratively improves a population of candidate solutions by combining and perturbing them based on their differences. It creates new positions in the search space by adding the weighted difference between two individuals in the population to a third individual creating trial solutions that are evaluated for their fitness and if a trial solution is better than the target it replaces, ensures continual improvement. A new trial solution is generated according to: .. math:: x_{trial} = x_{r1} + F \\cdot (x_{r2} - x_{r3}) where :math:`r1, r2, r3` are random individuals from the population, and :math:`F` is the differential weight or mutation_rate. """ population_size: PositiveInt | None = None """Size of the population.""" mutation_rate: ProbabilityFloat = 0.9 r"""Probability of a mutation event occurring in an individual. The mutation rate influences the algorithm's ability to explore the search space. A higher value of mutation_rate also called the differential weight `F` increases the diversity of the mutant individuals, leading to broader exploration, while a lower value encourages convergence by making smaller adjustments. .. math:: \mathbf{v}_{i,G+1} = \mathbf{x}_{r1,G} + F \cdot (\mathbf{x}_{r2,G} - \mathbf{x}_{r3,G}) """ crossover_rate: ProbabilityFloat = 0.9 """Probability of a crossover event occurring between two parents. It determines how much of the trial vector inherits its components from the mutant individual versus the target individual. A high crossover rate means that more components will come from the mutant individual, promoting exploration of new solutions. Conversely, a low crossover rate results in more components being taken from the target individual, which can help maintain existing solutions and refine them. .. math:: u_{i,j,G+1} = \\begin{cases} v_{i,j,G+1} & \\text{if } \\text{rand}_j(0,1) \\leq CR \\text{ or } j = j_{\\text{rand}} \\\\ x_{i,j,G} & \\text{otherwise} \\end{cases} """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: import gradient_free_optimizers as gfo population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) opt = gfo.DifferentialEvolutionOptimizer optimizer = partial( opt, population=population_size, mutation_rate=self.mutation_rate, crossover_rate=self.crossover_rate, ) res = _gfo_internal( common_options=self, problem=problem, x0=x0, optimizer=optimizer, ) return res # ================================================================================== # Helper functions # ================================================================================== def _gfo_internal( common_options: GFOCommonOptions, problem: InternalOptimizationProblem, x0: NDArray[np.float64], optimizer: BaseOptimizer, ) -> InternalOptimizeResult: """Internal helper function. Define the search space and inital params, define the objective function and run optimization. """ # Use common options from GFOCommonOptions common = common_options # set early stopping criterion early_stopping = { "n_iter_no_change": common.convergence_iter_noimprove, "tol_abs": common.convergence_ftol_abs, "tol_rel": common.convergence_ftol_rel, } # define search space, initial params, initial_population and constraints opt = optimizer( search_space=_get_search_space_gfo( problem.bounds, common.n_grid_points, problem.converter, ), initialize=_get_initialize_gfo( x0, common.n_init, common.extra_start_params, problem.converter ), constraints=_get_gfo_constraints(), random_state=common.seed, ) # define objective function, negate to perform minimize def objective_function(para: dict[str, float]) -> float | NDArray[np.float64]: x = np.array(opt.conv.para2value(para)) return -problem.fun(x) # negate in case of minimize convergence_target_value = ( -1 * common.convergence_target_value if common.convergence_target_value is not None else None ) # run optimization opt.search( objective_function=objective_function, n_iter=common.stopping_maxiter, max_time=common.stopping_maxtime, max_score=convergence_target_value, early_stopping=early_stopping, memory=common.caching, memory_warm_start=common.warm_start, verbosity=common.verbosity, ) return _process_result_gfo(opt) def _get_search_space_gfo( bounds: InternalBounds, n_grid_points: PositiveInt | PyTree, converter: Converter ) -> dict[str, NDArray[np.float64]]: """Create search space. Args: bounds: Internal Bounds n_grid_points: number of grid points in each dimension Returns: dict: search_space dictionary """ search_space = {} if bounds.lower is not None and bounds.upper is not None: dim = len(bounds.lower) upper = bounds.upper lower = bounds.lower if isinstance(n_grid_points, int): n_grid_points = [n_grid_points] * dim else: n_grid_points = list(map(int, converter.params_to_internal(n_grid_points))) for i in range(dim): search_space[f"x{i}"] = np.linspace(lower[i], upper[i], n_grid_points[i]) return search_space def _get_gfo_constraints() -> list[Any]: """Process constraints.""" return [] def _get_initialize_gfo( x0: NDArray[np.float64], n_init: PositiveInt, extra_start_points: list[PyTree] | None, converter: Converter, ) -> dict[str, Any]: """Set initial params x0, additional start params for the optimization run or the initial_population. Here, warm_start is actually extra_start_params. Args: x0: initial param Returns: dict: initialize dictionary with initial parameters set """ init = _value2para(x0) x_list = [init] if extra_start_points is not None: internal_values = [converter.params_to_internal(x) for x in extra_start_points] extra_start_points = [_value2para(x) for x in internal_values] x_list += extra_start_points initialize = { "warm_start": x_list, "vertices": n_init // 2, "grid": n_init // 2, } return initialize def _process_result_gfo(opt: "BaseOptimizer") -> InternalOptimizeResult: """Process result. Args: opt: Optimizer instance after optimization run is complete Returns: InternalOptimizeResult: Internal optimization result. """ res = InternalOptimizeResult( x=np.array(opt.best_value), fun=-opt.best_score, # negate once again success=True, n_fun_evals=len(opt.eval_times), n_jac_evals=0, n_hess_evals=0, n_iterations=opt.n_iter_search, ) return res def _value2para(x: NDArray[np.float64]) -> dict[str, float]: """Convert values to dict. Args: x: Array of parameter values Returns: dict: Dictionary of parameter values with key-value pair as { x{i} : x[i]} """ para = {} for i in range(len(x)): para[f"x{i}"] = x[i] return para ================================================ FILE: src/optimagic/optimizers/iminuit_migrad.py ================================================ """Implement the MIGRAD algorithm from iminuit.""" from __future__ import annotations from dataclasses import dataclass from typing import TYPE_CHECKING, Optional import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_IMINUIT_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( N_RESTARTS, STOPPING_MAXFUN, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import AggregationLevel if TYPE_CHECKING: from iminuit import Minuit @mark.minimizer( name="iminuit_migrad", solver_type=AggregationLevel.SCALAR, is_available=IS_IMINUIT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class IminuitMigrad(Algorithm): r"""Minimize a scalar differentiable function using the MIGRAD algorithm from iminuit. This optimizer wraps the MIGRAD algorithm from the iminuit package, which provides a Python interface to the Minuit2 C++ library developed and maintained by CERN. MIGRAD is a local optimization method in the quasi-Newton family. It iteratively builds an approximation of the inverse Hessian matrix using the DFP variable-metric method to efficiently navigate optimization landscapes. At each iteration, the algorithm attempts a Newton step, using gradient and Hessian approximations to move toward the function’s minimum. If this step fails to reduce the objective function, MIGRAD conducts a line search along the gradient direction to maintain progress. This continues until the convergence criteria, such as the Estimated Distance to Minimum (EDM) are met, that is, they fall below preset thresholds. MIGRAD is designed for statistical optimization problems where accurate parameter uncertainty estimates are essential. It excels at maximum-likelihood and least- squares fits common in scientific computing, and is best suited for smooth, differentiable cost functions. For best performance, supply analytical gradients. Convergence and solution will depend on your starting values. Bound constraints (limits) supported. """ stopping_maxfun: int = STOPPING_MAXFUN """Maximum number of function evaluations.""" n_restarts: int = N_RESTARTS """Number of times to restart the optimizer if convergence is not reached. A value of 1 (the default) indicates that the optimizer will only run once, disabling the restart feature. Values greater than 1 specify the maximum number of restart attempts. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, params: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_IMINUIT_INSTALLED: raise NotInstalledError( # pragma: no cover "To use the 'iminuit_migrad` optimizer you need to install iminuit. " "Use 'pip install iminuit' or 'conda install -c conda-forge iminuit'. " "Check the iminuit documentation for more details: " "https://scikit-hep.org/iminuit/install.html" ) from iminuit import Minuit def wrapped_objective(x: NDArray[np.float64]) -> float: return float(problem.fun(x)) m = Minuit(wrapped_objective, params, grad=problem.jac) bounds = _convert_bounds_to_minuit_limits( problem.bounds.lower, problem.bounds.upper ) for i, (lower, upper) in enumerate(bounds): if lower is not None or upper is not None: m.limits[i] = (lower, upper) m.migrad( ncall=self.stopping_maxfun, iterate=self.n_restarts, ) res = _process_minuit_result(m) return res def _process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: """Convert iminuit result to optimagic's internal result format.""" res = InternalOptimizeResult( x=np.array(minuit_result.values), fun=minuit_result.fval, success=minuit_result.valid, message=repr(minuit_result.fmin), n_fun_evals=minuit_result.nfcn, n_jac_evals=minuit_result.ngrad, n_hess_evals=None, n_iterations=minuit_result.nfcn, status=None, jac=None, hess=None, hess_inv=np.array(minuit_result.covariance), max_constraint_violation=None, info=None, history=None, ) return res def _convert_bounds_to_minuit_limits( lower_bounds: Optional[NDArray[np.float64]], upper_bounds: Optional[NDArray[np.float64]], ) -> list[tuple[Optional[float], Optional[float]]]: """Convert optimization bounds to Minuit-compatible limit format. Transforms numpy arrays of bounds into List of tuples as expected by iminuit. Handles special values like np.inf, -np.inf, and np.nan by converting them to None where appropriate, as required by Minuit's limits API. Parameters ---------- lower_bounds : Optional[NDArray[np.float64]] Array of lower bounds for parameters. upper_bounds : Optional[NDArray[np.float64]] Array of upper bounds for parameters. Returns: ------- list[tuple[Optional[float], Optional[float]]] List of (lower, upper) limit tuples in Minuit format, where: - None indicates unbounded (equivalent to infinity) - Float values represent actual bounds Notes: ----- Minuit expects bounds as tuples of (lower, upper) where: - `None` indicates no bound (equivalent to -inf or +inf) - A finite float value indicates a specific bound - Bounds can be asymmetric (e.g., one side bounded, one side not) """ if lower_bounds is None or upper_bounds is None: return [] return [ ( None if np.isneginf(lower) or np.isnan(lower) else float(lower), None if np.isposinf(upper) or np.isnan(upper) else float(upper), ) for lower, upper in zip(lower_bounds, upper_bounds, strict=True) ] ================================================ FILE: src/optimagic/optimizers/ipopt.py ================================================ """Implement cyipopt's Interior Point Optimizer.""" from dataclasses import dataclass from typing import Any, Literal import numpy as np from numpy.typing import NDArray from scipy.optimize import Bounds as ScipyBounds from optimagic import mark from optimagic.config import IS_CYIPOPT_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_REL, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.optimizers.scipy_optimizers import process_scipy_result from optimagic.typing import ( AggregationLevel, GtOneFloat, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, YesNoBool, ) @mark.minimizer( name="ipopt", solver_type=AggregationLevel.SCALAR, is_available=IS_CYIPOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class Ipopt(Algorithm): # convergence criteria convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL dual_inf_tol: PositiveFloat = 1.0 constr_viol_tol: PositiveFloat = 0.0001 compl_inf_tol: PositiveFloat = 0.0001 s_max: float = 100 mu_target: NonNegativeFloat = 0.0 # stopping criteria stopping_maxiter: PositiveInt = STOPPING_MAXITER stopping_max_wall_time_seconds: PositiveFloat = 1e20 stopping_max_cpu_time: PositiveFloat = 1e20 # acceptable criteria acceptable_iter: NonNegativeInt = 15 acceptable_tol: PositiveFloat = 1e-6 acceptable_dual_inf_tol: PositiveFloat = 1e-10 acceptable_constr_viol_tol: PositiveFloat = 0.01 acceptable_compl_inf_tol: PositiveFloat = 0.01 acceptable_obj_change_tol: PositiveFloat = 1e20 diverging_iterates_tol: PositiveFloat = 1e20 nlp_lower_bound_inf: float = -1e19 nlp_upper_bound_inf: float = 1e19 fixed_variable_treatment: Literal[ "make_parameter", "make_parameter_nodual", "relax_bounds", "make_constraint", ] = "make_parameter" dependency_detector: Literal["none", "mumps", "wsmp", "ma28"] | None = None dependency_detection_with_rhs: YesNoBool = False # bounds kappa_d: NonNegativeFloat = 1e-5 bound_relax_factor: NonNegativeFloat = 1e-8 honor_original_bounds: YesNoBool = False # derivatives check_derivatives_for_naninf: YesNoBool = False # not sure if we should support the following: jac_c_constant: YesNoBool = False jac_d_constant: YesNoBool = False hessian_constant: YesNoBool = False # scaling nlp_scaling_method: ( Literal[ "none", "user-scaling", "gradient-based", "equilibration-based", ] | None ) = "gradient-based" obj_scaling_factor: float = 1 nlp_scaling_max_gradient: PositiveFloat = 100 nlp_scaling_obj_target_gradient: NonNegativeFloat = 0.0 nlp_scaling_constr_target_gradient: NonNegativeFloat = 0.0 nlp_scaling_min_value: NonNegativeFloat = 1e-8 # initialization bound_push: PositiveFloat = 0.01 # TODO: refine type to fix the range (0,0.5] bound_frac: PositiveFloat = 0.01 slack_bound_push: PositiveFloat = 0.01 # TODO: refine type to fix the range (0,0.5] slack_bound_frac: PositiveFloat = 0.01 constr_mult_init_max: NonNegativeFloat = 1000 bound_mult_init_val: PositiveFloat = 1 bound_mult_init_method: Literal[ "constant", "mu-based", ] = "constant" least_square_init_primal: YesNoBool = False least_square_init_duals: YesNoBool = False # warm start warm_start_init_point: YesNoBool = False warm_start_same_structure: YesNoBool = False warm_start_bound_push: PositiveFloat = 0.001 warm_start_bound_frac: PositiveFloat = 0.001 warm_start_slack_bound_push: PositiveFloat = 0.001 # TODO: refine type to fix the range (0,0.5]) warm_start_slack_bound_frac: PositiveFloat = 0.001 warm_start_mult_bound_push: PositiveFloat = 0.001 warm_start_mult_init_max: float = 1e6 warm_start_entire_iterate: YesNoBool = False warm_start_target_mu: float = 0.0 # miscellaneous option_file_name: str = "" replace_bounds: YesNoBool = False skip_finalize_solution_call: YesNoBool = False timing_statistics: YesNoBool = False # barrier parameter update mu_max_fact: PositiveFloat = 1000 mu_max: PositiveFloat = 100_000 mu_min: PositiveFloat = 1e-11 adaptive_mu_globalization: Literal[ "obj-constr-filter", "kkt-error", "never-monotone-mode", ] = "obj-constr-filter" adaptive_mu_kkterror_red_iters: NonNegativeInt = 4 # TODO: refine type to fix the range (0,1) adaptive_mu_kkterror_red_fact: PositiveFloat = 0.9999 # TODO: refine type to fix the range (0,1) filter_margin_fact: PositiveFloat = 1e-5 filter_max_margin: PositiveFloat = 1 adaptive_mu_restore_previous_iterate: YesNoBool = False adaptive_mu_monotone_init_factor: PositiveFloat = 0.8 adaptive_mu_kkt_norm_type: Literal[ "max-norm", "2-norm-squared", "1-norm", "2-norm", ] = "2-norm-squared" mu_strategy: Literal["monotone", "adaptive"] = "monotone" mu_oracle: Literal[ "probing", "quality-function", "loqo", ] = "quality-function" fixed_mu_oracle: Literal[ "probing", "loqo", "quality-function", "average_compl", ] = "average_compl" mu_init: PositiveFloat = 0.1 barrier_tol_factor: PositiveFloat = 10 # TODO: refine type to fix the range (0,1) mu_linear_decrease_factor: PositiveFloat = 0.2 # TODO: refine type to fix the range (1,2) mu_superlinear_decrease_power: GtOneFloat = 1.5 mu_allow_fast_monotone_decrease: YesNoBool = True # TODO: refine type to fix the range (0,1) tau_min: PositiveFloat = 0.99 sigma_max: PositiveFloat = 100 sigma_min: NonNegativeFloat = 1e-6 quality_function_norm_type: Literal[ "max-norm", "2-norm-squared", "1-norm", "2-norm", ] = "2-norm-squared" quality_function_centrality: ( Literal[ "none", "reciprocal", "log", "cubed-reciprocal", ] | None ) = None quality_function_balancing_term: Literal["none", "cubic"] | None = None quality_function_max_section_steps: NonNegativeInt = 8 # TODO: refine type to fix the range [0,1) quality_function_section_sigma_tol: NonNegativeFloat = 0.01 # TODO: refine type to fix the range [0,1) quality_function_section_qf_tol: NonNegativeFloat = 0.0 # line search line_search_method: Literal[ "filter", "penalty", "cg-penalty", ] = "filter" # TODO: refine type to fix the range (0,1) alpha_red_factor: PositiveFloat = 0.5 accept_every_trial_step: YesNoBool = False accept_after_max_steps: Literal[-1] | NonNegativeInt = -1 alpha_for_y: Literal[ "primal", "bound-mult", "min", "max", "full", "min-dual-infeas", "safer-min-dual-infeas", "primal-and-full", "dual-and-full", "acceptor", ] = "primal" alpha_for_y_tol: NonNegativeFloat = 10 tiny_step_tol: NonNegativeFloat = 2.22045 * 1e-15 tiny_step_y_tol: NonNegativeFloat = 0.01 watchdog_shortened_iter_trigger: NonNegativeInt = 10 watchdog_trial_iter_max: PositiveInt = 3 theta_max_fact: PositiveFloat = 10_000 theta_min_fact: PositiveFloat = 0.0001 # TODO: refine type to fix the range (0,0.5) eta_phi: PositiveFloat = 1e-8 delta: PositiveFloat = 1 s_phi: GtOneFloat = 2.3 s_theta: GtOneFloat = 1.1 # TODO: refine type to fix the range (0,1) gamma_phi: PositiveFloat = 1e-8 # TODO: refine type to fix the range (0,1) gamma_theta: PositiveFloat = 1e-5 # TODO: refine type to fix the range (0,1) alpha_min_frac: PositiveFloat = 0.05 max_soc: NonNegativeInt = 4 kappa_soc: PositiveFloat = 0.99 obj_max_inc: float = 5.0 max_filter_resets: NonNegativeInt = 5 filter_reset_trigger: PositiveInt = 5 corrector_type: ( Literal[ "none", "affine", "primal-dual", ] | None ) = None skip_corr_if_neg_curv: YesNoBool = True skip_corr_in_monotone_mode: YesNoBool = True corrector_compl_avrg_red_fact: PositiveFloat = 1 soc_method: Literal[0, 1] = 0 nu_init: PositiveFloat = 1e-6 nu_inc: PositiveFloat = 0.0001 # TODO: refine type to fix the range (0,1) rho: PositiveFloat = 0.1 kappa_sigma: PositiveFloat = 1e10 recalc_y: YesNoBool = False recalc_y_feas_tol: PositiveFloat = 1e-6 slack_move: NonNegativeFloat = 1.81899 * 1e-12 constraint_violation_norm_type: Literal[ "1-norm", "2-norm", "max-norm", ] = "1-norm" # step calculation mehrotra_algorithm: YesNoBool = False fast_step_computation: YesNoBool = False min_refinement_steps: NonNegativeInt = 1 max_refinement_steps: NonNegativeInt = 10 residual_ratio_max: PositiveFloat = 1e-10 residual_ratio_singular: PositiveFloat = 1e-5 residual_improvement_factor: PositiveFloat = 1 neg_curv_test_tol: NonNegativeFloat = 0 neg_curv_test_reg: YesNoBool = True max_hessian_perturbation: PositiveFloat = 1e20 min_hessian_perturbation: NonNegativeFloat = 1e-20 perturb_inc_fact_first: GtOneFloat = 100 perturb_inc_fact: GtOneFloat = 8 # TODO: refine type to fix the range (0,1) perturb_dec_fact: PositiveFloat = 0.333333 first_hessian_perturbation: PositiveFloat = 0.0001 jacobian_regularization_value: NonNegativeFloat = 1e-8 jacobian_regularization_exponent: NonNegativeFloat = 0.25 perturb_always_cd: YesNoBool = False # restoration phase expect_infeasible_problem: YesNoBool = False expect_infeasible_problem_ctol: NonNegativeFloat = 0.001 expect_infeasible_problem_ytol: PositiveFloat = 1e8 start_with_resto: YesNoBool = False soft_resto_pderror_reduction_factor: NonNegativeFloat = 0.9999 max_soft_resto_iters: NonNegativeInt = 10 # TODO: refine type to fix the range [0,1) required_infeasibility_reduction: NonNegativeFloat = 0.9 max_resto_iter: NonNegativeInt = 3_000_000 evaluate_orig_obj_at_resto_trial: YesNoBool = True resto_penalty_parameter: PositiveFloat = 1000 resto_proximity_weight: NonNegativeFloat = 1 bound_mult_reset_threshold: NonNegativeFloat = 1000 constr_mult_reset_threshold: NonNegativeFloat = 0 resto_failure_feasibility_threshold: NonNegativeFloat | None = None # hessian approximation limited_memory_aug_solver: Literal[ "sherman-morrison", "extended", ] = "sherman-morrison" limited_memory_max_history: NonNegativeInt = 6 limited_memory_update_type: Literal[ "bfgs", "sr1", ] = "bfgs" limited_memory_initialization: Literal[ "scalar1", "scalar2", "scalar3", "scalar4", "constant", ] = "scalar1" limited_memory_init_val: PositiveFloat = 1 limited_memory_init_val_max: PositiveFloat = 1e8 limited_memory_init_val_min: PositiveFloat = 1e-8 limited_memory_max_skipping: PositiveInt = 2 limited_memory_special_for_resto: YesNoBool = False hessian_approximation: Literal[ "limited-memory", "exact", ] = "limited-memory" hessian_approximation_space: Literal[ "nonlinear-variables", "all-variables", ] = "nonlinear-variables" # linear solver linear_solver: Literal[ "mumps", "ma27", "ma57", "ma77", "ma86", "ma97", "pardiso", "custom" ] = "mumps" linear_solver_options: dict[str, Any] | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_CYIPOPT_INSTALLED: raise NotInstalledError( "The 'ipopt' algorithm requires the cyipopt package to be installed.\n" "You can install it with: `conda install -c conda-forge cyipopt`." ) import cyipopt if self.acceptable_tol <= self.convergence_ftol_rel: raise ValueError( "The acceptable tolerance must be larger than the desired tolerance." ) if self.mu_strategy not in ["monotone", "adaptive"]: raise ValueError( f"Unknown barrier strategy: {self.mu_strategy}." " It must be 'monotone' or 'adaptive'." ) if self.nlp_upper_bound_inf < 0: raise ValueError("nlp_upper_bound_inf should be > 0.") if self.nlp_lower_bound_inf > 0: raise ValueError("nlp_lower_bound_inf should be < 0.") linear_solver_options = ( {} if self.linear_solver_options is None else self.linear_solver_options ) if self.resto_failure_feasibility_threshold is None: resto_failure_feasibility_threshold = 1e2 * self.convergence_ftol_rel else: resto_failure_feasibility_threshold = ( self.resto_failure_feasibility_threshold ) # convert None to str none section linear_solver_options_with_none = [ "ma86_scaling", "ma97_scaling", "ma97_scaling1", "ma97_scaling2", "ma97_scaling3", "spral_scaling", "spral_scaling_1", "spral_scaling_2", "spral_scaling_3", "linear_system_scaling", ] for key, val in linear_solver_options.items(): if key in linear_solver_options_with_none: linear_solver_options[key] = _convert_none_to_str(val) boolean_linear_solver_options = [ "linear_scaling_on_demand" "ma27_skip_inertia_check" "ma27_ignore_singularity" "ma57_automatic_scaling" "ma97_solve_blas3" "pardiso_redo_symbolic_fact_only_if_inertia_wrong" "pardiso_repeated_perturbation_means_singular" "pardiso_skip_inertia_check" "pardiso_iterative" "pardisomkl_redo_symbolic_fact_only_if_inertia_wrong" "pardisomkl_repeated_perturbation_means_singular" "pardisomkl_skip_inertia_check" "spral_ignore_numa" "spral_use_gpu" "wsmp_skip_inertia_check" "wsmp_no_pivoting" ] for key, val in linear_solver_options.items(): if key in boolean_linear_solver_options: linear_solver_options[key] = _convert_bool_to_str(val, key) convert_bool_to_str_options = { "dependency_detection_with_rhs": self.dependency_detection_with_rhs, "check_derivatives_for_naninf": self.check_derivatives_for_naninf, "jac_c_constant": self.jac_c_constant, "jac_d_constant": self.jac_d_constant, "hessian_constant": self.hessian_constant, "least_square_init_primal": self.least_square_init_primal, "least_square_init_duals": self.least_square_init_duals, "warm_start_init_point": self.warm_start_init_point, "warm_start_same_structure": self.warm_start_same_structure, "warm_start_entire_iterate": self.warm_start_entire_iterate, "replace_bounds": self.replace_bounds, "skip_finalize_solution_call": self.skip_finalize_solution_call, "timing_statistics": self.timing_statistics, "adaptive_mu_restore_previous_iterate": ( self.adaptive_mu_restore_previous_iterate ), "mu_allow_fast_monotone_decrease": self.mu_allow_fast_monotone_decrease, "accept_every_trial_step": self.accept_every_trial_step, "skip_corr_if_neg_curv": self.skip_corr_if_neg_curv, "skip_corr_in_monotone_mode": self.skip_corr_in_monotone_mode, "recalc_y": self.recalc_y, "mehrotra_algorithm": self.mehrotra_algorithm, "fast_step_computation": self.fast_step_computation, "neg_curv_test_reg": self.neg_curv_test_reg, "perturb_always_cd": self.perturb_always_cd, "expect_infeasible_problem": self.expect_infeasible_problem, "start_with_resto": self.start_with_resto, "evaluate_orig_obj_at_resto_trial": self.evaluate_orig_obj_at_resto_trial, "limited_memory_special_for_resto": self.limited_memory_special_for_resto, "honor_original_bounds": self.honor_original_bounds, } converted_bool_to_str_options = { key: _convert_bool_to_str(val, key) for key, val in convert_bool_to_str_options.items() } options = { # disable verbosity "print_level": 0, "ma77_print_level": -1, "ma86_print_level": -1, "ma97_print_level": -1, "pardiso_msglvl": 0, # disable derivative checker "derivative_test": "none", "s_max": float(self.s_max), "max_iter": self.stopping_maxiter, "max_wall_time": float(self.stopping_max_wall_time_seconds), "max_cpu_time": self.stopping_max_cpu_time, "dual_inf_tol": self.dual_inf_tol, "constr_viol_tol": self.constr_viol_tol, "compl_inf_tol": self.compl_inf_tol, # acceptable heuristic "acceptable_iter": self.acceptable_iter, "acceptable_tol": self.acceptable_tol, "acceptable_dual_inf_tol": self.acceptable_dual_inf_tol, "acceptable_constr_viol_tol": self.acceptable_constr_viol_tol, "acceptable_compl_inf_tol": self.acceptable_compl_inf_tol, "acceptable_obj_change_tol": self.acceptable_obj_change_tol, # bounds and more "diverging_iterates_tol": self.diverging_iterates_tol, "nlp_lower_bound_inf": self.nlp_lower_bound_inf, "nlp_upper_bound_inf": self.nlp_upper_bound_inf, "fixed_variable_treatment": self.fixed_variable_treatment, "dependency_detector": _convert_none_to_str(self.dependency_detector), "kappa_d": self.kappa_d, "bound_relax_factor": self.bound_relax_factor, "honor_original_bounds": self.honor_original_bounds, # scaling "nlp_scaling_method": _convert_none_to_str(self.nlp_scaling_method), "obj_scaling_factor": float(self.obj_scaling_factor), "nlp_scaling_max_gradient": float(self.nlp_scaling_max_gradient), "nlp_scaling_obj_target_gradient": float( self.nlp_scaling_obj_target_gradient ), "nlp_scaling_constr_target_gradient": float( self.nlp_scaling_constr_target_gradient ), "nlp_scaling_min_value": float(self.nlp_scaling_min_value), # initialization "bound_push": self.bound_push, "bound_frac": self.bound_frac, "slack_bound_push": self.slack_bound_push, "slack_bound_frac": self.slack_bound_frac, "constr_mult_init_max": float(self.constr_mult_init_max), "bound_mult_init_val": float(self.bound_mult_init_val), "bound_mult_init_method": self.bound_mult_init_method, # warm start "warm_start_bound_push": self.warm_start_bound_push, "warm_start_bound_frac": self.warm_start_bound_frac, "warm_start_slack_bound_push": self.warm_start_slack_bound_push, "warm_start_slack_bound_frac": self.warm_start_slack_bound_frac, "warm_start_mult_bound_push": self.warm_start_mult_bound_push, "warm_start_mult_init_max": self.warm_start_mult_init_max, "warm_start_target_mu": self.warm_start_target_mu, # more miscellaneous "option_file_name": self.option_file_name, # barrier parameter update "mu_target": float(self.mu_target), "mu_max_fact": float(self.mu_max_fact), "mu_max": float(self.mu_max), "mu_min": float(self.mu_min), "adaptive_mu_globalization": self.adaptive_mu_globalization, "adaptive_mu_kkterror_red_iters": self.adaptive_mu_kkterror_red_iters, "adaptive_mu_kkterror_red_fact": self.adaptive_mu_kkterror_red_fact, "filter_margin_fact": float(self.filter_margin_fact), "filter_max_margin": float(self.filter_max_margin), "adaptive_mu_monotone_init_factor": self.adaptive_mu_monotone_init_factor, "adaptive_mu_kkt_norm_type": self.adaptive_mu_kkt_norm_type, "mu_strategy": self.mu_strategy, "mu_oracle": self.mu_oracle, "fixed_mu_oracle": self.fixed_mu_oracle, "mu_init": self.mu_init, "barrier_tol_factor": float(self.barrier_tol_factor), "mu_linear_decrease_factor": self.mu_linear_decrease_factor, "mu_superlinear_decrease_power": self.mu_superlinear_decrease_power, "tau_min": self.tau_min, "sigma_max": float(self.sigma_max), "sigma_min": float(self.sigma_min), "quality_function_norm_type": self.quality_function_norm_type, "quality_function_centrality": _convert_none_to_str( self.quality_function_centrality ), "quality_function_balancing_term": _convert_none_to_str( self.quality_function_balancing_term ), "quality_function_max_section_steps": ( self.quality_function_max_section_steps ), "quality_function_section_sigma_tol": ( self.quality_function_section_sigma_tol ), "quality_function_section_qf_tol": self.quality_function_section_qf_tol, # linear search "line_search_method": self.line_search_method, "alpha_red_factor": self.alpha_red_factor, "accept_after_max_steps": self.accept_after_max_steps, "alpha_for_y": self.alpha_for_y, "alpha_for_y_tol": float(self.alpha_for_y_tol), "tiny_step_tol": self.tiny_step_tol, "tiny_step_y_tol": self.tiny_step_y_tol, "watchdog_shortened_iter_trigger": self.watchdog_shortened_iter_trigger, "watchdog_trial_iter_max": self.watchdog_trial_iter_max, "theta_max_fact": float(self.theta_max_fact), "theta_min_fact": self.theta_min_fact, "eta_phi": self.eta_phi, "delta": float(self.delta), "s_phi": self.s_phi, "s_theta": self.s_theta, "gamma_phi": self.gamma_phi, "gamma_theta": self.gamma_theta, "alpha_min_frac": self.alpha_min_frac, "max_soc": self.max_soc, "kappa_soc": self.kappa_soc, "obj_max_inc": float(self.obj_max_inc), "max_filter_resets": self.max_filter_resets, "filter_reset_trigger": self.filter_reset_trigger, "corrector_type": _convert_none_to_str(self.corrector_type), "corrector_compl_avrg_red_fact": float(self.corrector_compl_avrg_red_fact), "soc_method": self.soc_method, "nu_init": self.nu_init, "nu_inc": self.nu_inc, "rho": self.rho, "kappa_sigma": self.kappa_sigma, "recalc_y_feas_tol": self.recalc_y_feas_tol, "slack_move": self.slack_move, "constraint_violation_norm_type": self.constraint_violation_norm_type, # step calculation "min_refinement_steps": self.min_refinement_steps, "max_refinement_steps": self.max_refinement_steps, "residual_ratio_max": self.residual_ratio_max, "residual_ratio_singular": self.residual_ratio_singular, "residual_improvement_factor": float(self.residual_improvement_factor), "neg_curv_test_tol": float(self.neg_curv_test_tol), "max_hessian_perturbation": self.max_hessian_perturbation, "min_hessian_perturbation": self.min_hessian_perturbation, "perturb_inc_fact_first": float(self.perturb_inc_fact_first), "perturb_inc_fact": float(self.perturb_inc_fact), "perturb_dec_fact": float(self.perturb_dec_fact), "first_hessian_perturbation": float(self.first_hessian_perturbation), "jacobian_regularization_value": float(self.jacobian_regularization_value), "jacobian_regularization_exponent": float( self.jacobian_regularization_exponent ), # restoration phase "expect_infeasible_problem_ctol": self.expect_infeasible_problem_ctol, "expect_infeasible_problem_ytol": self.expect_infeasible_problem_ytol, "soft_resto_pderror_reduction_factor": ( self.soft_resto_pderror_reduction_factor ), "max_soft_resto_iters": self.max_soft_resto_iters, "required_infeasibility_reduction": float( self.required_infeasibility_reduction ), "max_resto_iter": self.max_resto_iter, "resto_penalty_parameter": float(self.resto_penalty_parameter), "resto_proximity_weight": float(self.resto_proximity_weight), "bound_mult_reset_threshold": float(self.bound_mult_reset_threshold), "constr_mult_reset_threshold": float(self.constr_mult_reset_threshold), "resto_failure_feasibility_threshold": float( resto_failure_feasibility_threshold ), # hessian approximation "limited_memory_aug_solver": self.limited_memory_aug_solver, "limited_memory_max_history": self.limited_memory_max_history, "limited_memory_update_type": self.limited_memory_update_type, "limited_memory_initialization": self.limited_memory_initialization, "limited_memory_init_val": float(self.limited_memory_init_val), "limited_memory_init_val_max": self.limited_memory_init_val_max, "limited_memory_init_val_min": self.limited_memory_init_val_min, "limited_memory_max_skipping": self.limited_memory_max_skipping, "hessian_approximation": self.hessian_approximation, "hessian_approximation_space": self.hessian_approximation_space, # linear solver "linear_solver": self.linear_solver, **linear_solver_options, **converted_bool_to_str_options, } raw_res = cyipopt.minimize_ipopt( fun=problem.fun, x0=x0, bounds=_get_scipy_bounds(problem.bounds), jac=problem.jac, constraints=problem.nonlinear_constraints, tol=self.convergence_ftol_rel, options=options, ) res = process_scipy_result(raw_res) return res def _get_scipy_bounds(bounds: InternalBounds) -> ScipyBounds: return ScipyBounds(lb=bounds.lower, ub=bounds.upper) def _convert_bool_to_str(var, name): """Convert input to either 'yes' or 'no' and check the output is yes or no. Args: var (str or bool): user input name (str): name of the variable. Returns: out (str): "yes" or "no". """ if var is True: out = "yes" elif var is False: out = "no" else: out = var if out not in {"yes", "no"}: raise ValueError( f"{name} must be 'yes', 'no', True or False. You specified {var}." ) return out def _convert_none_to_str(var): out = "none" if var is None else var return out ================================================ FILE: src/optimagic/optimizers/nag_optimizers.py ================================================ """Implement algorithms by the (Numerical Algorithms Group)[https://www.nag.com/]. The following arguments are not supported as ``algo_options``: - ``scaling_within_bounds`` - ``init.run_in_parallel`` - ``do_logging``, ``print_progress`` and all their advanced options. """ import warnings from dataclasses import dataclass from typing import Any, Callable, Literal, cast import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_DFOLS_INSTALLED, IS_PYBOBYQA_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import STOPPING_MAXFUN from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, NonNegativeInt, PositiveInt, ) from optimagic.utilities import calculate_trustregion_initial_radius CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE = 1e-8 """float: Stop when the lower trust region radius falls below this value.""" CONVERGENCE_SLOW_PROGRESS = { "threshold_to_characterize_as_slow": 1e-8, "max_insufficient_improvements": None, "comparison_period": 5, } """dict: Specification of when to terminate or reset the optimization because of only slow improvements. This is similar to an absolute criterion tolerance only that instead of a single improvement the average over several iterations must be small. Possible entries are: threshold_to_characterize_as_slow (float): Threshold whether an improvement is insufficient. Note: the improvement is divided by the ``comparison_period``. So this is the required average improvement per iteration over the comparison period. max_insufficient_improvements (int): Number of consecutive insufficient improvements before termination (or reset). Default is ``20 * len(x)``. comparison_period (int): How many iterations to go back to calculate the improvement. For example 5 would mean that each criterion evaluation is compared to the criterion value from 5 iterations before. """ THRESHOLD_FOR_SAFETY_STEP = 0.5 r"""float: Threshold for when to call the safety step (:math:`\gamma_s`). :math:`\text{proposed step} \leq \text{threshold_for_safety_step} \cdot \text{current_lower_trustregion_radius}`. """ CONVERGENCE_NOISE_CORRECTED_FTOL = 1.0 """float: Stop when the evaluations on the set of interpolation points all fall within this factor of the noise level. The default is 1, i.e. when all evaluations are within the noise level. If you want to not use this criterion but still flag your criterion function as noisy, set this tolerance to 0.0. .. warning:: Very small values, as in most other tolerances don't make sense here. """ TRUSTREGION_THRESHOLD_SUCCESSFUL = 0.1 """float: Share of the predicted improvement that has to be achieved for a trust region iteration to count as successful. """ TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL = 0.7 """float: Share of predicted improvement that has to be achieved for a trust region iteration to count as very successful.``criterion_noisy`` """ TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL = None """float: Ratio by which to shrink the upper trust region radius when realized improvement does not match the ``threshold_successful``. The default is 0.98 if the criterion is noisy and 0.5 else. """ TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL = 2.0 r"""float: Ratio by which to expand the upper trust region radius :math:`\Delta_k` in very successful iterations (:math:`\gamma_{inc}` in the notation of the paper). """ TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL = 4.0 r"""float: Ratio of the proposed step ($\|s_k\|$) by which to expand the upper trust region radius (:math:`\Delta_k`) in very successful iterations (:math:`\overline{\gamma}_{inc}` in the notation of the paper). """ TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS = None r"""float: Ratio by which to shrink the lower trust region radius (:math:`\rho_k`) (:math:`\alpha_1` in the notation of the paper). Default is 0.9 if the criterion is noisy and 0.1 else. """ TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS = None r"""float: Ratio of the current lower trust region (:math:`\rho_k`) by which to shrink the upper trust region radius (:math:`\Delta_k`) when the lower one is shrunk (:math:`\alpha_2` in the notation of the paper). Default is 0.95 if the criterion is noisy and 0.5 else.""" RANDOM_DIRECTIONS_ORTHOGONAL = True """bool: Whether to make randomly drawn initial directions orthogonal.""" INTERPOLATION_ROUNDING_ERROR = 0.1 r"""float: Internally, all the NAG algorithms store interpolation points with respect to a base point :math:`x_b`; that is, we store :math:`\{y_t-x_b\}`, which reduces the risk of roundoff errors. We shift :math:`x_b` to :math:`x_k` when :math:`\text{proposed step} \leq \text{interpolation_rounding_error} \cdot \|x_k-x_b\|`. """ CLIP_CRITERION_IF_OVERFLOWING = True """bool: Whether to clip the criterion to avoid ``OverflowError``.""" TRUSTREGION_PRECONDITION_INTERPOLATION = True """bool: whether to scale the interpolation linear system to improve conditioning.""" RESET_OPTIONS = { "use_resets": None, "minimal_trustregion_radius_tolerance_scaling_at_reset": 1.0, "reset_type": "soft", "move_center_at_soft_reset": True, "reuse_criterion_value_at_hard_reset": True, "max_iterations_without_new_best_after_soft_reset": None, "auto_detect": True, "auto_detect_history": 30, "auto_detect_min_jacobian_increase": 0.015, "auto_detect_min_correlations": 0.1, "points_to_replace_at_soft_reset": 3, "max_consecutive_unsuccessful_resets": 10, # just bobyqa "max_unsuccessful_resets": None, "trust_region_scaling_at_unsuccessful_reset": None, # just dfols "max_interpolation_points": None, "n_extra_interpolation_points_per_soft_reset": 0, "n_extra_interpolation_points_per_hard_reset": 0, "n_additional_extra_points_to_replace_per_reset": 0, } r"""dict: Options for reseting the optimization. Possible entries are: use_resets (bool): Whether to do resets when the lower trust region radius (:math:`\rho_k`) reaches the stopping criterion (:math:`\rho_{end}`), or (optionally) when all interpolation points are within noise level. Default is ``True`` if the criterion is noisy. minimal_trustregion_radius_tolerance_scaling_at_reset (float): Factor with which the trust region stopping criterion is multiplied at each reset. reset_type (str): Whether to use "soft" or "hard" resets. Default is "soft". move_center_at_soft_reset (bool): Whether to move the trust region center ($x_k$) to the best new point evaluated in stead of keeping it constant. points_to_replace_at_soft_reset (int): Number of interpolation points to move at each soft reset. reuse_criterion_value_at_hard_reset (bool): Whether or not to recycle the criterion value at the best iterate found when performing a hard reset. This saves one criterion evaluation. max_iterations_without_new_best_after_soft_reset (int): The maximum number of successful steps in a given run where the new criterion value is worse than the best value found in previous runs before terminating. Default is ``max_criterion_evaluations``. auto_detect (bool): Whether or not to automatically determine when to reset. This is an additional condition and resets can still be triggered by small upper trust region radius, etc. There are two criteria used: upper trust region radius shrinkage (no increases over the history, more decreases than no changes) and changes in the model Jacobian (consistently increasing trend as measured by slope and correlation coefficient of the line of best fit). auto_detect_history (int): How many iterations of model changes and trust region radii to store. auto_detect_min_jacobian_increase (float): Minimum rate of increase of the Jacobian over past iterations to cause a reset. auto_detect_min_correlations (float): Minimum correlation of the Jacobian data set required to cause a reset. max_consecutive_unsuccessful_resets (int): maximum number of consecutive unsuccessful resets allowed (i.e. resets which did not outperform the best known value from earlier runs). Only used when using nag_bobyqa: max_unsuccessful_resets (int): number of total unsuccessful resets allowed. Default is 20 if ``seek_global_optimum`` and else unrestricted. trust_region_scaling_at_unsuccessful_reset (float): Factor by which to expand the initial lower trust region radius (:math:`\rho_{beg}`) after unsuccessful resets. Default is 1.1 if ``seek_global_optimum`` else 1. Only used when using nag_dfols: max_interpolation_points (int): Maximum allowed value of the number of interpolation points. This is useful if the number of interpolation points increases with each reset, e.g. when ``n_extra_interpolation_points_per_soft_reset > 0``. The default is ``n_interpolation_points``. n_extra_interpolation_points_per_soft_reset (int): Number of points to add to the interpolation set with each soft reset. n_extra_interpolation_points_per_hard_reset (int): Number of points to add to the interpolation set with each hard reset. n_additional_extra_points_to_replace_per_reset (int): This parameter modifies ``n_extra_points_to_replace_successful``. With each reset ``n_extra_points_to_replace_successful`` is increased by this number. """ TRUSTREGION_FAST_START_OPTIONS = { "min_inital_points": None, "method": "auto", "scale_of_trustregion_step_perturbation": None, "scale_of_jacobian_components_perturbation": 1e-2, # the following will be growing.full_rank.min_sing_val # but it not supported yet by DF-OLS. "floor_of_jacobian_singular_values": 1, "jacobian_max_condition_number": 1e8, "geometry_improving_steps": False, "safety_steps": True, "shrink_upper_radius_in_safety_steps": False, "full_geometry_improving_step": False, "reset_trustregion_radius_after_fast_start": False, "reset_min_trustregion_radius_after_fast_start": False, "shrinking_factor_not_successful": None, "n_extra_search_directions_per_iteration": 0, } r"""dict: Options to start the optimization while building the full trust region model. To activate this, set the number of interpolation points at which to evaluate the criterion before doing the first step, `min_initial_points`, to something smaller than the number of parameters. The following options can be specified: min_initial_points (int): Number of initial interpolation points in addition to the start point. This should only be changed to a value less than ``len(x)``, and only if the default setup cost of ``len(x) + 1`` evaluations of the criterion is impractical. If this is set to be less than the default, the input value of ``n_interpolation_points`` should be set to ``len(x)``. If the default is used, all the other parameters have no effect. Default is ``n_interpolation_points - 1``. If the default setup costs of the evaluations are very large, DF-OLS can start with less than ``len(x)`` interpolation points and add points to the trust region model with every iteration. method ("jacobian", "trustregion" or "auto"): When there are less interpolation points than ``len(x)`` the model is underdetermined. This can be fixed in two ways: If "jacobian", the interpolated Jacobian is perturbed to have full rank, allowing the trust region step to include components in the full search space. This is the default if ``len(x) \geq number of root contributions``. If "trustregion_step", the trust region step is perturbed by an orthogonal direction not yet searched. It is the default if ``len(x) < number of root contributions``. scale_of_trustregion_step_perturbation (float): When adding new search directions, the length of the step is the trust region radius multiplied by this value. The default is 0.1 if ``method == "trustregion"`` else 1. scale_of_jacobian_components_perturbation (float): Magnitude of extra components added to the Jacobian. Default is 1e-2. floor_of_jacobian_singular_values (float): Floor singular values of the Jacobian at this factor of the last non zero value. As of version 1.2.1 this option is not yet supported by DF-OLS! scale_of_jacobian_singular_value_floor (float): Floor singular values of the Jacobian at this factor of the last nonzero value. jacobian_max_condition_number (float): Cap on the condition number of Jacobian after applying floors to singular values (effectively another floor on the smallest singular value, since the largest singular value is fixed). geometry_improving_steps (bool): Whether to do geometry-improving steps in the trust region algorithm, as per the usual algorithm during the fast start. safety_steps (bool): Whether to perform safety steps. shrink_upper_radius_in_safety_steps (bool): During the fast start whether to shrink the upper trust region radius in safety steps. full_geometry_improving_step (bool): During the fast start whether to do a full geometry-improving step within safety steps (the same as the post fast start phase of the algorithm). Since this involves reducing the upper trust region radius, this can only be `True` if `shrink_upper_radius_in_safety_steps == False`. reset_trustregion_radius_after_fast_start (bool): Whether or not to reset the upper trust region radius to its initial value at the end of the fast start phase. reset_min_trustregion_radius_after_fast_start (bool): Whether or not to reset the minimum trust region radius (:math:`\rho_k`) to its initial value at the end of the fast start phase. shrinking_factor_not_successful (float): Ratio by which to shrink the trust region radius when realized improvement does not match the ``threshold_for_successful_iteration`` during the fast start phase. By default it is the same as ``reduction_when_not_successful``. n_extra_search_directions_per_iteration (int): Number of new search directions to add with each iteration where we do not have a full set of search directions. This approach is not recommended! Default is 0. """ @mark.minimizer( name="nag_dfols", solver_type=AggregationLevel.LEAST_SQUARES, is_available=IS_DFOLS_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NagDFOLS(Algorithm): clip_criterion_if_overflowing: bool = CLIP_CRITERION_IF_OVERFLOWING convergence_minimal_trustregion_radius_tolerance: NonNegativeFloat = ( CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE # noqa: E501 ) convergence_noise_corrected_criterion_tolerance: NonNegativeFloat = ( CONVERGENCE_NOISE_CORRECTED_FTOL # noqa: E501 ) convergence_ftol_scaled: NonNegativeFloat = 0.0 convergence_slow_progress: dict[str, Any] | None = None initial_directions: Literal[ "coordinate", "random", ] = "coordinate" interpolation_rounding_error: float = INTERPOLATION_ROUNDING_ERROR noise_additive_level: float | None = None noise_multiplicative_level: float | None = None noise_n_evals_per_point: NonNegativeInt | None = None random_directions_orthogonal: bool = RANDOM_DIRECTIONS_ORTHOGONAL stopping_maxfun: PositiveInt = STOPPING_MAXFUN threshold_for_safety_step: NonNegativeFloat = THRESHOLD_FOR_SAFETY_STEP trustregion_expansion_factor_successful: NonNegativeFloat = ( TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL ) trustregion_expansion_factor_very_successful: NonNegativeFloat = ( TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL # noqa: E501 ) trustregion_fast_start_options: dict[str, Any] | None = None trustregion_initial_radius: NonNegativeFloat | None = None trustregion_method_to_replace_extra_points: ( Literal["geometry_improving", "momentum"] | None ) = "geometry_improving" trustregion_n_extra_points_to_replace_successful: NonNegativeInt = 0 trustregion_n_interpolation_points: NonNegativeInt | None = None trustregion_precondition_interpolation: bool = ( TRUSTREGION_PRECONDITION_INTERPOLATION ) trustregion_reset_options: dict[str, Any] | None = None trustregion_shrinking_factor_not_successful: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL # noqa: E501 ) trustregion_shrinking_factor_lower_radius: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS ) trustregion_shrinking_factor_upper_radius: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS ) trustregion_threshold_successful: float = TRUSTREGION_THRESHOLD_SUCCESSFUL trustregion_threshold_very_successful: float = TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = nag_dfols_internal( criterion=problem.fun, x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, clip_criterion_if_overflowing=self.clip_criterion_if_overflowing, convergence_minimal_trustregion_radius_tolerance=self.convergence_minimal_trustregion_radius_tolerance, # noqa: E501 convergence_noise_corrected_criterion_tolerance=self.convergence_noise_corrected_criterion_tolerance, # noqa: E501 convergence_ftol_scaled=self.convergence_ftol_scaled, convergence_slow_progress=self.convergence_slow_progress, initial_directions=self.initial_directions, interpolation_rounding_error=self.interpolation_rounding_error, noise_additive_level=self.noise_additive_level, noise_multiplicative_level=self.noise_multiplicative_level, noise_n_evals_per_point=self.noise_n_evals_per_point, random_directions_orthogonal=self.random_directions_orthogonal, stopping_maxfun=self.stopping_maxfun, threshold_for_safety_step=self.threshold_for_safety_step, trustregion_expansion_factor_successful=self.trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful=self.trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_fast_start_options=self.trustregion_fast_start_options, trustregion_initial_radius=self.trustregion_initial_radius, trustregion_method_to_replace_extra_points=self.trustregion_method_to_replace_extra_points, trustregion_n_extra_points_to_replace_successful=self.trustregion_n_extra_points_to_replace_successful, trustregion_n_interpolation_points=self.trustregion_n_interpolation_points, trustregion_precondition_interpolation=self.trustregion_precondition_interpolation, trustregion_reset_options=self.trustregion_reset_options, trustregion_shrinking_factor_not_successful=self.trustregion_shrinking_factor_not_successful, trustregion_shrinking_factor_lower_radius=self.trustregion_shrinking_factor_lower_radius, trustregion_shrinking_factor_upper_radius=self.trustregion_shrinking_factor_upper_radius, trustregion_threshold_successful=self.trustregion_threshold_successful, trustregion_threshold_very_successful=self.trustregion_threshold_very_successful, ) return res def nag_dfols_internal( criterion, x, lower_bounds, upper_bounds, clip_criterion_if_overflowing, convergence_minimal_trustregion_radius_tolerance, # noqa: E501 convergence_noise_corrected_criterion_tolerance, # noqa: E501 convergence_ftol_scaled, convergence_slow_progress, initial_directions, interpolation_rounding_error, noise_additive_level, noise_multiplicative_level, noise_n_evals_per_point, random_directions_orthogonal, stopping_maxfun, threshold_for_safety_step, trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_fast_start_options, trustregion_initial_radius, trustregion_method_to_replace_extra_points, trustregion_n_extra_points_to_replace_successful, trustregion_n_interpolation_points, trustregion_precondition_interpolation, trustregion_reset_options, trustregion_shrinking_factor_not_successful, # noqa: E501 trustregion_shrinking_factor_lower_radius, trustregion_shrinking_factor_upper_radius, trustregion_threshold_successful, trustregion_threshold_very_successful, ): r"""Minimize a function with least squares structure using DFO-LS. For details see :ref: `list_of_nag_algorithms`. """ if not IS_DFOLS_INSTALLED: raise NotInstalledError( "The 'nag_dfols' algorithm requires the DFO-LS package to be installed." "You can install it with 'pip install DFO-LS'. " "For additional installation instructions visit: ", r"https://numericalalgorithmsgroup.github.io/dfols/build/html/install.html", ) import dfols if trustregion_method_to_replace_extra_points == "momentum": trustregion_use_momentum = True elif trustregion_method_to_replace_extra_points in ["geometry_improving", None]: trustregion_use_momentum = False else: raise ValueError( "trustregion_method_to_replace_extra_points must be " "'geometry_improving', 'momentum' or None." ) advanced_options, trustregion_reset_options = _create_nag_advanced_options( x=x, noise_multiplicative_level=noise_multiplicative_level, noise_additive_level=noise_additive_level, noise_n_evals_per_point=noise_n_evals_per_point, convergence_noise_corrected_criterion_tolerance=convergence_noise_corrected_criterion_tolerance, # noqa: E501 trustregion_initial_radius=trustregion_initial_radius, trustregion_reset_options=trustregion_reset_options, convergence_slow_progress=convergence_slow_progress, interpolation_rounding_error=interpolation_rounding_error, threshold_for_safety_step=threshold_for_safety_step, clip_criterion_if_overflowing=clip_criterion_if_overflowing, initial_directions=initial_directions, random_directions_orthogonal=random_directions_orthogonal, trustregion_precondition_interpolation=trustregion_precondition_interpolation, trustregion_threshold_successful=trustregion_threshold_successful, trustregion_threshold_very_successful=trustregion_threshold_very_successful, trustregion_shrinking_factor_not_successful=trustregion_shrinking_factor_not_successful, # noqa: E501 trustregion_expansion_factor_successful=trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful=trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_shrinking_factor_lower_radius=trustregion_shrinking_factor_lower_radius, # noqa: E501 trustregion_shrinking_factor_upper_radius=trustregion_shrinking_factor_upper_radius, # noqa: E501 ) fast_start = _build_options_dict( user_input=trustregion_fast_start_options, default_options=TRUSTREGION_FAST_START_OPTIONS, ) if fast_start["floor_of_jacobian_singular_values"] != 1: warnings.warn( "Setting the `floor_of_jacobian_singular_values` is not supported by " "DF-OLS as of version 1.2.1." ) if ( fast_start["shrink_upper_radius_in_safety_steps"] and fast_start["full_geometry_improving_step"] ): raise ValueError( "full_geometry_improving_step of the trustregion_fast_start_options can " "only be True if shrink_upper_radius_in_safety_steps is False." ) ( faststart_jac, faststart_step, ) = _get_fast_start_method(fast_start["method"]) if ( trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"] < trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"] ): raise ValueError( "In the trustregion_reset_options " "'n_extra_interpolation_points_per_soft_reset' must " "be larger or the same as 'n_extra_interpolation_points_per_hard_reset'." ) dfols_options = { "growing.full_rank.use_full_rank_interp": faststart_jac, "growing.perturb_trust_region_step": faststart_step, "restarts.hard.use_old_rk": trustregion_reset_options[ "reuse_criterion_value_at_hard_reset" ], "restarts.auto_detect.min_chgJ_slope": trustregion_reset_options[ "auto_detect_min_jacobian_increase" ], "restarts.max_npt": trustregion_reset_options["max_interpolation_points"], "restarts.increase_npt": trustregion_reset_options[ "n_extra_interpolation_points_per_soft_reset" ] > 0, "restarts.increase_npt_amt": trustregion_reset_options[ "n_extra_interpolation_points_per_soft_reset" ], "restarts.hard.increase_ndirs_initial_amt": trustregion_reset_options[ "n_extra_interpolation_points_per_hard_reset" ] - trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"], "model.rel_tol": convergence_ftol_scaled, "regression.num_extra_steps": trustregion_n_extra_points_to_replace_successful, "regression.momentum_extra_steps": trustregion_use_momentum, "regression.increase_num_extra_steps_with_restart": trustregion_reset_options[ "n_additional_extra_points_to_replace_per_reset" ], "growing.ndirs_initial": fast_start["min_inital_points"], "growing.delta_scale_new_dirns": fast_start[ "scale_of_trustregion_step_perturbation" ], "growing.full_rank.scale_factor": fast_start[ "scale_of_jacobian_components_perturbation" ], "growing.full_rank.svd_max_jac_cond": fast_start[ "jacobian_max_condition_number" ], "growing.do_geom_steps": fast_start["geometry_improving_steps"], "growing.safety.do_safety_step": fast_start["safety_steps"], "growing.safety.reduce_delta": fast_start[ "shrink_upper_radius_in_safety_steps" ], "growing.safety.full_geom_step": fast_start["full_geometry_improving_step"], "growing.reset_delta": fast_start["reset_trustregion_radius_after_fast_start"], "growing.reset_rho": fast_start[ "reset_min_trustregion_radius_after_fast_start" ], "growing.gamma_dec": fast_start["shrinking_factor_not_successful"], "growing.num_new_dirns_each_iter": fast_start[ "n_extra_search_directions_per_iteration" ], "logging.save_diagnostic_info": True, "logging.save_xk": True, } advanced_options.update(dfols_options) raw_res = dfols.solve( criterion, x0=x, bounds=(lower_bounds, upper_bounds), maxfun=stopping_maxfun, rhobeg=trustregion_initial_radius, npt=trustregion_n_interpolation_points, rhoend=convergence_minimal_trustregion_radius_tolerance, nsamples=noise_n_evals_per_point, objfun_has_noise=noise_additive_level or noise_multiplicative_level, scaling_within_bounds=False, do_logging=False, print_progress=False, user_params=advanced_options, ) res = _process_nag_result(raw_res, len(x)) out = InternalOptimizeResult( x=res["solution_x"], fun=res["solution_criterion"], success=res["success"], message=res["message"], n_iterations=res["n_iterations"], n_fun_evals=res["n_fun_evals"], ) return out @mark.minimizer( name="nag_pybobyqa", solver_type=AggregationLevel.SCALAR, is_available=IS_PYBOBYQA_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NagPyBOBYQA(Algorithm): clip_criterion_if_overflowing: bool = CLIP_CRITERION_IF_OVERFLOWING convergence_minimal_trustregion_radius_tolerance: NonNegativeFloat = ( CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE # noqa: E501 ) convergence_noise_corrected_criterion_tolerance: NonNegativeFloat = ( CONVERGENCE_NOISE_CORRECTED_FTOL # noqa: E501 ) convergence_criterion_value: float | None = None convergence_slow_progress: dict[str, Any] | None = None initial_directions: Literal[ "coordinate", "random", ] = "coordinate" interpolation_rounding_error: float = INTERPOLATION_ROUNDING_ERROR noise_additive_level: float | None = None noise_multiplicative_level: float | None = None noise_n_evals_per_point: NonNegativeInt | None = None random_directions_orthogonal: bool = RANDOM_DIRECTIONS_ORTHOGONAL seek_global_optimum: bool = False stopping_max_criterion_evaluations: PositiveInt = STOPPING_MAXFUN threshold_for_safety_step: NonNegativeFloat = THRESHOLD_FOR_SAFETY_STEP trustregion_expansion_factor_successful: NonNegativeFloat = ( TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL ) trustregion_expansion_factor_very_successful: NonNegativeFloat = ( TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL # noqa: E501 ) trustregion_initial_radius: NonNegativeFloat | None = None trustregion_minimum_change_hession_for_underdetermined_interpolation: bool = True trustregion_n_interpolation_points: NonNegativeInt | None = None trustregion_precondition_interpolation: bool = ( TRUSTREGION_PRECONDITION_INTERPOLATION ) trustregion_reset_options: dict[str, Any] | None = None trustregion_shrinking_factor_not_successful: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL # noqa: E501 ) trustregion_shrinking_factor_lower_radius: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS ) trustregion_shrinking_factor_upper_radius: NonNegativeFloat | None = ( TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS ) trustregion_threshold_successful: float = TRUSTREGION_THRESHOLD_SUCCESSFUL trustregion_threshold_very_successful: float = TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = nag_pybobyqa_internal( criterion=cast( Callable[[NDArray[np.float64]], NDArray[np.float64]], problem.fun, ), x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, clip_criterion_if_overflowing=self.clip_criterion_if_overflowing, convergence_minimal_trustregion_radius_tolerance=self.convergence_minimal_trustregion_radius_tolerance, # noqa: E501 convergence_noise_corrected_criterion_tolerance=self.convergence_noise_corrected_criterion_tolerance, # noqa: E501 convergence_slow_progress=self.convergence_slow_progress, convergence_criterion_value=self.convergence_criterion_value, initial_directions=self.initial_directions, interpolation_rounding_error=self.interpolation_rounding_error, noise_additive_level=self.noise_additive_level, noise_multiplicative_level=self.noise_multiplicative_level, noise_n_evals_per_point=self.noise_n_evals_per_point, random_directions_orthogonal=self.random_directions_orthogonal, seek_global_optimum=self.seek_global_optimum, stopping_max_criterion_evaluations=self.stopping_max_criterion_evaluations, threshold_for_safety_step=self.threshold_for_safety_step, trustregion_expansion_factor_successful=self.trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful=self.trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_initial_radius=self.trustregion_initial_radius, trustregion_minimum_change_hession_for_underdetermined_interpolation=self.trustregion_minimum_change_hession_for_underdetermined_interpolation, # noqa: E501 trustregion_n_interpolation_points=self.trustregion_n_interpolation_points, trustregion_precondition_interpolation=self.trustregion_precondition_interpolation, trustregion_reset_options=self.trustregion_reset_options, trustregion_shrinking_factor_not_successful=self.trustregion_shrinking_factor_not_successful, trustregion_shrinking_factor_lower_radius=self.trustregion_shrinking_factor_lower_radius, trustregion_shrinking_factor_upper_radius=self.trustregion_shrinking_factor_upper_radius, trustregion_threshold_successful=self.trustregion_threshold_successful, trustregion_threshold_very_successful=self.trustregion_threshold_very_successful, ) return res def nag_pybobyqa_internal( criterion, x, lower_bounds, upper_bounds, clip_criterion_if_overflowing, convergence_criterion_value, convergence_minimal_trustregion_radius_tolerance, # noqa: E501 convergence_noise_corrected_criterion_tolerance, # noqa: E501 convergence_slow_progress, initial_directions, interpolation_rounding_error, noise_additive_level, noise_multiplicative_level, noise_n_evals_per_point, random_directions_orthogonal, seek_global_optimum, stopping_max_criterion_evaluations, threshold_for_safety_step, trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_initial_radius, trustregion_minimum_change_hession_for_underdetermined_interpolation, trustregion_n_interpolation_points, trustregion_precondition_interpolation, trustregion_reset_options, trustregion_shrinking_factor_not_successful, # noqa: E501 trustregion_shrinking_factor_lower_radius, trustregion_shrinking_factor_upper_radius, trustregion_threshold_successful, trustregion_threshold_very_successful, ): r"""Minimize a function using the BOBYQA algorithm. For details see :ref: `list_of_nag_algorithms`. """ if not IS_PYBOBYQA_INSTALLED: raise NotInstalledError( "The 'nag_pybobyqa' algorithm requires the Py-BOBYQA package to be " "installed. You can install it with 'pip install Py-BOBYQA'. " "For additional installation instructions visit: ", r"https://numericalalgorithmsgroup.github.io/pybobyqa/build/html/" "install.html", ) import pybobyqa if convergence_criterion_value is None: convergence_criterion_value = -np.inf advanced_options, trustregion_reset_options = _create_nag_advanced_options( x=x, noise_multiplicative_level=noise_multiplicative_level, noise_additive_level=noise_additive_level, trustregion_initial_radius=trustregion_initial_radius, noise_n_evals_per_point=noise_n_evals_per_point, convergence_noise_corrected_criterion_tolerance=convergence_noise_corrected_criterion_tolerance, # noqa: E501 trustregion_reset_options=trustregion_reset_options, convergence_slow_progress=convergence_slow_progress, interpolation_rounding_error=interpolation_rounding_error, threshold_for_safety_step=threshold_for_safety_step, clip_criterion_if_overflowing=clip_criterion_if_overflowing, initial_directions=initial_directions, random_directions_orthogonal=random_directions_orthogonal, trustregion_precondition_interpolation=trustregion_precondition_interpolation, trustregion_threshold_successful=trustregion_threshold_successful, trustregion_threshold_very_successful=trustregion_threshold_very_successful, trustregion_shrinking_factor_not_successful=trustregion_shrinking_factor_not_successful, # noqa: E501 trustregion_expansion_factor_successful=trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful=trustregion_expansion_factor_very_successful, # noqa: E501 trustregion_shrinking_factor_lower_radius=trustregion_shrinking_factor_lower_radius, # noqa: E501 trustregion_shrinking_factor_upper_radius=trustregion_shrinking_factor_upper_radius, # noqa: E501 ) pybobyqa_options = { "model.abs_tol": convergence_criterion_value, "interpolation.minimum_change_hessian": trustregion_minimum_change_hession_for_underdetermined_interpolation, # noqa: E501 "restarts.max_unsuccessful_restarts_total": trustregion_reset_options[ "max_unsuccessful_resets" ], "restarts.rhobeg_scale_after_unsuccessful_restart": trustregion_reset_options[ "trust_region_scaling_at_unsuccessful_reset" ], "restarts.hard.use_old_fk": trustregion_reset_options[ "reuse_criterion_value_at_hard_reset" ], "restarts.auto_detect.min_chg_model_slope": trustregion_reset_options[ "auto_detect_min_jacobian_increase" ], "logging.save_diagnostic_info": True, "logging.save_xk": True, } advanced_options.update(pybobyqa_options) raw_res = pybobyqa.solve( criterion, x0=x, bounds=(lower_bounds, upper_bounds), maxfun=stopping_max_criterion_evaluations, rhobeg=trustregion_initial_radius, user_params=advanced_options, scaling_within_bounds=False, do_logging=False, print_progress=False, objfun_has_noise=noise_additive_level or noise_multiplicative_level, nsamples=noise_n_evals_per_point, npt=trustregion_n_interpolation_points, rhoend=convergence_minimal_trustregion_radius_tolerance, seek_global_minimum=seek_global_optimum, ) res = _process_nag_result(raw_res, len(x)) out = InternalOptimizeResult( x=res["solution_x"], fun=res["solution_criterion"], success=res["success"], message=res["message"], n_iterations=res["n_iterations"], ) return out def _process_nag_result(nag_result_obj, len_x): """Convert the NAG result object to our result dictionary. Args: nag_result_obj: NAG result object len_x (int): length of the supplied parameters, i.e. the dimensionality of the problem. Returns: results (dict): See :ref:`internal_optimizer_output` for details. """ if hasattr(nag_result_obj, "f"): solution_fun = nag_result_obj.f else: solution_fun = nag_result_obj.obj processed = { "solution_criterion": solution_fun, "n_fun_evals": nag_result_obj.nx, "message": nag_result_obj.msg, "success": nag_result_obj.flag == nag_result_obj.EXIT_SUCCESS, "reached_convergence_criterion": None, "diagnostic_info": nag_result_obj.diagnostic_info, } try: n_iterations = int(nag_result_obj.diagnostic_info["iters_total"].iloc[-1]) processed["n_iterations"] = n_iterations except (KeyboardInterrupt, SystemExit): raise except Exception: processed["n_iterations"] = None if hasattr(nag_result_obj, "states"): processed.update({"states": nag_result_obj.states}) if hasattr(nag_result_obj, "history_params"): processed.update({"history_params": nag_result_obj.history_params}) if nag_result_obj.x is not None: processed["solution_x"] = nag_result_obj.x else: processed["solution_x"] = np.array([np.nan] * len_x) return processed def _create_nag_advanced_options( x, noise_multiplicative_level, noise_additive_level, trustregion_initial_radius, noise_n_evals_per_point, convergence_noise_corrected_criterion_tolerance, trustregion_reset_options, convergence_slow_progress, interpolation_rounding_error, threshold_for_safety_step, clip_criterion_if_overflowing, initial_directions, random_directions_orthogonal, trustregion_precondition_interpolation, trustregion_threshold_successful, trustregion_threshold_very_successful, trustregion_shrinking_factor_not_successful, trustregion_expansion_factor_successful, trustregion_expansion_factor_very_successful, trustregion_shrinking_factor_lower_radius, trustregion_shrinking_factor_upper_radius, ): if noise_multiplicative_level is not None and noise_additive_level is not None: raise ValueError("You cannot specify both multiplicative and additive noise.") if trustregion_initial_radius is None: trustregion_initial_radius = calculate_trustregion_initial_radius(x) # -np.inf as a default leads to errors when building the documentation with sphinx. noise_n_evals_per_point = _change_evals_per_point_interface(noise_n_evals_per_point) trustregion_reset_options = _build_options_dict( user_input=trustregion_reset_options, default_options=RESET_OPTIONS, ) if trustregion_reset_options["reset_type"] not in ["soft", "hard"]: raise ValueError( "reset_type in the trustregion_reset_options must be soft or hard." ) if initial_directions not in ["coordinate", "random"]: raise ValueError("inital_directions must be either 'coordinate' or 'random'.") convergence_slow_progress = _build_options_dict( user_input=convergence_slow_progress, default_options=CONVERGENCE_SLOW_PROGRESS, ) is_noisy = bool(noise_additive_level or noise_multiplicative_level) advanced_options = { "general.rounding_error_constant": interpolation_rounding_error, "general.safety_step_thresh": threshold_for_safety_step, "general.check_objfun_for_overflow": clip_criterion_if_overflowing, "tr_radius.eta1": trustregion_threshold_successful, "tr_radius.eta2": trustregion_threshold_very_successful, "tr_radius.gamma_dec": trustregion_shrinking_factor_not_successful, "tr_radius.gamma_inc": trustregion_expansion_factor_successful, "tr_radius.gamma_inc_overline": trustregion_expansion_factor_very_successful, "tr_radius.alpha1": trustregion_shrinking_factor_lower_radius, "tr_radius.alpha2": trustregion_shrinking_factor_upper_radius, "init.random_initial_directions": initial_directions == "random", "init.random_directions_make_orthogonal": random_directions_orthogonal, "slow.thresh_for_slow": convergence_slow_progress[ "threshold_to_characterize_as_slow" ], "slow.max_slow_iters": convergence_slow_progress[ "max_insufficient_improvements" ], "slow.history_for_slow": convergence_slow_progress["comparison_period"], "noise.multiplicative_noise_level": noise_multiplicative_level, "noise.additive_noise_level": noise_additive_level, "noise.quit_on_noise_level": ( convergence_noise_corrected_criterion_tolerance > 0 ) and is_noisy, "noise.scale_factor_for_quit": convergence_noise_corrected_criterion_tolerance, "interpolation.precondition": trustregion_precondition_interpolation, "restarts.use_restarts": trustregion_reset_options["use_resets"], "restarts.max_unsuccessful_restarts": trustregion_reset_options[ "max_consecutive_unsuccessful_resets" ], "restarts.rhoend_scale": trustregion_reset_options[ "minimal_trustregion_radius_tolerance_scaling_at_reset" ], "restarts.use_soft_restarts": trustregion_reset_options["reset_type"] == "soft", "restarts.soft.move_xk": trustregion_reset_options["move_center_at_soft_reset"], "restarts.soft.max_fake_successful_steps": trustregion_reset_options[ "max_iterations_without_new_best_after_soft_reset" ], "restarts.auto_detect": trustregion_reset_options["auto_detect"], "restarts.auto_detect.history": trustregion_reset_options[ "auto_detect_history" ], "restarts.auto_detect.min_correl": trustregion_reset_options[ "auto_detect_min_correlations" ], "restarts.soft.num_geom_steps": trustregion_reset_options[ "points_to_replace_at_soft_reset" ], } return advanced_options, trustregion_reset_options def _change_evals_per_point_interface(func): """Change the interface of the user supplied function to the one expected by NAG. Args: func (callable or None): function mapping from our names to noise_n_evals_per_point. Returns: adjusted_noise_n_evals_per_point (callable): function mapping from the argument names expected by pybobyqa and df-ols to noise_n_evals_per_point. """ if func is not None: def adjusted_noise_n_evals_per_point(delta, rho, iter, nrestarts): # noqa: A002 return func( upper_trustregion_radius=delta, lower_trustregion_radius=rho, n_iterations=iter, n_resets=nrestarts, ) return adjusted_noise_n_evals_per_point def _build_options_dict(user_input, default_options): """Create the full dictionary of trust region fast start options from user input. Args: user_input (dict or None): dictionary to update the default options with. May only contain keys present in the default options. default_options (dict): the default values. Returns: full_options (dict) """ full_options = default_options.copy() user_input = {} if user_input is None else user_input invalid = [x for x in user_input if x not in full_options] if len(invalid) > 0: raise ValueError( f"You specified illegal options {', '.join(invalid)}. Allowed are: , ".join( full_options.keys() ) ) full_options.update(user_input) return full_options def _get_fast_start_method(user_value): """Get fast start method arguments from user value.""" allowed_values = ["auto", "jacobian", "trustregion"] if user_value not in allowed_values: raise ValueError( "`perturb_jacobian_or_trustregion_step` must be one of " f"{allowed_values}. You provided {user_value}." ) if user_value == "auto": faststart_jac = None faststart_step = None else: faststart_jac = user_value == "jacobian" faststart_step = not faststart_jac return faststart_jac, faststart_step ================================================ FILE: src/optimagic/optimizers/neldermead.py ================================================ """Implementation of parallelosation of Nelder-Mead algorithm.""" from dataclasses import dataclass from typing import Callable, Literal, cast import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.batch_evaluators import process_batch_evaluator from optimagic.optimization.algo_options import ( CONVERGENCE_SECOND_BEST_FTOL_ABS, CONVERGENCE_SECOND_BEST_XTOL_ABS, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt InitSimplexLiteral = Literal["pfeffer", "nash", "gao_han", "varadhan_borchers"] InitSimplexCallable = Callable[[NDArray[np.float64]], NDArray[np.float64]] from optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral @mark.minimizer( name="neldermead_parallel", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=True, ) @dataclass(frozen=True) class NelderMeadParallel(Algorithm): r"""Parallel Nelder-Mead algorithm following Lee D., Wiswall M., A parallel implementation of the simplex function minimization routine, Computational Economics, 2007. Parameters ---------- criterion (callable): A function that takes a Numpy array_like as an argument and return scalar floating point. x (array_like): 1-D array of initial value of parameters init_simplex_method (string or callable): Name of the method to create initial simplex or callable which takes as an argument initial value of parameters and returns initial simplex as j+1 x j array, where j is length of x. The default is "gao_han". n_cores (int): Degrees of parallization. The default is 1 (no parallelization). adaptive (bool): Adjust parameters of Nelder-Mead algorithm to accounf for simplex size. The default is True. stopping_maxiter (int): Maximum number of algorithm iterations. The default is STOPPING_MAX_ITERATIONS. convergence_ftol_abs (float): maximal difference between function value evaluated on simplex points. convergence_xtol_abs (float): maximal distance between points in the simplex. batch_evaluator (string or callable): See :ref:`batch_evaluators` for details. Default "joblib". Returns: ------- TYPE DESCRIPTION. """ init_simplex_method: InitSimplexLiteral | InitSimplexCallable = "gao_han" n_cores: PositiveInt = 1 adaptive: bool = True stopping_maxiter: PositiveInt = STOPPING_MAXITER convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_XTOL_ABS batch_evaluator: BatchEvaluator | BatchEvaluatorLiteral = "joblib" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: raw = neldermead_parallel( criterion=cast( Callable[[NDArray[np.float64]], float], problem.fun, ), x=x0, init_simplex_method=self.init_simplex_method, n_cores=self.n_cores, adaptive=self.adaptive, stopping_maxiter=self.stopping_maxiter, convergence_ftol_abs=self.convergence_ftol_abs, convergence_xtol_abs=self.convergence_xtol_abs, batch_evaluator=self.batch_evaluator, ) res = InternalOptimizeResult( x=raw["solution_x"], fun=raw["solution_criterion"], n_iterations=raw["n_iterations"], success=raw["success"], message=raw["reached_convergence_criterion"], ) return res def neldermead_parallel( criterion, x, *, init_simplex_method="gao_han", n_cores=1, adaptive=True, stopping_maxiter=STOPPING_MAXITER, convergence_ftol_abs=CONVERGENCE_SECOND_BEST_FTOL_ABS, convergence_xtol_abs=CONVERGENCE_SECOND_BEST_XTOL_ABS, batch_evaluator="joblib", ): if x.ndim >= 1: x = x.ravel() # check if the vector of initial values is one-dimensional j = len(x) # size of the parameter vector if n_cores <= 1: p = 1 # if number of cores is nonpositive, set it to 1 else: if n_cores >= j: # number of parallelisation cannot be bigger than # the number of parameters minus 1 p = int(j - 1) else: p = int(n_cores) # set parameters of Nelder-Mead algorithm # for a discussion about Nlder-Mead parameters see Gao F., Han L., Implementing the # Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimization # and Applications, 2012 alpha, gamma, beta, tau = _init_algo_params(adaptive, j) # construct initial simplex using one of feasible methods # see Wssing, Simon, Proper initialization is crucial for # the Nelder-Mead simplex search, Optimization Letters, 2019 # for a discussion about the choice of initialization if not callable(init_simplex_method): s = globals()["_" + init_simplex_method](x) else: s = init_simplex_method(x) batch_evaluator = process_batch_evaluator(batch_evaluator) # calculate criterion values for the initial simplex f_s = np.array(batch_evaluator(func=criterion, arguments=s, n_cores=n_cores))[ :, None ] # parallelized function def func_parallel(args): criterion, s_j, s_j_r, f_s_0, f_s_j, f_s_j_1, m = args # read arguments f_s_j_r = criterion( s_j_r ) # calculate value of the criterion at the reflection point if f_s_j_r < f_s_0: # if the reflection point is better than the best point s_j_e = m + gamma * (s_j_r - m) # calculate expansion point f_s_j_e = criterion( s_j_e ) # calculate value of the criterion at the expansion point if f_s_j_e < f_s_0: # if the expansion point is better than the best point return np.hstack( [s_j_e, f_s_j_e, 0] ) # return the expansion point as a new point else: # if the expansion point is worse than the best point return np.hstack( [s_j_r, f_s_j_r, 0] ) # return the reflection point as a new point elif ( f_s_j_r < f_s_j_1 ): # if reflection point is better than the next worst point return np.hstack( [s_j_r, f_s_j_r, 0] ) # return reflection point as a new point else: # if the reflection point is worse than the next worst point if ( f_s_j_r < f_s_j ): # if value of the criterion at reflection point is better than # value of the criterion at initial point s_j_c = m + beta * (s_j_r - m) # calculate outside contraction point else: s_j_c = m - beta * (s_j_r - m) # calculate inside contraction point f_s_j_c = criterion( s_j_c ) # calculate a value of the criterion at contraction point if f_s_j_c < np.minimum( f_s_j, f_s_j_r ): # if ta value of the criterion at contraction point is better # than original and refrelction point return np.hstack( [s_j_c, f_s_j_c, 0] ) # return contraction point as as new point else: if f_s_j_r < f_s_j: return np.hstack( [s_j_r, f_s_j_r, 1] ) # return reflection point as a new point else: # if value of the criterion at contraction point is worse # than the value uf the criterion at the reflection # and the initial points return np.hstack( [s_j, f_s_j, 1] ) # return the old point as a new point optimal = False # optmisation condition, if True stop the algorithem iterations = 0 # number of criterion evaluations while not optimal: iterations += 1 # new iteration # sort points and arguments increasing row = np.argsort(f_s.ravel()) s = np.take(s, row, axis=0) f_s = np.take(f_s, row, axis=0) # calculate centroid m = (s[:-p, :].sum(axis=0)) / (j - p + 1) # calculate reflaction points s_j_r = m + alpha * (m - s[-p:, :]) # calculate new points of simplex s[-p:, :], f_s[-p:, :], shrink_count = np.split( np.vstack( batch_evaluator( func=func_parallel, arguments=tuple( ( criterion, s[j + 1 - p + i, :], s_j_r[i, :], f_s[0, :], f_s[j + 1 - p + i, :], f_s[j - p + i, :], m, ) for i in range(p) ), n_cores=p, ) ), [-2, -1], axis=1, ) # shrink simplex if there is no improvement in every process if shrink_count.sum() == p: s = ( tau * s[0:1, :] + (1 - tau) * s ) # new simplex is a linear combination of the best point # and remaining points # evaluate function at new simplex f_s = np.array( batch_evaluator( func=criterion, arguments=s, n_cores=n_cores, ) )[:, None] # termination criteria if ( np.max(np.abs(f_s[0, :] - f_s[1:, :])) <= convergence_ftol_abs and np.max(np.abs(s[0, :] - s[1:,])) <= convergence_xtol_abs ): optimal = True converge = True reason_to_stop = "Termination codition satisfied" elif ( iterations >= stopping_maxiter ): # if maximum amount of iteration is exceeded optimal = True converge = False reason_to_stop = "Maximum number of interation exceeded" # save results result = { "solution_x": s[np.nonzero(f_s == f_s.min())[0][0], :], "solution_criterion": f_s.min(), "n_iterations": iterations, "success": converge, "reached_convergence_criterion": reason_to_stop, } return result # set parameters of Nelder-Mead algorithm # for a discussion about Nlder-Mead parameters see Gao F., Han L., Implementing the # Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimization # and Applications, 2012 def _init_algo_params(adaptive, j): if adaptive: # algorithem parameters alla Gao-Han (adaptive) return ( 1, 1 + 2 / j, 0.75 - 1 / (2 * j), 1 - 1 / j, ) else: # standard setting of Nelder-Mead return ( 1, 2, 0.5, 0.5, ) # initial structure of the simplex def _init_simplex(x): s = np.vstack( [ x, ] * (len(x) + 1) ).astype(np.float64) return s # initilize due to L. Pfeffer at Standford (Matlab fminsearch and SciPy default option) def _pfeffer(x): s = _init_simplex(x) # method parameters c_p = 1.05 # initial simplex np.fill_diagonal(s[1:, :], x * c_p * (x != 0) + 0.00025 * (x == 0)) return s # adopted from Nash (R default option) # see Nash, J.C.: Compact numerical methods for computers: linear algebra and # function minimisation, 2nd edn. Adam Hilger Ltd., Bristol (1990) for details def _nash(x): s = _init_simplex(x) # method parameters c_n = 0.1 # initial simplex np.fill_diagonal(s[1:, :], (x != 0) * (np.max(x) * c_n + x) + c_n * (x == 0)) return s # adopted from Gao F., Han L., Implementing the # Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimizatio def _gao_han(x): s = _init_simplex(x) # method parameters c_h = np.minimum(np.maximum(np.max(x), 1), 10) j = len(x) # initial simplex s = ( s + np.vstack( [ np.array([[(1 - (j + 1) ** 0.5) / j]]) * np.ones([1, j]), np.eye(j), ] ) * c_h ) return s # adopted by Varadhan and Borchers for the R package dfoptim # see Varadhan, R., Borchers, H.W.: Dfoptim: derivative-free optimization (2016). # https://CRAN.R-project. org/package=dfoptim. R package version 2016.7-1 for details def _varadhan_borchers(x): s = _init_simplex(x) # method parameters j = len(x) c_s = np.maximum(1, ((x**2).sum()) ** 0.5) beta1 = c_s / (j * 2**0.5) * ((j + 1) ** 0.5 + j - 1) beta2 = c_s / (j * 2**0.5) * ((j + 1) ** 0.5 - 1) # initial simplex s[1:, :] = s[1:, :] + np.full([j, j], beta2) + np.eye(j) * (beta1 - beta2) return s ================================================ FILE: src/optimagic/optimizers/nevergrad_optimizers.py ================================================ """Implement optimizers from the nevergrad package.""" from __future__ import annotations import math from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_BAYESOPTIM_INSTALLED, IS_NEVERGRAD_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_ABS, CONVERGENCE_FTOL_REL, CONVERGENCE_XTOL_ABS, STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, ) if TYPE_CHECKING: from nevergrad.optimization.base import ConfiguredOptimizer NEVERGRAD_NOT_INSTALLED_ERROR = ( "This optimizer requires the 'nevergrad' package to be installed. " "You can install it with `pip install nevergrad`. " "Visit https://facebookresearch.github.io/nevergrad/getting_started.html " "for more detailed installation instructions." ) @mark.minimizer( name="nevergrad_pso", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradPSO(Algorithm): """Minimize a scalar function using the Particle Swarm Optimization algorithm. The Particle Swarm Optimization algorithm was originally proposed by :cite:`Kennedy1995`.The implementation in Nevergrad is based on :cite:`Zambrano2013`. PSO solves an optimization problem by evolving a swarm of particles (candidate solutions) across the search space. Each particle adjusts its position based on its own experience (cognitive component) and the experiences of its neighbors or the swarm (social component), using velocity updates. The algorithm iteratively guides the swarm toward promising regions of the search space. """ transform: Literal["arctan", "gaussian", "identity"] = "arctan" """The transform used to map from PSO optimization space to real space.""" population_size: int | None = None """The number of particles in the swarm.""" n_cores: int = 1 """The number of CPU cores to use for parallel computation.""" seed: int | None = None """Random seed for reproducibility.""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations.""" inertia: float = 0.5 / math.log(2.0) r"""Inertia weight ω. Controls the influence of a particle's previous velocity. Must be less than 1 to avoid divergence. """ cognitive: float = 0.5 + math.log(2.0) r"""Cognitive coefficient :math:`\phi_p`. Controls the influence of a particle's own best known position. Typical values: 1.0 to 3.0. """ social: float = 0.5 + math.log(2.0) r"""Social coefficient. Denoted by :math:`\phi_g`. Controls the influence of the swarm's best known position. Typical values: 1.0 to 3.0. """ quasi_opp_init: bool = False """Whether to use quasi-opposition initialization. Default is False. """ speed_quasi_opp_init: bool = False """Whether to apply quasi-opposition initialization to speed. Default is False. """ special_speed_quasi_opp_init: bool = False """Whether to use special quasi-opposition initialization for speed. Default is False. """ sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.ConfPSO( transform=self.transform, popsize=self.population_size, omega=self.inertia, phip=self.cognitive, phig=self.social, qo=self.quasi_opp_init, sqo=self.speed_quasi_opp_init, so=self.special_speed_quasi_opp_init, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_cmaes", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradCMAES(Algorithm): """Minimize a scalar function using the Covariance Matrix Adaptation Evolution Strategy (CMA-ES) algorithm. The CMA-ES is a state-of-the-art evolutionary algorithm for difficult non-linear, non-convex, black-box optimization problems in continuous domains. It is typically applied to unconstrained or bounded problems with dimensionality between 3 and 100. CMA-ES adapts a multivariate normal distribution to approximate the objective function's shape by estimating a positive-definite covariance matrix, akin to the inverse Hessian in convex-quadratic problems, but without requiring derivatives. This implementation is a python wrapper over the original code. Original paper can be accessed at `cma-es `_. """ scale: NonNegativeFloat = 1.0 """Scale of the search.""" elitist: bool = False """Whether to switch to elitist mode (also known as (μ,λ)-CMA-ES). In elitist mode, the best point in the population is always retained. """ population_size: int | None = None """Population size.""" diagonal: bool = False """Use the diagonal version of CMA, which is more efficient for high-dimensional problems.""" high_speed: bool = False """Use a metamodel for recommendation to speed up optimization.""" fast_cmaes: bool = False """Use the fast CMA-ES implementation. Cannot be used with diagonal=True. Produces equivalent results and is preferable for high dimensions or when objective function evaluations are fast. """ random_init: bool = False """If True, initialize the optimizer with random parameters.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" step_size_adaptive: bool | str = True """Whether to adapt the step size. Can be a boolean or a string specifying the adaptation strategy. """ CSA_dampfac: PositiveFloat = 1.0 """Damping factor for step size adaptation.""" CMA_dampsvec_fade: PositiveFloat = 0.1 """Damping rate for step size adaptation.""" CSA_squared: bool = False """Whether to use squared step sizes in updates.""" CMA_on: float = 1.0 """Learning rate for the covariance matrix update.""" CMA_rankone: float = 1.0 """Multiplier for the rank-one update learning rate of the covariance matrix.""" CMA_rankmu: float = 1.0 """Multiplier for the rank-mu update learning rate of the covariance matrix.""" CMA_cmean: float = 1.0 """Learning rate for the mean update.""" CMA_diagonal_decoding: float = 0.0 """Learning rate for the diagonal update.""" num_parents: int | None = None """Number of parents (μ) for recombination.""" CMA_active: bool = True """Whether to use negative updates for the covariance matrix.""" CMA_mirrormethod: Literal[0, 1, 2] = 2 """Strategy for mirror sampling. 0: Unconditional, 1: Selective, 2: Selective with delay. """ CMA_const_trace: bool | Literal["arithm", "geom", "aeig", "geig"] = False """How to normalize the trace of the covariance matrix. False: No normalization, True: Normalize to 1. Other options: 'arithm', 'geom', 'aeig', 'geig'. """ CMA_diagonal: int | bool = False """Number of iterations to use diagonal covariance matrix before switching to full matrix. If False, always use full matrix. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations before termination.""" stopping_maxtime: PositiveFloat = float("inf") """Maximum time in seconds before termination.""" stopping_cov_mat_cond: NonNegativeFloat = 1e14 """Maximum condition number of the covariance matrix before termination.""" convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS """Absolute tolerance on function value changes for convergence.""" convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Relative tolerance on function value changes for convergence.""" convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS """Absolute tolerance on parameter changes for convergence.""" convergence_iter_noimprove: PositiveInt | None = None """Number of iterations without improvement before termination.""" invariant_path: bool = False """Whether evolution path (pc) should be invariant to transformations.""" eval_final_mean: bool = True """Whether to evaluate the final mean solution.""" seed: int | None = None """Seed used by the internal random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng cma_options = { "AdaptSigma": self.step_size_adaptive, "CSA_dampfac": self.CSA_dampfac, "CMA_dampsvec_fade": self.CMA_dampsvec_fade, "CSA_squared": self.CSA_squared, "CSA_invariant_path": self.invariant_path, "CMA_on": self.CMA_on, "CMA_rankone": self.CMA_rankone, "CMA_rankmu": self.CMA_rankmu, "CMA_cmean": self.CMA_cmean, "CMA_diagonal_decoding": self.CMA_diagonal_decoding, "CMA_mu": self.num_parents, "CMA_active": self.CMA_active, "CMA_mirrormethod": self.CMA_mirrormethod, "CMA_const_trace": self.CMA_const_trace, "CMA_diagonal": self.CMA_diagonal, "maxfevals": self.stopping_maxfun, "maxiter": self.stopping_maxiter, "timeout": self.stopping_maxtime, "tolconditioncov": self.stopping_cov_mat_cond, "tolfun": self.convergence_ftol_abs, "tolfunrel": self.convergence_ftol_rel, "tolx": self.convergence_xtol_abs, "tolstagnation": self.convergence_iter_noimprove, "eval_final_mean": self.eval_final_mean, } configured_optimizer = ng.optimizers.ParametrizedCMA( scale=self.scale, popsize=self.population_size, elitist=self.elitist, diagonal=self.diagonal, high_speed=self.high_speed, fcmaes=self.fast_cmaes, inopts=cma_options, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_oneplusone", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradOnePlusOne(Algorithm): """Minimize a scalar function using the One-Plus-One Evolutionary algorithm. The One-Plus-One evolutionary algorithm iterates to find a set of parameters that minimizes the loss function. It does this by perturbing, or mutating, the parameters from the last iteration (the parent). If the new (child) parameters yield a better result, the child becomes the new parent whose parameters are perturbed, perhaps more aggressively. If the parent yields a better result, it remains the parent and the next perturbation is less aggressive. Originally proposed by :cite:`Rechenberg1973`. The implementation in Nevergrad is based on the one-fifth adaptation rule from :cite:`Schumer1968`. """ noise_handling: ( Literal["random", "optimistic"] | tuple[Literal["random", "optimistic"], float] | None ) = None """Method for handling noise. 'random' reevaluates a random point, while 'optimistic' reevaluates the best optimistic point. A float coefficient can be provided to tune the regularity of these reevaluations. """ mutation: Literal[ "gaussian", "cauchy", "discrete", "fastga", "rls", "doublefastga", "adaptive", "coordinatewise_adaptive", "portfolio", "discreteBSO", "lengler", "lengler2", "lengler3", "lenglerhalf", "lenglerfourth", "doerr", "lognormal", "xlognormal", "xsmalllognormal", "tinylognormal", "smalllognormal", "biglognormal", "hugelognormal", ] = "gaussian" """Type of mutation to apply. 'gaussian' is the default. Other options include 'cauchy', 'discrete', 'fastga', 'rls', and 'portfolio'. """ annealing: ( Literal[ "none", "Exp0.9", "Exp0.99", "Exp0.9Auto", "Lin100.0", "Lin1.0", "LinAuto" ] | None ) = None """Annealing schedule for mutation amplitude. Can be 'none', exponential (e.g., 'Exp0.9'), or linear (e.g., 'Lin100.0'). """ sparse: bool = False """Whether to apply random mutations that set variables to zero.""" super_radii: bool = False """Whether to apply extended radii beyond standard bounds for candidate generation, enabling broader exploration.""" smoother: bool = False """Whether to suggest smooth mutations.""" roulette_size: PositiveInt = 64 """Size of the roulette wheel used for selection, affecting sampling diversity from past candidates.""" antismooth: NonNegativeInt = 4 """Degree of anti-smoothing to prevent premature convergence by penalizing overly smooth improvements.""" crossover: bool = False """Whether to include a genetic crossover step every other iteration.""" crossover_type: ( Literal["none", "rand", "max", "min", "onepoint", "twopoint"] | None ) = None """Method for genetic crossover. Options include 'rand', 'onepoint', and 'twopoint'. """ tabu_length: NonNegativeInt = 1000 """Length of the tabu list to prevent revisiting recent candidates and help escape local minima.""" rotation: bool = False """Whether to apply rotational transformations to the search space to enhance search performance.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel computation.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)if bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.ParametrizedOnePlusOne( noise_handling=self.noise_handling, mutation=self.mutation, crossover=self.crossover, rotation=self.rotation, annealing=self.annealing or "none", sparse=self.sparse, smoother=self.smoother, super_radii=self.super_radii, roulette_size=self.roulette_size, antismooth=self.antismooth, crossover_type=self.crossover_type or "none", ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_de", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradDifferentialEvolution(Algorithm): """Minimize a scalar function using the Differential Evolution optimizer. Differential Evolution is typically used for continuous optimization. It uses differences between points in the population for performing mutations in fruitful directions. It is a kind of covariance adaptation without any explicit covariance, making it very fast in high dimensions. """ initialization: Literal["parametrization", "LHS", "QR", "QO", "SO"] = ( "parametrization" ) """Algorithm for initialization. 'LHS' is Latin Hypercube Sampling, 'QR' is Quasi-Random. """ scale: float | str = 1.0 """Scale of random component of updates.""" recommendation: Literal["pessimistic", "optimistic", "mean", "noisy"] = ( "pessimistic" ) """Criterion for selecting the best point to recommend.""" crossover: ( float | Literal[ "dimension", "random", "onepoint", "twopoints", "rotated_twopoints", "parametrization", ] ) = 0.5 """Crossover rate or strategy. Can be a float, 'dimension' (1/dim), 'random', 'onepoint', or 'twopoints'. """ F1: PositiveFloat = 0.8 """Differential weight #1 (scaling factor).""" F2: PositiveFloat = 0.8 """Differential weight #2 (scaling factor).""" population_size: int | Literal["standard", "dimension", "large"] = "standard" """Population size. Can be an integer or a string like 'standard', 'dimension', or 'large' to set it automatically. """ high_speed: bool = False """If True, uses a metamodel for recommendations to speed up optimization.""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)if bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.DifferentialEvolution( initialization=self.initialization, scale=self.scale, recommendation=self.recommendation, crossover=self.crossover, F1=self.F1, F2=self.F2, popsize=self.population_size, high_speed=self.high_speed, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_bo", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED and IS_BAYESOPTIM_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradBayesOptim(Algorithm): """Minimize a scalar function using the Bayesian Optimization (BO) algorithm. This wrapper uses the BO and PCA-BO algorithms from the `bayes_optim` package :cite:`bayesoptimimpl`. PCA-BO (Principal Component Analysis for Bayesian Optimization) is a dimensionality reduction technique for black-box optimization. It applies PCA to the input space before performing Bayesian optimization, improving efficiency in high dimensions by focusing on directions of greatest variance. """ init_budget: int | None = None """Number of initialization algorithm steps.""" pca: bool = False """Whether to use the PCA transformation, defining PCA-BO rather than standard BO.""" n_components: NonNegativeFloat = 0.95 """Number of principal axes, representing the percentage of explained variance (e.g., 0.95 means 95% variance retained).""" prop_doe_factor: NonNegativeFloat | None = 1 """Percentage of the initial budget used for Design of Experiments (DoE).""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: int | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.BayesOptim( init_budget=self.init_budget, pca=self.pca, n_components=self.n_components, prop_doe_factor=self.prop_doe_factor, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_emna", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradEMNA(Algorithm): """Minimize a scalar function using the Estimation of Multivariate Normal Algorithm. EMNA is a distribution-based evolutionary algorithm that models the search space using a multivariate Gaussian. It learns the full covariance matrix, resulting in a cubic time complexity with respect to each sampling. It is efficient in parallel settings but other methods should be considered first. See :cite:`emnaimpl`. """ isotropic: bool = True """If True, uses an isotropic (identity covariance) Gaussian. If False, uses a separable (diagonal covariance) Gaussian. """ noise_handling: bool = True """If True, returns the best individual found. If False (recommended for noisy problems), returns the average of the final population. """ population_size_adaptation: bool = False """If True, the population size is adjusted automatically based on the optimization landscape and noise level.""" initial_popsize: int | None = None """Initial population size. Defaults to 4 times the problem dimension. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.EMNA( isotropic=self.isotropic, naive=self.noise_handling, population_size_adaptation=self.population_size_adaptation, initial_popsize=self.initial_popsize, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_cga", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradCGA(Algorithm): """Minimize a scalar function using the Compact Genetic Algorithm. The Compact Genetic Algorithm (cGA) is a memory-efficient genetic algorithm that represents the population as a probability vector over gene values. It simulates the behavior of a simple GA with uniform crossover by updating probabilities instead of maintaining an explicit population. See :cite:`cgaimpl`. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.cGA res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_eda", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradEDA(Algorithm): """Minimize a scalar function using the Estimation of Distribution Algorithm. Estimation of Distribution Algorithms (EDAs) optimize by building and sampling a probabilistic model of promising solutions. Instead of using traditional variation operators like crossover or mutation, EDAs update a distribution based on selected individuals and sample new candidates from it. Refer to :cite:`edaimpl`. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.EDA res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_tbpsa", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradTBPSA(Algorithm): r"""Minimize a scalar function using the Test-based Population Size Adaptation algorithm. TBPSA adapts population size based on fitness trend detection using linear regression. If no significant improvement is found (via hypothesis testing), the population size is increased to improve robustness, making it effective for noisy optimization problems. For more details, refer to :cite:`tbpsaimpl`. """ noise_handling: bool = True """If True, returns the best individual. If False (recommended for noisy problems), returns the average of the final population to reduce noise. """ initial_popsize: int | None = None """Initial population size. If not specified, defaults to 4 times the problem dimension. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.ParametrizedTBPSA( naive=self.noise_handling, initial_popsize=self.initial_popsize, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_randomsearch", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradRandomSearch(Algorithm): """Minimize a scalar function using the Random Search algorithm. This is a one-shot optimization method that provides random suggestions and serves as a simple baseline for other optimizers. """ middle_point: bool = False """Enforces that the first suggested point is the zero vector.""" opposition_mode: Literal["opposite", "quasi"] | None = None """Symmetrizes exploration with respect to the center. 'opposite' enables full symmetry, while 'quasi' applies randomized symmetry. """ sampler: Literal["parametrization", "gaussian", "cauchy"] = "parametrization" """The probability distribution for sampling points. 'gaussian' and 'cauchy' are available alternatives. """ scale: PositiveFloat | Literal["random", "auto", "autotune"] = "auto" """Scalar used to multiply suggested point values. Can be a float or a string for auto-scaling ('random', 'auto', 'autotune'). """ recommendation_rule: Literal[ "average_of_best", "pessimistic", "average_of_exp_best" ] = "pessimistic" """Specifies how the final recommendation is chosen, e.g., 'pessimistic' (default) or 'average_of_best'.""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.RandomSearchMaker( stupid=False, middle_point=self.middle_point, opposition_mode=self.opposition_mode, sampler=self.sampler, scale=self.scale, recommendation_rule=self.recommendation_rule, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=None, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_samplingsearch", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradSamplingSearch(Algorithm): """Minimize a scalar function using SamplingSearch. This is a one-shot optimization method that is better than random search because it uses low-discrepancy sequences to ensure more uniform coverage of the search space. It is recommended to use "Hammersley" as the sampler if the budget is known, and to set `scrambled=True` in high dimensions. """ sampler: Literal["Halton", "LHS", "Hammersley"] = "Halton" """Choice of the low-discrepancy sampler used for generating points. 'LHS' is Latin Hypercube Sampling. """ scrambled: bool = False """If True, adds scrambling to the search sequence, which is highly recommended for high-dimensional problems.""" middle_point: bool = False """If True, the first suggested point is the zero vector, useful for initializing at the center of the search space.""" cauchy: bool = False """If True, uses the inverse Cauchy distribution instead of Gaussian when projecting samples to a real-valued space.""" scale: bool | NonNegativeFloat = 1.0 """A float multiplier to scale all generated points.""" rescaled: bool = False """If True, rescales the sampling pattern to ensure better coverage of the boundaries.""" recommendation_rule: Literal["average_of_best", "pessimistic"] = "pessimistic" """How the final recommendation is chosen. 'pessimistic' is the default. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = ng.optimizers.SamplingSearch( sampler=self.sampler, scrambled=self.scrambled, middle_point=self.middle_point, cauchy=self.cauchy, scale=self.scale, rescaled=self.rescaled, recommendation_rule=self.recommendation_rule, ) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_ngopt", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradNGOpt(Algorithm): """Minimize a scalar function using a Meta Optimizer from Nevergrad. These are meta-optimizers that intelligently combine multiple different optimization algorithms to solve a problem. The specific portfolio of optimizers can be selected via the `optimizer` parameter. """ optimizer: Literal[ "NGOpt", "NGOpt4", "NGOpt8", "NGOpt10", "NGOpt12", "NGOpt13", "NGOpt14", "NGOpt15", "NGOpt16", "NGOpt21", "NGOpt36", "NGOpt38", "NGOpt39", "NGOptRW", "NGOptF", "NGOptF2", "NGOptF3", "NGOptF5", "NgIoh2", "NgIoh3", "NgIoh4", "NgIoh5", "NgIoh6", "NgIoh7", "NgIoh11", "NgIoh14", "NgIoh13", "NgIoh15", "NgIoh12", "NgIoh16", "NgIoh17", "NgIoh21", "NgIoh20", "NgIoh19", "NgIoh18", "NgIoh10", "NgIoh9", "NgIoh8", "NgIoh12b", "NgIoh13b", "NgIoh14b", "NgIoh15b", "NgDS", "NgDS2", "NGDSRW", "NGO", "NgIohRW2", "NgIohTuned", "CSEC", "CSEC10", "CSEC11", "Wiz", ] = "NGOpt" """The specific Nevergrad meta-optimizer to use. Each option is a portfolio of different algorithms. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²)in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = getattr(ng.optimizers, self.optimizer) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res @mark.minimizer( name="nevergrad_meta", solver_type=AggregationLevel.SCALAR, is_available=IS_NEVERGRAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NevergradMeta(Algorithm): """Minimize a scalar function using a Meta Optimizer from Nevergrad. This algorithm utilizes a combination of local and global optimizers to find the best solution. The specific portfolio of optimizers can be selected via the `optimizer` parameter. """ optimizer: Literal[ "MultiBFGSPlus", "LogMultiBFGSPlus", "SqrtMultiBFGSPlus", "MultiCobylaPlus", "MultiSQPPlus", "BFGSCMAPlus", "LogBFGSCMAPlus", "SqrtBFGSCMAPlus", "SQPCMAPlus", "LogSQPCMAPlus", "SqrtSQPCMAPlus", "MultiBFGS", "LogMultiBFGS", "SqrtMultiBFGS", "MultiCobyla", "ForceMultiCobyla", "MultiSQP", "BFGSCMA", "LogBFGSCMA", "SqrtBFGSCMA", "SQPCMA", "LogSQPCMA", "SqrtSQPCMA", "FSQPCMA", "F2SQPCMA", "F3SQPCMA", "MultiDiscrete", "CMandAS2", "CMandAS3", "MetaCMA", "CMA", "PCEDA", "MPCEDA", "MEDA", "NoisyBandit", "Shiwa", "Carola3", ] = "Shiwa" """The specific Nevergrad meta-optimizer to use. Each option is a portfolio of different local and global algorithms. """ stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL """Maximum number of function evaluations before termination.""" n_cores: PositiveInt = 1 """Number of cores to use for parallel function evaluation.""" seed: int | None = None """Seed for the random number generator for reproducibility.""" sigma: float | None = None """Standard deviation for sampling initial population from N(0, σ²) in case bounds are not provided.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_NEVERGRAD_INSTALLED: raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR) import nevergrad as ng configured_optimizer = getattr(ng.optimizers, self.optimizer) res = _nevergrad_internal( problem=problem, x0=x0, configured_optimizer=configured_optimizer, stopping_maxfun=self.stopping_maxfun, n_cores=self.n_cores, seed=self.seed, sigma=self.sigma, nonlinear_constraints=problem.nonlinear_constraints, ) return res def _nevergrad_internal( problem: InternalOptimizationProblem, x0: NDArray[np.float64], n_cores: int, configured_optimizer: ConfiguredOptimizer, stopping_maxfun: int, seed: int | None, sigma: float | None, nonlinear_constraints: list[dict[str, Any]] | None, ) -> InternalOptimizeResult: """Internal helper function for nevergrad. Handle the optimization loop. Args: problem (InternalOptimizationProblem): Internal optimization problem to solve. x0 (np.ndarray): Initial parameter vector of shape (n_params,). n_cores (int): Number of processes used to parallelize the function evaluations. configured_optimizer (ConfiguredOptimizer): Nevergrad optimizer instance configured with options. stopping_maxfun (int): Maximum number of function evaluations. seed (int): Random seed for reproducibility. Defaults to None. Returns: InternalOptimizeResult: Internal optimization result """ import nevergrad as ng param = ng.p.Array( init=x0, lower=problem.bounds.lower, upper=problem.bounds.upper, ) instrum = ng.p.Instrumentation(param) # In case bounds are not provided, the initial population is sampled # from a gaussian with mean = 0 and sd = 1, # which can be set through this method. param.set_mutation(sigma=sigma) if seed is not None: instrum.random_state.seed(seed) optimizer = configured_optimizer( parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores ) ### Skip handling of non_linear constraints until improve constraint handling. # if nonlinear_constraints: # constraints = _process_nonlinear_constraints(nonlinear_constraints) ### # optimization loop using the ask-and-tell interface while optimizer.num_ask < stopping_maxfun: x_list = [ optimizer.ask() for _ in range(min(n_cores, stopping_maxfun - optimizer.num_ask)) ] losses = problem.batch_fun([x.value[0][0] for x in x_list], n_cores=n_cores) if not nonlinear_constraints: for x, loss in zip(x_list, losses, strict=True): optimizer.tell(x, loss) ### Skip handling of non_linear constraints until improve constraint handling. # else: # constraint_violations = _batch_constraint_evaluations( # constraints, [x.value[0][0] for x in x_list], n_cores # ) # for x, loss, cv in zip(x_list, losses, constraint_violations, strict=True): # optimizer.tell(x, loss, cv) ### recommendation = optimizer.provide_recommendation() best_x = recommendation.value[0][0] loss = recommendation.loss # In case of CMA, loss is not provided by the optimizer, in that case, # evaluate it manually using problem.fun if loss is None: loss = problem.fun(best_x) result = InternalOptimizeResult( x=best_x, fun=loss, success=True, n_fun_evals=optimizer.num_ask, n_jac_evals=0, n_hess_evals=0, ) return result ### Skip handling of non_linear constraints until improve constraint handling. # def _process_nonlinear_constraints( # constraints: list[dict[str, Any]], # ) -> list[dict[str, Any]]: # """Process stacked inequality constraints as single constraints. # Returns a list of single constraints. # """ # processed_constraints = [] # for c in constraints: # new = _vector_to_list_of_scalar(c) # processed_constraints.extend(new) # return processed_constraints # def _get_constraint_evaluations( # constraints: list[dict[str, Any]], x: NDArray[np.float64] # ) -> list[NDArray[np.float64]]: # """In optimagic, inequality constraints are internally defined as g(x) >= 0. # Nevergrad uses h(x) <= 0 hence a sign flip is required. Passed equality # constraints are treated as inequality constraints with lower bound equal to # value. Return a list of constraint evaluations at x. # """ # results = [-c["fun"](x) for c in constraints] # results = [np.atleast_1d(i) for i in results] # return results # def _batch_constraint_evaluations( # constraints: list[dict[str, Any]], x_list: list[Any], n_cores: int # ) -> list[list[NDArray[np.float64]]]: # """Batch version of _get_constraint_evaluations.""" # batch = process_batch_evaluator("joblib") # func = partial(_get_constraint_evaluations, constraints) # results = batch(func=func, arguments=[x for x in x_list], n_cores=n_cores) # return results ### ================================================ FILE: src/optimagic/optimizers/nlopt_optimizers.py ================================================ """Implement `nlopt` algorithms. The documentation is heavily based on (nlopt documentation)[nlopt.readthedocs.io]. """ from dataclasses import dataclass import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_NLOPT_INSTALLED from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_ABS, CONVERGENCE_FTOL_REL, CONVERGENCE_XTOL_ABS, CONVERGENCE_XTOL_REL, STOPPING_MAXFUN, STOPPING_MAXFUN_GLOBAL, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.parameters.nonlinear_constraints import ( equality_as_inequality_constraints, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveInt, ) if IS_NLOPT_INSTALLED: import nlopt @mark.minimizer( name="nlopt_bobyqa", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptBOBYQA(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LN_BOBYQA, ) return res @mark.minimizer( name="nlopt_neldermead", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptNelderMead(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LN_NELDERMEAD, ) return res @mark.minimizer( name="nlopt_praxis", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptPRAXIS(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LN_PRAXIS, ) return res @mark.minimizer( name="nlopt_cobyla", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class NloptCOBYLA(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LN_COBYLA, ) return res @mark.minimizer( name="nlopt_sbplx", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptSbplx(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LN_SBPLX, ) return res @mark.minimizer( name="nlopt_newuoa", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptNEWUOA(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if problem.bounds.lower is None or problem.bounds.upper is None: algo = nlopt.LN_NEWUOA elif np.any(np.isfinite(problem.bounds.lower)) or np.any( np.isfinite(problem.bounds.upper) ): algo = nlopt.LN_NEWUOA_BOUND else: algo = nlopt.LN_NEWUOA res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=algo, ) return res @mark.minimizer( name="nlopt_tnewton", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptTNewton(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LD_TNEWTON, ) return res @mark.minimizer( name="nlopt_lbfgsb", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptLBFGSB(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LD_LBFGS, ) return res @mark.minimizer( name="nlopt_ccsaq", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptCCSAQ(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LD_CCSAQ, ) return res @mark.minimizer( name="nlopt_mma", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class NloptMMA(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: nonlinear_constraints = equality_as_inequality_constraints( problem.nonlinear_constraints ) res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LD_MMA, nonlinear_constraints=nonlinear_constraints, ) return res @mark.minimizer( name="nlopt_var", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptVAR(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN rank_1_update: bool = True def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.rank_1_update: algo = nlopt.LD_VAR1 else: algo = nlopt.LD_VAR2 res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=algo, ) return res @mark.minimizer( name="nlopt_slsqp", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class NloptSLSQP(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.LD_SLSQP, ) return res @mark.minimizer( name="nlopt_direct", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptDirect(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL locally_biased: bool = False random_search: bool = False unscaled_bounds: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if ( not self.locally_biased and not self.random_search and not self.unscaled_bounds ): algo = nlopt.GN_DIRECT elif ( self.locally_biased and not self.random_search and not self.unscaled_bounds ): algo = nlopt.GN_DIRECT_L elif self.locally_biased and not self.random_search and self.unscaled_bounds: algo = nlopt.GN_DIRECT_L_NOSCAL elif self.locally_biased and self.random_search and not self.unscaled_bounds: algo = nlopt.GN_DIRECT_L_RAND elif self.locally_biased and self.random_search and self.unscaled_bounds: algo = nlopt.GN_DIRECT_L_RAND_NOSCAL elif ( not self.locally_biased and not self.random_search and self.unscaled_bounds ): algo = nlopt.GN_DIRECT_NOSCAL res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=algo, ) return res @mark.minimizer( name="nlopt_esch", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptESCH(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.GN_ESCH, ) return res @mark.minimizer( name="nlopt_isres", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class NloptISRES(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.GN_ISRES, ) return res @mark.minimizer( name="nlopt_crs2_lm", solver_type=AggregationLevel.SCALAR, is_available=IS_NLOPT_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class NloptCRS2LM(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL population_size: PositiveInt | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.population_size is None: population_size = 10 * (len(x0) + 1) else: population_size = self.population_size res = _minimize_nlopt( problem=problem, x0=x0, is_global=self.algo_info.is_global, convergence_xtol_rel=self.convergence_xtol_rel, convergence_xtol_abs=self.convergence_xtol_abs, convergence_ftol_rel=self.convergence_ftol_rel, convergence_ftol_abs=self.convergence_ftol_abs, stopping_max_eval=self.stopping_maxfun, algorithm=nlopt.GN_CRS2_LM, population_size=population_size, ) return res def _minimize_nlopt( problem, x0, algorithm, is_global, *, convergence_xtol_rel=None, convergence_xtol_abs=None, convergence_ftol_rel=None, convergence_ftol_abs=None, stopping_max_eval=None, population_size=None, nonlinear_constraints=None, ): """Run actual nlopt optimization argument, set relevant attributes.""" def func(x, grad): if grad.size > 0: fun, jac = problem.fun_and_jac(x) grad[:] = jac else: fun = problem.fun(x) return fun if nonlinear_constraints is None: nonlinear_constraints = problem.nonlinear_constraints opt = nlopt.opt(algorithm, x0.shape[0]) if convergence_ftol_rel is not None: opt.set_ftol_rel(convergence_ftol_rel) if convergence_ftol_abs is not None: opt.set_ftol_abs(convergence_ftol_abs) if convergence_xtol_rel is not None: opt.set_xtol_rel(convergence_xtol_rel) if convergence_xtol_abs is not None: opt.set_xtol_abs(convergence_xtol_abs) if problem.bounds.lower is not None: opt.set_lower_bounds(problem.bounds.lower) if problem.bounds.upper is not None: opt.set_upper_bounds(problem.bounds.upper) if stopping_max_eval is not None: opt.set_maxeval(stopping_max_eval) if population_size is not None: opt.set_population(population_size) if nonlinear_constraints: for constr in _get_nlopt_constraints(nonlinear_constraints, filter_type="eq"): opt.add_equality_mconstraint(constr["fun"], constr["tol"]) for constr in _get_nlopt_constraints(nonlinear_constraints, filter_type="ineq"): opt.add_inequality_mconstraint(constr["fun"], constr["tol"]) opt.set_min_objective(func) solution_x = opt.optimize(x0) return _process_nlopt_results(opt, solution_x, is_global) def _process_nlopt_results(nlopt_obj, solution_x, is_global): messages = { 1: "Convergence achieved ", 2: ( "Optimizer stopped because maximum value of criterion function was reached" ), 3: ( "Optimizer stopped because convergence_ftol_rel or " "convergence_ftol_abs was reached" ), 4: ( "Optimizer stopped because convergence_xtol_rel or " "convergence_xtol_abs was reached" ), 5: "Optimizer stopped because max_criterion_evaluations was reached", 6: "Optimizer stopped because max running time was reached", -1: "Optimizer failed", -2: "Invalid arguments were passed", -3: "Memory error", -4: "Halted because roundoff errors limited progress", -5: "Halted because of user specified forced stop", } success = nlopt_obj.last_optimize_result() in [1, 2, 3, 4] if is_global and not success: success = None processed = InternalOptimizeResult( x=solution_x, fun=nlopt_obj.last_optimum_value(), n_fun_evals=nlopt_obj.get_numevals(), success=success, message=messages[nlopt_obj.last_optimize_result()], ) return processed def _get_nlopt_constraints(constraints, filter_type): """Transform internal nonlinear constraints to NLOPT readable format.""" filtered = [c for c in constraints if c["type"] == filter_type] nlopt_constraints = [_internal_to_nlopt_constaint(c) for c in filtered] return nlopt_constraints def _internal_to_nlopt_constaint(c): """Sign flip description: In optimagic, inequality constraints are internally defined as g(x) >= 0. NLOPT uses h(x) <= 0, which is why we need to flip the sign. """ tol = c["tol"] if np.isscalar(tol): tol = np.tile(tol, c["n_constr"]) def _constraint(result, x, grad): result[:] = -c["fun"](x) # see docstring for sign flip if grad.size > 0: grad[:] = -c["jac"](x) # see docstring for sign flip new_constr = { "fun": _constraint, "tol": tol, } return new_constr ================================================ FILE: src/optimagic/optimizers/pounders.py ================================================ """Implement the POUNDERS algorithm.""" import warnings from dataclasses import dataclass from typing import Any, Literal import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import DEFAULT_N_CORES from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.optimizers._pounders.pounders_auxiliary import ( add_accepted_point_to_residual_model, add_geomtery_points_to_make_main_model_fully_linear, create_initial_residual_model, create_main_from_residual_model, evaluate_residual_model, find_affine_points, fit_residual_model, get_feature_matrices_residual_model, get_last_model_indices_and_check_for_repeated_model, solve_subproblem, update_main_model_with_new_accepted_x, update_residual_model, update_residual_model_with_new_accepted_x, update_trustregion_radius, ) from optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveFloat, PositiveInt, ) @mark.minimizer( name="pounders", solver_type=AggregationLevel.LEAST_SQUARES, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class Pounders(Algorithm): convergence_gtol_abs: NonNegativeFloat = 1e-8 convergence_gtol_rel: NonNegativeFloat = 1e-8 # TODO: Why can this a bool convergence_gtol_scaled: NonNegativeFloat | bool = False max_interpolation_points: PositiveInt | None = None # TODO: Why is this not higher? stopping_maxiter: PositiveInt = 2_000 trustregion_initial_radius: PositiveFloat = 0.1 trustregion_minimal_radius: PositiveFloat = 1e-6 trustregion_maximal_radius: PositiveFloat = 1e6 trustregion_shrinking_factor_not_successful: PositiveFloat = 0.5 trustregion_expansion_factor_successful: PositiveFloat = 2 theta1: PositiveFloat = 1e-5 theta2: PositiveFloat = 1e-4 trustregion_threshold_acceptance: NonNegativeFloat = 0 trustregion_threshold_successful: NonNegativeFloat = 0.1 c1: NonNegativeFloat | None = None c2: NonNegativeFloat = 10 trustregion_subproblem_solver: Literal[ "bntr", "gqtpar", ] = "bntr" trustregion_subsolver_options: dict[str, Any] | None = None n_cores: PositiveInt = DEFAULT_N_CORES def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.max_interpolation_points is None: max_interpolation_points = 2 * len(x0) + 1 else: max_interpolation_points = self.max_interpolation_points if self.c1 is None: c1 = np.sqrt(x0.shape[0]) else: c1 = self.c1 if self.trustregion_subsolver_options is None: trustregion_subsolver_options = {} else: trustregion_subsolver_options = self.trustregion_subsolver_options default_options = { "conjugate_gradient_method": "trsbox", "maxiter": 50, "maxiter_gradient_descent": 5, "gtol_abs": 1e-8, "gtol_rel": 1e-8, "gtol_scaled": 0, "gtol_abs_cg": 1e-8, "gtol_rel_cg": 1e-6, "k_easy": 0.1, "k_hard": 0.2, } trustregion_subsolver_options = { **default_options, **trustregion_subsolver_options, } result = internal_solve_pounders( criterion=problem.fun, x0=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, gtol_abs=self.convergence_gtol_abs, gtol_rel=self.convergence_gtol_rel, gtol_scaled=self.convergence_gtol_scaled, maxinterp=max_interpolation_points, maxiter=self.stopping_maxiter, delta=self.trustregion_initial_radius, delta_min=self.trustregion_minimal_radius, delta_max=self.trustregion_maximal_radius, gamma0=self.trustregion_shrinking_factor_not_successful, gamma1=self.trustregion_expansion_factor_successful, theta1=self.theta1, theta2=self.theta2, eta0=self.trustregion_threshold_acceptance, eta1=self.trustregion_threshold_successful, c1=c1, c2=self.c2, solver_sub=self.trustregion_subproblem_solver, conjugate_gradient_method_sub=trustregion_subsolver_options[ "conjugate_gradient_method" ], maxiter_sub=trustregion_subsolver_options["maxiter"], maxiter_gradient_descent_sub=trustregion_subsolver_options[ "maxiter_gradient_descent" ], gtol_abs_sub=trustregion_subsolver_options["gtol_abs"], gtol_rel_sub=trustregion_subsolver_options["gtol_rel"], gtol_scaled_sub=trustregion_subsolver_options["gtol_scaled"], gtol_abs_conjugate_gradient_sub=trustregion_subsolver_options[ "gtol_abs_cg" ], gtol_rel_conjugate_gradient_sub=trustregion_subsolver_options[ "gtol_rel_cg" ], k_easy_sub=trustregion_subsolver_options["k_easy"], k_hard_sub=trustregion_subsolver_options["k_hard"], batch_fun=problem.batch_fun, n_cores=self.n_cores, ) return result def internal_solve_pounders( criterion, x0, lower_bounds, upper_bounds, gtol_abs, gtol_rel, gtol_scaled, maxinterp, maxiter, delta, delta_min, delta_max, gamma0, gamma1, theta1, theta2, eta0, eta1, c1, c2, solver_sub, conjugate_gradient_method_sub, maxiter_sub, maxiter_gradient_descent_sub, gtol_abs_sub, gtol_rel_sub, gtol_scaled_sub, gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient_sub, k_easy_sub, k_hard_sub, batch_fun, n_cores, ): """Find the local minimum to a non-linear least-squares problem using POUNDERS. Args: criterion (callable): Function that returns criterion. x0 (np.ndarray): Initial guess for the parameter vector (starting points). lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. gtol_abs (float): Convergence tolerance for the absolute gradient norm. Stop if norm of the gradient is less than this. gtol_rel (float): Convergence tolerance for the relative gradient norm. Stop if norm of the gradient relative to the criterion value is less than this. gtol_scaled (float): Convergence tolerance for the scaled gradient norm. Stop if norm of the gradient divided by norm of the gradient at the initial parameters is less than this. maxinterp (int): Maximum number of interpolation points. Default is `2 * n + 1`, where `n` is the length of the parameter vector. maxiter (int): Maximum number of iterations. If reached, terminate. delta (float): Delta, initial trust-region radius. delta_min (float): Minimal trust-region radius. delta_max (float): Maximal trust-region radius. gamma0 (float): Shrinking factor of the trust-region radius in case the solution vector of the suproblem is not accepted, but the model is fully linar (i.e. "valid"). gamma1 (float): Expansion factor of the trust-region radius in case the solution vector of the suproblem is accepted. theta1 (float): Threshold for adding the current candidate vector to the model. Function argument to find_affine_points(). theta2 (float): Threshold for adding the current candidate vector to the model. Argument to get_interpolation_matrices_residual_model(). eta0 (float): Threshold for accepting the solution vector of the trust-region subproblem as the best candidate. eta1 (float): Threshold for successfully accepting the solution vector of the trust-region subproblem as the best candidate. c1 (float): Treshold for accepting the norm of our current x candidate. Equal to sqrt(n) by default. Argument to find_affine_points() in case the input array *model_improving_points* is zero. c2 (int)): Treshold for accepting the norm of our current candidate vector. Equal to 10 by default. Argument to find_affine_points() in case the input array *model_improving_points* is not zero. solver_sub (str): Solver to use for the trust-region subproblem. Two internal solvers are supported: - "bntr": Bounded Newton Trust-Region (default, supports bound constraints) - "gqtpar": (does not support bound constraints) conjugate_gradient_method_sub (str): Method for computing the conjugate gradient step ("bntr"). Available conjugate gradient methods are: - "cg" - "steihaug_toint" - "trsbox" (default) maxiter_sub (int): Maximum number of iterations in the trust-region subproblem. maxiter_gradient_descent_sub (int): Maximum number of gradient descent iterations to perform ("bntr"). gtol_abs_sub (float): Convergence tolerance for the absolute gradient norm in the trust-region subproblem ("bntr"). gtol_rel_sub (float): Convergence tolerance for the relative gradient norm in the trust-region subproblem ("bntr"). gtol_scaled_sub (float): Convergence tolerance for the scaled gradient norm in the trust-region subproblem ("bntr"). gtol_abs_conjugate_gradient_sub (float): Convergence tolerance for the absolute gradient norm in the conjugate gradient step of the trust-region subproblem if "cg" is used as ``conjugate_gradient_method_sub`` ("bntr"). gtol_rel_conjugate_gradient_sub (float): Convergence tolerance for the relative gradient norm in the conjugate gradient step of the trust-region subproblem if "cg" is used as ``conjugate_gradient_method_sub`` ("bntr"). k_easy_sub (float): Stopping criterion for the "easy" case in the trust-region subproblem ("gqtpar"). k_hard_sub (float): Stopping criterion for the "hard" case in the trust-region subproblem ("gqtpar"). batch_evaluator (str or callable): Name of a pre-implemented batch evaluator (currently 'joblib' and 'pathos_mp') or callable with the same interface as the optimagic batch_evaluators. n_cores (int): Number of processes used to parallelize the function evaluations. Default is 1. Returns: (dict) Result dictionary containing: - solution_x (np.ndarray): Solution vector of shape (n,). - solution_criterion (np.ndarray): Values of the criterion function at the solution vector. Shape (n_obs,). - history_x (np.ndarray): Entire history of x. Shape (history.get_n_fun(), n). - history_criterion (np.ndarray): Entire history of the criterion function evaluations. Shape (history.get_n_fun(), n_obs) - n_iterations (int): Number of iterations the algorithm ran before finding a solution vector or reaching maxiter. - success (bool): Boolean indicating whether a solution has been found before reaching maxiter. """ history = LeastSquaresHistory() n = len(x0) model_indices = np.zeros(maxinterp, dtype=int) n_last_modelpoints = 0 if lower_bounds is not None and upper_bounds is not None: if np.max(x0 + delta - upper_bounds) > 1e-10: raise ValueError("Starting points + delta > upper bounds.") xs = [x0] for i in range(n): x1 = x0.copy() x1[i] += delta xs.append(x1) residuals = batch_fun(x_list=xs, n_cores=n_cores) history.add_entries(xs, residuals) accepted_index = history.get_best_index() residual_model = create_initial_residual_model( history=history, accepted_index=accepted_index, delta=delta ) main_model = create_main_from_residual_model( residual_model=residual_model, multiply_square_terms_with_intercepts=False ) x_accepted = history.get_best_x() gradient_norm_initial = np.linalg.norm(main_model.linear_terms) gradient_norm_initial *= delta valid = True n_modelpoints = n + 1 last_model_indices = np.zeros(maxinterp, dtype=int) converged = False convergence_reason = "Continue iterating." for niter in range(maxiter + 1): result_sub = solve_subproblem( x_accepted=x_accepted, main_model=main_model, lower_bounds=lower_bounds, upper_bounds=upper_bounds, delta=delta, solver=solver_sub, conjugate_gradient_method=conjugate_gradient_method_sub, maxiter=maxiter_sub, maxiter_gradient_descent=maxiter_gradient_descent_sub, gtol_abs=gtol_abs_sub, gtol_rel=gtol_rel_sub, gtol_scaled=gtol_scaled_sub, gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient_sub, gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient_sub, k_easy=k_easy_sub, k_hard=k_hard_sub, ) x_candidate = x_accepted + result_sub["x"] * delta residuals_candidate = criterion(x_candidate) history.add_entries(x_candidate, residuals_candidate) predicted_reduction = history.get_critvals( accepted_index ) - history.get_critvals(-1) actual_reduction = -result_sub["criterion"] with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) rho = np.divide(predicted_reduction, actual_reduction) if (rho >= eta1) or (rho > eta0 and valid): residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) center_info = {"x": history.get_best_x(), "radius": delta} x_candidate = history.get_centered_xs(center_info, index=-1) residual_model = update_residual_model_with_new_accepted_x( residual_model=residual_model, x_candidate=x_candidate ) main_model = update_main_model_with_new_accepted_x( main_model=main_model, x_candidate=x_candidate ) x_accepted = history.get_best_x() accepted_index = history.get_best_index() critval_accepted = history.get_critvals(index=accepted_index) # The model is deemend "not valid" if it has less than n model points. # Otherwise, if the model has n points, it is considered "valid" or # "fully linear" or "just identified". # Note: valid is True in the first iteration if not valid: ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_fun=batch_fun, n_cores=n_cores, ) n_modelpoints = n delta_old = delta delta = update_trustregion_radius( result_subproblem=result_sub, rho=rho, model_is_valid=valid, delta=delta, delta_min=delta_min, delta_max=delta_max, eta1=eta1, gamma0=gamma0, gamma1=gamma1, ) ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=np.zeros((n, n)), project_x_onto_null=False, delta=delta, theta1=theta1, c=c1, model_indices=model_indices, n_modelpoints=0, ) if n_modelpoints == n: valid = True else: valid = False ( model_improving_points, model_indices, n_modelpoints, project_x_onto_null, ) = find_affine_points( history=history, x_accepted=x_accepted, model_improving_points=model_improving_points, project_x_onto_null=project_x_onto_null, delta=delta, theta1=theta1, c=c2, model_indices=model_indices, n_modelpoints=n_modelpoints, ) if n_modelpoints < n: ( history, model_indices, ) = add_geomtery_points_to_make_main_model_fully_linear( history=history, main_model=main_model, model_improving_points=model_improving_points, model_indices=model_indices, x_accepted=x_accepted, n_modelpoints=n_modelpoints, delta=delta, criterion=criterion, lower_bounds=lower_bounds, upper_bounds=upper_bounds, batch_fun=batch_fun, n_cores=n_cores, ) model_indices = add_accepted_point_to_residual_model( model_indices, accepted_index, n_modelpoints ) ( x_sample_monomial_basis, monomial_basis, basis_null_space, lower_triangular, n_modelpoints, ) = get_feature_matrices_residual_model( history=history, x_accepted=x_accepted, model_indices=model_indices, delta=delta, c2=c2, theta2=theta2, n_maxinterp=maxinterp, ) center_info = {"x": x_accepted, "radius": delta_old} centered_xs = history.get_centered_xs( center_info, index=model_indices[:n_modelpoints] ) center_info = {"residuals": residual_model.intercepts} centered_residuals = history.get_centered_residuals( center_info, index=model_indices ) y_residuals = evaluate_residual_model( centered_xs=centered_xs, centered_residuals=centered_residuals, residual_model=residual_model, ) coefficients_residual_model = fit_residual_model( m_mat=x_sample_monomial_basis, n_mat=monomial_basis, z_mat=basis_null_space, n_z_mat=lower_triangular, y_residuals=y_residuals, n_modelpoints=n_modelpoints, ) residual_model = residual_model._replace( intercepts=history.get_residuals(index=accepted_index) ) residual_model = update_residual_model( residual_model=residual_model, coefficients_to_add=coefficients_residual_model, delta=delta, delta_old=delta_old, ) main_model = create_main_from_residual_model(residual_model) gradient_norm = np.linalg.norm(main_model.linear_terms) gradient_norm *= delta ( last_model_indices, n_last_modelpoints, same_model_used, ) = get_last_model_indices_and_check_for_repeated_model( model_indices=model_indices, last_model_indices=last_model_indices, n_modelpoints=n_modelpoints, n_last_modelpoints=n_last_modelpoints, ) converged, convergence_reason = _check_for_convergence( gradient_norm=gradient_norm, gradient_norm_initial=gradient_norm_initial, critval=critval_accepted, delta=delta, delta_old=delta_old, same_model_used=same_model_used, converged=converged, reason=convergence_reason, niter=niter, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, maxiter=maxiter, ) if converged: break result = InternalOptimizeResult( x=history.get_xs(index=accepted_index), fun=history.get_best_residuals(), n_iterations=niter, success=converged, message=convergence_reason, ) return result def _check_for_convergence( gradient_norm, gradient_norm_initial, critval, delta, delta_old, same_model_used, converged, reason, niter, *, gtol_abs, gtol_rel, gtol_scaled, maxiter, ): """Check for convergence.""" if same_model_used and delta == delta_old: converged = True reason = "Identical model used in successive iterations." elif gradient_norm < gtol_abs: converged = True reason = "Norm of the gradient is less than absolute_gradient_tolerance." elif critval != 0 and abs(gradient_norm / critval) < gtol_rel: converged = True reason = ( "Norm of the gradient relative to the criterion value is less than " "relative_gradient_tolerance." ) elif ( gradient_norm_initial != 0 and gradient_norm / gradient_norm_initial < gtol_scaled ): converged = True reason = ( "Norm of the gradient divided by norm of the gradient at the " "initial parameters is less than scaled_gradient_tolerance." ) elif gradient_norm_initial != 0 and gradient_norm == 0 and gtol_scaled == 0: converged = True reason = ( "Norm of the gradient divided by norm of the gradient at the " "initial parameters is less than scaled_gradient_tolerance." ) elif critval <= -np.inf: converged = True reason = "Criterion value is negative infinity." elif niter == maxiter: reason = "Maximum number of iterations reached." return converged, reason ================================================ FILE: src/optimagic/optimizers/pygad/__init__.py ================================================ """PyGAD optimizer configuration classes and utilities. This module provides easy access to PyGAD mutation classes and Protocols. Example: # >>> import optimagic as om # >>> mutation = om.optimizers.pygad.RandomMutation( # ... probability=0.15, # ... by_replacement=True, # ... ) # >>> result = om.minimize( # ... ..., # ... algorithm=om.algos.pygad(mutation=mutation), # ... ) """ from optimagic.optimizers.pygad_optimizer import ( AdaptiveMutation as _AdaptiveMutation, ) from optimagic.optimizers.pygad_optimizer import ( CrossoverFunction, GeneConstraintFunction, MutationFunction, ParentSelectionFunction, ) from optimagic.optimizers.pygad_optimizer import ( InversionMutation as _InversionMutation, ) from optimagic.optimizers.pygad_optimizer import ( RandomMutation as _RandomMutation, ) from optimagic.optimizers.pygad_optimizer import ( ScrambleMutation as _ScrambleMutation, ) from optimagic.optimizers.pygad_optimizer import ( SwapMutation as _SwapMutation, ) RandomMutation = _RandomMutation AdaptiveMutation = _AdaptiveMutation SwapMutation = _SwapMutation InversionMutation = _InversionMutation ScrambleMutation = _ScrambleMutation __all__ = [ "RandomMutation", "AdaptiveMutation", "SwapMutation", "InversionMutation", "ScrambleMutation", "MutationFunction", "CrossoverFunction", "ParentSelectionFunction", "GeneConstraintFunction", ] ================================================ FILE: src/optimagic/optimizers/pygad_optimizer.py ================================================ """Implement PyGAD genetic algorithm optimizer.""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import ( Any, Callable, ClassVar, Literal, Protocol, runtime_checkable, ) import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_PYGAD_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_GENERATIONS_NOIMPROVE, CONVERGENCE_TARGET_VALUE, STOPPING_MAXITER, get_population_size, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, Direction, PositiveFloat, PositiveInt, ProbabilityFloat, PyTree, ) @runtime_checkable class ParentSelectionFunction(Protocol): """Protocol for user-defined parent selection functions. Args: fitness: Array of fitness values for all solutions in the population. num_parents: Number of parents to select. ga_instance: The PyGAD GA instance. Returns: Tuple of (selected_parents, parent_indices) where: - selected_parents: 2D array of selected parent solutions - parent_indices: 1D array of indices of selected parents """ def __call__( self, fitness: NDArray[np.float64], num_parents: int, ga_instance: Any ) -> tuple[NDArray[np.float64], NDArray[np.int_]]: ... @runtime_checkable class CrossoverFunction(Protocol): """Protocol for user-defined crossover functions. Args: parents: 2D array of parent solutions selected for mating. offspring_size: Tuple (num_offspring, num_genes) specifying the shape of the offspring population to be generated. ga_instance: The PyGAD GA instance. Returns: 2D array of offspring solutions generated from the parents. """ def __call__( self, parents: NDArray[np.float64], offspring_size: tuple[int, int], ga_instance: Any, ) -> NDArray[np.float64]: ... @runtime_checkable class MutationFunction(Protocol): """Protocol for user-defined mutation functions. Args: offspring: 2D array of offspring solutions to be mutated. ga_instance: The PyGAD GA instance. Returns: 2D array of mutated offspring solutions. """ def __call__( self, offspring: NDArray[np.float64], ga_instance: Any ) -> NDArray[np.float64]: ... @runtime_checkable class GeneConstraintFunction(Protocol): """Protocol for user-defined gene constraint functions. Gene constraint functions are applied to individual genes to enforce specific constraints on their values. Each function receives the current solution and a list of candidate values, then returns the constrained values. Args: solution: Current solution array containing all gene values. values: List or array of candidate values for the gene being constrained. Returns: Constrained values as a list or array, ensuring they satisfy the gene's specific constraints. """ def __call__( self, solution: NDArray[np.float64], values: list[float] | NDArray[np.float64], ) -> list[float] | NDArray[np.float64]: ... @dataclass(frozen=True) class _BuiltinMutation: """Base class for all built-in PyGAD mutation configurations. Note: This is an internal base class. Users should not inherit from it directly. To configure a built-in mutation, use one of its subclasses (e.g., `RandomMutation`, `AdaptiveMutation`). To define a custom mutation, provide a function that conforms to the `MutationFunction` protocol. """ mutation_type: ClassVar[str] = "random" def to_pygad_params(self) -> dict[str, Any]: """Convert mutation configuration to PyGAD parameters. Default implementation that works for simple mutations. Complex mutations (RandomMutation, AdaptiveMutation) should override this. Returns: Dictionary of PyGAD mutation parameters. """ return { "mutation_type": self.mutation_type, "mutation_probability": None, "mutation_percent_genes": "default", "mutation_num_genes": None, "mutation_by_replacement": False, } @dataclass(frozen=True) class RandomMutation(_BuiltinMutation): """Configuration for the random mutation in PyGAD. The random mutation selects a subset of genes in each solution and either replaces each selected gene with a new random value or adds a random value to it. The exact behavior depends on the `by_replacement` parameter: If `by_replacement` is True, the selected genes are replaced with new values; if False, random values are added to the existing gene values. The mutation rate is determined by the mutation probability, the number of genes, or the percentage of genes (with priority: probability > num_genes > percent_genes). """ mutation_type: ClassVar[str] = "random" probability: ProbabilityFloat | None = None """Probability of mutating each gene. If specified, takes precedence over num_genes and percent_genes. Range [0, 1]. """ num_genes: PositiveInt | None = None """Number of genes to mutate per solution. Takes precedence over percent_genes but is ignored if probability is specified. """ percent_genes: PositiveFloat | str = "default" """Percentage of genes to mutate in each solution. - "default": Uses 10% of genes (PyGAD default) - Numeric value: Percentage (0-100) Ignored if probability or num_genes are specified. """ by_replacement: bool = False """If True, replace gene values with random values. If False, add random values to existing gene values. """ def to_pygad_params(self) -> dict[str, Any]: """Convert RandomMutation configuration to PyGAD parameters.""" return { "mutation_type": self.mutation_type, "mutation_probability": self.probability, "mutation_percent_genes": self.percent_genes, "mutation_num_genes": self.num_genes, "mutation_by_replacement": self.by_replacement, } @dataclass(frozen=True) class SwapMutation(_BuiltinMutation): """Configuration for the swap mutation in PyGAD. The swap mutation selects two random genes and exchanges their values. This operation maintains all gene values, altering only their positions within the chromosome. No additional parameters are required for this mutation type. """ mutation_type: ClassVar[str] = "swap" @dataclass(frozen=True) class InversionMutation(_BuiltinMutation): """Configuration for the inversion mutation in PyGAD. The inversion mutation selects a contiguous segment of genes and reverses their order. All gene values remain unchanged; only the ordering within the selected segment is altered. No additional parameters are required for this mutation type. """ mutation_type: ClassVar[str] = "inversion" @dataclass(frozen=True) class ScrambleMutation(_BuiltinMutation): """Configuration for the scramble mutation in PyGAD. The scramble mutation randomly shuffles the genes within a contiguous segment. This preserves gene values but changes their order within the chosen segment. No additional parameters are required for this mutation type. """ mutation_type: ClassVar[str] = "scramble" @dataclass(frozen=True) class AdaptiveMutation(_BuiltinMutation): """Configuration for the adaptive mutation in PyGAD. The adaptive mutation dynamically adjusts the mutation rate based on solution quality. Solutions whose objective value is worse than the current population median receive a higher mutation rate to encourage exploration, while better-than-median solutions receive a lower rate to preserve promising traits. If no mutation rate parameters are specified, this mutation defaults to using probabilities, with a 10% rate for bad solutions (`probability_bad=0.1`) and a 5% rate for good solutions (`probability_good=0.05`). **Parameter Precedence:** The mutation rate is determined by the first set of parameters found, in the following order of priority: 1. `probability_bad` and `probability_good` 2. `num_genes_bad` and `num_genes_good` 3. `percent_genes_bad` and `percent_genes_good` """ mutation_type: ClassVar[str] = "adaptive" probability_bad: ProbabilityFloat | None = None """Probability of mutating each gene for below-average fitness solutions. If specified, takes precedence over num_genes_bad and percent_genes_bad. Range [0, 1]. If no mutation rate parameters are provided at all, this defaults to 0.1 (10% mutation rate for bad fitness solutions). """ probability_good: ProbabilityFloat | None = None """Probability of mutating each gene for above-average fitness solutions. If specified, takes precedence over num_genes_good and percent_genes_good. Range [0, 1]. If no mutation rate parameters are provided at all, this defaults to 0.05 (5% mutation rate for good fitness solutions). """ num_genes_bad: PositiveInt | None = None """Number of genes to mutate for below-average fitness solutions. Takes precedence over percent_genes_bad but is ignored if probability_bad is specified. """ num_genes_good: PositiveInt | None = None """Number of genes to mutate for above-average fitness solutions. Takes precedence over percent_genes_good but is ignored if probability_good is specified. """ percent_genes_bad: PositiveFloat | None = None """Percentage of genes to mutate for below-average fitness solutions. Ignored if probability_bad or num_genes_bad are specified. """ percent_genes_good: PositiveFloat | None = None """Percentage of genes to mutate for above-average fitness solutions. Ignored if probability_good or num_genes_good are specified. """ by_replacement: bool = False """If True, replace gene values with random values. If False, add random values to existing gene values. """ def to_pygad_params(self) -> dict[str, Any]: """Convert AdaptiveMutation configuration to PyGAD parameters.""" mutation_probability: list[float] | None = None mutation_num_genes: list[int] | None = None mutation_percent_genes: list[float] | str | None = None if self.probability_bad is not None and self.probability_good is not None: mutation_probability = [self.probability_bad, self.probability_good] elif self.num_genes_bad is not None and self.num_genes_good is not None: mutation_num_genes = [self.num_genes_bad, self.num_genes_good] elif self.percent_genes_bad is not None and self.percent_genes_good is not None: mutation_percent_genes = [self.percent_genes_bad, self.percent_genes_good] else: mutation_probability = [ self.probability_bad or 0.1, self.probability_good or 0.05, ] return { "mutation_type": self.mutation_type, "mutation_probability": mutation_probability, "mutation_percent_genes": mutation_percent_genes, "mutation_num_genes": mutation_num_genes, "mutation_by_replacement": self.by_replacement, } @mark.minimizer( name="pygad", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGAD_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class Pygad(Algorithm): """Minimize a scalar function using the PyGAD genetic algorithm. This optimizer wraps the PyGAD genetic algorithm package :cite:`gad2023pygad`, a population-based evolutionary method for global optimization. It maintains a population of candidate solutions and evolves them over generations using biologically inspired operations: selection (choosing parents based on fitness), crossover (combining genes from parents), and mutation (introducing random variations). The algorithm is well-suited for global optimization problems with multiple local optima, black-box optimization where gradients are unavailable or difficult to compute. All variables must have finite bounds. Parallel fitness evaluation is supported via batch processing. For more details, see the `PyGAD documentation `_. """ population_size: PositiveInt | None = None """Number of solutions in each generation. Larger populations explore the search space more thoroughly but require more fitness evaluations per generation. If None, optimagic sets this to ``max(10, 10 * (problem_dimension + 1))``. """ num_parents_mating: PositiveInt | None = 10 """Number of parents selected for mating in each generation. Higher values can speed up convergence but may risk premature convergence. If None, defaults to ``max(2, population_size // 2)``. """ num_generations: PositiveInt | None = 50 """Number of generations to evolve the population.""" stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations (generations) to run. This corresponds to PyGAD's num_generations parameter. """ initial_population: list[PyTree] | None = None """Optional initial population as a list of parameter PyTrees. If None, the population is initialized randomly within parameter bounds. """ parent_selection_type: ( Literal["sss", "rws", "sus", "rank", "random", "tournament"] | ParentSelectionFunction ) = "sss" """Parent selection strategy used to choose parents for crossover. Available methods: * ``"sss"``: Steady-State Selection (selects the best individuals to continue) * ``"rws"``: Roulette Wheel Selection (probabilistic, fitness-proportional) * ``"sus"``: Stochastic Universal Sampling (even sampling across population) * ``"rank"``: Rank Selection (selects based on rank order) * ``"random"``: Random Selection * ``"tournament"``: Tournament Selection (best from K randomly chosen individuals) Alternatively, provide a custom function with signature ``(fitness, num_parents, ga_instance) -> tuple[NDArray, NDArray]``. """ keep_parents: int = -1 """Number of best parents to keep in the next generation. Only used if ``keep_elitism = 0``. Values: * ``-1``: Keep all parents in the next generation (default) * ``0``: Keep no parents in the next generation * Positive integer: Keep the specified number of best parents """ keep_elitism: int = 1 """Number of elite (best) solutions preserved each generation. Range: 0 to population_size. If greater than 0, takes precedence over ``keep_parents``. When 0, elitism is disabled and ``keep_parents`` controls parent retention. """ K_tournament: PositiveInt = 3 """Tournament size for parent selection when ``parent_selection_type="tournament"``.""" crossover_type: ( Literal["single_point", "two_points", "uniform", "scattered"] | CrossoverFunction | None ) = "single_point" """Crossover operator for generating offspring. Available methods: * ``"single_point"``: Single-point crossover * ``"two_points"``: Two-point crossover * ``"uniform"``: Uniform crossover (randomly mixes genes) * ``"scattered"``: Scattered crossover (random mask) Or provide a custom function with signature ``(parents, offspring_size, ga_instance) -> NDArray``. """ crossover_probability: ProbabilityFloat | None = None """Probability of applying crossover to selected parents. Range [0, 1]. If None, uses PyGAD's default. """ mutation: ( Literal["random", "swap", "inversion", "scramble", "adaptive"] | type[_BuiltinMutation] | _BuiltinMutation | MutationFunction | None ) = "random" """Mutation operator for introducing genetic diversity. Available options: **String values for default configurations:** * ``"random"``: Random mutation with default parameters * ``"swap"``: Swap mutation with default parameters * ``"inversion"``: Inversion mutation with default parameters * ``"scramble"``: Scramble mutation with default parameters * ``"adaptive"``: Adaptive random mutation with default parameters **Mutation classes for default configurations:** * Any mutation class (e.g., ``RandomMutation``, ``SwapMutation``, ``AdaptiveMutation``, etc.) * All classes can be used without parameters for default behavior **Configured mutation instances:** * Any mutation instance (e.g., ``RandomMutation(...)``, ``SwapMutation()``, etc.) * All mutation classes inherit from ``_BuiltinMutation`` **Custom function:** * Custom function with signature ``(offspring, ga_instance) -> NDArray`` **Disable mutation:** * ``None`` to disable mutation """ allow_duplicate_genes: bool = True """If True, duplicate gene values are allowed within a solution.""" gene_constraint: list[GeneConstraintFunction | None] | None = None """Optional list of per-gene constraint functions. Each with signature ``(solution, values) -> list[float] | NDArray``. """ sample_size: PositiveInt = 100 """Number of values to sample when enforcing uniqueness or gene constraints.""" batch_size: PositiveInt | None = None """Number of solutions to evaluate in parallel batches. If None and ``n_cores > 1``, automatically set to ``n_cores``. """ convergence_target_value: PositiveFloat | None = CONVERGENCE_TARGET_VALUE """Target value for early stopping. Default: None. """ convergence_generations_noimprove: PositiveInt | None = ( CONVERGENCE_GENERATIONS_NOIMPROVE ) """Maximum generations without fitness improvement before stopping. Default: None. """ n_cores: PositiveInt = 1 """Number of CPU cores for parallel fitness evaluation.""" seed: int | None = None """Random seed for reproducibility.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYGAD_INSTALLED: raise NotInstalledError( "The 'pygad' algorithm requires the pygad package to be " "installed. You can install it with 'pip install pygad'." ) _validate_user_defined_functions( parent_selection_type=self.parent_selection_type, crossover_type=self.crossover_type, gene_constraint=self.gene_constraint, ) import pygad if ( problem.bounds.lower is None or problem.bounds.upper is None or not np.isfinite(problem.bounds.lower).all() or not np.isfinite(problem.bounds.upper).all() ): raise ValueError("pygad requires finite bounds for all parameters.") # Determine effective batch_size for parallel processing effective_batch_size = _determine_effective_batch_size( self.batch_size, self.n_cores ) if ( effective_batch_size is not None and effective_batch_size > 1 and self.n_cores > 1 ): def _fitness_func_batch( _ga_instance: Any, batch_solutions: NDArray[np.float64], _batch_indices: list[int] | NDArray[np.int_], ) -> list[float]: solutions_list: list[NDArray[np.float64]] = [ np.asarray(batch_solutions[i]) for i in range(batch_solutions.shape[0]) ] batch_results = problem.batch_fun( solutions_list, n_cores=self.n_cores, batch_size=effective_batch_size, ) return [-float(result) for result in batch_results] fitness_function: Any = _fitness_func_batch else: def _fitness_func_single( _ga_instance: Any, solution: NDArray[np.float64], _solution_idx: int ) -> float: return -float(problem.fun(solution)) fitness_function = _fitness_func_single population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) num_parents_mating = ( self.num_parents_mating if self.num_parents_mating is not None else max(2, population_size // 2) ) if self.initial_population is not None: initial_population = np.array( [ problem.converter.params_to_internal(params) for params in self.initial_population ] ) else: num_genes = len(x0) initial_population = np.random.uniform( problem.bounds.lower, problem.bounds.upper, size=(population_size, num_genes), ) initial_population[0] = x0 gene_space = [ {"low": problem.bounds.lower[i], "high": problem.bounds.upper[i]} for i in range(len(x0)) ] # Convert mutation parameter to PyGAD parameters mutation_params = _convert_mutation_to_pygad_params(self.mutation) # Build stop criteria from convergence parameters stop_criteria = _build_stop_criteria( self.convergence_target_value, self.convergence_generations_noimprove, direction=problem.direction, ) ga_instance = pygad.GA( num_generations=self.num_generations, num_parents_mating=num_parents_mating, fitness_func=fitness_function, fitness_batch_size=effective_batch_size, initial_population=initial_population, gene_space=gene_space, parent_selection_type=self.parent_selection_type, keep_parents=self.keep_parents, keep_elitism=self.keep_elitism, K_tournament=self.K_tournament, crossover_type=self.crossover_type, crossover_probability=self.crossover_probability, mutation_type=mutation_params["mutation_type"], mutation_probability=mutation_params["mutation_probability"], mutation_by_replacement=mutation_params["mutation_by_replacement"], mutation_percent_genes=mutation_params["mutation_percent_genes"], mutation_num_genes=mutation_params["mutation_num_genes"], allow_duplicate_genes=self.allow_duplicate_genes, gene_constraint=self.gene_constraint, sample_size=self.sample_size, stop_criteria=stop_criteria, parallel_processing=None, random_seed=self.seed, ) ga_instance.run() result = _process_pygad_result(ga_instance) return result def _convert_mutation_to_pygad_params(mutation: Any) -> dict[str, Any]: """Convert the mutation parameter to PyGAD mutation parameters. Handles strings, classes, instances, and custom functions using the new mutation dataclass system with built-in conversion methods. Returns: Dictionary of PyGAD mutation parameters. """ params: dict[str, Any] if mutation is None: params = _get_default_mutation_params(mutation_type=None) elif isinstance(mutation, str): mutation_instance = _create_mutation_from_string(mutation) params = mutation_instance.to_pygad_params() elif isinstance(mutation, type) and issubclass(mutation, _BuiltinMutation): mutation_instance = mutation() params = mutation_instance.to_pygad_params() elif isinstance(mutation, _BuiltinMutation): params = mutation.to_pygad_params() elif isinstance(mutation, MutationFunction): params = _get_default_mutation_params(mutation_type=mutation) else: raise ValueError(f"Unsupported mutation type: {type(mutation)}") return params def _get_default_mutation_params(mutation_type: Any = "random") -> dict[str, Any]: """Get default PyGAD mutation parameters.""" return { "mutation_type": mutation_type, "mutation_probability": None, "mutation_percent_genes": None if mutation_type is None else "default", "mutation_num_genes": None, "mutation_by_replacement": None if mutation_type is None else False, } def _create_mutation_from_string(mutation_type: str) -> _BuiltinMutation: """Create a mutation instance from a string type. Args: mutation_type: String mutation type (e.g., "random", "swap", etc.) Returns: Appropriate mutation instance. Raises: ValueError: If mutation_type is not supported. """ mutation_map = { "random": RandomMutation, "swap": SwapMutation, "inversion": InversionMutation, "scramble": ScrambleMutation, "adaptive": AdaptiveMutation, } if mutation_type not in mutation_map: raise ValueError(f"Unsupported mutation type: {mutation_type}") return mutation_map[mutation_type]() def _determine_effective_batch_size(batch_size: int | None, n_cores: int) -> int | None: """Determine the effective batch_size for parallel processing. Behavior: - If `batch_size` is explicitly provided: - The value is returned unchanged. - A warning is issued if it is less than `n_cores`, as this may underutilize available cores. - If `batch_size` is `None`: - If `n_cores` > 1, defaults to `n_cores`. - Otherwise, returns None (i.e., single-threaded evaluation). Args: batch_size: User-specified batch size or None n_cores: Number of cores for parallel processing Returns: Effective batch size for PyGAD, or None for single-threaded processing """ result = None if batch_size is not None: if batch_size < n_cores: warnings.warn( f"batch_size ({batch_size}) is smaller than " f"n_cores ({n_cores}). This may reduce parallel efficiency. " f"Consider setting batch_size >= n_cores." ) result = batch_size elif n_cores > 1: result = n_cores return result def _build_stop_criteria( target_criterion: float | None, saturate_generations: int | None, direction: Direction, ) -> str | list[str] | None: """Build PyGAD stop criteria from optimagic convergence parameters. Args: target_criterion: Target value that the objective function should reach. saturate_generations: Max generations without improvement before stopping. direction: Direction of optimization (Direction.MINIMIZE or Direction.MAXIMIZE). Returns: PyGAD stop criteria string, list of strings, or None. """ criteria = [] if target_criterion is not None: pygad_target_fitness = ( -target_criterion if direction is Direction.MINIMIZE else target_criterion ) criteria.append(f"reach_{pygad_target_fitness}") if saturate_generations is not None: criteria.append(f"saturate_{saturate_generations}") return criteria[0] if len(criteria) == 1 else (criteria or None) def _validate_user_defined_functions( parent_selection_type: str | Callable[..., object] | None, crossover_type: str | Callable[..., object] | None, gene_constraint: list[GeneConstraintFunction | None] | None, ) -> None: """Validate user-provided functions for selection, crossover, and constraints.""" if parent_selection_type is None: pass elif isinstance(parent_selection_type, str): _validate_string_choice( parent_selection_type, ["sss", "rws", "sus", "rank", "random", "tournament"], "parent_selection_type", ) elif callable(parent_selection_type): _validate_protocol_function( parent_selection_type, ParentSelectionFunction, "parent_selection_type", ) else: raise ValueError( "parent_selection_type must be a string, callable, or None, " f"got {type(parent_selection_type)}" ) if crossover_type is None: pass elif isinstance(crossover_type, str): _validate_string_choice( crossover_type, ["single_point", "two_points", "uniform", "scattered"], "crossover_type", ) elif callable(crossover_type): _validate_protocol_function( crossover_type, CrossoverFunction, "crossover_type", ) else: raise ValueError( "crossover_type must be a string, callable, or None, " f"got {type(crossover_type)}" ) if gene_constraint is not None: if not isinstance(gene_constraint, list): raise ValueError( f"gene_constraint must be a list or None, got {type(gene_constraint)}" ) for i, constraint_func in enumerate(gene_constraint): if constraint_func is not None: if not callable(constraint_func): raise TypeError( f"gene_constraint[{i}] must be callable, or None, " f"got {type(constraint_func)}" ) _validate_protocol_function( constraint_func, GeneConstraintFunction, f"gene_constraint[{i}]", ) def _validate_string_choice(value: str, valid_choices: list[str], name: str) -> None: """Ensure a string parameter is one of the allowed choices.""" if value not in valid_choices: raise ValueError(f"{name} must be one of {valid_choices}, got '{value}'.") def _validate_protocol_function( func: Callable[..., Any], protocol: Any, name: str ) -> None: """Ensure a callable satisfies the expected protocol interface.""" if not isinstance(func, protocol): raise TypeError(f"{name} must implement {protocol.__name__}.") def _process_pygad_result(ga_instance: Any) -> InternalOptimizeResult: """Process PyGAD result into InternalOptimizeResult. Args: ga_instance: The PyGAD instance after running the optimization Returns: InternalOptimizeResult: Processed optimization results """ best_solution, best_fitness, _ = ga_instance.best_solution() best_criterion = -best_fitness completed_generations = ga_instance.generations_completed success = ga_instance.run_completed if success: message = ( "Optimization terminated successfully.\n" f"Generations completed: {completed_generations}" ) else: message = ( "Optimization failed to complete.\n" f"Generations completed: {completed_generations}" ) return InternalOptimizeResult( x=best_solution, fun=best_criterion, success=success, message=message, n_fun_evals=ga_instance.generations_completed * ga_instance.pop_size[0], ) ================================================ FILE: src/optimagic/optimizers/pygmo_optimizers.py ================================================ """Implement pygmo optimizers. Notes for converting to the new algorithm interface: - `create_algo_options` is not needed anymore because the only thing it did was mixing options that are supported by all optimizers (e.g. population_size, seed) with specific options. Then later they had to be taken appart again. Instead you need to pass a few more arguments directly to `_minimize_pygmo`. - Calling `_check_that_every_param_is_bounded` is not needed anymore. I do that check once in `_minimize_pygmo`. - The documentation often just says float where I suspect PositiveFloats; Leave it at float for now and add todos where needed. Don't spend a lot of time on it. - There are some specific type checks and type conversions that should not be needed anymore after switching to the new interface. - Whenever we had batch_evaluator as `algo_option` we don't need it anymore but we should have `n_cores` in those algorithms. """ from __future__ import annotations import warnings from dataclasses import dataclass from typing import TYPE_CHECKING, Any, List, Literal import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_PYGMO_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_XTOL_REL, STOPPING_MAXFUN_GLOBAL, get_population_size, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveFloat, PositiveInt, ) STOPPING_MAX_ITERATIONS_GENETIC = 250 if TYPE_CHECKING: import pygmo as pg @mark.minimizer( name="pygmo_gaco", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoGaco(Algorithm): population_size: int | None = None n_cores: int = 1 seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC kernel_size: PositiveInt = 63 speed_parameter_q: PositiveFloat = 1.0 oracle: float = 0.0 accuracy: PositiveFloat = 0.01 threshold: PositiveInt = 1 speed_of_std_values_convergence: int = 7 stopping_max_n_without_improvements: PositiveInt = 100000 stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL focus: NonNegativeFloat = 0.0 cache: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) algo_specific_options = { "gen": self.stopping_maxiter, "ker": self.kernel_size, "q": self.speed_parameter_q, "oracle": self.oracle, "acc": self.accuracy, "threshold": self.threshold, "n_gen_mark": self.speed_of_std_values_convergence, "impstop": self.stopping_max_n_without_improvements, "evalstop": self.stopping_maxfun, "focus": self.focus, "memory": self.cache, } res = _minimize_pygmo( problem=problem, x0=x0, method="gaco", specific_options=algo_specific_options, population_size=population_size, n_cores=self.n_cores, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_bee_colony", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoBeeColony(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC seed: int | None = None discard_start_params: bool = False max_n_trials: PositiveInt = 1 population_size: int | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=20 ) algo_specific_options = { "limit": self.max_n_trials, "gen": self.stopping_maxiter, } res = _minimize_pygmo( problem=problem, x0=x0, method="bee_colony", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_de", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoDe(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,2] weight_coefficient: NonNegativeFloat = 0.8 # TODO: Probably refine type to fix range [0,1] crossover_probability: NonNegativeFloat = 0.9 mutation_variant: Literal[ "best/1/exp", "rand/1/exp", "rand-to-best/1/exp", "best/2/exp", "rand/2/exp", "best/1/bin", "rand/1/bin", "rand-to-best/1/bin", "best/2/bin", "rand/2/bin", ] = "rand/1/exp" convergence_criterion_tolerance: NonNegativeFloat = 1e-6 convergence_relative_params_tolerance: NonNegativeFloat = CONVERGENCE_XTOL_REL def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) # support both integer and string specification of the mutation variant mutation_variant_str_to_int = { "best/1/exp": 1, "rand/1/exp": 2, "rand-to-best/1/exp": 3, "best/2/exp": 4, "rand/2/exp": 5, "best/1/bin": 6, "rand/1/bin": 7, "rand-to-best/1/bin": 8, "best/2/bin": 9, "rand/2/bin": 10, } mutation_variant = _convert_str_to_int( str_to_int=mutation_variant_str_to_int, value=self.mutation_variant ) algo_specific_options = { "gen": self.stopping_maxiter, "F": self.weight_coefficient, "CR": self.crossover_probability, "variant": mutation_variant, "ftol": self.convergence_criterion_tolerance, "xtol": self.convergence_relative_params_tolerance, } res = _minimize_pygmo( problem=problem, x0=x0, method="de", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_sea", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoSea(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = ( 10_000 # Each generation will compute the objective once ) def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) algo_specific_options = { "gen": self.stopping_maxiter, } res = _minimize_pygmo( problem=problem, x0=x0, method="sea", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_sga", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoSga(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,1] crossover_probability: NonNegativeFloat = 0.9 crossover_strategy: Literal[ "exponential", "sbx", "single", "binomial", ] = "exponential" # TODO: Refine type to fix range [1,100] eta_c: PositiveFloat | None = None # TODO: Refine type to fix range [0,1] mutation_probability: NonNegativeFloat = 0.02 mutation_strategy: Literal["uniform", "polynomial"] = "polynomial" # TODO: Refine type to fix range [0,1] mutation_polynomial_distribution_index: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] mutation_gaussian_width: NonNegativeFloat | None = None selection_strategy: Literal["tournament", "truncated"] = "tournament" # TODO: Check if should be NonNegativeInt selection_truncated_n_best: int | None = None # TODO Check if should be NonNegativeInt selection_tournament_size: int | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) if self.eta_c is not None and self.crossover_strategy != "sbx": warnings.warn( f"You specified crossover strategy {self.crossover_strategy}" "and eta_c. However, eta_c is ignored because it is only used when " "the crossover_strategy is set to sbx." ) eta_c = 1.0 if self.eta_c is None else self.eta_c if ( self.mutation_polynomial_distribution_index is not None ) and self.mutation_strategy != "polynomial": warnings.warn( "You specified a mutation_polynomial_distribution_index but" "did not choose polynomial as your mutation_strategy. Thus, " "mutation_polynomial_distribution_index will be ignored." ) if ( self.mutation_gaussian_width is not None and self.mutation_strategy != "gaussian" ): warnings.warn( "You specified a mutation_gaussian_width but " "did not choose gaussion as your mutation_strategy. " "Thus, mutation_gaussian_width will be ignored." ) if ( self.selection_strategy != "truncated" and self.selection_truncated_n_best is not None ): warnings.warn( "You specified selection_truncated_n_best but " "did not specify truncated as your selection strategy. " "Therefore, selection_truncated_n_best is ignored." ) if ( self.selection_strategy != "tournament" and self.selection_tournament_size is not None ): warnings.warn( "You specified selection_tournament_size but " "did not specify tournament as your selection strategy. " "Therefore, selection_tournament_size is ignored." ) if ( self.mutation_strategy == "gaussian" and self.mutation_gaussian_width is not None ): param_m = self.mutation_gaussian_width elif ( self.mutation_strategy == "polynomial" and self.mutation_polynomial_distribution_index is not None ): param_m = self.mutation_polynomial_distribution_index else: param_m = 1.0 if ( self.selection_strategy == "truncated" and self.selection_truncated_n_best is not None ): param_s = self.selection_truncated_n_best elif ( self.selection_strategy == "tournament" and self.selection_tournament_size is not None ): param_s = self.selection_tournament_size else: param_s = 2 algo_specific_options = { "gen": self.stopping_maxiter, "cr": self.crossover_probability, "eta_c": eta_c, "m": self.mutation_probability, "param_m": param_m, "crossover": self.crossover_strategy, "mutation": self.mutation_strategy, "selection": self.selection_strategy, "param_s": param_s, } res = _minimize_pygmo( problem=problem, x0=x0, method="sga", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_sade", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoSade(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False jde: bool = True stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC mutation_variant: Literal[ "best/1/exp", "rand/1/exp", "rand-to-best/1/exp", "best/2/exp", "rand/2/exp", "best/1/bin", "rand/1/bin", "rand-to-best/1/bin", "best/2/bin", "rand/2/bin", "rand/3/exp", "rand/3/bin", "best/3/exp", "best/3/bin", "rand-to-current/2/exp", "rand-to-current/2/bin", "rand-to-best-and-current/2/exp", "rand-to-best-and-current/2/bin", ] = "rand/1/exp" keep_adapted_params: bool = False ftol: NonNegativeFloat = 1e-6 xtol: NonNegativeFloat = 1e-6 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) mutation_variant_str_to_int = { "best/1/exp": 1, "rand/1/exp": 2, "rand-to-best/1/exp": 3, "best/2/exp": 4, "rand/2/exp": 5, "best/1/bin": 6, "rand/1/bin": 7, "rand-to-best/1/bin": 8, "best/2/bin": 9, "rand/2/bin": 10, "rand/3/exp": 11, "rand/3/bin": 12, "best/3/exp": 13, "best/3/bin": 14, "rand-to-current/2/exp": 15, "rand-to-current/2/bin": 16, "rand-to-best-and-current/2/exp": 17, "rand-to-best-and-current/2/bin": 18, } mutation_variant = _convert_str_to_int( str_to_int=mutation_variant_str_to_int, value=self.mutation_variant ) algo_specific_options = { "gen": self.stopping_maxiter, "variant": mutation_variant, "variant_adptv": 1 if self.jde else 2, "ftol": self.ftol, "xtol": self.xtol, "memory": self.keep_adapted_params, } res = _minimize_pygmo( problem=problem, x0=x0, method="sade", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_cmaes", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoCmaes(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,1] backward_horizon: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] variance_loss_compensation: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] learning_rate_rank_one_update: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] learning_rate_rank_mu_update: NonNegativeFloat | None = None # TODO: Check if should be NonNegativeFloat initial_step_size: float = 0.5 ftol: NonNegativeFloat = 1e-6 xtol: NonNegativeFloat = 1e-6 keep_adapted_params: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) algo_specific_options = { "gen": self.stopping_maxiter, "cc": self.backward_horizon if self.backward_horizon is not None else -1.0, "cs": self.variance_loss_compensation if self.variance_loss_compensation is not None else -1.0, "c1": self.learning_rate_rank_one_update if self.learning_rate_rank_one_update is not None else -1.0, "cmu": self.learning_rate_rank_mu_update if self.learning_rate_rank_mu_update is not None else -1.0, "sigma0": self.initial_step_size, "ftol": self.ftol, "xtol": self.xtol, "memory": self.keep_adapted_params, "force_bounds": True, } res = _minimize_pygmo( problem=problem, x0=x0, method="cmaes", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_simulated_annealing", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoSimulatedAnnealing(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False start_temperature: PositiveFloat = 10.0 # TODO: Check if type should be same as start_temperature end_temperature: float = 0.01 # TODO: Check if type should be NonNegativeInt n_temp_adjustments: int = 10 # TODO: Check if type should be NonNegativeInt n_range_adjustments: int = 10 # TODO: Check if type should be NonNegativeInt bin_size: int = 10 # TODO: Refine type to fix range [0,1] start_range: NonNegativeFloat = 1.0 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) algo_specific_options = { "Ts": self.start_temperature, "Tf": self.end_temperature, "n_T_adj": self.n_temp_adjustments, "n_range_adj": self.n_range_adjustments, "bin_size": self.bin_size, "start_range": self.start_range, } res = _minimize_pygmo( problem=problem, x0=x0, method="simulated_annealing", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_pso", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoPso(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,1] omega: NonNegativeFloat = 0.7298 # TODO: Refine type to fix range [0,4] force_of_previous_best: NonNegativeFloat = 2.05 # TODO: Refine type to fix range [0,4] force_of_best_in_neighborhood: NonNegativeFloat = 2.05 # TODO: Refine type to fix range [0,1] max_velocity: NonNegativeFloat = 0.5 algo_variant: Literal[ "canonical_inertia", "social_and_cog_rand", "all_components_rand", "one_rand", "canonical_constriction", "fips", ] = "canonical_constriction" neighbor_definition: Literal[ "gbest", "lbest", "Von Neumann", "Adaptive random", ] = "lbest" neighbor_param: int | None = None keep_velocities: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if ( self.neighbor_definition in ["gbest", "Von Neumann"] and self.neighbor_param is not None ): warnings.warn( "You gave a neighbor parameter but selected a neighbor_definition " "that ignores this parameter." ) neighbor_param = 4 if self.neighbor_param is None else self.neighbor_param population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) neighbor_definition_str_to_int = { "gbest": 1, "lbest": 2, "Von Neumann": 3, "Adaptive random": 4, } algo_variant_str_to_int = { "canonical_inertia": 1, "social_and_cog_rand": 2, "all_components_rand": 3, "one_rand": 4, "canonical_constriction": 5, "fips": 6, } algo_specific_options = { "gen": self.stopping_maxiter, "omega": self.omega, "eta1": self.force_of_previous_best, "eta2": self.force_of_best_in_neighborhood, "max_vel": self.max_velocity, "variant": _convert_str_to_int(algo_variant_str_to_int, self.algo_variant), "neighb_type": _convert_str_to_int( neighbor_definition_str_to_int, self.neighbor_definition ), "neighb_param": neighbor_param, "memory": self.keep_velocities, } res = _minimize_pygmo( problem=problem, x0=x0, method="pso", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_pso_gen", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoPsoGen(Algorithm): population_size: int | None = None n_cores: PositiveInt = 1 seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,1] omega: NonNegativeFloat = 0.7298 # TODO: Refine type to fix range [0,4] force_of_previous_best: NonNegativeFloat = 2.05 # TODO: Refine type to fix range [0,4] force_of_best_in_neighborhood: NonNegativeFloat = 2.05 # TODO: Refine type to fix range [0,1] max_velocity: NonNegativeFloat = 0.5 algo_variant: Literal[ "canonical_inertia", "social_and_cog_rand", "all_components_rand", "one_rand", "canonical_constriction", "fips", ] = "canonical_constriction" neighbor_definition: Literal[ "gbest", "lbest", "Von Neumann", "Adaptive random", ] = "lbest" neighbor_param: int | None = None keep_velocities: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if ( self.neighbor_definition in ["gbest", "Von Neumann"] and self.neighbor_param is not None ): warnings.warn( "You gave a neighbor parameter but selected a neighbor_definition " "that ignores this parameter." ) neighbor_param = 4 if self.neighbor_param is None else self.neighbor_param neighbor_str_to_int = { "gbest": 1, "lbest": 2, "Von Neumann": 3, "Adaptive random": 4, } neighbor_type = _convert_str_to_int( neighbor_str_to_int, self.neighbor_definition ) algo_variant_str_to_int = { "canonical_inertia": 1, "social_and_cog_rand": 2, "all_components_rand": 3, "one_rand": 4, "canonical_constriction": 5, "fips": 6, } algo_variant = _convert_str_to_int(algo_variant_str_to_int, self.algo_variant) population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=10 ) algo_specific_options = { "gen": self.stopping_maxiter, "omega": self.omega, "eta1": self.force_of_previous_best, "eta2": self.force_of_best_in_neighborhood, "max_vel": self.max_velocity, "variant": algo_variant, "neighb_type": neighbor_type, "neighb_param": neighbor_param, "memory": self.keep_velocities, } res = _minimize_pygmo( problem=problem, x0=x0, method="pso_gen", specific_options=algo_specific_options, population_size=population_size, n_cores=self.n_cores, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_mbh", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoMbh(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False inner_algorithm: pg.algorithm | None = None # this is 30 instead of 5 in pygmo for our sum of squares test to pass stopping_max_inner_runs_without_improvement: PositiveInt = 30 perturbation: float = 0.01 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: # the min default population size is this large to pass our sum of # squares tests. population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=250 ) algo_specific_options = { "algo": self.inner_algorithm, "stop": self.stopping_max_inner_runs_without_improvement, "perturb": self.perturbation, } res = _minimize_pygmo( problem=problem, x0=x0, method="mbh", specific_options=algo_specific_options, population_size=population_size, seed=self.seed, discard_start_params=self.discard_start_params, n_cores=1, ) return res @mark.minimizer( name="pygmo_xnes", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoXnes(Algorithm): population_size: float | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Refine type to fix range [0,1] learning_rate_mean_update: NonNegativeFloat | None = 1.0 # TODO: Refine type to fix range [0,1] learning_rate_step_size_update: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] learning_rate_cov_matrix_update: NonNegativeFloat | None = None # TODO: Refine type to fix range [0,1] initial_search_share: NonNegativeFloat | None = 1.0 ftol: NonNegativeFloat = 1e-6 xtol: NonNegativeFloat = 1e-6 keep_adapted_params: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) eta_mu = ( -1 if self.learning_rate_mean_update is None else self.learning_rate_mean_update ) eta_sigma = ( -1 if self.learning_rate_step_size_update is None else self.learning_rate_step_size_update ) eta_b = ( -1 if self.learning_rate_cov_matrix_update is None else self.learning_rate_cov_matrix_update ) algo_specific_options = { "gen": self.stopping_maxiter, "eta_mu": eta_mu, "eta_sigma": eta_sigma, "eta_b": eta_b, "sigma0": self.initial_search_share, "ftol": self.ftol, "xtol": self.xtol, "memory": self.keep_adapted_params, "force_bounds": True, } res = _minimize_pygmo( problem=problem, x0=x0, method="xnes", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_gwo", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoGwo(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) algo_specific_options = { "gen": self.stopping_maxiter, } res = _minimize_pygmo( problem=problem, x0=x0, method="gwo", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_compass_search", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoCompassSearch(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL # TODO: Refine type to fix range (0,1] start_range: PositiveFloat = 0.1 # TODO?: mus be in (0,start_range] stop_range: PositiveFloat = 0.01 # TODO: Refine type to fix range (0,1) reduction_coeff: PositiveFloat = 0.5 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.population_size is not None: warnings.warn( f"You specified population size {self.population_size}. " "compass_search does not have a population so this argument is ignored." ) population_size = self.population_size else: # if discard_start_params is False population_size - 1 # must still be positive population_size = 100 algo_specific_options = { "max_fevals": self.stopping_maxfun, "start_range": self.start_range, "stop_range": self.stop_range, "reduction_coeff": self.reduction_coeff, } res = _minimize_pygmo( problem=problem, x0=x0, method="compass_search", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_ihs", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoIhs(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC # TODO: Probably refine type to fix range [0,1] choose_from_memory_probability: NonNegativeFloat = 0.85 # TODO: Refine type to fix range [0,1] min_pitch_adjustment_rate: NonNegativeFloat = 0.35 # TODO: Refine type to fix range [0,1] max_pitch_adjustment_rate: NonNegativeFloat = 0.99 min_distance_bandwidth: PositiveFloat = 1e-5 max_distance_bandwidth: PositiveFloat = 1.0 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.population_size is not None: warnings.warn("The population size has no effect on IHS' performance.") population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=1 ) algo_specific_options = { "gen": self.stopping_maxiter, "phmcr": self.choose_from_memory_probability, "ppar_min": self.min_pitch_adjustment_rate, "ppar_max": self.max_pitch_adjustment_rate, "bw_min": self.min_distance_bandwidth, "bw_max": self.max_distance_bandwidth, } res = _minimize_pygmo( problem=problem, x0=x0, method="ihs", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res @mark.minimizer( name="pygmo_de1220", solver_type=AggregationLevel.SCALAR, is_available=IS_PYGMO_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PygmoDe1220(Algorithm): population_size: int | None = None seed: int | None = None discard_start_params: bool = False jde: bool = True stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC allowed_variants: List[str] | None = None keep_adapted_params: bool = False ftol: NonNegativeFloat = 1e-6 xtol: NonNegativeFloat = 1e-6 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: variant_str_to_int = { "best/1/exp": 1, "rand/1/exp": 2, "rand-to-best/1/exp": 3, "best/2/exp": 4, "rand/2/exp": 5, "best/1/bin": 6, "rand/1/bin": 7, "rand-to-best/1/bin": 8, "best/2/bin": 9, "rand/2/bin": 10, "rand/3/exp": 11, "rand/3/bin": 12, "best/3/exp": 13, "best/3/bin": 14, "rand-to-current/2/exp": 15, "rand-to-current/2/bin": 16, "rand-to-best-and-current/2/exp": 17, "rand-to-best-and-current/2/bin": 18, } if self.allowed_variants is None: allowed_variant_codes = [2, 3, 7, 10, 13, 14, 15, 16] else: allowed_variant_codes = [ _convert_str_to_int(variant_str_to_int, variant) for variant in self.allowed_variants ] population_size = get_population_size( population_size=self.population_size, x=x0, lower_bound=64 ) algo_specific_options = { "gen": self.stopping_maxiter, "variant_adptv": 1 if self.jde else 2, "ftol": self.ftol, "xtol": self.xtol, "memory": self.keep_adapted_params, "allowed_variants": allowed_variant_codes, } res = _minimize_pygmo( problem=problem, x0=x0, method="de1220", specific_options=algo_specific_options, population_size=population_size, n_cores=1, seed=self.seed, discard_start_params=self.discard_start_params, ) return res # ==================================================================================== def _minimize_pygmo( problem: InternalOptimizationProblem, x0: NDArray[np.float64], method: str, specific_options: dict[str, Any], population_size: PositiveInt, n_cores: int, seed: int | None, discard_start_params: bool, ) -> InternalOptimizeResult: if not IS_PYGMO_INSTALLED: raise NotInstalledError( f"The {method} algorithm requires the pygmo package to be installed. " "You can install it with 'conda install -c conda-forge pygmo'. Visit " "https://esa.github.io/pygmo2/install.html for more detailed installation " "instructions." ) bounds = problem.bounds if bounds is None or bounds.lower is None or bounds.upper is None: raise ValueError(f"{method} requires finitel bounds for all parameters.") elif not np.isfinite(bounds.lower).all() or not np.isfinite(bounds.upper).all(): raise ValueError(f"{method} requires finite bounds for all parameters.") pygmo_problem = _create_pygmo_problem(problem, len(x0), n_cores) algo = _create_algorithm(method, specific_options, n_cores) pop = _create_population( problem=pygmo_problem, population_size=population_size, x=x0, seed=seed, discard_start_params=discard_start_params, ) evolved = algo.evolve(pop) result = _process_pygmo_result(evolved) return result def _create_pygmo_problem( problem: InternalOptimizationProblem, dim: int, n_cores: int ) -> pg.problem: import pygmo as pg class Problem: def fitness(self, x): return [problem.fun(x)] def get_bounds(self): return (problem.bounds.lower, problem.bounds.upper) def gradient(self, dv): # noqa: ARG002 raise ValueError("No pygmo optimizer should use a gradient.") def batch_fitness(self, dvs): x_list = list(dvs.reshape(-1, dim)) eval_list = problem.batch_fun(x_list, n_cores=n_cores) evals = np.array(eval_list) return evals pygmo_problem = pg.problem(Problem()) return pygmo_problem def _create_algorithm( method: str, algo_options: dict[str, Any], n_cores: int ) -> pg.algorithm: """Create a pygmo algorithm.""" import pygmo as pg pygmo_uda = getattr(pg, method) algo = pygmo_uda(**algo_options) try: algo.set_bfe(pg.bfe()) except AttributeError: if n_cores >= 2: warnings.warn( f"Your specified algorithm {method} does not support parallelization. " "Choose another algorithm such as pygmo_gaco to parallelize." ) out = pg.algorithm(algo) return out def _create_population( problem: InternalOptimizationProblem, population_size: int, x: NDArray[np.float64], seed: int | None, discard_start_params: bool, ) -> pg.population: import pygmo as pg if not discard_start_params: population_size = population_size - 1 pop = pg.population( problem, size=population_size, seed=seed, b=pg.bfe(), ) if not discard_start_params: pop.push_back(x) return pop def _process_pygmo_result(evolved: pg.population) -> InternalOptimizeResult: result = InternalOptimizeResult( x=evolved.champion_x, fun=evolved.champion_f[0], success=True, message="Number of generations reached.", n_fun_evals=evolved.problem.get_fevals(), n_jac_evals=evolved.problem.get_gevals(), ) return result def _convert_str_to_int(str_to_int, value): if value in str_to_int: out = str_to_int[value] elif value not in str_to_int.values(): raise ValueError( f"You specified {value} as value. " f"It must be one of {', '.join(str_to_int.keys())}" ) else: out = value return out ================================================ FILE: src/optimagic/optimizers/pyswarms_optimizers.py ================================================ """Implement PySwarms particle swarm optimization algorithms. This module provides optimagic-compatible wrappers for PySwarms particle swarm optimization algorithms including global best, local best, and general PSO variants with support for different topologies. """ from __future__ import annotations import warnings from dataclasses import dataclass from typing import Any, Callable, Literal import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, PositiveFloat, PositiveInt, PyTree, ) PYSWARMS_NOT_INSTALLED_ERROR = ( "This optimizer requires the 'pyswarms' package to be installed. " "You can install it with `pip install pyswarms`. " "Visit https://pyswarms.readthedocs.io/en/latest/installation.html " "for more detailed installation instructions." ) # ====================================================================================== # 1. Topology Dataclasses # ====================================================================================== @dataclass(frozen=True) class Topology: """Base class for all topology configurations.""" @dataclass(frozen=True) class StarTopology(Topology): """Star topology configuration. All particles are connected to the global best. """ @dataclass(frozen=True) class RingTopology(Topology): """Ring topology configuration. Particles are connected in a ring structure. """ k_neighbors: PositiveInt = 3 """Number of neighbors for each particle.""" p_norm: Literal[1, 2] = 2 """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" static: bool = False """Whether to use a static or dynamic ring topology. When True, the neighborhood structure is fixed throughout optimization. When False, neighbors are recomputed at each iteration based on current particle positions. """ @dataclass(frozen=True) class VonNeumannTopology(Topology): """Von Neumann topology configuration. Particles are arranged on a 2D grid. """ p_norm: Literal[1, 2] = 2 """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" range_param: PositiveInt = 1 r"""Range parameter :math:`r` for neighborhood size.""" @dataclass(frozen=True) class PyramidTopology(Topology): """Pyramid topology configuration.""" static: bool = False """Whether to use a static or dynamic pyramid topology. When True, the neighborhood structure is fixed throughout optimization. When False, neighbors are recomputed at each iteration based on current particle positions. """ @dataclass(frozen=True) class RandomTopology(Topology): """Random topology configuration. Particles are connected to random neighbors. """ k_neighbors: PositiveInt = 3 """Number of neighbors for each particle.""" static: bool = False """Whether to use a static or dynamic random topology. When True, the neighborhood structure is fixed throughout optimization. When False, neighbors are recomputed at each iteration based on current particle positions. """ # ====================================================================================== # Common PSO Options # ====================================================================================== @dataclass(frozen=True) class PSOCommonOptions: """Common options for PySwarms optimizers.""" n_particles: PositiveInt = 10 """Number of particles in the swarm.""" cognitive_parameter: PositiveFloat = 0.5 """Cognitive parameter (c1) - attraction to personal best.""" social_parameter: PositiveFloat = 0.3 """Social parameter (c2) - attraction to neighborhood/global best.""" inertia_weight: PositiveFloat = 0.9 """Inertia weight (w) - momentum control.""" stopping_maxiter: PositiveInt = 1000 """Maximum number of iterations.""" initial_positions: list[PyTree] | None = None """Option to set the initial particle positions. If None, positions are generated randomly within the given bounds, or within [0, 1] if bounds are not specified. """ oh_strategy: dict[str, str] | None = None """Dictionary of strategies for time-varying options.""" boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" """Strategy for handling out-of-bounds particles. Available options: periodic (default), reflective, shrink, random, intermediate. """ velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), adjust, invert, zero. """ velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" convergence_ftol_rel: NonNegativeFloat = 0 """Stop when relative change in objective function is less than this value.""" convergence_ftol_iter: PositiveInt = 1 """Number of iterations to check for convergence.""" n_cores: PositiveInt = 1 """Number of cores for parallel evaluation.""" center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" verbose: bool = False """Enable or disable the logs and progress bar.""" seed: int | None = None """Random seed for initial positions. For full reproducibility, set a global seed with `np.random.seed()`. """ # ====================================================================================== # Algorithm Classes # ====================================================================================== @mark.minimizer( name="pyswarms_global_best", solver_type=AggregationLevel.SCALAR, is_available=IS_PYSWARMS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PySwarmsGlobalBestPSO(Algorithm, PSOCommonOptions): r"""Minimize a scalar function using Global Best Particle Swarm Optimization. A population-based stochastic, global optimization optimization algorithm that simulates the social behavior of bird flocking or fish schooling. Particles (candidate solutions) move through the search space, adjusting their positions based on their own experience (cognitive component) and the experience of their neighbors or the entire swarm (social component). This implementation uses a star topology where all particles are connected to each other, making each particle aware of the global best solution found by the entire swarm. The position update follows: .. math:: x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) The velocity update follows: .. math:: v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_j(t) - x_{ij}(t)] Where: - :math:`w`: inertia weight controlling momentum - :math:`c_1`: cognitive parameter for attraction to personal best - :math:`c_2`: social parameter for attraction to global best - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_j(t)`: global best position This algorithm is an adaptation of the original Particle Swarm Optimization method by :cite:`Kennedy1995` """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) import pyswarms as ps pso_options_dict = { "c1": self.cognitive_parameter, "c2": self.social_parameter, "w": self.inertia_weight, } optimizer_kwargs = {"options": pso_options_dict} res = _pyswarms_internal( problem=problem, x0=x0, optimizer_class=ps.single.GlobalBestPSO, optimizer_kwargs=optimizer_kwargs, algo_options=self, ) return res @mark.minimizer( name="pyswarms_local_best", solver_type=AggregationLevel.SCALAR, is_available=IS_PYSWARMS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PySwarmsLocalBestPSO(Algorithm, PSOCommonOptions): r"""Minimize a scalar function using Local Best Particle Swarm Optimization. A variant of PSO that uses local neighborhoods instead of a single global best. Each particle is influenced only by the best position found within its local neighborhood, which is determined by the k-nearest neighbors using distance metrics. This approach uses a ring topology where particles are connected to their local neighbors, making each particle aware of only the best solution found within its neighborhood. The position update follows: .. math:: x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) The velocity update follows: .. math:: v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_{lj}(t) - x_{ij}(t)] Where: - :math:`w`: inertia weight controlling momentum - :math:`c_1`: cognitive parameter for attraction to personal best - :math:`c_2`: social parameter for attraction to local best - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_{lj}(t)`: local best position in particle i's neighborhood The algorithm is based on the original Particle Swarm Optimization method by :cite:`Kennedy1995` and the local best concept introduced in :cite:`EberhartKennedy1995`. """ topology: RingTopology = RingTopology() """Configuration for the Ring topology. This algorithm uses a fixed ring topology where particles are connected to their local neighbors. This parameter allows customization of the number of neighbors, distance metric, and whether the topology remains static throughout optimization. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) import pyswarms as ps pso_options_dict = { "c1": self.cognitive_parameter, "c2": self.social_parameter, "w": self.inertia_weight, "k": self.topology.k_neighbors, "p": self.topology.p_norm, } optimizer_kwargs = { "options": pso_options_dict, "static": self.topology.static, } res = _pyswarms_internal( problem=problem, x0=x0, optimizer_class=ps.single.LocalBestPSO, optimizer_kwargs=optimizer_kwargs, algo_options=self, ) return res @mark.minimizer( name="pyswarms_general", solver_type=AggregationLevel.SCALAR, is_available=IS_PYSWARMS_INSTALLED, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class PySwarmsGeneralPSO(Algorithm, PSOCommonOptions): r"""Minimize a scalar function using General Particle Swarm Optimization with custom topologies. A flexible PSO implementation that allows selection of different neighborhood topologies, providing control over the balance between exploration and exploitation. The topology determines how particles communicate and share information, directly affecting the algorithm's search behavior. The position update follows: .. math:: x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) The velocity update follows: .. math:: v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_{nj}(t) - x_{ij}(t)] Where: - :math:`w`: inertia weight controlling momentum - :math:`c_1`: cognitive parameter for attraction to personal best - :math:`c_2`: social parameter for attraction to neighborhood best - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_{nj}(t)`: neighborhood best position This algorithm is based on the original Particle Swarm Optimization method by :cite:`Kennedy1995` with configurable topology structures. For topology references, see :cite:`Lane2008SpatialPSO, Ni2013`. """ topology: Literal["star", "ring", "vonneumann", "random", "pyramid"] | Topology = ( "star" ) """Topology structure for particle communication. The `topology` can be specified in two ways: 1. **By name (string):** e.g., ``"star"``, ``"ring"``. This uses the default parameter values for that topology. 2. **By dataclass instance:** e.g., ``RingTopology(k_neighbors=5, p_norm=1)``. This allows for detailed configuration of topology-specific parameters. Available topologies: ``StarTopology``, ``RingTopology``, ``VonNeumannTopology``, ``RandomTopology``, ``PyramidTopology``. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) import pyswarms as ps pyswarms_topology, topology_options = _resolve_topology_config(self.topology) base_options = { "c1": self.cognitive_parameter, "c2": self.social_parameter, "w": self.inertia_weight, } pso_options_dict = {**base_options, **topology_options} optimizer_kwargs = { "options": pso_options_dict, "topology": pyswarms_topology, } res = _pyswarms_internal( problem=problem, x0=x0, optimizer_class=ps.single.GeneralOptimizerPSO, optimizer_kwargs=optimizer_kwargs, algo_options=self, ) return res def _pyswarms_internal( problem: InternalOptimizationProblem, x0: NDArray[np.float64], optimizer_class: Any, optimizer_kwargs: dict[str, Any], algo_options: PSOCommonOptions, ) -> InternalOptimizeResult: """Internal function for PySwarms optimization. Args: problem: Internal optimization problem. x0: Initial parameter vector. optimizer_class: PySwarms optimizer class to use. optimizer_kwargs: Arguments for optimizer class. algo_options: The PySwarms common options. Returns: InternalOptimizeResult: Internal optimization result. """ if algo_options.seed is not None: warnings.warn( "The 'seed' parameter only makes initial particle positions reproducible. " "PySwarms still uses NumPy's global random functions for generating " "velocities, updating coefficients, and handling other stochastic " "operations. For fully deterministic results, set a global seed with " "'np.random.seed()' before running the optimizer.", UserWarning, ) rng = np.random.default_rng(algo_options.seed) velocity_clamp = _build_velocity_clamp( algo_options.velocity_clamp_min, algo_options.velocity_clamp_max ) bounds = _get_pyswarms_bounds(problem.bounds) if algo_options.initial_positions is not None: init_pos = np.array( [ problem.converter.params_to_internal(position) for position in algo_options.initial_positions ] ) else: init_pos = _create_initial_positions( x0=x0, n_particles=algo_options.n_particles, bounds=bounds, center=algo_options.center_init, rng=rng, ) optimizer = optimizer_class( n_particles=algo_options.n_particles, dimensions=len(x0), bounds=bounds, init_pos=init_pos, velocity_clamp=velocity_clamp, oh_strategy=algo_options.oh_strategy, bh_strategy=algo_options.boundary_strategy, vh_strategy=algo_options.velocity_strategy, ftol=algo_options.convergence_ftol_rel, ftol_iter=algo_options.convergence_ftol_iter, **optimizer_kwargs, ) objective_wrapper = _create_batch_objective(problem, algo_options.n_cores) result = optimizer.optimize( objective_func=objective_wrapper, iters=algo_options.stopping_maxiter, verbose=algo_options.verbose, ) res = _process_pyswarms_result(result=result, optimizer=optimizer) return res def _resolve_topology_config( config: Literal["star", "ring", "vonneumann", "random", "pyramid"] | Topology, ) -> tuple[Any, dict[str, float | int]]: """Resolves the topology config into a pyswarms topology instance and options dict. """ from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann if isinstance(config, str): default_topologies = { "star": StarTopology(), "ring": RingTopology(), "vonneumann": VonNeumannTopology(), "random": RandomTopology(), "pyramid": PyramidTopology(), } if config not in default_topologies: raise ValueError(f"Unknown topology string: '{config}'") config = default_topologies[config] topology_instance: Any options: dict[str, float | int] = {} if isinstance(config, StarTopology): topology_instance = Star() elif isinstance(config, RingTopology): topology_instance = Ring(static=config.static) options = {"k": config.k_neighbors, "p": config.p_norm} elif isinstance(config, VonNeumannTopology): topology_instance = VonNeumann() options = {"p": config.p_norm, "r": config.range_param} elif isinstance(config, RandomTopology): topology_instance = Random(static=config.static) options = {"k": config.k_neighbors} elif isinstance(config, PyramidTopology): topology_instance = Pyramid(static=config.static) else: raise TypeError(f"Unsupported topology configuration type: {type(config)}") return topology_instance, options def _build_velocity_clamp( velocity_clamp_min: float | None, velocity_clamp_max: float | None ) -> tuple[float, float] | None: """Build velocity clamp tuple.""" clamp = None if velocity_clamp_min is not None and velocity_clamp_max is not None: clamp = (velocity_clamp_min, velocity_clamp_max) return clamp def _get_pyswarms_bounds( bounds: InternalBounds, ) -> tuple[NDArray[np.float64], NDArray[np.float64]] | None: """Convert optimagic bounds to PySwarms format.""" pyswarms_bounds = None if bounds.lower is not None and bounds.upper is not None: if not np.all(np.isfinite(bounds.lower)) or not np.all( np.isfinite(bounds.upper) ): raise ValueError("PySwarms does not support infinite bounds.") pyswarms_bounds = (bounds.lower, bounds.upper) return pyswarms_bounds def _create_initial_positions( x0: NDArray[np.float64], n_particles: int, bounds: tuple[NDArray[np.float64], NDArray[np.float64]] | None, center: float, rng: np.random.Generator, ) -> NDArray[np.float64]: """Create an initial swarm positions. Args: x0: Initial parameter vector. n_particles: Number of particles in the swarm. bounds: Tuple of (lower_bounds, upper_bounds) arrays or None. center: Scaling factor for initial particle positions around bounds. rng: NumPy random number generator instance. Returns: Initial positions array of shape (n_particles, n_dimensions) where each row represents one particle's starting position. """ n_dimensions = len(x0) if bounds is None: lower_bounds: NDArray[np.float64] = np.zeros(n_dimensions, dtype=np.float64) upper_bounds: NDArray[np.float64] = np.ones(n_dimensions, dtype=np.float64) else: lower_bounds, upper_bounds = bounds # Generate random initial positions within the bounds, scaled by center init_pos = center * rng.uniform( low=lower_bounds, high=upper_bounds, size=(n_particles, n_dimensions) ) init_pos[0] = x0 init_pos = np.clip(init_pos, lower_bounds, upper_bounds) return init_pos def _create_batch_objective( problem: InternalOptimizationProblem, n_cores: int, ) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: """Return an batch objective function.""" def batch_objective(positions: NDArray[np.float64]) -> NDArray[np.float64]: """Compute objective values for all particles in positions. Args: positions: 2D array of shape (n_particles, n_dimensions) with particle positions. Returns: 1D array of shape (n_particles,) with objective values. """ arguments = [position for position in positions] results = problem.batch_fun(arguments, n_cores=n_cores) return np.array(results) return batch_objective def _process_pyswarms_result( result: tuple[float, NDArray[np.float64]], optimizer: Any ) -> InternalOptimizeResult: """Convert PySwarms result to optimagic format.""" best_cost, best_position = result n_iterations = len(optimizer.cost_history) n_particles = optimizer.n_particles return InternalOptimizeResult( x=best_position, fun=best_cost, success=True, message="PySwarms optimization completed", n_fun_evals=n_particles * n_iterations, n_jac_evals=0, n_hess_evals=0, n_iterations=n_iterations, ) ================================================ FILE: src/optimagic/optimizers/scipy_optimizers.py ================================================ """Implement scipy algorithms. The following ``scipy`` algorithms are not supported because they require the specification of the Hessian: - dogleg - trust-ncg - trust-exact - trust-krylov The following arguments are not supported as part of ``algo_options``: - ``disp`` If set to True would print a convergence message. In optimagic it's always set to its default False. Refer to optimagic's result dictionary's "success" entry for the convergence message. - ``return_all`` If set to True, a list of the best solution at each iteration is returned. In optimagic it's always set to its default False. - ``tol`` This argument of minimize (not an options key) is passed as different types of tolerance (gradient, parameter or criterion, as well as relative or absolute) depending on the selected algorithm. We require the user to explicitely input the tolerance criteria or use our defaults instead. - ``args`` This argument of minimize (not an options key) is partialed into the function for the user. Specify ``criterion_kwargs`` in ``maximize`` or ``minimize`` to achieve the same behavior. - ``callback`` This argument would be called after each iteration and the algorithm would terminate if it returned True. """ from __future__ import annotations import functools from dataclasses import dataclass from typing import Any, Callable, List, Literal, SupportsInt, Tuple import numpy as np import scipy import scipy.optimize from numpy.typing import NDArray from scipy.optimize import Bounds as ScipyBounds from scipy.optimize import NonlinearConstraint from scipy.optimize import OptimizeResult as ScipyOptimizeResult from optimagic import mark from optimagic.batch_evaluators import process_batch_evaluator from optimagic.optimization.algo_options import ( CONVERGENCE_FTOL_ABS, CONVERGENCE_FTOL_REL, CONVERGENCE_GTOL_ABS, CONVERGENCE_GTOL_REL, CONVERGENCE_SECOND_BEST_FTOL_ABS, CONVERGENCE_SECOND_BEST_XTOL_ABS, CONVERGENCE_XTOL_ABS, CONVERGENCE_XTOL_REL, LIMITED_MEMORY_STORAGE_LENGTH, MAX_LINE_SEARCH_STEPS, STOPPING_MAXFUN, STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, ) from optimagic.parameters.nonlinear_constraints import ( equality_as_inequality_constraints, vector_as_list_of_scalar_constraints, ) from optimagic.typing import ( AggregationLevel, BatchEvaluator, BatchEvaluatorLiteral, NegativeFloat, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, ) from optimagic.utilities import calculate_trustregion_initial_radius @mark.minimizer( name="scipy_lbfgsb", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyLBFGSB(Algorithm): """Minimize a scalar differentiable function using the L-BFGS-B algorithm. The optimizer is taken from scipy, which calls the Fortran code written by the original authors of the algorithm. The Fortran code includes the corrections and improvements that were introduced in a follow up paper. lbfgsb is a limited memory version of the original bfgs algorithm, that deals with lower and upper bounds via an active set approach. The lbfgsb algorithm is well suited for differentiable scalar optimization problems with up to several hundred parameters. It is a quasi-newton line search algorithm. At each trial point it evaluates the criterion function and its gradient to find a search direction. It then approximates the hessian using the stored history of gradients and uses the hessian to calculate a candidate step size. Then it uses a gradient based line search algorithm to determine the actual step length. Since the algorithm always evaluates the gradient and criterion function jointly, the user should provide a ``fun_and_jac`` function that exploits the synergies in the calculation of criterion and gradient. The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary to scale the parameters. """ convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL r"""Converge if the relative change in the objective function is less than this value. More formally, this is expressed as. .. math:: \frac{f^k - f^{k+1}}{\max\{{|f^k|, |f^{k+1}|, 1}\}} \leq \textsf{convergence_ftol_rel}. """ convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS """Converge if the absolute values in the gradient of the objective function are less than this value.""" stopping_maxfun: PositiveInt = STOPPING_MAXFUN """Maximum number of function evaluations.""" stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" limited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH """The maximum number of variable metric corrections used to define the limited memory matrix. This is the 'maxcor' parameter in the SciPy documentation. The default value is taken from SciPy's L-BFGS-B implementation. Larger values use more memory but may converge faster for some problems. """ max_line_search_steps: PositiveInt = MAX_LINE_SEARCH_STEPS """The maximum number of line search steps. This is the 'maxls' parameter in the SciPy documentation. The default value is taken from SciPy's L-BFGS-B implementation. """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "maxcor": self.limited_memory_storage_length, "ftol": self.convergence_ftol_rel, "gtol": self.convergence_gtol_abs, "maxfun": self.stopping_maxfun, "maxiter": self.stopping_maxiter, "maxls": self.max_line_search_steps, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="L-BFGS-B", jac=True, bounds=_get_scipy_bounds(problem.bounds), options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_slsqp", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class ScipySLSQP(Algorithm): convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS stopping_maxiter: PositiveInt = STOPPING_MAXITER display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "maxiter": self.stopping_maxiter, "ftol": self.convergence_ftol_abs, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="SLSQP", jac=True, bounds=_get_scipy_bounds(problem.bounds), constraints=problem.nonlinear_constraints, options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_neldermead", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyNelderMead(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER stopping_maxfun: PositiveInt = STOPPING_MAXFUN convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_XTOL_ABS adaptive: bool = False display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "maxiter": self.stopping_maxiter, "maxfev": self.stopping_maxfun, "xatol": self.convergence_xtol_abs, "fatol": self.convergence_ftol_abs, # TODO: Benchmark if adaptive = True works better "adaptive": self.adaptive, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun, x0=x0, bounds=_get_scipy_bounds(problem.bounds), method="Nelder-Mead", options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_powell", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyPowell(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL stopping_maxfun: PositiveInt = STOPPING_MAXFUN stopping_maxiter: PositiveInt = STOPPING_MAXITER display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "xtol": self.convergence_xtol_rel, "ftol": self.convergence_ftol_rel, "maxfev": self.stopping_maxfun, "maxiter": self.stopping_maxiter, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun, x0=x0, method="Powell", bounds=_get_scipy_bounds(problem.bounds), options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_bfgs", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyBFGS(Algorithm): convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS stopping_maxiter: PositiveInt = STOPPING_MAXITER norm: NonNegativeFloat = np.inf convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL display: bool = False armijo_condition: NonNegativeFloat = 1e-4 curvature_condition: NonNegativeFloat = 0.9 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "gtol": self.convergence_gtol_abs, "maxiter": self.stopping_maxiter, "norm": self.norm, "xrtol": self.convergence_xtol_rel, "disp": self.display, "c1": self.armijo_condition, "c2": self.curvature_condition, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="BFGS", jac=True, options=options ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_conjugate_gradient", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyConjugateGradient(Algorithm): convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS stopping_maxiter: PositiveInt = STOPPING_MAXITER norm: NonNegativeFloat = np.inf display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "gtol": self.convergence_gtol_abs, "maxiter": self.stopping_maxiter, "norm": self.norm, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="CG", jac=True, options=options ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_newton_cg", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyNewtonCG(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL stopping_maxiter: PositiveInt = STOPPING_MAXITER display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "xtol": self.convergence_xtol_rel, "maxiter": self.stopping_maxiter, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="Newton-CG", jac=True, options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_cobyla", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class ScipyCOBYLA(Algorithm): convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL stopping_maxiter: PositiveInt = STOPPING_MAXITER trustregion_initial_radius: PositiveFloat | None = None display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: # TODO: Maybe we should leave the radius at their default if self.trustregion_initial_radius is None: radius = calculate_trustregion_initial_radius(x0) else: radius = self.trustregion_initial_radius options = { "maxiter": self.stopping_maxiter, "rhobeg": radius, "disp": self.display, } # cannot handle equality constraints nonlinear_constraints = equality_as_inequality_constraints( problem.nonlinear_constraints ) raw_res = scipy.optimize.minimize( fun=problem.fun, x0=x0, method="COBYLA", constraints=nonlinear_constraints, options=options, tol=self.convergence_xtol_rel, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_ls_trf", solver_type=AggregationLevel.LEAST_SQUARES, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyLSTRF(Algorithm): convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL stopping_maxfun: PositiveInt = STOPPING_MAXFUN relative_step_size_diff_approx: NonNegativeFloat | None = None tr_solver: Literal["exact", "lsmr"] | None = None tr_solver_options: dict[str, Any] | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.tr_solver_options is None: tr_solver_options = {} else: tr_solver_options = self.tr_solver_options lower_bounds = -np.inf if problem.bounds.lower is None else problem.bounds.lower upper_bounds = np.inf if problem.bounds.upper is None else problem.bounds.upper raw_res = scipy.optimize.least_squares( fun=problem.fun, x0=x0, # This optimizer does not work with fun_and_jac jac=problem.jac, bounds=(lower_bounds, upper_bounds), method="trf", max_nfev=self.stopping_maxfun, ftol=self.convergence_ftol_rel, gtol=self.convergence_gtol_rel, diff_step=self.relative_step_size_diff_approx, tr_solver=self.tr_solver, tr_options=tr_solver_options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_ls_dogbox", solver_type=AggregationLevel.LEAST_SQUARES, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyLSDogbox(Algorithm): convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL stopping_maxfun: PositiveInt = STOPPING_MAXFUN relative_step_size_diff_approx: NonNegativeFloat | None = None tr_solver: Literal["exact", "lsmr"] | None = None tr_solver_options: dict[str, Any] | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.tr_solver_options is None: tr_solver_options = {} else: tr_solver_options = self.tr_solver_options lower_bounds = -np.inf if problem.bounds.lower is None else problem.bounds.lower upper_bounds = np.inf if problem.bounds.upper is None else problem.bounds.upper raw_res = scipy.optimize.least_squares( fun=problem.fun, x0=x0, # This optimizer does not work with fun_and_jac jac=problem.jac, bounds=(lower_bounds, upper_bounds), method="dogbox", max_nfev=self.stopping_maxfun, ftol=self.convergence_ftol_rel, gtol=self.convergence_gtol_rel, diff_step=self.relative_step_size_diff_approx, tr_solver=self.tr_solver, tr_options=tr_solver_options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_ls_lm", solver_type=AggregationLevel.LEAST_SQUARES, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyLSLM(Algorithm): convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL stopping_maxfun: PositiveInt = STOPPING_MAXFUN relative_step_size_diff_approx: NonNegativeFloat | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: raw_res = scipy.optimize.least_squares( fun=problem.fun, x0=x0, # This optimizer does not work with fun_and_jac jac=problem.jac, method="lm", max_nfev=self.stopping_maxfun, ftol=self.convergence_ftol_rel, gtol=self.convergence_gtol_rel, diff_step=self.relative_step_size_diff_approx, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_truncated_newton", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyTruncatedNewton(Algorithm): convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS stopping_maxfun: PositiveInt = STOPPING_MAXFUN max_hess_evaluations_per_iteration: int = -1 max_step_for_line_search: NonNegativeFloat = 0 line_search_severity: float = -1 finite_difference_precision: NonNegativeFloat = 0 criterion_rescale_factor: float = -1 # TODO: Check type hint for `func_min_estimate` func_min_estimate: float = 0 display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: options = { "ftol": self.convergence_ftol_abs, "xtol": self.convergence_xtol_abs, "gtol": self.convergence_gtol_abs, "maxfun": self.stopping_maxfun, "maxCGit": self.max_hess_evaluations_per_iteration, "stepmx": self.max_step_for_line_search, "minfev": self.func_min_estimate, "eta": self.line_search_severity, "accuracy": self.finite_difference_precision, "rescale": self.criterion_rescale_factor, "disp": self.display, } raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, x0=x0, method="TNC", jac=True, bounds=_get_scipy_bounds(problem.bounds), options=options, ) res = process_scipy_result(raw_res) return res @mark.minimizer( name="scipy_trust_constr", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class ScipyTrustConstr(Algorithm): # TODO: Check if can be set to CONVERGENCE_GTOL_ABS convergence_gtol_abs: NonNegativeFloat = 1e-08 convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL stopping_maxiter: PositiveInt = STOPPING_MAXITER trustregion_initial_radius: PositiveFloat | None = None display: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.trustregion_initial_radius is None: trustregion_initial_radius = calculate_trustregion_initial_radius(x0) else: trustregion_initial_radius = self.trustregion_initial_radius options = { "gtol": self.convergence_gtol_abs, "maxiter": self.stopping_maxiter, "xtol": self.convergence_xtol_rel, "initial_tr_radius": trustregion_initial_radius, "disp": self.display, } # cannot handle equality constraints nonlinear_constraints = equality_as_inequality_constraints( problem.nonlinear_constraints ) raw_res = scipy.optimize.minimize( fun=problem.fun_and_jac, jac=True, x0=x0, method="trust-constr", bounds=_get_scipy_bounds(problem.bounds), constraints=_get_scipy_constraints(nonlinear_constraints), options=options, ) res = process_scipy_result(raw_res) return res def process_scipy_result(scipy_res: ScipyOptimizeResult) -> InternalOptimizeResult: res = InternalOptimizeResult( x=scipy_res.x, fun=scipy_res.fun, success=bool(scipy_res.success), message=str(scipy_res.message), n_fun_evals=_int_if_not_none(scipy_res.get("nfev")), n_jac_evals=_int_if_not_none(scipy_res.get("njev")), n_hess_evals=_int_if_not_none(scipy_res.get("nhev")), n_iterations=_int_if_not_none(scipy_res.get("nit")), # TODO: Pass on more things once we can convert them to external status=None, jac=None, hess=None, hess_inv=None, max_constraint_violation=None, info=None, history=None, ) return res def _int_if_not_none(value: SupportsInt | None) -> int | None: if value is None: return None return int(value) def _get_scipy_constraints(constraints): """Transform internal nonlinear constraints to scipy readable format. This format is currently only used by scipy_trust_constr. """ scipy_constraints = [_internal_to_scipy_constraint(c) for c in constraints] return scipy_constraints def _internal_to_scipy_constraint(c): new_constr = NonlinearConstraint( fun=c["fun"], lb=np.zeros(c["n_constr"]), ub=np.tile(np.inf, c["n_constr"]), jac=c["jac"], ) return new_constr @mark.minimizer( name="scipy_basinhopping", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyBasinhopping(Algorithm): local_algorithm: ( Literal[ "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ] | Callable ) = "L-BFGS-B" n_local_optimizations: PositiveInt = 100 temperature: NonNegativeFloat = 1.0 stepsize: NonNegativeFloat = 0.5 local_algo_options: dict[str, Any] | None = None take_step: Callable | None = None accept_test: Callable | None = None interval: PositiveInt = 50 convergence_n_unchanged_iterations: PositiveInt | None = None seed: int | np.random.Generator | np.random.RandomState | None = None target_accept_rate: NonNegativeFloat = 0.5 stepwise_factor: NonNegativeFloat = 0.9 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: n_local_optimizations = max(1, self.n_local_optimizations - 1) if self.local_algo_options is None: local_algo_options = {} else: local_algo_options = self.local_algo_options minimizer_kwargs = { "method": self.local_algorithm, "bounds": _get_scipy_bounds(problem.bounds), "jac": problem.jac, } minimizer_kwargs = {**minimizer_kwargs, **local_algo_options} res = scipy.optimize.basinhopping( func=problem.fun, x0=x0, minimizer_kwargs=minimizer_kwargs, niter=n_local_optimizations, T=self.temperature, stepsize=self.stepsize, take_step=self.take_step, accept_test=self.accept_test, interval=self.interval, niter_success=self.convergence_n_unchanged_iterations, seed=self.seed, target_accept_rate=self.target_accept_rate, stepwise_factor=self.stepwise_factor, ) return process_scipy_result(res) @mark.minimizer( name="scipy_brute", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=True, ) @dataclass(frozen=True) class ScipyBrute(Algorithm): n_grid_points: PositiveInt = 20 polishing_function: Callable | None = None n_cores: PositiveInt = 1 batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = "joblib" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: workers = _get_workers(self.n_cores, self.batch_evaluator) if problem.bounds.lower is None or problem.bounds.upper is None: raise ValueError( """Global algorithms like scipy_brute need finite bounds for all parameters""" ) raw_res = scipy.optimize.brute( func=problem.fun, ranges=tuple(zip(problem.bounds.lower, problem.bounds.upper, strict=True)), Ns=self.n_grid_points, full_output=True, finish=self.polishing_function, workers=workers, ) res = InternalOptimizeResult( x=raw_res[0], fun=raw_res[1], n_fun_evals=raw_res[2].size, n_iterations=raw_res[2].size, success=True, message="brute force optimization terminated successfully", ) return res @mark.minimizer( name="scipy_differential_evolution", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=True, ) @dataclass(frozen=True) class ScipyDifferentialEvolution(Algorithm): strategy: ( Literal[ "best1bin", "best1exp", "rand1exp", "randtobest1exp", "currenttobest1exp", "best2exp", "rand2exp", "randtobest1bin", "currenttobest1bin", "best2bin", "rand2bin", "rand1bin", ] | Callable ) = "best1bin" stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL population_size_multiplier: NonNegativeInt = 15 convergence_ftol_rel: NonNegativeFloat = 0.01 # TODO: Refine type to add ranges [0,2] if float. mutation_constant: NonNegativeFloat | Tuple[NonNegativeFloat, NonNegativeFloat] = ( 0.5, 1, ) # TODO: Refine type to add ranges [0,1]. recombination_constant: NonNegativeFloat = 0.7 seed: int | np.random.Generator | np.random.RandomState | None = None polish: bool = True sampling_method: ( Literal["latinhypercube", "random", "sobol", "halton"] | NDArray[np.float64] ) = "latinhypercube" convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS n_cores: PositiveInt = 1 batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = "joblib" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: workers = _get_workers(self.n_cores, self.batch_evaluator) res = scipy.optimize.differential_evolution( func=problem.fun, bounds=_get_scipy_bounds(problem.bounds), strategy=self.strategy, maxiter=self.stopping_maxiter, popsize=self.population_size_multiplier, tol=self.convergence_ftol_rel, mutation=self.mutation_constant, recombination=self.recombination_constant, seed=self.seed, polish=self.polish, init=self.sampling_method, atol=self.convergence_ftol_abs, updating="deferred", workers=workers, constraints=_get_scipy_constraints(problem.nonlinear_constraints), ) return process_scipy_result(res) @mark.minimizer( name="scipy_shgo", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=True, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class ScipySHGO(Algorithm): local_algorithm: ( Literal[ "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ] | Callable ) = "L-BFGS-B" local_algo_options: dict[str, Any] | None = None n_sampling_points: PositiveInt = 128 n_simplex_iterations: PositiveInt = 1 sampling_method: Literal["simplicial", "halton", "sobol"] | Callable = "simplicial" max_sampling_evaluations: PositiveInt | None = None convergence_minimum_criterion_value: float | None = None convergence_minimum_criterion_tolerance: NonNegativeFloat = 1e-4 stopping_maxiter: PositiveInt | None = None stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL stopping_max_processing_time: PositiveFloat | None = None minimum_homology_group_rank_differential: PositiveInt | None = None symmetry: List | bool = False minimize_every_iteration: bool = True max_local_minimizations_per_iteration: PositiveInt | bool = False infinity_constraints: bool = True def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if self.local_algorithm == "COBYLA": nonlinear_constraints = equality_as_inequality_constraints( problem.nonlinear_constraints ) nonlinear_constraints = vector_as_list_of_scalar_constraints( problem.nonlinear_constraints ) local_algo_options = ( {} if self.local_algo_options is None else self.local_algo_options ) default_minimizer_kwargs = { "method": self.local_algorithm, "bounds": _get_scipy_bounds(problem.bounds), "jac": problem.jac, } minimizer_kwargs = {**default_minimizer_kwargs, **local_algo_options} options = { "maxfev": self.max_sampling_evaluations, "f_min": self.convergence_minimum_criterion_value, "f_tol": self.convergence_minimum_criterion_tolerance, "maxiter": self.stopping_maxiter, "maxev": self.stopping_maxfun, "maxtime": self.stopping_max_processing_time, "minhgrd": self.minimum_homology_group_rank_differential, "symmetry": self.symmetry, "jac": problem.jac, "minimize_every_iter": self.minimize_every_iteration, "local_iter": self.max_local_minimizations_per_iteration, "infty_constraints": self.infinity_constraints, } if any(options.values()) is False: options_used = None else: options_used = options res = scipy.optimize.shgo( func=problem.fun, bounds=_get_scipy_bounds(problem.bounds), constraints=nonlinear_constraints, minimizer_kwargs=minimizer_kwargs, n=self.n_sampling_points, iters=self.n_simplex_iterations, sampling_method=self.sampling_method, options=options_used, ) return process_scipy_result(res) @mark.minimizer( name="scipy_dual_annealing", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=True, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyDualAnnealing(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL local_algorithm: ( Literal[ "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ] | Callable ) = "L-BFGS-B" local_algo_options: dict[str, Any] | None = None # TODO: Refine type to add ranges (0.01, 5e4] initial_temperature: PositiveFloat = 5230.0 # TODO: Refine type to add ranges (0,1) restart_temperature_ratio: PositiveFloat = 2e-05 # TODO: Refine type to add ranges (1, 3] visit: PositiveFloat = 2.62 # TODO: Refine type to add ranges (-1e4, -5] accept: NegativeFloat = -5.0 stopping_maxfun: PositiveInt = STOPPING_MAXFUN seed: int | np.random.Generator | np.random.RandomState | None = None no_local_search: bool = False def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: local_algo_options = ( {} if self.local_algo_options is None else self.local_algo_options ) default_minimizer_kwargs = { "method": self.local_algorithm, "bounds": _get_scipy_bounds(problem.bounds), "jac": problem.jac, } minimizer_kwargs = {**default_minimizer_kwargs, **local_algo_options} res = scipy.optimize.dual_annealing( func=problem.fun, bounds=_get_scipy_bounds(problem.bounds), maxiter=self.stopping_maxiter, minimizer_kwargs=minimizer_kwargs, initial_temp=self.initial_temperature, restart_temp_ratio=self.restart_temperature_ratio, visit=self.visit, accept=self.accept, maxfun=self.stopping_maxfun, seed=self.seed, no_local_search=self.no_local_search, x0=x0, ) return process_scipy_result(res) @mark.minimizer( name="scipy_direct", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=True, needs_jac=False, needs_hess=False, needs_bounds=True, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class ScipyDirect(Algorithm): convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL stopping_maxfun: PositiveInt = STOPPING_MAXFUN stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL locally_biased: bool = True convergence_minimum_criterion_value: float = -np.inf # TODO: must be between 0 and 1 convergence_minimum_criterion_tolerance: NonNegativeFloat = 1e-4 # TODO: must be between 0 and 1 volume_hyperrectangle_tolerance: NonNegativeFloat = 1e-16 # TODO: must be between 0 and 1 length_hyperrectangle_tolerance: NonNegativeFloat = 1e-6 def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: res = scipy.optimize.direct( func=problem.fun, bounds=_get_scipy_bounds(problem.bounds), eps=self.convergence_ftol_rel, maxfun=self.stopping_maxfun, maxiter=self.stopping_maxiter, locally_biased=self.locally_biased, f_min=self.convergence_minimum_criterion_value, f_min_rtol=self.convergence_minimum_criterion_tolerance, vol_tol=self.volume_hyperrectangle_tolerance, len_tol=self.length_hyperrectangle_tolerance, ) return process_scipy_result(res) def _get_workers(n_cores, batch_evaluator): batch_evaluator = process_batch_evaluator(batch_evaluator) out = functools.partial( batch_evaluator, n_cores=n_cores, error_handling="raise", ) return out def _get_scipy_bounds(bounds: InternalBounds) -> ScipyBounds | None: if bounds.lower is None and bounds.upper is None: return None lower = bounds.lower if bounds.lower is not None else -np.inf upper = bounds.upper if bounds.upper is not None else np.inf return ScipyBounds(lb=lower, ub=upper) def process_scipy_result_old(scipy_results_obj): # using get with defaults to access dict elements is just a safety measure raw_res = {**scipy_results_obj} processed = { "solution_x": raw_res.get("x"), "solution_criterion": raw_res.get("fun"), "solution_derivative": raw_res.get("jac"), "solution_hessian": raw_res.get("hess"), "n_fun_evals": raw_res.get("nfev"), "n_jac_evals": raw_res.get("njac") or raw_res.get("njev"), "n_iterations": raw_res.get("nit"), "success": raw_res.get("success"), "reached_convergence_criterion": None, "message": raw_res.get("message"), } return processed ================================================ FILE: src/optimagic/optimizers/tao_optimizers.py ================================================ """This module implements the POUNDERs algorithm.""" import functools from dataclasses import dataclass import numpy as np from numpy.typing import NDArray from optimagic import mark from optimagic.config import IS_PETSC4PY_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( CONVERGENCE_GTOL_ABS, CONVERGENCE_GTOL_REL, CONVERGENCE_GTOL_SCALED, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt from optimagic.utilities import calculate_trustregion_initial_radius @mark.minimizer( name="tao_pounders", solver_type=AggregationLevel.LEAST_SQUARES, is_available=IS_PETSC4PY_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=False, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class TAOPounders(Algorithm): """Implement the POUNDERs algorithm.""" convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL convergence_gtol_scaled: NonNegativeFloat = CONVERGENCE_GTOL_SCALED trustregion_initial_radius: NonNegativeFloat | None = None stopping_maxiter: PositiveInt = STOPPING_MAXITER def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: raw = tao_pounders( criterion=problem.fun, x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, convergence_gtol_abs=self.convergence_gtol_abs, convergence_gtol_rel=self.convergence_gtol_rel, convergence_gtol_scaled=self.convergence_gtol_scaled, trustregion_initial_radius=self.trustregion_initial_radius, stopping_maxiter=self.stopping_maxiter, ) res = InternalOptimizeResult( x=raw["solution_x"], fun=raw["solution_criterion"], success=raw["success"], message=raw["message"], n_fun_evals=raw["n_fun_evals"], n_jac_evals=0, n_hess_evals=0, n_iterations=raw["n_iterations"], info={ "gradient_norm": raw["gradient_norm"], "criterion_norm": raw["criterion_norm"], "convergence_code": raw["convergence_code"], "convergence_reason": raw["reached_convergence_criterion"], }, ) return res def tao_pounders( criterion, x, lower_bounds, upper_bounds, *, convergence_gtol_abs=CONVERGENCE_GTOL_ABS, convergence_gtol_rel=CONVERGENCE_GTOL_REL, convergence_gtol_scaled=CONVERGENCE_GTOL_SCALED, trustregion_initial_radius=None, stopping_maxiter=STOPPING_MAXITER, ): r"""Minimize a function using the POUNDERs algorithm. For details see :ref: `tao_algorithm`. """ if not IS_PETSC4PY_INSTALLED: raise NotInstalledError( "The 'tao_pounders' algorithm requires the petsc4py package to be " "installed. If you are using Linux or MacOS, install the package with " "'conda install -c conda-forge petsc4py'. The package is not available on " "Windows. Windows users can use optimagics 'pounders' algorithm instead." ) from petsc4py import PETSc first_eval = criterion(x) n_errors = len(first_eval) _x = _initialise_petsc_array(x) # We need to know the number of contributions of the criterion value to allocate the # array. residuals_out = _initialise_petsc_array(n_errors) # Create the solver object. tao = PETSc.TAO().create(PETSc.COMM_WORLD) # Set the solver type. tao.setType("pounders") tao.setFromOptions() def func_tao(tao, x, resid_out): # noqa: ARG001 """Evaluate objective and attach result to an petsc object f. This is required to use the pounders solver from tao. Args: tao: The tao object we created for the optimization task. x (PETSc.array): Current parameter values. f: Petsc object in which we save the current function value. """ resid_out.array = criterion(x.array) # Set the procedure for calculating the objective. This part has to be changed if we # want more than pounders. tao.setResidual(func_tao, residuals_out) if trustregion_initial_radius is None: trustregion_initial_radius = calculate_trustregion_initial_radius(_x) elif trustregion_initial_radius <= 0: raise ValueError("The initial trust region radius must be > 0.") tao.setInitialTrustRegionRadius(trustregion_initial_radius) # Add bounds if provided. if lower_bounds is not None or upper_bounds is not None: if lower_bounds is None: lower_bounds = np.full(len(x), -np.inf) if upper_bounds is None: upper_bounds = np.full(len(x), np.inf) lower_bounds = _initialise_petsc_array(lower_bounds) upper_bounds = _initialise_petsc_array(upper_bounds) tao.setVariableBounds(lower_bounds, upper_bounds) # Put the starting values into the container and pass them to the optimizer. tao.setInitial(_x) # Obtain tolerances for the convergence criteria. Since we can not create # scaled_gradient_tolerance manually we manually set absolute_gradient_tolerance and # or relative_gradient_tolerance to zero once a subset of these two is turned off # and scaled_gradient_tolerance is still turned on. default_gatol = convergence_gtol_abs if convergence_gtol_abs else -1 default_gttol = convergence_gtol_scaled if convergence_gtol_scaled else -1 default_grtol = convergence_gtol_rel if convergence_gtol_rel else -1 # Set tolerances for default convergence tests. tao.setTolerances( gatol=default_gatol, grtol=default_grtol, gttol=default_gttol, ) # Set user defined convergence tests. Beware that specifying multiple tests could # overwrite others or lead to unclear behavior. if stopping_maxiter is not None: tao.setConvergenceTest(functools.partial(_max_iters, stopping_maxiter)) elif convergence_gtol_scaled is False and convergence_gtol_abs is False: tao.setConvergenceTest(functools.partial(_grtol_conv, convergence_gtol_rel)) elif convergence_gtol_rel is False and convergence_gtol_scaled is False: tao.setConvergenceTest(functools.partial(_gatol_conv, convergence_gtol_abs)) elif convergence_gtol_scaled is False: tao.setConvergenceTest( functools.partial( _grtol_gatol_conv, convergence_gtol_rel, convergence_gtol_abs, ) ) # Run the problem. tao.solve() results = _process_pounders_results(residuals_out, tao) # Destroy petsc objects for memory reasons. petsc_bounds = [b for b in (lower_bounds, upper_bounds) if b is not None] for obj in [tao, _x, residuals_out, *petsc_bounds]: obj.destroy() return results def _initialise_petsc_array(len_or_array): """Initialize an empty array or fill in provided values. Args: len_or_array (int or numpy.ndarray): If the value is an integer, allocate an empty array with the given length. If the value is an array, allocate an array of equal length and fill in the values. """ from petsc4py import PETSc length = len_or_array if isinstance(len_or_array, int) else len(len_or_array) array = PETSc.Vec().create(PETSc.COMM_WORLD) array.setSizes(length) array.setFromOptions() if isinstance(len_or_array, np.ndarray): array.array = len_or_array return array def _max_iters(max_iterations, tao): if tao.getSolutionStatus()[0] < max_iterations: return 0 elif tao.getSolutionStatus()[0] >= max_iterations: tao.setConvergedReason(8) def _gatol_conv(absolute_gradient_tolerance, tao): if tao.getSolutionStatus()[2] >= absolute_gradient_tolerance: return 0 elif tao.getSolutionStatus()[2] < absolute_gradient_tolerance: tao.setConvergedReason(3) def _grtol_conv(relative_gradient_tolerance, tao): if ( tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1] >= relative_gradient_tolerance ): return 0 elif ( tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1] < relative_gradient_tolerance ): tao.setConvergedReason(4) def _grtol_gatol_conv(relative_gradient_tolerance, absolute_gradient_tolerance, tao): if ( tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1] >= relative_gradient_tolerance ): return 0 elif ( tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1] < relative_gradient_tolerance ): tao.setConvergedReason(4) elif tao.getSolutionStatus()[2] < absolute_gradient_tolerance: tao.setConvergedReason(3) def _translate_tao_convergence_reason(tao_resaon): mapping = { 3: "absolute_gradient_tolerance below critical value", 4: "relative_gradient_tolerance below critical value", 5: "scaled_gradient_tolerance below critical value", 6: "step size small", 7: "objective below min value", 8: "user defined", -2: "maxits reached", -4: "numerical problems", -5: "max funcevals reached", -6: "line search failure", -7: "trust region failure", -8: "user defined", } return mapping[tao_resaon] def _process_pounders_results(residuals_out, tao): convergence_code = tao.getConvergedReason() convergence_reason = _translate_tao_convergence_reason(convergence_code) results = { "solution_x": tao.solution.array, "solution_criterion": tao.function, "solution_derivative": None, "solution_hessian": None, "n_fun_evals": tao.getIterationNumber(), "n_jac_evals": None, "n_iterations": None, "success": bool(convergence_code >= 0), "reached_convergence_criterion": ( convergence_reason if convergence_code >= 0 else None ), "message": convergence_reason, # Further results. "solution_criterion_values": residuals_out.array, "gradient_norm": tao.gnorm, "criterion_norm": tao.cnorm, "convergence_code": convergence_code, } return results ================================================ FILE: src/optimagic/optimizers/tranquilo.py ================================================ from __future__ import annotations from dataclasses import dataclass from typing import TYPE_CHECKING, Callable, Literal import numpy as np from numpy.typing import NDArray from packaging import version from optimagic import mark from optimagic.config import IS_TRANQUILO_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalOptimizationProblem, ) from optimagic.typing import ( AggregationLevel, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, ) if TYPE_CHECKING: from tranquilo.options import ( AcceptanceOptions, FilterOptions, FitterOptions, NoiseAdaptationOptions, RadiusOptions, SamplerOptions, StagnationOptions, SubsolverOptions, VarianceEstimatorOptions, ) if IS_TRANQUILO_INSTALLED: import tranquilo IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0 = version.parse( tranquilo.__version__ ) >= version.parse("0.1.0") else: IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0 = False TRANQUILO_INSTALLATION_INSTRUCTIONS = ( "The 'tranquilo' algorithm requires the tranquilo package version 0.1.0 or newer " "to be installed. Install it with 'conda -c conda-forge install tranquilo>=0.1.0'." ) @mark.minimizer( name="tranquilo", solver_type=AggregationLevel.SCALAR, is_available=IS_TRANQUILO_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class Tranquilo(Algorithm): # function type functype: Literal["scalar"] = "scalar" # basic options noisy: bool = False # convergence options disable_convergence: bool = False convergence_ftol_abs: NonNegativeFloat = 0.0 convergence_gtol_abs: NonNegativeFloat = 0.0 convergence_xtol_abs: NonNegativeFloat = 0.0 convergence_ftol_rel: NonNegativeFloat = 2e-9 convergence_gtol_rel: NonNegativeFloat = 1e-8 convergence_xtol_rel: NonNegativeFloat = 1e-8 convergence_min_trust_region_radius: NonNegativeFloat = 0.0 # stopping options stopping_maxfun: PositiveInt = 2_000 stopping_maxiter: PositiveInt = 200 stopping_maxtime: NonNegativeFloat = np.inf # single advanced options batch_evaluator: Literal[ "joblib", "pathos", ] = "joblib" n_cores: PositiveInt = 1 batch_size: PositiveInt | None = None sample_size: PositiveInt | None = None model_type: ( Literal[ "quadratic", "linear", ] | None ) = None search_radius_factor: PositiveFloat | None = None n_evals_per_point: NonNegativeInt | None = None n_evals_at_start: NonNegativeInt | None = None seed: int | None = 925408 # bundled advanced options radius_options: RadiusOptions | None = None stagnation_options: StagnationOptions | None = None noise_adaptation_options: NoiseAdaptationOptions | None = None # component names and related options sampler: ( Literal[ "optimal_hull", "random_hull", "random_interior", ] | Callable ) = "optimal_hull" sampler_options: SamplerOptions | None = None sample_filter: ( Literal[ "discard_all", "keep_all", "clustering", "drop_excess", ] | Callable | None ) = None sample_filter_options: FilterOptions | None = None model_fitter: ( Literal[ "ols", "ridge", "powell", "tranquilo", ] | Callable | None ) = None model_fitter_options: FitterOptions | None = None cube_subsolver: ( Literal[ "bntr", "bntr_fast", "fallback_cube", "fallback_multistart", ] | Callable ) = "bntr_fast" sphere_subsolver: ( Literal[ "gqtpar", "gqtpar_fast", "fallback_reparametrized", "fallback_inscribed_cube", "fallback_norm_constraint", ] | Callable ) = "gqtpar_fast" retry_subproblem_with_fallback: bool = True subsolver_options: SubsolverOptions | None = None acceptance_decider: ( Literal[ "classic", "naive_noisy", "classic_line_search", "noisy", ] | Callable | None ) = None acceptance_decider_options: AcceptanceOptions | None = None variance_estimator: Literal["classic"] | Callable = "classic" variance_estimator_options: VarianceEstimatorOptions | None = None infinity_handler: Literal["relative"] | Callable = "relative" residualize: bool | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0: raise NotInstalledError(TRANQUILO_INSTALLATION_INSTRUCTIONS) from tranquilo.tranquilo import _tranquilo raw_res = _tranquilo( functype="scalar", batch_fun=problem.batch_fun, x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, noisy=self.noisy, disable_convergence=self.disable_convergence, convergence_absolute_criterion_tolerance=self.convergence_ftol_abs, convergence_absolute_gradient_tolerance=self.convergence_gtol_abs, convergence_absolute_params_tolerance=self.convergence_xtol_abs, convergence_relative_criterion_tolerance=self.convergence_ftol_rel, convergence_relative_gradient_tolerance=self.convergence_gtol_rel, convergence_relative_params_tolerance=self.convergence_xtol_rel, convergence_min_trust_region_radius=self.convergence_min_trust_region_radius, stopping_max_criterion_evaluations=self.stopping_maxfun, stopping_max_iterations=self.stopping_maxiter, stopping_max_time=self.stopping_maxtime, n_cores=self.n_cores, batch_size=self.batch_size, sample_size=self.sample_size, model_type=self.model_type, search_radius_factor=self.search_radius_factor, n_evals_per_point=self.n_evals_per_point, n_evals_at_start=self.n_evals_at_start, seed=self.seed, radius_options=self.radius_options, stagnation_options=self.stagnation_options, noise_adaptation_options=self.noise_adaptation_options, sampler=self.sampler, sampler_options=self.sampler_options, sample_filter=self.sample_filter, sample_filter_options=self.sample_filter_options, model_fitter=self.model_fitter, model_fitter_options=self.model_fitter_options, cube_subsolver=self.cube_subsolver, sphere_subsolver=self.sphere_subsolver, retry_subproblem_with_fallback=self.retry_subproblem_with_fallback, subsolver_options=self.subsolver_options, acceptance_decider=self.acceptance_decider, acceptance_decider_options=self.acceptance_decider_options, variance_estimator=self.variance_estimator, variance_estimator_options=self.variance_estimator_options, infinity_handler=self.infinity_handler, residualize=self.residualize, ) res = InternalOptimizeResult( x=raw_res["solution_x"], fun=raw_res["solution_criterion"], message=raw_res["message"], info={"states": raw_res["states"]}, ) return res @mark.minimizer( name="tranquilo_ls", solver_type=AggregationLevel.LEAST_SQUARES, is_available=IS_TRANQUILO_INSTALLED, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class TranquiloLS(Algorithm): # basic options noisy: bool = False # convergence options disable_convergence: bool = False convergence_ftol_abs: NonNegativeFloat = 0.0 convergence_gtol_abs: NonNegativeFloat = 0.0 convergence_xtol_abs: NonNegativeFloat = 0.0 convergence_ftol_rel: NonNegativeFloat = 2e-9 convergence_gtol_rel: NonNegativeFloat = 1e-8 convergence_xtol_rel: NonNegativeFloat = 1e-8 convergence_min_trust_region_radius: NonNegativeFloat = 0.0 # stopping options stopping_maxfun: PositiveInt = 2_000 stopping_maxiter: PositiveInt = 200 stopping_maxtime: NonNegativeFloat = np.inf # single advanced options batch_evaluator: Literal[ "joblib", "pathos", ] = "joblib" n_cores: PositiveInt = 1 batch_size: PositiveInt | None = None sample_size: PositiveInt | None = None model_type: ( Literal[ "quadratic", "linear", ] | None ) = None search_radius_factor: PositiveFloat | None = None n_evals_per_point: NonNegativeInt | None = None n_evals_at_start: NonNegativeInt | None = None seed: int | None = 925408 # bundled advanced options radius_options: RadiusOptions | None = None stagnation_options: StagnationOptions | None = None noise_adaptation_options: NoiseAdaptationOptions | None = None # component names and related options sampler: ( Literal[ "optimal_hull", "random_hull", "random_interior", ] | Callable ) = "optimal_hull" sampler_options: SamplerOptions | None = None sample_filter: ( Literal[ "discard_all", "keep_all", "clustering", "drop_excess", ] | Callable | None ) = None sample_filter_options: FilterOptions | None = None model_fitter: ( Literal[ "ols", "ridge", "powell", "tranquilo", ] | Callable | None ) = None model_fitter_options: FitterOptions | None = None cube_subsolver: ( Literal[ "bntr", "bntr_fast", "fallback_cube", "fallback_multistart", ] | Callable ) = "bntr_fast" sphere_subsolver: ( Literal[ "gqtpar", "gqtpar_fast", "fallback_reparametrized", "fallback_inscribed_cube", "fallback_norm_constraint", ] | Callable ) = "gqtpar_fast" retry_subproblem_with_fallback: bool = True subsolver_options: SubsolverOptions | None = None acceptance_decider: ( Literal[ "classic", "naive_noisy", "classic_line_search", "noisy", ] | Callable | None ) = None acceptance_decider_options: AcceptanceOptions | None = None variance_estimator: Literal["classic"] | Callable = "classic" variance_estimator_options: VarianceEstimatorOptions | None = None infinity_handler: Literal["relative"] | Callable = "relative" residualize: bool | None = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0: raise NotInstalledError(TRANQUILO_INSTALLATION_INSTRUCTIONS) from tranquilo.tranquilo import _tranquilo raw_res = _tranquilo( functype="least_squares", batch_fun=problem.batch_fun, x=x0, lower_bounds=problem.bounds.lower, upper_bounds=problem.bounds.upper, noisy=self.noisy, disable_convergence=self.disable_convergence, convergence_absolute_criterion_tolerance=self.convergence_ftol_abs, convergence_absolute_gradient_tolerance=self.convergence_gtol_abs, convergence_absolute_params_tolerance=self.convergence_xtol_abs, convergence_relative_criterion_tolerance=self.convergence_ftol_rel, convergence_relative_gradient_tolerance=self.convergence_gtol_rel, convergence_relative_params_tolerance=self.convergence_xtol_rel, convergence_min_trust_region_radius=self.convergence_min_trust_region_radius, stopping_max_criterion_evaluations=self.stopping_maxfun, stopping_max_iterations=self.stopping_maxiter, stopping_max_time=self.stopping_maxtime, n_cores=self.n_cores, batch_size=self.batch_size, sample_size=self.sample_size, model_type=self.model_type, search_radius_factor=self.search_radius_factor, n_evals_per_point=self.n_evals_per_point, n_evals_at_start=self.n_evals_at_start, seed=self.seed, radius_options=self.radius_options, stagnation_options=self.stagnation_options, noise_adaptation_options=self.noise_adaptation_options, sampler=self.sampler, sampler_options=self.sampler_options, sample_filter=self.sample_filter, sample_filter_options=self.sample_filter_options, model_fitter=self.model_fitter, model_fitter_options=self.model_fitter_options, cube_subsolver=self.cube_subsolver, sphere_subsolver=self.sphere_subsolver, retry_subproblem_with_fallback=self.retry_subproblem_with_fallback, subsolver_options=self.subsolver_options, acceptance_decider=self.acceptance_decider, acceptance_decider_options=self.acceptance_decider_options, variance_estimator=self.variance_estimator, variance_estimator_options=self.variance_estimator_options, infinity_handler=self.infinity_handler, residualize=self.residualize, ) res = InternalOptimizeResult( x=raw_res["solution_x"], fun=raw_res["solution_criterion"], message=raw_res["message"], info={"states": raw_res["states"]}, ) return res ================================================ FILE: src/optimagic/parameters/__init__.py ================================================ ================================================ FILE: src/optimagic/parameters/block_trees.py ================================================ """Functions to convert between array and block-tree representations of a matrix.""" import numpy as np import pandas as pd from pybaum import tree_flatten, tree_unflatten from pybaum import tree_just_flatten as tree_leaves from optimagic.parameters.tree_registry import get_registry def matrix_to_block_tree(matrix, outer_tree, inner_tree): """Convert a matrix (2-dimensional array) to block-tree. A block tree most often arises when one applies an operation to a function that maps between two trees. For certain functions this results in a 2-dimensional data array. Two main examples are the Jacobian of the function f : inner_tree -> outer_tree, which results in a block tree structure, or the covariance matrix of a tree, in which case outer_tree = inner_tree. Args: matrix (numpy.ndarray): 2d representation of the block tree. Has shape (m, n). outer_tree: A pytree. If flattened to scalars has length m. inner_tree: A pytree. If flattened to scalars has length n. Returns: block_tree: A (block) pytree. """ _check_dimensions_matrix(matrix, outer_tree, inner_tree) flat_outer, treedef_outer = tree_flatten(outer_tree) flat_inner, treedef_inner = tree_flatten(inner_tree) flat_outer_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_outer] flat_inner_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_inner] shapes_outer = [np.shape(a) for a in flat_outer_np] shapes_inner = [np.shape(a) for a in flat_inner_np] block_bounds_outer = np.cumsum([int(np.prod(s)) for s in shapes_outer[:-1]]) block_bounds_inner = np.cumsum([int(np.prod(s)) for s in shapes_inner[:-1]]) blocks = [] for leaf_outer, s1, submat in zip( flat_outer, shapes_outer, np.split(matrix, block_bounds_outer, axis=0), strict=False, ): row = [] for leaf_inner, s2, block_values in zip( flat_inner, shapes_inner, np.split(submat, block_bounds_inner, axis=1), strict=False, ): raw_block = block_values.reshape((*s1, *s2)) block = _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner) row.append(block) blocks.append(row) block_tree = tree_unflatten( treedef_outer, [tree_unflatten(treedef_inner, row) for row in blocks] ) return block_tree def hessian_to_block_tree(hessian, f_tree, params_tree): """Convert a Hessian array to block-tree format. Remark: In comparison to Jax we need this formatting function because we calculate the second derivative using second-order finite differences. Jax computes the second derivative by applying their jacobian function twice, which produces the desired block-tree shape of the Hessian automatically. If we apply our first derivative function twice we get the same block-tree shape. Args: hessian (np.ndarray): The Hessian, 2- or 3-dimensional array representation of the resulting block-tree. f_tree (pytree): The function evaluated at params_tree. params_tree (pytree): The params_tree. Returns: hessian_block_tree (pytree): The pytree """ _check_dimensions_hessian(hessian, f_tree, params_tree) if hessian.ndim == 2: hessian = hessian[np.newaxis] flat_f, treedef_f = tree_flatten(f_tree) flat_p, treedef_p = tree_flatten(params_tree) flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f] flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p] shapes_f = [np.shape(a) for a in flat_f_np] shapes_p = [np.shape(a) for a in flat_p_np] block_bounds_f = np.cumsum([int(np.prod(s)) for s in shapes_f[:-1]]) block_bounds_p = np.cumsum([int(np.prod(s)) for s in shapes_p[:-1]]) sub_block_trees = [] for s0, subarr in zip( shapes_f, np.split(hessian, block_bounds_f, axis=0), strict=False ): blocks = [] for leaf_outer, s1, submat in zip( flat_p, shapes_p, np.split(subarr, block_bounds_p, axis=1), strict=False ): row = [] for leaf_inner, s2, block_values in zip( flat_p, shapes_p, np.split(submat, block_bounds_p, axis=2), strict=False ): _shape = [k for k in (*s0, *s1, *s2) if k != 1] raw_block = block_values.reshape(_shape) block = _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner) row.append(block) blocks.append(row) block_tree = tree_unflatten( treedef_p, [tree_unflatten(treedef_p, row) for row in blocks] ) sub_block_trees.append(block_tree) hessian_block_tree = tree_unflatten(treedef_f, sub_block_trees) return hessian_block_tree def block_tree_to_matrix(block_tree, outer_tree, inner_tree): """Convert a block tree to a matrix. A block tree most often arises when one applies an operation to a function that maps between two trees. Two main examples are the Jacobian of the function f : inner_tree -> outer_tree, which results in a block tree structure, or the covariance matrix of a tree, in which case outer_tree = inner_tree. Args: block_tree: A (block) pytree, must match dimensions of outer_tree and inner_tree outer_tree: A pytree. inner_tree: A pytree. Returns: matrix (np.ndarray): 2d array containing information stored in block_tree. """ flat_outer = tree_leaves(outer_tree) flat_inner = tree_leaves(inner_tree) flat_block_tree = tree_leaves(block_tree) flat_outer_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_outer] flat_inner_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_inner] size_outer = [np.size(a) for a in flat_outer_np] size_inner = [np.size(a) for a in flat_inner_np] n_blocks_outer = len(size_outer) n_blocks_inner = len(size_inner) block_rows_raw = [ flat_block_tree[n_blocks_inner * i : n_blocks_inner * (i + 1)] for i in range(n_blocks_outer) ] block_rows = [] for s1, row in zip(size_outer, block_rows_raw, strict=False): shapes = [(s1, s2) for s2 in size_inner] row_np = [_convert_to_numpy(leaf, only_pandas=False) for leaf in row] row_reshaped = _reshape_list(row_np, shapes) row_concatenated = np.concatenate(row_reshaped, axis=1) block_rows.append(row_concatenated) matrix = np.concatenate(block_rows, axis=0) _check_dimensions_matrix(matrix, flat_outer, flat_inner) return matrix def block_tree_to_hessian(block_hessian, f_tree, params_tree): """Convert a block tree to a Hessian array. Remark: In comparison to Jax we need this formatting function because we calculate the second derivative using second-order finite differences. Jax computes the second derivative by applying their jacobian function twice, which produces the desired block-tree shape of the Hessian automatically. If we apply our first derivative function twice we get the same block-tree shape. Args: block_hessian: A (block) pytree, must match dimensions of f_tree and params_tree f_tree (pytree): The function evaluated at params_tree. params_tree (pytree): The params_tree. Returns: matrix (np.ndarray): 2d array containing information stored in block_tree. """ flat_f = tree_leaves(f_tree) flat_p = tree_leaves(params_tree) flat_block_tree = tree_leaves(block_hessian) flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f] flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p] size_f = [np.size(a) for a in flat_f_np] size_p = [np.size(a) for a in flat_p_np] n_blocks_f = len(size_f) n_blocks_p = len(size_p) outer_blocks = [ flat_block_tree[(n_blocks_p**2) * i : (n_blocks_p**2) * (i + 1)] for i in range(n_blocks_f) ] inner_matrices = [] for outer_block_dim, list_inner_blocks in zip(size_f, outer_blocks, strict=False): block_rows_raw = [ list_inner_blocks[n_blocks_p * i : n_blocks_p * (i + 1)] for i in range(n_blocks_p) ] block_rows = [] for s1, row in zip(size_p, block_rows_raw, strict=False): shapes = [(outer_block_dim, s1, s2) for s2 in size_p] row_np = [_convert_to_numpy(leaf, only_pandas=False) for leaf in row] row_np_3d = [leaf[np.newaxis] if leaf.ndim < 3 else leaf for leaf in row_np] row_reshaped = _reshape_list(row_np_3d, shapes) row_concatenated = np.concatenate(row_reshaped, axis=2) block_rows.append(row_concatenated) inner_matrix = np.concatenate(block_rows, axis=1) inner_matrices.append(inner_matrix) hessian = np.concatenate(inner_matrices, axis=0) _check_dimensions_hessian(hessian, f_tree, params_tree) return hessian def _convert_to_numpy(obj, only_pandas=True): if only_pandas: out = _convert_pandas_objects_to_numpy(obj) else: out = np.asarray(obj) return out def _convert_pandas_objects_to_numpy(obj): if not isinstance(obj, (pd.Series, pd.DataFrame)): return obj elif isinstance(obj, pd.Series): out = obj.to_numpy() elif "value" in obj.columns: out = obj["value"].to_numpy() else: out = obj.to_numpy() return out def _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner): if np.ndim(raw_block) not in (1, 2): return raw_block if not _is_pd_object(leaf_outer) and not _is_pd_object(leaf_inner): return raw_block index1 = None if not _is_pd_object(leaf_outer) else leaf_outer.index index2 = None if not _is_pd_object(leaf_inner) else leaf_inner.index # can only happen if one leaf is a scalar and the other a pandas # object that is interpreted as one-dimensional. We want to convert # the block to a series wtih the index of the pandas object if np.ndim(raw_block) == 1: out = pd.Series(raw_block, index=_select_non_none(index1, index2)) # can happen in two cases elif np.ndim(raw_block) == 2: # case 1: one leaf is scalar and the other is a DataFrame # without value column. We want to convert the block to a DataFrame # with same index and columns as original DataFrame if np.isscalar(leaf_outer) or np.isscalar(leaf_inner): if np.isscalar(leaf_outer): index, columns = leaf_inner.index, leaf_inner.columns elif np.isscalar(leaf_inner): index, columns = leaf_outer.index, leaf_outer.columns out = pd.DataFrame(raw_block, index=index, columns=columns) # case 2: both 1d Data structures and at least one of them is # a pandas object. We want to convert the result to a DataFrame # with index=index1 and columns=index2 else: out = pd.DataFrame(raw_block, index=index1, columns=index2) return out def _select_non_none(first, second): if first is None and second is None: raise ValueError() elif first is not None and second is not None: raise ValueError() elif first is None: out = second elif second is None: out = first return out def _reshape_list(list_to_reshape, shapes): """Reshape list of numpy.ndarray according to list of shapes. Args: list_to_reshape (list): List containing numpy.ndarray's. shapes (list): List of shape tuples. Returns: reshaped (list): List containing the reshaped numpy.ndarray's. """ if len(list_to_reshape) != len(shapes): raise ValueError("Arguments must have the same number of elements.") reshaped = [ a.reshape(shape) for a, shape in zip(list_to_reshape, shapes, strict=False) ] return reshaped def _is_pd_object(obj): return isinstance(obj, (pd.Series, pd.DataFrame)) def _check_dimensions_matrix(matrix, outer_tree, inner_tree): extended_registry = get_registry(extended=True) flat_outer = tree_leaves(outer_tree, registry=extended_registry) flat_inner = tree_leaves(inner_tree, registry=extended_registry) if matrix.shape[0] != len(flat_outer): raise ValueError("First dimension of matrix does not match that of outer_tree.") if matrix.shape[1] != len(flat_inner): raise ValueError( "Second dimension of matrix does not match that of inner_tree." ) def _check_dimensions_hessian(hessian, f_tree, params_tree): extended_registry = get_registry(extended=True) flat_f = tree_leaves(f_tree, registry=extended_registry) flat_p = tree_leaves(params_tree, registry=extended_registry) if len(flat_f) == 1: # consider only dimensions with non trivial size (larger than 1) relevant_hessian_shape = tuple(k for k in hessian.shape if k != 1) if len(relevant_hessian_shape) == 0 and len(flat_p) != 1: # scalar f and scalar params -> scalar hessian raise ValueError("Hessian dimension does not match those of params.") if len(relevant_hessian_shape) == 2: # scalar f and vector params -> matrix hessian if relevant_hessian_shape != (len(flat_p), len(flat_p)): raise ValueError("Hessian dimension does not match those of params.") if len(relevant_hessian_shape) > 2: raise ValueError("Hessian must be 0- or 2-d if f is scalar-valued.") else: if hessian.ndim != 3: raise ValueError("Hessian must be 3d if f is multidimensional.") if hessian.shape[0] != len(flat_f): raise ValueError("First Hessian dimension does not match that of f.") if hessian.shape[1:] != (len(flat_p), len(flat_p)): raise ValueError( "Last two Hessian dimensions do not match those of params." ) ================================================ FILE: src/optimagic/parameters/bounds.py ================================================ from __future__ import annotations from dataclasses import dataclass from typing import Any, Literal, Sequence import numpy as np from numpy.typing import NDArray from pybaum import leaf_names, tree_map from pybaum import tree_just_flatten as tree_leaves from scipy.optimize import Bounds as ScipyBounds from optimagic.exceptions import InvalidBoundsError from optimagic.parameters.tree_registry import get_registry from optimagic.typing import PyTree, PyTreeRegistry from optimagic.utilities import fast_numpy_full @dataclass(frozen=True) class Bounds: lower: PyTree | None = None upper: PyTree | None = None soft_lower: PyTree | None = None soft_upper: PyTree | None = None def pre_process_bounds( bounds: None | Bounds | ScipyBounds | Sequence[tuple[float, float]], ) -> Bounds | None: """Convert all valid types of specifying bounds to optimagic.Bounds. This just harmonizes multiple ways of specifying bounds into a single format. It does not check that bounds are valid or compatible with params. Args: bounds: The user provided bounds. Returns: The bounds in the optimagic format. Raises: InvalidBoundsError: If bounds cannot be processed, e.g. because they do not have the correct type. """ if isinstance(bounds, ScipyBounds): bounds = Bounds(lower=bounds.lb, upper=bounds.ub) elif isinstance(bounds, Bounds) or bounds is None: pass else: try: bounds = _process_bounds_sequence(bounds) except (KeyboardInterrupt, SystemExit): raise except Exception as e: raise InvalidBoundsError( f"Invalid bounds of type: {type(bounds)}. Bounds must be " "optimagic.Bounds, scipy.optimize.Bounds or a Sequence of tuples with " "lower and upper bounds." ) from e return bounds def _process_bounds_sequence(bounds: Sequence[tuple[float, float]]) -> Bounds: lower = fast_numpy_full(len(bounds), fill_value=-np.inf) upper = fast_numpy_full(len(bounds), fill_value=np.inf) for i, (lb, ub) in enumerate(bounds): if lb is not None: lower[i] = lb if ub is not None: upper[i] = ub return Bounds(lower=lower, upper=upper) def get_internal_bounds( params: PyTree, bounds: Bounds | None = None, registry: PyTreeRegistry | None = None, add_soft_bounds: bool = False, ) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None]: """Create consolidated and flattened bounds for params. If params is a DataFrame with value column, the user provided bounds are extended with bounds from the params DataFrame. If no bounds are provided, we return None. If some bounds are available the missing entries are set to -np.inf for the lower bound and np.inf for the upper bound. The bounds provided in `bounds` override bounds provided in params if both are specified (in the case where params is a DataFrame with bounds as a column). Args: params: The parameter pytree. bounds: The lower and upper bounds. registry: pybaum registry. add_soft_bounds: If True, the element-wise maximum (minimum) of the lower and soft_lower (upper and soft_upper) bounds are taken. If False, the lower (upper) bounds are returned. Returns: Consolidated and flattened lower_bounds. Consolidated and flattened upper_bounds. """ bounds = Bounds() if bounds is None else bounds fast_path = _is_fast_path( params=params, bounds=bounds, add_soft_bounds=add_soft_bounds, ) if fast_path: return _get_fast_path_bounds(bounds) # Handling of None-valued bounds in the slow path needs to be improved. Currently, # None-valued bounds are replaced with arrays of np.inf and -np.inf, and then # translated back to None if all entries are non-finite. registry = get_registry(extended=True) if registry is None else registry n_params = len(tree_leaves(params, registry=registry)) # Fill leaves with np.nan. If params contains a data frame with bounds as a column, # that column is NOT overwritten (as long as an extended registry is used). nan_tree = tree_map(lambda leaf: np.nan, params, registry=registry) # noqa: ARG005 lower_flat = _update_bounds_and_flatten(nan_tree, bounds.lower, kind="lower_bound") upper_flat = _update_bounds_and_flatten(nan_tree, bounds.upper, kind="upper_bound") if len(lower_flat) != n_params: raise InvalidBoundsError("lower_bounds do not match dimension of params.") if len(upper_flat) != n_params: raise InvalidBoundsError("upper_bounds do not match dimension of params.") lower_flat[np.isnan(lower_flat)] = -np.inf upper_flat[np.isnan(upper_flat)] = np.inf if add_soft_bounds: lower_flat_soft = _update_bounds_and_flatten( nan_tree, bounds.soft_lower, kind="soft_lower_bound" ) lower_flat_soft[np.isnan(lower_flat_soft)] = -np.inf lower_flat = np.maximum(lower_flat, lower_flat_soft) upper_flat_soft = _update_bounds_and_flatten( nan_tree, bounds.soft_upper, kind="soft_upper_bound" ) upper_flat_soft[np.isnan(upper_flat_soft)] = np.inf upper_flat = np.minimum(upper_flat, upper_flat_soft) if (lower_flat > upper_flat).any(): msg = "Invalid bounds. Some lower bounds are larger than upper bounds." raise InvalidBoundsError(msg) if np.isinf(lower_flat).all(): lower_flat = None # type: ignore[assignment] if np.isinf(upper_flat).all(): upper_flat = None # type: ignore[assignment] return lower_flat, upper_flat def _update_bounds_and_flatten( nan_tree: PyTree, bounds: PyTree, kind: Literal["lower_bound", "upper_bound", "soft_lower_bound", "soft_upper_bound"], ) -> NDArray[np.float64]: """Flatten bounds array and update it with bounds from params. Args: nan_tree: Pytree with the same structure as params, filled with nans. bounds: The candidate bounds to be updated and flattened. kind: One of "lower_bound", "upper_bound", "soft_lower_bound", "soft_upper_bound". Returns: np.ndarray: The updated and flattened bounds. """ registry = get_registry(extended=True, data_col=kind) flat_nan_tree = tree_leaves(nan_tree, registry=registry) if bounds is not None: registry = get_registry(extended=True) flat_bounds = tree_leaves(bounds, registry=registry) seperator = 10 * "$" params_names = leaf_names(nan_tree, registry=registry, separator=seperator) bounds_names = leaf_names(bounds, registry=registry, separator=seperator) flat_nan_dict = dict(zip(params_names, flat_nan_tree, strict=False)) invalid = {"names": [], "bounds": []} # type: ignore for bounds_name, bounds_leaf in zip(bounds_names, flat_bounds, strict=False): # if a bounds leaf is None we treat it as saying the the corresponding # subtree of params has no bounds. if bounds_leaf is not None: if bounds_name in flat_nan_dict: flat_nan_dict[bounds_name] = bounds_leaf else: invalid["names"].append(bounds_name) invalid["bounds"].append(bounds_leaf) if invalid["bounds"]: msg = ( f"{kind} could not be matched to params pytree. The bounds " f"{invalid['bounds']} with names {invalid['names']} are not part of " "params." ) raise InvalidBoundsError(msg) flat_nan_tree = list(flat_nan_dict.values()) updated = np.array(flat_nan_tree, dtype=np.float64) return updated def _is_fast_path(params: PyTree, bounds: Bounds, add_soft_bounds: bool) -> bool: out = True if add_soft_bounds: out = False if not _is_1d_array(params): out = False for bound in (bounds.lower, bounds.upper): if not (_is_1d_array(bound) or bound is None): out = False return out def _is_1d_array(candidate: Any) -> bool: return isinstance(candidate, np.ndarray) and candidate.ndim == 1 def _get_fast_path_bounds( bounds: Bounds, ) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None]: if bounds.lower is None: lower_bounds = None else: lower_bounds = bounds.lower.astype(float) if np.isinf(lower_bounds).all(): lower_bounds = None if bounds.upper is None: upper_bounds = None else: upper_bounds = bounds.upper.astype(float) if np.isinf(upper_bounds).all(): upper_bounds = None if ( lower_bounds is not None and upper_bounds is not None and (lower_bounds > upper_bounds).any() ): msg = "Invalid bounds. Some lower bounds are larger than upper bounds." raise InvalidBoundsError(msg) return lower_bounds, upper_bounds ================================================ FILE: src/optimagic/parameters/check_constraints.py ================================================ """Check compatibility of pc with each other and with bounds and fixes. See the module docstring of process_constraints for naming conventions. """ from functools import partial import numpy as np import pandas as pd from optimagic.exceptions import InvalidConstraintError, InvalidParamsError from optimagic.utilities import cov_params_to_matrix, sdcorr_params_to_matrix def check_constraints_are_satisfied(flat_constraints, param_values, param_names): """Check that params satisfies all constraints. This should be called before the more specialized constraints are rewritten to linear constraints in order to get better error messages! We let the checks pass if all "values" are np.nan. This way `process_constraints` can be used on empty params DataFrames which is useful to construct templates for start parameters that can be filled out by the user. Args: pc (list): List of constraints with processed selectors. params (pd.DataFrame): See :ref:`params` Raises: ValueError if constraints are not satisfied. """ # skip check if all parameters are NaN if not np.isfinite(param_values).any(): return for constr in flat_constraints: typ = constr["type"] subset = param_values[constr["index"]] report = [] _msg = partial(_get_message, constr, param_names) if typ == "covariance": cov = cov_params_to_matrix(subset) e, _ = np.linalg.eigh(cov) if not np.all(e > -1e-8): report.append(_msg()) elif typ == "sdcorr": cov = sdcorr_params_to_matrix(subset) e, _ = np.linalg.eigh(cov) if not np.all(e > -1e-8): report.append(_msg()) elif typ == "probability": if not np.isclose(subset.sum(), 1, rtol=0.01): explanation = "Probabilities do not sum to 1." report.append(_msg(explanation)) if np.any(subset < 0): explanation = "There are negative Probabilities." report.append(_msg(explanation)) if np.any(subset > 1): explanation = "There are probabilities larger than 1." report.append(_msg(explanation)) elif typ == "fixed": if "value" in constr and not np.allclose(subset, constr["value"]): explanation = ( "Fixing parameters to different values than their start values " "was allowed in earlier versions of optimagic but is " "forbidden now. " ) report.append(_msg(explanation)) elif typ == "increasing": if np.any(np.diff(subset) < 0): report.append(_msg()) elif typ == "decreasing": if np.any(np.diff(subset) > 0): report.append(_msg()) elif typ == "linear": wsum = subset.dot(constr["weights"]) if "lower_bound" in constr and wsum < constr["lower_bound"]: explanation = "Lower bound of linear constraint is violated." report.append(_msg(explanation)) elif "upper_bound" in constr and wsum > constr["upper_bound"]: explanation = "Upper bound of linear constraint violated" report.append(_msg(explanation)) elif "value" in constr and not np.isclose(wsum, constr["value"]): explanation = "Equality condition of linear constraint violated" report.append(_msg(explanation)) elif typ == "equality": if len(set(subset.tolist())) > 1: report.append(_msg()) report = "\n".join(report) if report != "": raise InvalidParamsError(f"Violated constraint at start params:\n{report}") def _get_message(constraint, param_names, explanation=""): start = ( f"A constraint of type '{constraint['type']}' is not fulfilled in params, " "please make sure that it holds for the starting values. The problem arose " "because:" ) if explanation: explanation = f" {explanation.rstrip('. ')}. " names = [param_names[i] for i in constraint["index"]] end = ( f"The names of the involved parameters are:\n{names}\n" "The relevant constraint is:\n" f"{constraint}." ) msg = start + explanation + end return msg def check_types(constraints): """Check that no invalid constraint types are requested. Args: constraints (list): List of constraints. Raises: TypeError if invalid constraint types are encountered """ valid_types = { "covariance", "sdcorr", "linear", "probability", "increasing", "decreasing", "equality", "pairwise_equality", "fixed", } for constr in constraints: if constr["type"] not in valid_types: raise InvalidConstraintError( "Invalid constraint_type: {}".format(constr["type"]), ) def check_for_incompatible_overlaps(transformations, parnames): """Check that there are no overlaps between constraints that transform parameters. Since the constraints are already consolidated such that only those that transform a parameter are left and all equality constraints are already plugged in, this boils down to checking that no parameter appears more than once. Args: constr_info (dict): Dict of 1d numpy arrays with info about constraints. transformations (list): Processed transforming constraints. parnames (list): List of parameter names. """ all_indices = [] for constr in transformations: all_indices += constr["index"] msg = ( "Transforming constraints such as 'covariance', 'sdcorr', 'probability' " "and 'linear' cannot overlap. This includes overlaps induced by equality " "constraints. This was violated for the following parameters:\n{}" ) if len(set(all_indices)) < len(all_indices): unique, counts = np.unique(all_indices, return_counts=True) invalid_indices = unique[counts >= 2] invalid_names = [parnames[i] for i in invalid_indices] raise InvalidConstraintError(msg.format(invalid_names)) def check_fixes_and_bounds(constr_info, transformations, parnames): """Check fixes. Warn the user if he fixes a parameter to a value even though that parameter has a different non-nan value in params check that fixes are compatible with other constraints. Args: constr_info (dict): Dict of 1d numpy arrays with info about constraints. transformations (list): Processed transforming constraints. parnames (list): List of parameter names. """ constr_info = constr_info.copy() constr_info["index"] = parnames prob_msg = ( "{} constraints are incompatible with fixes or bounds. " "This is violated for:\n{}" ) cov_msg = ( "{} constraints are incompatible with fixes or bounds except for the first " "parameter. This is violated for:\n{}" ) for constr in transformations: if constr["type"] in ["covariance", "sdcorr"]: subset = _iloc(dictionary=constr_info, positions=constr["index"][1:]) if subset["is_fixed_to_value"].any(): problematic = subset["index"][subset["is_fixed_to_value"]] raise InvalidConstraintError( cov_msg.format(constr["type"], problematic) ) finite_bounds = np.isfinite(subset["lower_bounds"]) | np.isfinite( subset["upper_bounds"] ) if finite_bounds.any(): problematic = subset["index"][finite_bounds] raise InvalidConstraintError( prob_msg.format(constr["type"], problematic) ) elif constr["type"] == "probability": subset = _iloc(dictionary=constr_info, positions=constr["index"]) if subset["is_fixed_to_value"].any(): problematic = subset["index"][subset["is_fixed_to_value"]] raise InvalidConstraintError( prob_msg.format(constr["type"], problematic) ) finite_bounds = np.isfinite(subset["lower_bounds"]) | np.isfinite( subset["upper_bounds"] ) if finite_bounds.any(): problematic = subset["index"][finite_bounds] raise InvalidConstraintError( prob_msg.format(constr["type"], problematic) ) is_invalid = constr_info["lower_bounds"] >= constr_info["upper_bounds"] if is_invalid.any(): info = pd.DataFrame( { "names": np.array(parnames)[is_invalid], "lower_bounds": constr_info["lower_bounds"][is_invalid], "upper_bounds": constr_info["upper_bounds"][is_invalid], } ) msg = ( "lower_bound must be strictly smaller than upper_bound. " f"This is violated for:\n{info}" ) raise InvalidConstraintError(msg) def _iloc(dictionary, positions): """Substitute function for DataFrame.iloc. that works for a dictionary of arrays. It creates a subset of the input dictionary based on the index values in the info list, and returns this subset as a dictionary with numpy arrays. Args: dictionary (dict): Dictionary of arrays. position (list): List, slice or array of indices. """ subset = {} for key, value in dictionary.items(): if isinstance(value, list) and not isinstance(positions, slice): subset[key] = [value[i] for i in positions] else: subset[key] = value[positions] return subset ================================================ FILE: src/optimagic/parameters/consolidate_constraints.py ================================================ """Functions to consolidate user provided constraints. Consolidation means that redundant constraints are dropped and other constraints are collected in meaningful bundles. Check the module docstring of process_constraints for naming conventions. """ import numpy as np import pandas as pd from optimagic.exceptions import InvalidConstraintError from optimagic.utilities import ( fast_numpy_full, number_of_triangular_elements_to_dimension, ) def consolidate_constraints( constraints, parvec, lower_bounds, upper_bounds, param_names ): """Consolidate constraints with each other and remove redundant ones. Args: constraints (list): List with constraint dictionaries. It is assumed that the selectors are already processed, increasing and decreasing constraints have been rewritten as linear constraints and pairwise_equality constraints have been rewritten as equality constraints. parvec (np.ndarray): 1d numpy array with parameters. lower_bounds (np.ndarray | None): 1d numpy array with lower_bounds upper_bounds (np.ndarray | None): 1d numpy array with upper_bounds param_names (list): Names of parameters. Used for error messages. Returns: list: This contains processed version of all constraints that require an actual kernel transformation. The information on all other constraints is subsumed in pp. dict: Dict of 1d numpy arrays with information about non-transforming constraints. """ # None-valued bounds are handled by instantiating them as an -inf and inf array. In # the future, this should be handled more gracefully. if lower_bounds is None: lower_bounds = fast_numpy_full(len(parvec), fill_value=-np.inf) if upper_bounds is None: upper_bounds = fast_numpy_full(len(parvec), fill_value=np.inf) raw_eq, other_constraints = _split_constraints(constraints, "equality") equality_constraints = _consolidate_equality_constraints(raw_eq) fixed_constraints, other_constraints = _split_constraints( other_constraints, "fixed" ) fixed_value = _consolidate_fixes_with_equality_constraints( fixed_constraints, equality_constraints, parvec ) constr_info = { "fixed_values": fixed_value, "is_fixed_to_value": np.isfinite(fixed_value), } other_constraints = [ c for c in other_constraints if not constr_info["is_fixed_to_value"][c["index"]].all() ] ( other_constraints, lower_bounds, upper_bounds, ) = simplify_covariance_and_sdcorr_constraints( constraints=other_constraints, lower_bounds=lower_bounds, upper_bounds=upper_bounds, is_fixed_to_value=constr_info["is_fixed_to_value"], fixed_value=constr_info["fixed_values"], ) lower_bounds, upper_bounds = _consolidate_bounds_with_equality_constraints( equality_constraints, lower_bounds=lower_bounds, upper_bounds=upper_bounds, ) constr_info["lower_bounds"] = lower_bounds constr_info["upper_bounds"] = upper_bounds ( other_constraints, post_replacements, is_fixed_to_other, ) = _plug_equality_constraints_into_selectors( equality_constraints, other_constraints, n_params=len(parvec) ) constr_info["post_replacements"] = post_replacements constr_info["is_fixed_to_other"] = is_fixed_to_other linear_constraints, other_constraints = _split_constraints( other_constraints, "linear" ) if len(linear_constraints) > 0: linear_constraints = _consolidate_linear_constraints( params_vec=parvec, linear_constraints=linear_constraints, constr_info=constr_info, param_names=param_names, ) constraints = other_constraints + linear_constraints return constraints, constr_info def _consolidate_equality_constraints(equality_constraints): """Consolidate equality constraints as far as possible. Since equality is a transitive conditions we can consolidate any two equality constraints that have at least one parameter in common into one condition. Besides being faster, this also ensures that the result remains unchanged if equality constraints are are split into several different constraints or if they are specified in a different order. The index in the consolidated equality constraints is sorted in the same order as the index of params. This is no problem because order is irrelevant for equality constraints. Args: equality_constraints (list): List of dictionaries where each dictionary is a constraint. It is assumed that the selectors were already processed. Returns: list: List of consolidated equality constraints. """ candidates = [constr["index"] for constr in equality_constraints] # drop constraints that just restrict one parameter to be equal to itself candidates = [c for c in candidates if len(c) >= 2] merged = _join_overlapping_lists(candidates) consolidated = [{"index": sorted(index), "type": "equality"} for index in merged] return consolidated def _join_overlapping_lists(candidates): """Bundle all candidates with with non-empty intersection. Args: candidates (list): List of potentially overlapping lists. Returns: bundles (list): List of lists where all overlapping lists have been joined and sorted. """ bundles = [] while len(candidates) > 0: new_candidates = _unite_first_with_all_intersecting_elements(candidates) if len(candidates) == len(new_candidates): bundles.append(sorted(new_candidates[0])) candidates = candidates[1:] else: candidates = new_candidates return bundles def _unite_first_with_all_intersecting_elements(indices): """Helper function to bundle overlapping indices. Args: indices (list): A list lists with indices. """ first = set(indices[0]) new_first = first new_others = [] for idx in indices[1:]: if len(first.intersection(idx)) > 0: new_first = new_first.union(idx) else: new_others.append(idx) return [new_first, *new_others] def _consolidate_fixes_with_equality_constraints( fixed_constraints, equality_constraints, parvec ): """Consolidate fixes with equality constraints. If any equality constrained parameter is fixed, all of the parameters that are equal to it have to be fixed to the same value. Args: fixed_constraints (list): List of constrains of type "fixed". equality_constraints (list): List of constraints of type "equality". parvec (np.ndarray): 1d numpy array with parameters. Returns: fixed_value (pd.Series): Series with the fixed value for all parameters that are fixed and np.nan everywhere else. Has the same index as params. """ fixed_value = np.full(len(parvec), np.nan) for fix in fixed_constraints: fixed_value[fix["index"]] = fix.get("value", parvec[fix["index"]]) for eq in equality_constraints: if np.isfinite(fixed_value[eq["index"]]).any(): valcounts = _unique_values(fixed_value[eq["index"]]) assert len(valcounts) == 1, ( "Equality constrained parameters cannot be fixed to different values." ) fixed_value[eq["index"]] = valcounts[0] return fixed_value def _consolidate_bounds_with_equality_constraints( equality_constraints, lower_bounds, upper_bounds ): """Consolidate bounds with equality constraints. Check that there are no incompatible bounds on equality constrained parameters and set the bounds for equal parameters to the strictest bound encountered on any of them. Args: equality_constraints (list): List of constraints of type "equality". lower_bounds (np.ndarray): Lower bounds for parameters. upper_bounds (np.ndarray): Upper bounds for parameters. Returns: np.ndarray: 1d array with lower bounds np.ndarray: 1d array with upper bounds """ lower = lower_bounds.copy() upper = upper_bounds.copy() for eq in equality_constraints: lower[eq["index"]] = lower[eq["index"]].max() upper[eq["index"]] = upper[eq["index"]].min() return lower, upper def _split_constraints(constraints, type_): """Split list of constraints in two list. The first list contains all constraints of type and the second the rest. """ filtered = [c for c in constraints if c["type"] == type_] rest = [c for c in constraints if c["type"] != type_] return filtered, rest def simplify_covariance_and_sdcorr_constraints( constraints, lower_bounds, upper_bounds, is_fixed_to_value, fixed_value, ): """Enforce covariance and sdcorr constraints by bounds if possible. This is possible if the dimension is <= 2 or all covariances are fexd to 0. """ cov_constraints, others = _split_constraints(constraints, "covariance") sdcorr_constraints, others = _split_constraints(others, "sdcorr") to_simplify = cov_constraints + sdcorr_constraints lower = lower_bounds.copy() upper = upper_bounds.copy() not_simplifyable = [] for constr in to_simplify: dim = number_of_triangular_elements_to_dimension(len(constr["index"])) if constr["type"] == "covariance": diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()] diag_indices = np.array(constr["index"])[diag_positions].tolist() off_indices = [i for i in constr["index"] if i not in diag_positions] if constr["type"] == "sdcorr": diag_indices = constr["index"][:dim] off_indices = constr["index"][dim:] uncorrelated = False if is_fixed_to_value[off_indices].all(): if (fixed_value[off_indices] == 0).all(): uncorrelated = True if uncorrelated: lower[diag_indices] = np.maximum(0, lower[diag_indices]) elif dim <= 2 and constr["type"] == "sdcorr": lower[diag_indices] = np.maximum(0, lower[diag_indices]) lower[off_indices] = -1 upper[off_indices] = 1 else: not_simplifyable.append(constr) return others + not_simplifyable, lower, upper def _plug_equality_constraints_into_selectors( equality_constraints, other_constraints, n_params ): """Rewrite all constraint in terms of free parameters. Only one parameter from a set of equality constrained parameters will actually be free. Which one is not important. We take the one with the lowest iloc. Then all other constraints have to be rewritten in terms of the free parameters. Once that is done, redundant constraints can be filtered out. Args: equality_constraints (list): List of constraints of type "equality". other_constraints (list): All other constraints. n_params (int): Number of parameters. Returns: list: List of processed non-equality constraints. np.ndarray: post_replacements np.ndarray: is_fixed_to_other """ is_equal_to = np.full(n_params, -1) for eq in equality_constraints: is_equal_to[sorted(eq["index"])[1:]] = sorted(eq["index"])[0] post_replacements = is_equal_to.astype(int) is_fixed_to_other = is_equal_to >= 0 helper = pd.Series(post_replacements) replace_dict = helper[helper >= 0].to_dict() plugged_in = [] for constr in other_constraints: new = constr.copy() new["index"] = pd.Series(constr["index"]).replace(replace_dict).tolist() plugged_in.append(new) linear_constraints, others = _split_constraints(plugged_in, "linear") pc = [] for constr in others: if not _is_redundant(constr, pc): pc.append(constr) pc += linear_constraints return pc, post_replacements, is_fixed_to_other def _consolidate_linear_constraints( params_vec, linear_constraints, constr_info, param_names ): """Consolidate linear constraints. Consolidation entails the following steps: - Plugging fixes and equality constraints into the linear constraints - Collect weights of those constraints that overlap into weight DataFrames - Collect corresponding right hand sides (bounds or values) in DataFrames - Express box constraints of parameters involved in linear constraints as additional linear constraints. - Rescale the weights for easier detection of linear dependence - Drop redundant constraints - Check compatibility of constraints - Construct a list of consolidated constraint dictionaries that contain all matrices needed for the kernel transformations. Args: params_vec (np.ndarray): 1d numpy array wtih parameters linear_constraints (list): Linear constraints that already have processed weights and selector fields. constr_info (dict): Dict with information about constraints. param_names (list): Parameter names. Used for error messages. Returns: list: Processed and consolidated linear constraints. """ weights, right_hand_side = _transform_linear_constraints_to_pandas_objects( linear_constraints, n_params=len(params_vec) ) weights = _plug_equality_constraints_into_linear_weights( weights, constr_info["post_replacements"] ) weights, right_hand_side = _plug_fixes_into_linear_weights_and_rhs( weights, right_hand_side, constr_info["is_fixed_to_value"], constr_info["fixed_values"], ) involved_parameters = [set(w[w != 0].index) for _, w in weights.iterrows()] bundled_indices = _join_overlapping_lists(involved_parameters) pc = [] for involved_parameters in bundled_indices: w = weights[involved_parameters][ (weights[involved_parameters] != 0).any(axis=1) ].copy(deep=True) rhs = right_hand_side.loc[w.index].copy(deep=True) w, rhs = _express_bounds_as_linear_constraints( w, rhs, constr_info["lower_bounds"], constr_info["upper_bounds"] ) w, rhs = _rescale_linear_constraints(w, rhs) w, rhs = _drop_redundant_linear_constraints(w, rhs) _check_consolidated_weights(w, param_names=param_names) to_internal, from_internal = _get_kernel_transformation_matrices(w) constr = { "index": list(w.columns), "type": "linear", "to_internal": to_internal, "from_internal": from_internal, "right_hand_side": rhs, } pc.append(constr) return pc def _transform_linear_constraints_to_pandas_objects(linear_constranits, n_params): """Collect information from the linear constraint dictionaries into pandas objects. Args: linear_constraints (list): List of constraint of type "linear". n_params (int): number of parameters. Returns: weights (pd.DataFrame): DataFrame with one row per constraint and one column per parameter. Columns names are the ilocs of the parameters in params. rhs (pd.DataFrame): DataFrame with the columns "value", "lower_bound" and "upper_bound" that collects the right hand sides of the constraints. """ all_weights, all_values, all_lbs, all_ubs = [], [], [], [] for constr in linear_constranits: all_weights.append(constr["weights"]) all_values.append(constr.get("value", np.nan)) all_lbs.append(constr.get("lower_bound", -np.inf)) all_ubs.append(constr.get("upper_bound", np.inf)) weights = pd.concat(all_weights, axis=1).T.reset_index() weights = weights.reindex(columns=np.arange(n_params)).fillna(0) values = pd.Series(all_values, name="value") lbs = pd.Series(all_lbs, name="lower_bound") ubs = pd.Series(all_ubs, name="upper_bound") rhs = pd.concat([values, lbs, ubs], axis=1) return weights, rhs def _plug_equality_constraints_into_linear_weights(weights, post_replacements): """Sum the weights of equality constrained parameters. The sum of the weights is then the new weight of the equality constrained parameter that is actually free. The weights of the other parameters are set to zero. Args: weights (pd.DataFrame): Weight matrices for linear constraints. post_replacements (pd.Series): The _post_replacements column of pp. Returns: plugged_weights (pd.DataFrame) """ w = weights.T plugged_iloc = pd.Series(post_replacements) plugged_iloc = plugged_iloc.where(plugged_iloc >= 0, np.arange(len(plugged_iloc))) w["plugged_iloc"] = plugged_iloc plugged_weights = w.groupby("plugged_iloc").sum() plugged_weights = plugged_weights.reindex(w.index).fillna(0).T return plugged_weights def _plug_fixes_into_linear_weights_and_rhs( weights, rhs, is_fixed_to_value, fixed_value ): """Set weights of fixed parameters to 0 and adjust right hand sides accordingly. Args: weights (pd.DataFrame): Weight matrix for linear constraint. rhs (pd.DataFrame): Right hand side of the linear constraint. is_fixed_to_value (pd.Series): The _is_fixed_to_value column of pp. fixed_value (pd.Series): The _fixed_value column of pp. Returns: new_weights (pd.DataFrame) new_rhs (pd.DataFrame) """ ilocs = np.arange(len(fixed_value)) fixed_ilocs = ilocs[is_fixed_to_value].tolist() new_rhs = rhs.copy() new_weights = weights.copy() if len(fixed_ilocs) > 0: fixed_values = fixed_value[fixed_ilocs] fixed_contribution = weights[fixed_ilocs] @ fixed_values for column in ["lower_bound", "upper_bound", "value"]: new_rhs[column] = new_rhs[column] - fixed_contribution for i in fixed_ilocs: new_weights[i] = 0 return new_weights, new_rhs def _express_bounds_as_linear_constraints(weights, rhs, lower, upper): """Express bounds of linearly constrained params as linear constraint. In general it is easier to keep bounds separately from the constraints but in the case of linearly constrained parameters we need to express them as additional linear constraints to check compatibility and to choose the correct reparametrization. Args: weights (pd.DataFrame): The weight matrix of the linear constraint. rhs (pd.DataFrame): The right hand side of the linear constraint. lower (np.ndarray): Lower bounds. upper (np.ndarray): Upper bounds. Returns: extended_weights (pd.DataFrame) extended_rhs (pd.DataFrame) """ additional_pc = [] for i in weights.columns: new = {} if np.isfinite(lower[i]): new["lower_bound"] = lower[i] if np.isfinite(upper[i]): new["upper_bound"] = upper[i] if new != {}: new["weights"] = pd.Series([1], name="w", index=[i]) additional_pc.append(new) if len(additional_pc) > 0: new_weights, new_rhs = _transform_linear_constraints_to_pandas_objects( additional_pc, len(lower) ) new_weights = new_weights[weights.columns] extended_weights = pd.concat([weights, new_weights]).reset_index(drop=True) extended_rhs = pd.concat([rhs, new_rhs]).reset_index(drop=True) else: extended_weights, extended_rhs = weights, rhs return extended_weights, extended_rhs def _rescale_linear_constraints(weights, rhs): """Rescale rows in weights such that the first nonzero element equals one. This will make it easier to detect redundant rows. Args: weights (pd.DataFrame): The weight matrix of the linear constraint. rhs (pd.DataFrame): The right hand side of the linear constraint. Returns: new_weights (pd.DataFrame) new_rhs (pd.DataFrame) """ first_nonzero = weights.replace(0, np.nan).bfill(axis=1).iloc[:, 0] scaling_factor = 1 / first_nonzero.to_numpy().reshape(-1, 1) new_weights = scaling_factor * weights scaled_rhs = scaling_factor * rhs new_rhs = scaled_rhs.copy() new_rhs["lower_bound"] = scaled_rhs["lower_bound"].where( scaling_factor.flatten() > 0, scaled_rhs["upper_bound"] ) new_rhs["upper_bound"] = scaled_rhs["upper_bound"].where( scaling_factor.flatten() > 0, scaled_rhs["lower_bound"] ) return new_weights, new_rhs def _drop_redundant_linear_constraints(weights, rhs): """Drop linear constraints that are implied by other linear constraints. This is not yet very smart. We just check for linearly dependent weights. Args: weights (pd.DataFrame): The weight matrix of the linear constraint. rhs (pd.DataFrame): The right hand side of the linear constraint. Returns: new_weights (pd.DataFrame) new_rhs (pd.DataFrame) """ weights["dupl_group"] = weights.groupby(list(weights.columns)).ngroup() rhs["dupl_group"] = weights["dupl_group"] weights.set_index("dupl_group", inplace=True) new_weights = weights.drop_duplicates() def _consolidate_fix(x): vc = x.value_counts(dropna=True) if len(vc) == 0: return np.nan elif len(vc) == 1: return vc.index[0] else: raise ValueError ub = rhs.groupby("dupl_group")["upper_bound"].min() lb = rhs.groupby("dupl_group")["lower_bound"].max() fix = rhs.groupby("dupl_group")["value"].apply(_consolidate_fix) # remove the bounds for fixed parameters ub = ub.where(fix.isnull(), np.inf) lb = lb.where(fix.isnull(), -np.inf) new_rhs = pd.concat( [lb, ub, fix], axis=1, names=["lower_bound", "upper_bound", "value"] ) new_rhs = new_rhs.reindex(new_weights.index) return new_weights, new_rhs def _check_consolidated_weights(weights, param_names): """Check the rank condition on the linear weights.""" n_constraints, n_params = weights.shape msg_too_many = ( "Too many linear constraints. There can be at most as many linear constraints" "as involved parameters with non-zero weights.\n" ) msg_rank = "The weights for linear constraints must be linearly independent.\n" msg_general = ( "The error occurred for constraints on the following parameters:\n{}\n with " "weighting matrix:\n{}\nIt is possible that you did not specify those " "constraints as linear constraints but as bounds, fixes, increasing or " "decreasing constraints." ) relevant_names = [param_names[i] for i in weights.columns] if n_constraints > n_params: raise InvalidConstraintError( msg_too_many + msg_general.format(relevant_names, weights) ) if np.linalg.matrix_rank(weights) < n_constraints: raise InvalidConstraintError( msg_rank + msg_general.format(relevant_names, weights) ) def _get_kernel_transformation_matrices(weights): """Construct the m matrix for the kernel transformations. See :ref:`linear_constraint_implementation` for details. Args: weights (pd.DataFrame): Weight matrix of a linear constraint. """ n_constraints, n_params = weights.shape identity = np.eye(n_params) i = 0 filled_weights = weights while len(filled_weights) < n_params: candidate = np.vstack([identity[i], filled_weights]) if np.linalg.matrix_rank(candidate) == len(candidate): filled_weights = candidate i += 1 k = n_params - n_constraints filled_weights[:k] = filled_weights[:k][::-1] to_internal = filled_weights from_internal = np.linalg.inv(to_internal) return to_internal, from_internal def _is_redundant(candidate, others): """Check if a constraint is redundant given other constraints. Applicable to all but linear constraints. """ assert candidate["type"] != "linear" if len(others) == 0: is_redundant = False else: same_type, _ = _split_constraints(others, candidate["type"]) duplicates = [c for c in same_type if c["index"] == candidate["index"]] is_redundant = len(duplicates) > 0 return is_redundant def _unique_values(arr, dropna=True): if dropna: arr = arr[np.isfinite(arr)] return list(set(arr.tolist())) ================================================ FILE: src/optimagic/parameters/constraint_tools.py ================================================ from optimagic import deprecations from optimagic.parameters.bounds import pre_process_bounds from optimagic.parameters.conversion import get_converter def count_free_params( params, constraints=None, bounds=None, # deprecated lower_bounds=None, upper_bounds=None, ): """Count the (free) parameters of an optimization problem. Args: params (pytree): The parameters. constraints (list): The constraints for the optimization problem. If constraints are provided, only the free parameters are counted. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. Returns: int: Number of (free) parameters """ bounds = deprecations.replace_and_warn_about_deprecated_bounds( bounds=bounds, lower_bounds=lower_bounds, upper_bounds=upper_bounds, ) deprecations.throw_dict_constraints_future_warning_if_required(constraints) bounds = pre_process_bounds(bounds) constraints = deprecations.pre_process_constraints(constraints) _, internal_params = get_converter( params=params, constraints=constraints, bounds=bounds, func_eval=3, solver_type="value", ) return int(internal_params.free_mask.sum()) def check_constraints( params, constraints, bounds=None, # deprecated lower_bounds=None, upper_bounds=None, ): """Raise an error if constraints are invalid or not satisfied in params. Args: params (pytree): The parameters. constraints (list): The constraints for the optimization problem. bounds: Lower and upper bounds on the parameters. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for sampling based optimizers but are not enforced during optimization. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. Raises: InvalidParamsError: If constraints are valid but not satisfied. InvalidConstraintError: If constraints are invalid. """ bounds = deprecations.replace_and_warn_about_deprecated_bounds( bounds=bounds, lower_bounds=lower_bounds, upper_bounds=upper_bounds, ) deprecations.throw_dict_constraints_future_warning_if_required(constraints) bounds = pre_process_bounds(bounds) constraints = deprecations.pre_process_constraints(constraints) get_converter( params=params, constraints=constraints, bounds=bounds, func_eval=3, solver_type="value", ) ================================================ FILE: src/optimagic/parameters/conversion.py ================================================ """Aggregate the multiple parameter and function output conversions into on.""" from dataclasses import dataclass, replace from typing import Callable import numpy as np from optimagic.parameters.process_selectors import process_selectors from optimagic.parameters.scale_conversion import get_scale_converter from optimagic.parameters.space_conversion import InternalParams, get_space_converter from optimagic.parameters.tree_conversion import get_tree_converter from optimagic.typing import AggregationLevel def get_converter( params, constraints, bounds, func_eval, solver_type, scaling=None, derivative_eval=None, add_soft_bounds=False, ): """Get a converter between external and internal params and internal params. This combines the following conversions: - Flattening parameters provided as pytrees (tree_conversion) - Enforcing constraints via reparametrizations (space_conversion) - Scaling of the parameter space (scale_conversion) The resulting converter can transform parameters, function outputs and derivatives. If possible, fast paths for some or all transformations are chosen. Args: params (pytree): The user provided parameters. constraints (list): The user provided constraints. bounds (Bounds): The user provided bounds. func_eval (float or pytree): An evaluation of ``func`` at ``params``. Used to flatten the derivative output. solver_type: Used to determine how the derivative output has to be transformed for the optimzer. scaling (ScalingOptions | None): Scaling options. If None, no scaling is performed. derivative_eval (dict, pytree or None): Evaluation of the derivative of func at params. Used for consistency checks. add_soft_bounds (bool): Whether soft bounds should be added to the internal_params Returns: Converter: NamedTuple with methods to convert between internal and external parameters, derivatives and function outputs. InternalParams: NamedTuple with internal parameter values, lower_bounds and upper_bounds. """ fast_path = _is_fast_path( params=params, constraints=constraints, solver_type=solver_type, scaling=scaling, derivative_eval=derivative_eval, add_soft_bounds=add_soft_bounds, ) if fast_path: return _get_fast_path_converter( params=params, bounds=bounds, solver_type=solver_type, ) tree_converter, internal_params = get_tree_converter( params=params, bounds=bounds, func_eval=func_eval, derivative_eval=derivative_eval, solver_type=solver_type, add_soft_bounds=add_soft_bounds, ) flat_constraints = process_selectors( constraints=constraints, params=params, tree_converter=tree_converter, param_names=internal_params.names, ) space_converter, internal_params = get_space_converter( internal_params=internal_params, internal_constraints=flat_constraints ) scale_converter, scaled_params = get_scale_converter( internal_params=internal_params, scaling=scaling, ) def _params_to_internal(params): x_flat = tree_converter.params_flatten(params) x_internal = space_converter.params_to_internal(x_flat) x_scaled = scale_converter.params_to_internal(x_internal) return x_scaled def _params_from_internal(x, return_type="tree"): x_unscaled = scale_converter.params_from_internal(x) x_external = space_converter.params_from_internal(x_unscaled) x_tree = tree_converter.params_unflatten(x_external) if return_type == "tree": out = x_tree elif return_type == "tree_and_flat": out = x_tree, x_external elif return_type == "flat": out = x_external else: msg = ( f"Invalid return type: {return_type}. Must be one of 'tree', 'flat', " "'tree_and_flat'" ) raise ValueError(msg) return out def _derivative_to_internal(derivative_eval, x, jac_is_flat=False): if jac_is_flat: jacobian = derivative_eval else: jacobian = tree_converter.derivative_flatten(derivative_eval) x_unscaled = scale_converter.params_from_internal(x) jac_with_space_conversion = space_converter.derivative_to_internal( jacobian, x_unscaled ) jac_with_unscaling = scale_converter.derivative_to_internal( jac_with_space_conversion ) return jac_with_unscaling internal_params = replace(scaled_params, free_mask=internal_params.free_mask) converter = Converter( params_to_internal=_params_to_internal, params_from_internal=_params_from_internal, derivative_to_internal=_derivative_to_internal, has_transforming_constraints=space_converter.has_transforming_constraints, ) return converter, internal_params @dataclass(frozen=True) class Converter: params_to_internal: Callable params_from_internal: Callable derivative_to_internal: Callable has_transforming_constraints: bool def _fast_params_from_internal(x, return_type="tree"): x = x.astype(float) if return_type == "tree_and_flat": return x, x else: return x def _get_fast_path_converter(params, bounds, solver_type): def _fast_derivative_to_internal( derivative_eval, x, # noqa: ARG001 jac_is_flat=True, # noqa: ARG001 ): # make signature compatible with non-fast path return derivative_eval converter = Converter( params_to_internal=lambda params: params.astype(float), params_from_internal=_fast_params_from_internal, derivative_to_internal=_fast_derivative_to_internal, has_transforming_constraints=False, ) if bounds is None or bounds.lower is None: lower_bounds = None else: lower_bounds = bounds.lower.astype(float) if bounds is None or bounds.upper is None: upper_bounds = None else: upper_bounds = bounds.upper.astype(float) internal_params = InternalParams( values=params.astype(float), lower_bounds=lower_bounds, upper_bounds=upper_bounds, free_mask=np.full(len(params), True), names=[str(i) for i in range(len(params))], ) return converter, internal_params def _is_fast_path( params, constraints, solver_type, scaling, derivative_eval, add_soft_bounds, ): if not _is_1d_arr(params): return False if constraints: return False if scaling is not None: return False if not _is_fast_deriv_eval(derivative_eval, solver_type): return False if add_soft_bounds: return False return True def _is_fast_deriv_eval(d, solver_type): # this is the case if no or closed form derivatives are used if d is None: return True if solver_type == AggregationLevel.SCALAR: if not _is_1d_arr(d): return False else: if not _is_2d_arr(d): return False return True def _is_1d_arr(candidate): return isinstance(candidate, np.ndarray) and candidate.ndim == 1 def _is_2d_arr(candidate): return isinstance(candidate, np.ndarray) and candidate.ndim == 2 ================================================ FILE: src/optimagic/parameters/kernel_transformations.py ================================================ r"""Functions and derivatives thereof to transform external and internal params. Remarks on the mathematical notation: ------------------------------------- We let :math:`X` denote the Cholesky factor of some covariance matrix :math:`S`. That is :math:`X X^\top = S`. We write :math:`\text{vec}(A)` for the column-wise vectorization of the matrix :math:`A` and we write :math:`\text{vech}(A)` for the row-wise half vectorization of :math:`A`. We denote the elimination matrix by :math:`L`, which fulfills :math:`L \text{vec}(A) = \text{vech}(A)`. For lower-triangular matrices :math:`A` we define the "lower-triangular" duplication matrix :math:`D`, which is not to be confused with the standard duplication matrix, and fulfills :math:`D \text{vech}(A) = \text{vec}(A)`. At last we define the so called commutation matrix :math:`K` which is given by the property that :math:`K \text{vec}(A) = \text{vec}(A^\top)`. Remarks on reference literature: -------------------------------- The solutions on how to compute the jacobians implemented here can be found using matrix calculus. See for example 'Matrix Differential Calculus with Applications in Statistics and Econometrics' by Magnus and Neudecker. In specific cases we refer to posts on math.stackexchange.com. .. rubric:: References .. _post_mathoverflow: https://google.github.io/styleguide/pyguide.html """ import numpy as np from optimagic.utilities import ( chol_params_to_lower_triangular_matrix, cov_matrix_to_sdcorr_params, cov_params_to_matrix, dimension_to_number_of_triangular_elements, robust_cholesky, sdcorr_params_to_matrix, ) def covariance_to_internal(external_values, constr): """Do a cholesky reparametrization.""" cov = cov_params_to_matrix(external_values) chol = robust_cholesky(cov) return chol[np.tril_indices(len(cov))] def covariance_to_internal_jacobian(external_values, constr): r"""Jacobian of ``covariance_to_internal``. For reference see docstring of ``jacobian_covariance_from_internal``. In comparison to that function, however, here we want to differentiate the reverse graph external --> cov --> cholesky --> internal Again use the vectors :math:`c` and :math:`x` to denote the external and internal values, respectively. To solve for the jacobian we make use of the identity .. math:: \frac{\mathrm{d}x}{\mathrm{d}c} = (\frac{\mathrm{d}c}{\mathrm{d}x})^{-1} Args: external_values (np.ndarray): Row-wise half-vectorized covariance matrix Returns: deriv: The Jacobian matrix. """ cov = cov_params_to_matrix(external_values) chol = robust_cholesky(cov) internal = chol[np.tril_indices(len(chol))] deriv = covariance_from_internal_jacobian(internal, constr=None) deriv = np.linalg.pinv(deriv) return deriv def covariance_from_internal(internal_values, constr): """Undo a cholesky reparametrization.""" chol = chol_params_to_lower_triangular_matrix(internal_values) cov = chol @ chol.T return cov[np.tril_indices(len(chol))] def covariance_from_internal_jacobian(internal_values, constr): r"""Jacobian of ``covariance_from_internal``. The following result is motivated by https://tinyurl.com/y4pbfxst, which is shortly presented again here. For notation see the explaination at the beginning of the module. Explaination of the result -------------------------- We want to differentiate the graph internal --> cholesky --> cov --> external Define :math:`x' := \text{vec}(X)` and :math:`c' := \text{vec}(S)`, where :math:`X` denotes the Cholesky factor of the covariance matrix :math:`S`. We then first differentiate the part "cholesky --> cov" using the result stated in the tinyurl above to get .. math:: J' := \frac{\mathrm{d}c'}{\mathrm{d}x'} = (I + K)(X \otimes I) \,, where :math:`K` denotes the commutation matrix. Using this intermediate result we can compute the jacobian as .. math:: \frac{\mathrm{d}c}{\mathrm{d}x} = L J' D \,, where :math:`c := \text{external}` and :math:`x := \text{internal}`. Args: internal_values (np.ndarray): Cholesky factors stored in an "internal" format. Returns: deriv: The Jacobian matrix. """ chol = chol_params_to_lower_triangular_matrix(internal_values) dim = len(chol) K = _commutation_matrix(dim) L = _elimination_matrix(dim) left = np.eye(dim**2) + K right = np.kron(chol, np.eye(dim)) intermediate = left @ right deriv = L @ intermediate @ L.T return deriv def sdcorr_to_internal(external_values, constr): """Convert sdcorr to cov and do a cholesky reparametrization.""" cov = sdcorr_params_to_matrix(external_values) chol = robust_cholesky(cov) return chol[np.tril_indices(len(cov))] def sdcorr_to_internal_jacobian(external_values, constr): r"""Derivative of ``sdcorr_to_internal``. For reference see docstring of ``jacobian_sdcorr_from_internal``. In comparison to that function, however, here we want to differentiate the reverse graph external --> mod. corr-mat --> corr-mat --> cov --> cholesky --> internal Again use the vectors :math:`p` and :math:`x` to denote the external and internal values, respectively. To solve for the jacobian we make use of the identity .. math:: \frac{\mathrm{d}x}{\mathrm{d}p} = (\frac{\mathrm{d}p}{\mathrm{d}x})^{-1} Args: external_values (np.ndarray): Row-wise half-vectorized modified correlation matrix. Returns: deriv: The Jacobian matrix. """ cov = sdcorr_params_to_matrix(external_values) chol = robust_cholesky(cov) internal = chol[np.tril_indices(len(chol))] deriv = sdcorr_from_internal_jacobian(internal, constr=None) deriv = np.linalg.pinv(deriv) return deriv def sdcorr_from_internal(internal_values, constr): """Undo a cholesky reparametrization.""" chol = chol_params_to_lower_triangular_matrix(internal_values) cov = chol @ chol.T return cov_matrix_to_sdcorr_params(cov) def sdcorr_from_internal_jacobian(internal_values, constr): r"""Derivative of ``sdcorr_from_internal``. The following result is motivated by https://tinyurl.com/y6ytlyd9; however since the question was formulated with an error the result here is adjusted slightly. In particular, in the answer by user 'greg', the matrix :math:`A` should have been defined as :math:`A = \text{diag}(||x_1||, \dots, ||x_n||)` , where :math:`||x_i||` denotes the euclidian norm of the the i-th row of :math:`X` (the Cholesky factor). For notation see the explaination at the beginning of the module or the question on the tinyurl. The variable names in this function are chosen to be consistent with the tinyurl link. Explaination on the result -------------------------- We want to differentiate the graph internal --> cholesky --> cov --> corr-mat --> mod. corr-mat --> external where mod. corr-mat denotes the modified correlation matrix which has the standard deviations stored on its diagonal. Let :math:`x := \text{internal}` and :math:`p := \text{external}`. Then we want to compute the quantity .. math:: \frac{\mathrm{d} p}{\mathrm{d} x} . As before we consider an intermediate result first. Namely we define :math:`A` as above, :math:`V := A^{-1}` and :math:`P := V S V + A - I`. The attentive reader might now notice that :math:`P` is the modified correlation matrix. At last we write :math:`x' := \text{vec}(X)` and :math:`p' := \text{vec}(P)`. Using the result stated in the tinyurl above, adjusted for the different matrix :math:`A`, we can compute the quantity :math:`(\mathrm{d} p'/ \mathrm{d} x')`. Finally, since we can define transformation matrices :math:`T` and :math:`L` to get :math:`p = T p'` and :math:`x = L x'` (where :math:`L` denotes the elimination matrix with corresponding duplication matrix :math:`D`), we can get our final result as .. math:: \frac{\mathrm{d}p}{\mathrm{d}x} = T \frac{\mathrm{d}p'}{\mathrm{d}x'} D Args: internal_values (np.ndarray): Cholesky factors stored in an "internal" format. Returns: deriv: The Jacobian matrix. """ X = chol_params_to_lower_triangular_matrix(internal_values) dim = len(X) identity = np.eye(dim) S = X @ X.T # the wrong formulation in the tinyurl stated: A = np.multiply(I, X) A = np.sqrt(np.multiply(identity, S)) V = np.linalg.inv(A) K = _commutation_matrix(dim) Y = np.diag(identity.ravel("F")) # with the wrong formulation in the tinyurl we would have had U = Y norms = np.sqrt((X**2).sum(axis=1).reshape(-1, 1)) XX = X / norms U = Y @ np.kron(identity, XX) @ K N = np.kron(identity, X) @ K + np.kron(X, identity) VS = V @ S B = np.kron(V, V) H = np.kron(VS, identity) J = np.kron(identity, VS) intermediate = U + B @ N - (H + J) @ B @ U T = _transformation_matrix(dim) D = _duplication_matrix(dim) deriv = T @ intermediate @ D return deriv def probability_to_internal(external_values, constr): """Reparametrize probability constrained parameters to internal.""" return external_values / external_values[-1] def probability_to_internal_jacobian(external_values, constr): r"""Jacobian of ``probability_to_internal``. Let :math:`x = \text{external}`. The function ``probability_to_internal`` has the following structure .. math:: f: \mathbb{R}^m \to \mathbb{R}^m, x \mapsto \frac{1}{x_m} x where :math:`e_k` denotes the m-dimensional k-th standard basis vector. The jacobian can then be computed as .. math:: J(f)(x) = \frac{1}{x_m} \sum_{k=1}^{m-1} e_k e_k^\top - \frac{1}{x_m^2} [0, \dots, 0, \left ( \begin{matrix} x_{1:m-1} \\ 0 \end{matrix} \right ) ] Args: external_values (np.ndarray): Array of probabilities; sums to one. Returns: deriv: The Jacobian matrix. """ dim = len(external_values) deriv = np.eye(dim) / external_values[-1] deriv[:, -1] -= external_values / (external_values[-1] ** 2) deriv[-1, -1] = 0 return deriv def probability_from_internal(internal_values, constr): """Reparametrize probability constrained parameters from internal.""" return internal_values / internal_values.sum() def probability_from_internal_jacobian(internal_values, constr): r"""Jacobian of ``probability_from_internal``. Let :math:`x := \text{internal}`. The function ``probability_from_internal`` has the following structure .. math::`f: \mathbb{R}^m \to \mathbb{R}^m, x \mapsto \frac{1}{x^\top 1} x` where :math:`1` denotes a vector of all ones and :math:`I_m` the identity matrix. The jacobian can be computed as .. math:: J(f)(x) = \frac{1}{\sigma} I_m - \frac{1}{\sigma^2} 1 x^\top Args: internal_values (np.ndarray): Internal (positive) values. Returns: deriv: The Jacobian matrix. """ dim = len(internal_values) sigma = np.sum(internal_values) left = np.eye(dim) right = (np.ones((dim, dim)) * (internal_values / sigma)).T deriv = (left - right) / sigma return deriv def linear_to_internal(external_values, constr): """Reparametrize linear constraint to internal.""" return constr["to_internal"] @ external_values def linear_to_internal_jacobian(external_values, constr): return constr["to_internal"] def linear_from_internal(internal_values, constr): """Reparametrize linear constraint from internal.""" return constr["from_internal"] @ internal_values def linear_from_internal_jacobian(internal_values, constr): return constr["from_internal"] def _elimination_matrix(dim): r"""Construct (row-wise) elimination matrix. Let :math:`A` be a quadratic matrix. Let :math:`\text{vec}(A)` be the column-wise vectorization of :math:`A`. Let :math:`\text{vech}(A)` be the row-wise half-vectorization of :math:`A`. Then the corresponding elimination matrix :math:`L` has the property .. math:: L \text{vec}(A) = \text{vech}(A) See the wiki entry https://tinyurl.com/yy4sdr43 for further information, but note that here we are using :math:`\text{vech}` as the row-wise and not column-wise half-vectorization. Args: dim (int): The dimension. Returns: eliminator (np.ndarray): The elimination matrix. Examples: >>> import numpy as np >>> from numpy.testing import assert_array_almost_equal >>> dim = 10 >>> A = np.random.default_rng().normal(size=(dim, dim)) >>> vectorized = A.ravel('F') >>> half_vectorized = A[np.tril_indices(dim)] >>> L = _elimination_matrix(dim) >>> assert_array_almost_equal(L @ vectorized, half_vectorized) """ n = dimension_to_number_of_triangular_elements(dim) counter = np.zeros((dim, dim), int) - 1 counter[np.tril_indices(dim)] = np.arange(n, dtype=int) columns = [_unit_vector_or_zeros(i, n) for i in counter.ravel("F")] eliminator = np.column_stack(columns) return eliminator def _duplication_matrix(dim): r"""Return duplication matrix. Let :math:`A` be a lower-triangular quadratic matrix. Let :math:`\text{vec}(A)` be the column-wise vectorization of :math:`A`. Let :math:`\text{vech}(A)` be the row-wise half-vectorization of :math:`A`. Then the corresponding elimination matrix :math:`D` has the property .. math:: D \text{vech}(A) = \text{vec}(A) In particular note that here :math:`D = L^\top`. See the wiki entry https://tinyurl.com/yy4sdr43 for further information, but note that here we are using :math:`\text{vech}` as the row-wise and not column-wise half-vectorization, and that we are using this operator on a lower-triangular matrix and not a symmetric matrix, which allows for the identity :math:`D = L^\top`. Args: dim (int): The dimension. Returns: duplicator (np.ndarray): The duplication matrix. Example: >>> import numpy as np >>> from numpy.testing import assert_array_almost_equal >>> dim = 10 >>> A = np.tril(np.random.default_rng().normal(size=(dim, dim))) >>> vectorized = A.ravel('F') >>> half_vectorized = A[np.tril_indices(dim)] >>> D = _duplication_matrix(dim) >>> assert_array_almost_equal(D @ half_vectorized, vectorized) """ duplicator = _elimination_matrix(dim).T return duplicator def _transformation_matrix(dim): r"""Return transformation matrix. Let :math:`A` be a quadratic matrix of dimension :math:`m \times m`. Define the :math:`m-1 \times m-1` matrix :math:`B` as the lower-triangular matrix with entries given by the lower-triangular part of :math:`A` without the diagonal. Set :math:`a := \text{diag}(A)`. We define the special vectorization operator :math:`\bar{\text{vec}}` as the operator that maps the diagonal of a matrix to the first entries of the vector and then proceeds to map the remaining lower part of the matrix using a row-wise half-vectorization scheme. That is, we would have .. math:: \bar{\text{vec}}(A) = (a^\top, \text{vech}(A)^\top)^\top Then the transformation matrix :math:`T` is defined by the property that .. math:: T \text{vec}(A) = \bar{\text{vec}}(A) We use this transformation when we map the vectorization of the modified correlation matrix to the externally stored ``sdcorr_params``. Args: dim (int): The dimension. Returns: transformer (np.ndarray): The transformation matrix. Example: >>> import numpy as np >>> from numpy.testing import assert_array_almost_equal >>> from optimagic.utilities import cov_matrix_to_sdcorr_params >>> from optimagic.utilities import cov_to_sds_and_corr >>> cov = np.cov(np.random.default_rng().normal(size=(10, 4))) >>> sds, corr = cov_to_sds_and_corr(cov) >>> corr[np.diag_indices(len(cov))] = sds >>> vectorized = corr.ravel('F') >>> sdcorr_params = cov_matrix_to_sdcorr_params(cov) >>> T = _transformation_matrix(len(cov)) >>> assert_array_almost_equal(T @ vectorized, sdcorr_params) """ n = dimension_to_number_of_triangular_elements(dim) counter = np.zeros((dim, dim)) + np.nan counter[np.diag_indices(dim)] = np.arange(dim, dtype=int) counter[np.tril_indices(dim, k=-1)] = np.arange(dim, n, dtype=int) m = counter.ravel("F") num_na = np.count_nonzero(np.isnan(m)) indices = m.argsort()[:-num_na] rows = [_unit_vector_or_zeros(i, dim**2) for i in indices] transformer = np.vstack(rows) return transformer def _commutation_matrix(dim): r"""Return commutation matrix. Let :math:`A` be a quadratic matrix. Let :math:`\text{vec}(A)` be the column-wise vectorization of :math:`A`. Then the corresponding commutation matrix :math:`K` has the property .. math:: K \text{vec}(A) = \text{vec}(A^\top) See the wiki entry https://tinyurl.com/yydgq2z4 for further information. Args: dim (int): The dimension. Returns: cummuter (np.ndarrary): The cummutation matrix. Example: >>> import numpy as np >>> from numpy.testing import assert_array_almost_equal >>> dim = 10 >>> A = np.random.default_rng().normal(size=(dim, dim)) >>> vectorized = A.ravel('F') >>> vectorized_transposed = A.T.ravel('F') >>> K = _commutation_matrix(dim) >>> assert_array_almost_equal(K @ vectorized, vectorized_transposed) """ row = np.arange(dim**2) col = row.reshape((dim, dim), order="F").ravel() commuter = np.zeros((dim**2, dim**2), dtype=np.int8) commuter[row, col] = 1 return commuter def _unit_vector_or_zeros(index, size): """Return unit vector or vector of all zeroes. Args: index (int): On which index to set a 1. If it is set to -1 a vector of all zeros will be returned. size (int): Dimension of the resulting vector. Returns: u (np.ndarray): The unit or zero vector. Example: >>> import numpy as np >>> _unit_vector_or_zeros(1, 2) array([0, 1]) >>> _unit_vector_or_zeros(-1, 2) array([0, 0]) """ u = np.zeros(size, int) if index != -1: u[index] = 1 return u ================================================ FILE: src/optimagic/parameters/nonlinear_constraints.py ================================================ import itertools from dataclasses import asdict from functools import partial import numpy as np import pandas as pd from pybaum import tree_flatten, tree_just_flatten, tree_unflatten from optimagic.differentiation.derivatives import first_derivative from optimagic.exceptions import InvalidConstraintError, InvalidFunctionError from optimagic.optimization.algo_options import CONSTRAINTS_ABSOLUTE_TOLERANCE from optimagic.parameters.block_trees import block_tree_to_matrix from optimagic.parameters.tree_registry import get_registry def process_nonlinear_constraints( nonlinear_constraints, params, bounds, converter, numdiff_options, skip_checks, ): """Process and prepare nonlinear constraints for internal use. A user-provided nonlinear constraint consists of a function that is evaluated on a selection of parameters returning a scalar or vector that must either be equal to a fixed value (equality constraint) or smaller and larger than or equal to a lower and upper bound (inequality constraint). This function processes the nonlinear constraints in the following way: 1. The constraint a <= g(x) <= b is transformed to h(x) >= 0, where h(x) is - h(x) = g(x), if a == 0 and b == inf - h(x) = g(x) - a, if a != 0 and b == inf - h(x) = (g(x) - a, -g(x) + b) >= 0, if a != 0 and b != inf. 2. The equality constraint g(x) = v is transformed to h(x) >= 0, where h(x) = (g(x) - v, -g(x) + v). 3. Vector constraints are transformed to a list of scalar constraints. g(x) = (g1(x), g2(x), ...) >= 0 is transformed to (g1(x) >= 0, g2(x) >= 0, ...). 4. The constraint function (defined on a selection of user-facing parameters) is transformed to be evaluated on the internal parameters. Args: nonlinear_constraints (list[dict]): List of dictionaries, each representing a nonlinear constraint. params (pandas): A pytree containing the parameters with respect to which the criterion is optimized. Examples are a numpy array, a pandas Series, a DataFrame with "value" column, a float and any kind of (nested) dictionary or list containing these elements. See :ref:`params` for examples. bounds (Bounds): Bounds object containing information on the bounds of the parameters. See :ref:`bounds` for details. converter (Converter): NamedTuple with methods to convert between internal and external parameters, derivatives and function outputs. numdiff_options (NumdiffOptions): Options for numerical derivatives. See :ref:`first_derivative` for details. Note that the default method is changed to "forward" for speed reasons. skip_checks (bool): Whether checks on the inputs are skipped. This makes the optimization faster, especially for very fast constraint functions. Default False. Returns: list[dict]: List of processed constraints. """ # do checks first to fail fast constraint_evals = [] for _constraint in nonlinear_constraints: _eval = _check_validity_and_return_evaluation(_constraint, params, skip_checks) constraint_evals.append(_eval) processed = [] for _constraint, _eval in zip( nonlinear_constraints, constraint_evals, strict=False ): _processed_constraint = _process_nonlinear_constraint( _constraint, constraint_eval=_eval, params=params, bounds=bounds, converter=converter, numdiff_options=numdiff_options, ) processed.append(_processed_constraint) return processed def _process_nonlinear_constraint( c, constraint_eval, params, bounds, converter, numdiff_options ): """Process a single nonlinear constraint.""" # ================================================================================== # Process selector and evaluate functions if necessary # ================================================================================== external_selector = _process_selector(c) # functional selector constraint_func = c["func"] if constraint_eval is None: selected = external_selector(params) constraint_eval = constraint_func(selected) if bounds is not None: # TODO: use bounds for numerical derivative; For this to work we need to # extend bounds to the full params pytree before passing them to # process_nonlinear_constraints. # constraint_bounds = replace( # bounds, # lower=external_selector(bounds.lower), # upper=external_selector(bounds.upper), # ) constraint_bounds = None else: constraint_bounds = None _n_constr = len(np.atleast_1d(constraint_eval)) # ================================================================================== # Consolidate and transform jacobian # ================================================================================== # process numdiff_options for numerical derivative if "derivative" in c: if not callable(c["derivative"]): msg = "Jacobian of constraints needs to be callable." raise ValueError(msg) jacobian = c["derivative"] else: # use finite-differences if no closed-form jacobian is defined def jacobian(p): return first_derivative( constraint_func, p, bounds=constraint_bounds, error_handling="raise_strict", **asdict(numdiff_options), ).derivative # To define the internal Jacobian we need to know which parameters enter the # contraint function. selection_indices, n_params = _get_selection_indices(params, external_selector) def _internal_jacobian(x): """Return Jacobian of constraint at internal parameters. The constraint function is written to be evaluated on a selection of the external parameters. The optimizer, however, only works on internal parameters. These can be significantly different from the external parameters, due to optimagic's reparametrization features. In this function we compute the Jacobian of the constraint at the internal parameters using information on the Jacobian of the constraint at the selected external parameters. """ params = converter.params_from_internal(x) selected = external_selector(params) jac = jacobian(selected) jac_matrix = block_tree_to_matrix(jac, constraint_eval, selected) jac_extended = _extend_jacobian(jac_matrix, selection_indices, n_params) jac_internal = converter.derivative_to_internal( jac_extended, x, jac_is_flat=True ) return np.atleast_2d(jac_internal) # ================================================================================== # Transform constraint function and derive bounds # ================================================================================== _type = "eq" if "value" in c else "ineq" if _type == "eq": # ============================================================================== # Equality constraints # # We define the internal constraint function to be satisfied if it is equal # to zero, by subtracting the fixed value. _value = np.atleast_1d(np.array(c["value"], dtype=float)) def internal_constraint_func(x): params = converter.params_from_internal(x) select = external_selector(params) out = np.atleast_1d(constraint_func(select)) - _value return out jacobian_from_internal = _internal_jacobian n_constr = _n_constr else: # ============================================================================== # Inequality constraints # # We define the internal constraint function to be satisfied if it is # greater than or equal to zero (positivity constraint). If the bounds already # satify this condition we do not change anything, otherwise we need to perform # a transformation. def _internal_constraint_func(x): params = converter.params_from_internal(x) select = external_selector(params) return np.atleast_1d(constraint_func(select)) lower_bounds = c.get("lower_bounds", 0) upper_bounds = c.get("upper_bounds", np.inf) transformation = _get_transformation(lower_bounds, upper_bounds) internal_constraint_func = _compose_funcs( _internal_constraint_func, transformation["func"] ) jacobian_from_internal = _compose_funcs( _internal_jacobian, transformation["derivative"] ) n_constr = 2 * _n_constr if transformation["name"] == "stack" else _n_constr internal_constr = { "n_constr": n_constr, "type": _type, "fun": internal_constraint_func, # internal name for 'func' "jac": jacobian_from_internal, # internal name for 'derivative' "tol": c.get("tol", CONSTRAINTS_ABSOLUTE_TOLERANCE), } return internal_constr def equality_as_inequality_constraints(nonlinear_constraints): """Return constraints where equality constraints are converted to inequality.""" constraints = [_equality_to_inequality(c) for c in nonlinear_constraints] return constraints def _equality_to_inequality(c): """Transform a single constraint. An equality constaint g(x) = 0 can be transformed to two inequality constraints using (g(x), -g(x)) >= 0. Hence, the number of constraints doubles, and the constraint functions itself as well as the derivative need to be updated. """ if c["type"] == "eq": def transform(x, func): value = func(x) return np.concatenate((value, -value), axis=0) out = { "fun": partial(transform, func=c["fun"]), "jac": partial(transform, func=c["jac"]), "n_constr": 2 * c["n_constr"], "tol": c["tol"], "type": "ineq", } else: out = c return out def vector_as_list_of_scalar_constraints(nonlinear_constraints): """Return constraints where vector constraints are converted to scalar constraints. This is necessary for internal optimizers that only support scalar constraints. """ list_of_constraints_lists = [ _vector_to_list_of_scalar(c) for c in nonlinear_constraints ] constraints = list(itertools.chain.from_iterable(list_of_constraints_lists)) return constraints def _vector_to_list_of_scalar(constraint): if constraint["n_constr"] > 1: out = [] for k in range(constraint["n_constr"]): c = constraint.copy() fun, jac = _get_components(constraint["fun"], constraint["jac"], idx=k) c["fun"] = fun c["jac"] = jac c["n_constr"] = 1 out.append(c) else: out = [constraint] return out def _get_components(fun, jac, idx): """Return function and derivative for a single component of a vector function. Args: fun (callable): Function that returns a vector. jac (callable): Derivative of the function that returns a matrix. idx (int): Index of the component. Returns: callable: Component function at index idx. callable: Jacobian of the component function. """ fun_component = lambda x: fun(x)[idx] jac_component = lambda x: jac(x)[idx] return fun_component, jac_component # ====================================================================================== # Helper Functions # ====================================================================================== def _process_selector(c): if "selector" in c: selector = c["selector"] elif "loc" in c: def selector(params): return params.loc[c["loc"]] elif "query" in c: def selector(params): return params.query(c["query"]) else: selector = _identity return selector def _compose_funcs(f, g): return lambda x: g(f(x)) def _identity(x): return x # ====================================================================================== # Jacobian helper functions # ====================================================================================== def _extend_jacobian(jac_mat, selection_indices, n_params): """Extend Jacobian on selected parameters to full params. Jacobian of constraints is defined on a selection of the parameters, however, we need the Jacobian on the full params. Since the Jacobian is trivially zero at the non-selected params we can simply fill a zero matrix. """ jac_extended = np.zeros((jac_mat.shape[0], n_params)) jac_extended[:, selection_indices] = jac_mat return jac_extended def _get_selection_indices(params, selector): """Get index of selected flat params and number of flat params.""" registry = get_registry(extended=True) flat_params, params_treedef = tree_flatten(params, registry=registry) n_params = len(flat_params) indices = np.arange(n_params, dtype=int) params_indices = tree_unflatten(params_treedef, indices, registry=registry) selected = selector(params_indices) selection_indices = np.array( tree_just_flatten(selected, registry=registry), dtype=int ) return selection_indices, n_params # ====================================================================================== # Transformation helper functions # ====================================================================================== def _get_transformation(lower_bounds, upper_bounds): """Get transformation given bounds. The internal inequality constraint is defined as h(x) >= 0. However, the user can specify: a <= g(x) <= b. To get the internal represenation we need to transform the constraint. """ transformation_type = _get_transformation_type(lower_bounds, upper_bounds) if transformation_type == "identity": transformer = {"func": _identity, "derivative": _identity} elif transformation_type == "subtract_lb": transformer = { "func": lambda v: v - lower_bounds, "derivative": _identity, } elif transformation_type == "stack": transformer = { "func": lambda v: np.concatenate( (v - lower_bounds, upper_bounds - v), axis=0 ), "derivative": lambda v: np.concatenate((v, -v), axis=0), } transformer["name"] = transformation_type return transformer def _get_transformation_type(lower_bounds, upper_bounds): lb_is_zero = not np.count_nonzero(lower_bounds) ub_is_inf = np.all(np.isposinf(upper_bounds)) if lb_is_zero and ub_is_inf: # the external constraint is already in the correct format _transformation_type = "identity" elif ub_is_inf: # the external constraint can be transformed by subtraction _transformation_type = "subtract_lb" else: # the external constraint can only be transformed by duplication (stacking) _transformation_type = "stack" return _transformation_type # ====================================================================================== # Checks # ====================================================================================== def _check_validity_and_return_evaluation(c, params, skip_checks): """Check that nonlinear constraints are valid. Returns: constaint_eval: Evaluation of constraint at params, if skip_checks if False, else None. """ # ================================================================================== # check functions # ================================================================================== if "func" not in c: raise InvalidConstraintError( "Constraint needs to have entry 'fun', representing the constraint " "function." ) if not callable(c["func"]): raise InvalidConstraintError( "Entry 'fun' in nonlinear constraints has be callable." ) if "derivative" in c and not callable(c["derivative"]): raise InvalidConstraintError( "Entry 'jac' in nonlinear constraints has be callable." ) # ================================================================================== # check bounds # ================================================================================== is_equality_constraint = "value" in c if is_equality_constraint: if "lower_bounds" in c or "upper_bounds" in c: raise InvalidConstraintError( "Only one of 'value' or ('lower_bounds', 'upper_bounds') can be " "passed to a nonlinear constraint." ) if not is_equality_constraint: if "lower_bounds" not in c and "upper_bounds" not in c: raise InvalidConstraintError( "For inequality constraint at least one of ('lower_bounds', " "'upper_bounds') has to be passed to the nonlinear constraint." ) if "lower_bounds" in c and "upper_bounds" in c: if not np.all(np.array(c["lower_bounds"]) <= np.array(c["upper_bounds"])): raise InvalidConstraintError( "If lower bounds need to less than or equal to upper bounds." ) # ================================================================================== # check selector # ================================================================================== if "selector" in c: if not callable(c["selector"]): raise InvalidConstraintError( f"'selector' entry needs to be callable in constraint {c}." ) else: try: c["selector"](params) except Exception as e: raise InvalidFunctionError( "Error when calling 'selector' function on params in constraint " f" {c}" ) from e elif "loc" in c: if not isinstance(params, (pd.Series, pd.DataFrame)): raise InvalidConstraintError( "params needs to be pd.Series or pd.DataFrame to use 'loc' selector in " f"in consrtaint {c}." ) try: params.loc[c["loc"]] except (KeyError, IndexError) as e: raise InvalidConstraintError("'loc' string is invalid.") from e elif "query" in c: if not isinstance(params, pd.DataFrame): raise InvalidConstraintError( "params needs to be pd.DataFrame to use 'query' selector in " f"constraints {c}." ) try: params.query(c["query"]) except Exception as e: raise InvalidConstraintError( f"'query' string is invalid in constraint {c}." ) from e # ================================================================================== # check that constraints can be evaluated # ================================================================================== constraint_eval = None if not skip_checks: selector = _process_selector(c) try: constraint_eval = c["func"](selector(params)) except Exception as e: raise InvalidFunctionError( f"Error when evaluating function of constraint {c}." ) from e return constraint_eval ================================================ FILE: src/optimagic/parameters/process_constraints.py ================================================ """Process the user provided pc for use during the optimization. The main purpose of this module is to convert the user provided constraints into inputs for fast reparametrization functions. In the process, the constraints are checked and consolidated. Consolidation means that redundant constraints are dropped and other constraints are collected in meaningful bundles. To improve readability, the actual code for checking and consolidation are in separate modules. Calls to functions doing checking are scattered across the module. This is in order to perform each check as soon as it becomes possible, which allows errors to be raised at a point where constraints still look similar to what users wrote. However, some checks can only be done after consolidation. """ import numpy as np import pandas as pd from optimagic.parameters.check_constraints import ( check_constraints_are_satisfied, check_fixes_and_bounds, check_for_incompatible_overlaps, check_types, ) from optimagic.parameters.consolidate_constraints import consolidate_constraints from optimagic.utilities import number_of_triangular_elements_to_dimension def process_constraints( constraints, params_vec, lower_bounds, upper_bounds, param_names, ): """Process, consolidate and check constraints. Args: constraints (list): List of constraints where the fields that select parameters have already been consolidated into an ``"index"`` field that selects the same parameters from the flattened_parameter vector. params_vec (np.ndarray): Flattened version of params. lower_bounds (np.ndarray | None): Lower bounds for params_vec. upper_bounds (np.ndarray | None): Upper bounds for params_vec. param_names (list): Names of the flattened parameters. Only used to produce good error messages. Returns: transformations (list): A processed version of those constraints that entail actual transformations and not just fixing parameters. constr_info (dict): Dict of 1d numpy arrays of length n_params (or None) with information that is needed for the reparametrizations. - lower_bounds: Lower bounds for the internal parameter vector. Those are derived from the original lower bounds and additional bounds implied by other constraints. - upper_bounds: As lower_bounds but for upper bounds. - internal_free: Boolean column that is true for those parameters over which the optimizer will actually optimize. - pre_replacements: The j_th element indicates the position of the internal parameter that has to be copied into the j_th position of the external parameter vector when reparametrizing from_internal, before any transformations are applied. Negative if no element has to be copied. - post_replacements: As pre_replacements, but applied after the transformations are done. - internal_fixed_values: Contains transformed versions of the fixed values that will become equal to the external fixed values after the kernel transformations are applied. parameter """ params_vec = params_vec.copy() check_types(constraints) constraints = _replace_pairwise_equality_by_equality(constraints) constraints = _process_linear_weights(constraints) check_constraints_are_satisfied(constraints, params_vec, param_names) constraints = _replace_increasing_and_decreasing_by_linear(constraints) # process newly generated linear constraints constraints = _process_linear_weights(constraints) transformations, constr_info = consolidate_constraints( constraints=constraints, parvec=params_vec, lower_bounds=lower_bounds, upper_bounds=upper_bounds, param_names=param_names, ) check_for_incompatible_overlaps(transformations, param_names) check_fixes_and_bounds(constr_info, transformations, param_names) is_fixed_to_value = constr_info.pop("is_fixed_to_value") is_fixed_to_other = constr_info.pop("is_fixed_to_other") int_lower, int_upper = _create_internal_bounds( constr_info["lower_bounds"], constr_info["upper_bounds"], transformations ) constr_info["internal_free"] = _create_internal_free( is_fixed_to_value=is_fixed_to_value, is_fixed_to_other=is_fixed_to_other, constraints=transformations, ) constr_info["lower_bounds"] = int_lower[constr_info["internal_free"]] constr_info["upper_bounds"] = int_upper[constr_info["internal_free"]] constr_info["pre_replacements"] = _create_pre_replacements( constr_info["internal_free"] ) constr_info["internal_fixed_values"] = _create_internal_fixed_value( constr_info["fixed_values"], transformations ) del constr_info["fixed_values"] return transformations, constr_info def _replace_pairwise_equality_by_equality(constraints): """Rewrite pairwise equality constraints to equality constraints. Args: constraints (list): List of dictionaries where each dictionary is a constraint. It is assumed that the selectors in constraints were already processed. Returns: list: List of processed constraints. """ pairwise_constraints = [c for c in constraints if c["type"] == "pairwise_equality"] constraints = [c for c in constraints if c["type"] != "pairwise_equality"] for constr in pairwise_constraints: equality_constraints = [ {"index": list(elements), "type": "equality"} for elements in zip(*constr["indices"], strict=False) ] constraints += equality_constraints return constraints def _process_linear_weights(constraints): """Harmonize the weights of linear constraints. Args: constraints (list): Constraints where the selectors have already been processed. Returns: list: Constraints where all weights are Series. """ processed = [] for constr in constraints: if constr["type"] == "linear": raw_weights = constr["weights"] if isinstance(raw_weights, (np.ndarray, list, tuple, pd.Series)): if len(raw_weights) != len(constr["index"]): msg = ( f"weights of length {len(raw_weights)} could not be aligned " f"with selected parameters of length {len(constr['index'])}." ) raise ValueError(msg) weights = np.asarray(raw_weights) elif isinstance(raw_weights, (float, int)): weights = np.full(len(constr["index"]), float(raw_weights)) else: raise TypeError(f"Invalid type for linear weights {type(raw_weights)}.") new_constr = constr.copy() weights_sr = pd.Series(weights, index=constr["index"]) new_constr["weights"] = weights_sr processed.append(new_constr) else: processed.append(constr) return processed def _replace_increasing_and_decreasing_by_linear(constraints): """Write increasing and decreasing constraints as linear constraints. Args: constraints (list): Constraints where the selectors have already been processed. Returns: list: Processed constraints. """ increasing_ilocs, other_constraints = [], [] for constr in constraints: if constr["type"] == "increasing": increasing_ilocs.append(constr["index"]) elif constr["type"] == "decreasing": increasing_ilocs.append(constr["index"][::-1]) else: other_constraints.append(constr) linear_constraints = [] for iloc in increasing_ilocs: for smaller, larger in zip(iloc, iloc[1:], strict=False): linear_constr = { "index": [smaller, larger], "type": "linear", "weights": np.array([-1, 1]), "lower_bound": 0, } linear_constraints.append(linear_constr) processed = linear_constraints + other_constraints return processed def _create_internal_bounds(lower, upper, constraints): """Create bounds for the internal parameter vector. The resulting arrays have the length of the flat external params and will be reduced later. Args: lower (np.ndarray): Processed and consolidated external lower bounds. upper (np.ndarray): Processed and consolidated external upper bounds. constraints (pd.DataFrame): Processed and consolidated constraints. Returns: int_lower (np.ndarray): Lower bound of internal parameters. int_upper (np.ndarray): Upper bound of internal parameters. """ int_lower, int_upper = lower.copy(), upper.copy() for constr in constraints: if constr["type"] in ["covariance", "sdcorr"]: # Note that the diagonal positions are the same for covariance and sdcorr # because the internal params contains the Cholesky factor of the implied # covariance matrix in both cases. dim = number_of_triangular_elements_to_dimension(len(constr["index"])) diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()] diag_indices = np.array(constr["index"])[diag_positions].tolist() bd = constr.get("regularization", 0) bd = np.sqrt(bd) if constr["type"] == "covariance" else bd int_lower[diag_indices] = np.maximum(int_lower[diag_indices], bd) elif constr["type"] == "probability": int_lower[constr["index"]] = 0 elif constr["type"] == "linear": int_lower[constr["index"]] = -np.inf int_upper[constr["index"]] = np.inf relevant_index = constr["index"][-len(constr["right_hand_side"]) :] int_lower[relevant_index] = constr["right_hand_side"]["lower_bound"] int_upper[relevant_index] = constr["right_hand_side"]["upper_bound"] else: raise TypeError("Invalid constraint type {}".format(constr["type"])) return int_lower, int_upper def _create_internal_free(is_fixed_to_value, is_fixed_to_other, constraints): """Boolean array that is True for parameters over which the optimizer optimizes. Args: is_fixed_to_value (np.ndarray): boolean array is_fixed_to_other (np.ndarray): boolean array Returns: np.ndarray """ int_fixed = is_fixed_to_value | is_fixed_to_other for constr in constraints: if constr["type"] == "probability": int_fixed[constr["index"][-1]] = True elif constr["type"] == "linear": int_fixed[constr["index"]] = False relevant_index = constr["index"][-len(constr["right_hand_side"]) :] int_fixed[relevant_index] = np.isfinite(constr["right_hand_side"]["value"]) int_free = ~int_fixed return int_free def _create_pre_replacements(internal_free): """Create an array with internal position of parameters. The j_th element indicates the position of the internal parameter that has to be copied into the j_th position of the external parameter vector when reparametrizing from_internal, before any transformations are applied. Negative if no element has to be copied. This will be used to copy the free internal parameters into a parameter vector that has the same length as all params. Args: internal_free (np.ndarray): The _internal_free column of the processed params. """ pre_replacements = np.full(len(internal_free), -1) pre_replacements[internal_free] = np.arange(internal_free.sum()) return pre_replacements def _create_internal_fixed_value(fixed_values, constraints): """Create and array with the values to which internal parameters are fixed. This contains additional fixes used to enforce other constraints and (potentially transformed) user specified fixed values. Args: fixed_value (np.ndarray): The (external) _fixed_value column of pp. constraints (list): Processed and consolidated params. """ int_fix = fixed_values.copy() for constr in constraints: if constr["type"] == "probability": int_fix[constr["index"][-1]] = 1 elif constr["type"] in ["covariance", "sdcorr"]: int_fix[constr["index"][0]] = np.sqrt(int_fix[constr["index"][0]]) elif constr["type"] == "linear": int_fix[constr["index"]] = np.nan relevant_index = constr["index"][-len(constr["right_hand_side"]) :] int_fix[relevant_index] = constr["right_hand_side"]["value"].to_numpy() return int_fix ================================================ FILE: src/optimagic/parameters/process_selectors.py ================================================ import warnings from collections import Counter import numpy as np import pandas as pd from pybaum import tree_just_flatten from optimagic.constraints import Constraint from optimagic.exceptions import InvalidConstraintError from optimagic.parameters.tree_registry import get_registry def process_selectors(constraints, params, tree_converter, param_names): """Process and harmonize the selector fields of constraints. By selector fields we mean loc, locs, query, queries, selector and selectors entries in constraints. The processed selector fields are called "index" and are integer numpy arrays with positions of parameters in a flattened parameter vector. Args: constraints (list): User provided constraints. params (pytree): User provided params. tree_converter (TreeConverter): NamedTuple with methods to convert between flattend and unflattend parameters. param_names (list): Names of flattened parameters. Used for error messages. Returns: list: List of constraints with additional "index" entry. """ # fast path if constraints in (None, []): return [] if isinstance(constraints, dict): constraints = [constraints] registry = get_registry(extended=True) n_params = len(tree_converter.params_flatten(params)) helper = tree_converter.params_unflatten(np.arange(n_params)) params_case = _get_params_case(params) flat_constraints = [] for constr in constraints: selector_case = _get_selector_case(constr) field = _get_selection_field( constraint=constr, selector_case=selector_case, params_case=params_case, ) evaluator = _get_selection_evaluator( field=field, constraint=constr, params_case=params_case, registry=registry, ) try: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) selected = evaluator(helper) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = ( "An error occurred when trying to select parameters for the following " f"constraint:\n{constr}" ) raise InvalidConstraintError(msg) from e if selector_case == "one selector": if np.isscalar(selected): selected = [selected] selected = np.array(selected).astype(int) _fail_if_duplicates(selected, constr, param_names) else: selected = [[sel] if np.isscalar(sel) else sel for sel in selected] _fail_if_selections_are_incompatible(selected, constr) selected = [np.array(sel).astype(int) for sel in selected] for sel in selected: _fail_if_duplicates(sel, constr, param_names) new_constr = constr.copy() if selector_case == "one selector": new_constr["index"] = selected else: new_constr["indices"] = selected if selector_case == "one selector": if len(new_constr["index"]) > 0: flat_constraints.append(new_constr) else: if len(new_constr["indices"][0]) > 0: flat_constraints.append(new_constr) return flat_constraints def _get_selection_field(constraint, selector_case, params_case): """Get the relevant selection field of a constraint.""" selector_case = _get_selector_case(constraint) valid = { "multiple selectors": { "dataframe": {"locs", "queries", "selectors"}, "numpy array": {"locs", "selectors"}, "pytree": {"selectors"}, "series": {"locs", "selectors"}, }, "one selector": { "dataframe": {"loc", "query", "selector"}, "numpy array": {"loc", "selector"}, "pytree": {"selector"}, "series": {"loc", "selector"}, }, } valid = valid[selector_case][params_case] present = set(constraint).intersection(valid) if not present: msg = ( "No valid parameter selection field in constraint. Valid selection fields " f"are {valid}. The constraint is:\n{constraint}" ) raise InvalidConstraintError(msg) elif len(present) > 1: msg = ( f"Too many parameter selection fields in constraint: {present}. " "Constraints must have exactly one parameter selection field. The " f"constraint was:\n{constraint}" ) raise InvalidConstraintError(msg) field = list(present)[0] return field def _get_selection_evaluator(field, constraint, params_case, registry): if field == "selector": def evaluator(params): raw = constraint["selector"](params) flat = tree_just_flatten(raw, registry=registry) return flat elif field == "selectors": def evaluator(params): raw = [sel(params) for sel in constraint["selectors"]] flat = [tree_just_flatten(r, registry=registry) for r in raw] return flat elif field == "loc": if params_case == "dataframe": def evaluator(params): return params.loc[constraint["loc"], "value"].tolist() else: def evaluator(params): return params[constraint["loc"]].tolist() elif field == "locs": if params_case == "dataframe": def evaluator(params): return [params.loc[lo, "value"].tolist() for lo in constraint["locs"]] else: def evaluator(params): return [params[lo].tolist() for lo in constraint["locs"]] elif field == "query": def evaluator(params): return params.query(constraint["query"])["value"].tolist() elif field == "queries": def evaluator(params): return [params.query(q)["value"].tolist() for q in constraint["queries"]] else: raise ValueError(f"Invalid parameter selection field: {field}") return evaluator def _get_params_case(params): if isinstance(params, pd.DataFrame) and "value" in params: params_case = "dataframe" elif isinstance(params, pd.Series): params_case = "series" elif isinstance(params, np.ndarray): params_case = "numpy array" else: params_case = "pytree" return params_case def _get_selector_case(constraint): if constraint["type"] == "pairwise_equality": selector_case = "multiple selectors" else: selector_case = "one selector" return selector_case def _fail_if_duplicates( selected: list[int], constraint: Constraint, param_names: list[str] ) -> None: duplicates = _find_duplicates(selected) if duplicates: names = [param_names[i] for i in duplicates] msg = ( "Error while processing constraints. There are duplicates in selected " "parameters. The parameters that were selected more than once are " f"{names}. The problematic constraint is:\n{constraint}" ) raise InvalidConstraintError(msg) def _fail_if_selections_are_incompatible(selected, constraint): if len(selected) <= 1: msg = ( "pairwise equality constraints require mutliple sets of selected " "parameters but there is just one in the following constraint:\n" f"{constraint}" ) raise InvalidConstraintError(msg) lengths = [len(sel) for sel in selected] if len(set(lengths)) != 1: msg = ( "All sets of selected parameters for pairwise equality constraints need " f"to have the same length. You have lengths {lengths} in constraint:\n" f"{constraint}" ) raise InvalidConstraintError(msg) def _find_duplicates(list_): return [item for item, count in Counter(list_).items() if count > 1] ================================================ FILE: src/optimagic/parameters/scale_conversion.py ================================================ from dataclasses import dataclass import numpy as np from numpy.typing import NDArray from optimagic.parameters.scaling import ScalingOptions from optimagic.parameters.space_conversion import InternalParams @dataclass(frozen=True) class ScaleConverter: factor: NDArray[np.float64] | None offset: NDArray[np.float64] | None def params_to_internal(self, vec: NDArray[np.float64]) -> NDArray[np.float64]: """Scale a parameter vector from external scale to internal one.""" if self.offset is not None: vec = vec - self.offset if self.factor is not None: vec = vec / self.factor return vec def params_from_internal(self, vec: NDArray[np.float64]) -> NDArray[np.float64]: """Scale a parameter vector from internal scale to external one.""" if self.factor is not None: vec = vec * self.factor if self.offset is not None: vec = vec + self.offset return vec def derivative_to_internal( self, derivative: NDArray[np.float64] ) -> NDArray[np.float64]: """Scale a derivative vector from external scale to internal one.""" if self.factor is not None: derivative = derivative * self.factor return derivative def derivative_from_internal( self, derivative: NDArray[np.float64] ) -> NDArray[np.float64]: """Scale a derivative vector from internal scale to external one.""" if self.factor is not None: derivative = derivative / self.factor return derivative def get_scale_converter( internal_params: InternalParams, scaling: ScalingOptions | None, ) -> tuple[ScaleConverter, InternalParams]: """Get a converter between scaled and unscaled parameters. Args: internal_params: NamedTuple of internal and possibly reparametrized but not yet scaled parameter values and bounds. scaling: Scaling options. If None, no scaling is performed. Returns: ScaleConverter: Dataclass with methods to convert between scaled and unscaled internal parameters and derivatives. InternalParams: Dataclass with internal parameter values and bounds. """ # fast path if scaling is None: return ScaleConverter(factor=None, offset=None), internal_params factor, offset = calculate_scaling_factor_and_offset( internal_params=internal_params, options=scaling, ) converter = ScaleConverter(factor=factor, offset=offset) if internal_params.soft_lower_bounds is not None: _soft_lower = converter.params_to_internal(internal_params.soft_lower_bounds) else: _soft_lower = None if internal_params.soft_upper_bounds is not None: _soft_upper = converter.params_to_internal(internal_params.soft_upper_bounds) else: _soft_upper = None if internal_params.lower_bounds is not None: _lower_bounds = converter.params_to_internal(internal_params.lower_bounds) else: _lower_bounds = None if internal_params.upper_bounds is not None: _upper_bounds = converter.params_to_internal(internal_params.upper_bounds) else: _upper_bounds = None params = InternalParams( values=converter.params_to_internal(internal_params.values), lower_bounds=_lower_bounds, upper_bounds=_upper_bounds, names=internal_params.names, soft_lower_bounds=_soft_lower, soft_upper_bounds=_soft_upper, ) return converter, params def calculate_scaling_factor_and_offset( internal_params: InternalParams, options: ScalingOptions, ) -> tuple[NDArray[np.float64], NDArray[np.float64] | None]: x = internal_params.values lower_bounds = internal_params.lower_bounds upper_bounds = internal_params.upper_bounds if options.method == "start_values": raw_factor = np.clip(np.abs(x), options.clipping_value, np.inf) scaling_offset = None elif options.method == "bounds": if ( lower_bounds is None or np.isinf(lower_bounds).any() or upper_bounds is None or np.isinf(upper_bounds).any() ): raise ValueError( "To use the 'bounds' scaling method, all bounds must be finite." ) raw_factor = upper_bounds - lower_bounds scaling_offset = lower_bounds else: raise ValueError(f"Invalid scaling method: {options.method}") scaling_factor = raw_factor / options.magnitude return scaling_factor, scaling_offset ================================================ FILE: src/optimagic/parameters/scaling.py ================================================ from dataclasses import dataclass from typing import Literal, TypedDict from typing_extensions import NotRequired from optimagic.exceptions import InvalidScalingError @dataclass(frozen=True) class ScalingOptions: """Scaling options in optimization problems. Attributes: method: The method used for scaling. Can be "start_values" or "bounds". Default is "start_values". clipping_value: The minimum value to which elements are clipped to avoid division by zero. Must be a positive number. Default is 0.1. magnitude: A factor by which the scaled parameters are multiplied to adjust their magnitude. Must be a positive number. Default is 1.0. Raises: InvalidScalingError: If scaling options cannot be processed, e.g. because they do not have the correct type. """ method: Literal["start_values", "bounds"] = "start_values" clipping_value: float = 0.1 magnitude: float = 1.0 def __post_init__(self) -> None: _validate_attribute_types_and_values(self) class ScalingOptionsDict(TypedDict): method: NotRequired[Literal["start_values", "bounds"]] clipping_value: NotRequired[float] magnitude: NotRequired[float] def pre_process_scaling( scaling: bool | ScalingOptions | ScalingOptionsDict | None, ) -> ScalingOptions | None: """Convert all valid types of scaling options to optimagic.ScalingOptions. This just harmonizes multiple ways of specifying scaling options into a single format. It performs runtime type checks, but it does not check whether scaling options are consistent with other option choices. Args: scaling: The user provided scaling options. Returns: The scaling options in the optimagic format. Raises: InvalidScalingError: If scaling options cannot be processed, e.g. because they do not have the correct type. """ if isinstance(scaling, bool): scaling = ScalingOptions() if scaling else None elif isinstance(scaling, ScalingOptions) or scaling is None: pass else: try: scaling = ScalingOptions(**scaling) except (KeyboardInterrupt, SystemExit): raise except Exception as e: if isinstance(e, InvalidScalingError): raise e raise InvalidScalingError( f"Invalid scaling options of type: {type(scaling)}. Scaling options " "must be of type optimagic.ScalingOptions, a dictionary with a subset " "of the keys {'method', 'clipping_value', 'magnitude'}, None, or a " "boolean." ) from e return scaling def _validate_attribute_types_and_values(options: ScalingOptions) -> None: if options.method not in ("start_values", "bounds"): raise InvalidScalingError( f"Invalid scaling method: {options.method}. Valid methods are " "'start_values' and 'bounds'." ) if ( not isinstance(options.clipping_value, int | float) or options.clipping_value <= 0 ): raise InvalidScalingError( f"Invalid clipping value: {options.clipping_value}. Clipping value " "must be a positive number." ) if not isinstance(options.magnitude, int | float) or options.magnitude <= 0: raise InvalidScalingError( f"Invalid scaling magnitude: {options.magnitude}. Scaling magnitude " "must be a positive number." ) ================================================ FILE: src/optimagic/parameters/space_conversion.py ================================================ """Handle constraints by reparametrizations. The functions in this module allow to convert between internal and external parameter vectors. An external parameter vector is a possibly flattened version of the parameter vector as it was specified by the user. This external parameter vector might be subject to constraints, such as the condition that the first two parameters are equal. An internal parameter vector is an internal representation of the parameters in a different space. The internal parameters are meaningless and have no direct interpretation. However, the internal parameter vector has two important properties: 1. It is only subject to box constraints 2. `reparametrize_from_internal(internal_parameter)` always produces a valid external parameter vector (i.e. one that fulfills all constraints. For more background see :ref:`implementation_of_constraints`. The reparametrization from internal can be broken down into three separate steps: - Writing values from the internal parameter vector into an array that is as long as the external parameters and contains NaNs or values to which parameters have been fixed. We call this step `pre_replace`. - Transforming slices of the resulting vector with kernel transformations. Note that this step does not change the length. All kernel transformations have as many input as output parameters and are invertible. We call this step `transformation`. The resulting vector might still contrain NaNs. - Fill the NaNs by duplicating values of the transformed parameter vector. We call this step `post_replace` In the following, let n_external be the length of th external parameter vector and n_internal the length of the internal parameter vector. """ from dataclasses import dataclass from functools import partial from typing import Callable import numpy as np import optimagic.parameters.kernel_transformations as kt from optimagic.parameters.process_constraints import process_constraints def get_space_converter( internal_params, internal_constraints, ): """Get functions to convert between in-/external space of params and derivatives. In the internal parameter space the optimization problem is unconstrained except for bounds. Args: internal_params (InternalParams): Dataclass with internal parameter values and bounds. internal_constraints (list): List of constraints with processed selector fields. Returns: SpaceConverter: The space converter. InternalParams: Dataclass with entries: - value (np.ndarray): Internal parameter values. - lower_bounds (np.ndarray | None): Lower bounds on the internal params. - upper_bounds (np.ndarray | None): Upper bounds on the internal params. - soft_lower_bounds (np.ndarray | None): Soft lower bounds on the internal params. - soft_upper_bounds (np.ndarray | None): Soft upper bounds on the internal params. - name (list): List of names of the external parameters. - free_mask (np.ndarray): Boolean mask representing which external parameter is free. """ transformations, constr_info = process_constraints( constraints=internal_constraints, params_vec=internal_params.values, lower_bounds=internal_params.lower_bounds, upper_bounds=internal_params.upper_bounds, param_names=internal_params.names, ) _params_to_internal = partial( reparametrize_to_internal, internal_free=constr_info["internal_free"], transformations=transformations, ) _params_from_internal = partial( reparametrize_from_internal, fixed_values=constr_info["internal_fixed_values"], pre_replacements=constr_info["pre_replacements"], transformations=transformations, post_replacements=constr_info["post_replacements"], ) _dim_internal = int(constr_info["internal_free"].sum()) _pre_replace_jac = pre_replace_jacobian( pre_replacements=constr_info["pre_replacements"], dim_in=_dim_internal ) _post_replace_jac = post_replace_jacobian( post_replacements=constr_info["post_replacements"] ) _derivative_to_internal = partial( convert_external_derivative_to_internal, fixed_values=constr_info["internal_fixed_values"], pre_replacements=constr_info["pre_replacements"], transformations=transformations, pre_replace_jac=_pre_replace_jac, post_replace_jac=_post_replace_jac, ) _has_transforming_constraints = bool(transformations) converter = SpaceConverter( params_to_internal=_params_to_internal, params_from_internal=_params_from_internal, derivative_to_internal=_derivative_to_internal, has_transforming_constraints=_has_transforming_constraints, ) free_mask = constr_info["internal_free"] if ( internal_params.soft_lower_bounds is not None and not _has_transforming_constraints ): _soft_lower = internal_params.soft_lower_bounds[free_mask] else: _soft_lower = None if ( internal_params.soft_upper_bounds is not None and not _has_transforming_constraints ): _soft_upper = internal_params.soft_upper_bounds[free_mask] else: _soft_upper = None params = InternalParams( values=converter.params_to_internal(internal_params.values), lower_bounds=constr_info["lower_bounds"], upper_bounds=constr_info["upper_bounds"], names=internal_params.names, free_mask=free_mask, soft_lower_bounds=_soft_lower, soft_upper_bounds=_soft_upper, ) return converter, params @dataclass(frozen=True) class SpaceConverter: params_to_internal: Callable params_from_internal: Callable derivative_to_internal: Callable has_transforming_constraints: bool def reparametrize_to_internal( external, internal_free, transformations, ): """Convert a params DataFrame into a numpy array of internal parameters. Args: external (np.ndarray or pandas.DataFrmae): 1d array with of external parameter values or params DataFrame. internal_free (np.ndarray): 1d array of lenth n_external that determines which parameters are free. transformations (list): Processed transforming constraints. Returns: internal_params (numpy.ndarray): 1d numpy array of free reparametrized parameters. """ with_internal_values = external.copy() for constr in transformations: func = getattr(kt, f"{constr['type']}_to_internal") with_internal_values[constr["index"]] = func(external[constr["index"]], constr) internal = with_internal_values[internal_free] return internal def reparametrize_from_internal( internal, fixed_values, pre_replacements, transformations, post_replacements, ): """Convert a numpy array of internal parameters to a params DataFrame. Args: internal (numpy.ndarray): 1d numpy array with internal parameters fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains NaN for parameters that are not fixed and an internal representation of the value to which a parameter has been fixed for all others. pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th element in array contains the position of the internal parameter that has to be copied to the i_th position of the external parameter vector or -1 if no value has to be copied. transformations (list): Processed transforming constraints. post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th element contains the position a parameter in the transformed parameter vector that has to be copied to duplicated and copied to the i_th position of the external parameter vector. Returns: numpy.ndarray: Array with external parameters """ # do pre-replacements external_values = pre_replace(internal, fixed_values, pre_replacements) # do transformations for constr in transformations: func = getattr(kt, f"{constr['type']}_from_internal") external_values[constr["index"]] = func( external_values[constr["index"]], constr ) # do post-replacements external_values = post_replace(external_values, post_replacements) return external_values def convert_external_derivative_to_internal( external_derivative, internal_values, fixed_values, pre_replacements, transformations, post_replacements=None, pre_replace_jac=None, post_replace_jac=None, ): r"""Compute the derivative of the criterion utilizing an external derivative. Denote by :math:`c` the criterion function which is evaluated on the full parameter set. Denote by :math:`g` the paramater transform which maps an internal to an external paramter, i.e :math:`g: x \mapsto g(x)`, with :math:`x` denoting the internal paramter vector and :math:`g(x)` the respective external parameter frame. We are interested in the derivative of the composition :math:`f := c \circ g` which maps an internal vector to the criterion value. The derivative can be computed using the chain rule, as .. math:: \frac{\mathrm{d}f}{\mathrm{d}x}(x) = \frac{\mathrm{d}c}{\mathrm{d}g}(g(x)) \times \frac{\mathrm{d}g}{\mathrm{d}x}(x) We assume that the user provides the first part of the above product. The second part denotes the derivative of the parameter transform from inner to external. Args: external_derivative (numpy.ndarray): The external derivative evaluated at external values mapped from ``internal_values``. internal_values (numpy.ndarray): 1d numpy array with internal parameters fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains NaN for parameters that are not fixed and an internal representation of the value to which a parameter has been fixed for all others. pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th element in array contains the position of the internal parameter that has to be copied to the i_th position of the external parameter vector or -1 if no value has to be copied. transformations (list): Processed transforming constraints. post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th element contains the position a parameter in the transformed parameter vector that has to be copied to duplicated and copied to the i_th position of the external parameter vector. pre_replace_jac (np.ndarray): 2d Array with the jacobian of pre_replace post_replacment_jacobian (np.ndarray): 2d Array with the jacobian post_replace Returns: deriv (numpy.ndarray): The gradient or Jacobian. """ dim_in = len(internal_values) pre_replaced = pre_replace(internal_values, fixed_values, pre_replacements) if post_replacements is None and post_replace_jac is None: raise ValueError( "either post_replacements or post_replace_jac must be specified." ) if pre_replace_jac is None: pre_replace_jac = pre_replace_jacobian(pre_replacements, dim_in) if post_replace_jac is None: post_replace_jac = post_replace_jacobian(post_replacements) transform_jac = transformation_jacobian(transformations, pre_replaced) external_derivative = np.atleast_2d(external_derivative) tall_external = external_derivative.shape[0] > external_derivative.shape[1] mat_list = [ external_derivative, post_replace_jac, transform_jac, pre_replace_jac, ] if tall_external: deriv = _multiply_from_right(mat_list) else: deriv = _multiply_from_left(mat_list) # return gradient with shape (len(params),) if deriv.shape[0] == 1: deriv = deriv.flatten() return deriv def _multiply_from_left(mat_list): """Multiply all matrices in the list, starting from the left. Note that this only affects the order in which the pairwise multiplications happen, not the actual result. """ out = mat_list[0] for mat in mat_list[1:]: out = out @ mat return out def _multiply_from_right(mat_list): """Multiply all matrices in the list, starting from the right. Note that this only affects the order in which the pairwise multiplications happen, not the actual result. """ out = mat_list[-1] for mat in reversed(mat_list[:-1]): out = mat @ out return out def pre_replace(internal_values, fixed_values, pre_replacements): """Return pre-replaced parameters. Args: internal (numpy.ndarray): 1d numpy array with internal parameter. fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains NaN for parameters that are not fixed and an internal representation of the value to which a parameter has been fixed for all others. pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th element in array contains the position of the internal parameter that has to be copied to the i_th position of the external parameter vector or -1 if no value has to be copied. Returns: pre_replaced (numpy.ndarray): 1d numpy array with pre-replaced params. Examples: >>> internal_values = np.array([1., 2.]) >>> fixed_values = np.array([np.nan, 0, np.nan]) >>> pre_replacements = np.array([1, -1, 0]) >>> pre_replace(internal_values, fixed_values, pre_replacements) array([2., 0., 1.]) """ pre_replaced = fixed_values.copy() mask = pre_replacements >= 0 positions = pre_replacements[mask] pre_replaced[mask] = internal_values[positions] return pre_replaced def pre_replace_jacobian(pre_replacements, dim_in): """Return Jacobian of pre-replacement step. Remark. The function ``pre_replace`` can have ``np.nan`` in its output. In this case we know from the underlying structure that the derivative of this output with respect to any of the inputs is zero. Here we use this additional knowledge; however, when the derivative is computed using a numerical differentiation technique this will not be the case. Thus the numerical derivative can differ from the derivative here in these cases. Args: pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th element in array contains the position of the internal parameter that has to be copied to the i_th position of the external parameter vector or -1 if no value has to be copied. dim_in (int): Dimension of the internal parameters. Returns: jacobian (np.ndarray): The jacobian. Examples: >>> # Note: The example is the same as in the doctest of pre_replace >>> pre_replacements = np.array([1, -1, 0]) >>> pre_replace_jacobian(pre_replacements, 2) array([[0., 1.], [0., 0.], [1., 0.]]) """ dim_out = len(pre_replacements) mask = pre_replacements >= 0 position_in = pre_replacements[mask] position_out = np.arange(dim_out)[mask] jacobian = np.zeros((dim_out, dim_in)) jacobian[position_out, position_in] = 1 return jacobian def transformation_jacobian(transformations, pre_replaced): """Return Jacobian of constraint transformation step. The Jacobian of the constraint transformation step is build as a block matrix of either identity matrices, in the case when the external parameter equals the internal parameter, or, of the Jacobians of the specific kernel transforms, in case the external paramater is a transformed version of the internal. Args: transformations (list): Processed transforming constraints. pre_replaced (numpy.ndarray): 1d numpy array with pre-replaced params. dim (int): The dimension of the external parameters. Returns: jacobian (numpy.ndarray): The Jacobian. """ dim = len(pre_replaced) jacobian = np.eye(dim) for constr in transformations: block_indices = constr["index"] jacobian_func = getattr(kt, f"{constr['type']}_from_internal_jacobian") jac = jacobian_func(pre_replaced[block_indices], constr) jacobian[np.ix_(block_indices, block_indices)] = jac return jacobian def post_replace(external_values, post_replacements): """Return post-replaed parameters. Args: external_values (numpy.ndarray): 1d numpy array of external params. post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th element contains the position a parameter in the transformed parameter vector that has to be copied to duplicated and copied to the i_th position of the external parameter vector. Returns: post_replaced (numpy.ndarray): 1d numpy array with post-replaced params. Examples: >>> external_values = np.array([3., 4., np.nan]) >>> post_replacements = np.array([-1, -1, 1]) >>> post_replace(external_values, post_replacements) array([3., 4., 4.]) """ post_replaced = external_values.copy() mask = post_replacements >= 0 positions = post_replacements[mask] post_replaced[mask] = post_replaced[positions] return post_replaced def post_replace_jacobian(post_replacements): """Return Jacobian of post-replacement step. Args: post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th element contains the position a parameter in the transformed parameter vector that has to be copied to duplicated and copied to the i_th position of the external parameter vector. dim (int): The dimension of the external parameters. Returns: jacobian (np.ndarray): The Jacobian. Examples: >>> # Note: the example is the same as in the doctest of post_replace >>> post_replacements = np.array([-1, -1, 1]) >>> post_replace_jacobian(post_replacements) array([[1., 0., 0.], [0., 1., 0.], [0., 1., 0.]]) """ dim = len(post_replacements) mask = post_replacements >= 0 positions_in = post_replacements[mask] positions_out = np.arange(dim)[mask] jacobian = np.eye(dim) jacobian[positions_out, :] *= 0 jacobian[positions_out, positions_in] = 1 return jacobian @dataclass(frozen=True) class InternalParams: values: np.ndarray lower_bounds: np.ndarray | None upper_bounds: np.ndarray | None soft_lower_bounds: np.ndarray | None = None soft_upper_bounds: np.ndarray | None = None names: list | None = None free_mask: np.ndarray | None = None ================================================ FILE: src/optimagic/parameters/tree_conversion.py ================================================ from typing import Callable, NamedTuple import numpy as np from pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten from optimagic.exceptions import InvalidFunctionError from optimagic.parameters.block_trees import block_tree_to_matrix from optimagic.parameters.bounds import get_internal_bounds from optimagic.parameters.tree_registry import get_registry from optimagic.typing import AggregationLevel def get_tree_converter( params, bounds, func_eval, solver_type, derivative_eval=None, add_soft_bounds=False, ): """Get flatten and unflatten functions for criterion and its derivative. The function creates a converter with methods to convert parameters, derivatives and the output of the criterion function between the user provided pytree structure and flat representations. The main motivation for bundling all of this together (as opposed to handling parameters, derivatives and function outputs separately) is that the derivative conversion needs to know about the structure of params and the criterion output. Args: params (pytree): The user provided parameters. lower_bounds (pytree): The user provided lower_bounds upper_bounds (pytree): The user provided upper bounds solver_type: Used to determine how derivative output has to be transformed for the optimizer. derivative_eval (dict, pytree or None): Evaluation of the derivative of func at params. Used for consistency checks. soft_lower_bounds (pytree): As lower_bounds soft_upper_bounds (pytree): As upper_bounds add_soft_bounds (bool): Whether soft bounds should be added to the flat_params Returns: TreeConverter: NamedTuple with flatten and unflatten methods. FlatParams: NamedTuple of 1d arrays with flattened bounds and param names. """ _registry = get_registry(extended=True) _params_vec, _params_treedef = tree_flatten(params, registry=_registry) _params_vec = np.array(_params_vec).astype(float) _lower, _upper = get_internal_bounds( params=params, bounds=bounds, registry=_registry, ) if add_soft_bounds: _soft_lower, _soft_upper = get_internal_bounds( params=params, bounds=bounds, registry=_registry, add_soft_bounds=add_soft_bounds, ) else: _soft_lower, _soft_upper = None, None _param_names = leaf_names(params, registry=_registry) flat_params = FlatParams( values=_params_vec, lower_bounds=_lower, upper_bounds=_upper, names=_param_names, soft_lower_bounds=_soft_lower, soft_upper_bounds=_soft_upper, ) _params_flatten = _get_params_flatten(registry=_registry) _params_unflatten = _get_params_unflatten( registry=_registry, treedef=_params_treedef ) _derivative_flatten = _get_derivative_flatten( registry=_registry, solver_type=solver_type, params=params, func_eval=func_eval, derivative_eval=derivative_eval, ) converter = TreeConverter( params_flatten=_params_flatten, params_unflatten=_params_unflatten, derivative_flatten=_derivative_flatten, ) return converter, flat_params def _get_params_flatten(registry): def params_flatten(params): return np.array(tree_just_flatten(params, registry=registry)).astype(float) return params_flatten def _get_params_unflatten(registry, treedef): def params_unflatten(x): return tree_unflatten(treedef=treedef, leaves=list(x), registry=registry) return params_unflatten def _get_best_key_and_aggregator(needed_key, available_keys): if needed_key in available_keys: key = needed_key if needed_key == "value": aggregate = lambda x: float(x[0]) else: aggregate = lambda x: np.array(x).astype(float) elif needed_key == "contributions" and "root_contributions" in available_keys: key = "root_contributions" aggregate = lambda x: np.array(x).astype(float) ** 2 elif needed_key == "value" and "contributions" in available_keys: key = "contributions" aggregate = lambda x: float(np.sum(x)) elif needed_key == "value" and "root_contributions" in available_keys: key = "root_contributions" aggregate = lambda x: float((np.array(x) ** 2).sum()) else: msg = ( "The optimizer you requested requires a criterion function that returns " f"a dictionary with the entry '{needed_key}'. Your function returns a " f"dictionary that only contains the entries {available_keys}." ) raise InvalidFunctionError(msg) return key, aggregate def _get_derivative_flatten(registry, solver_type, params, func_eval, derivative_eval): # gradient case if solver_type == AggregationLevel.SCALAR: def derivative_flatten(derivative_eval): flat = np.array( tree_just_flatten(derivative_eval, registry=registry) ).astype(float) return flat # jacobian case else: def derivative_flatten(derivative_eval): flat = block_tree_to_matrix( derivative_eval, outer_tree=func_eval, inner_tree=params, ) return flat if derivative_eval is not None: try: derivative_flatten(derivative_eval) except (KeyboardInterrupt, SystemExit): raise except Exception as e: msg = "The output of derivative and criterion cannot be aligned." raise InvalidFunctionError(msg) from e return derivative_flatten class TreeConverter(NamedTuple): params_flatten: Callable params_unflatten: Callable derivative_flatten: Callable class FlatParams(NamedTuple): values: np.ndarray lower_bounds: np.ndarray | None upper_bounds: np.ndarray | None soft_lower_bounds: np.ndarray | None = None soft_upper_bounds: np.ndarray | None = None names: list | None = None free_mask: np.ndarray | None = None ================================================ FILE: src/optimagic/parameters/tree_registry.py ================================================ """Wrapper around pybaum get_registry to tailor it to optimagic.""" from functools import partial from itertools import product import numpy as np import pandas as pd from pybaum import get_registry as get_pybaum_registry def get_registry(extended=False, data_col="value"): """Return pytree registry. Special Rules ------------- If extended is True the registry contains pd.DataFrame. In optimagic a data frame can represent a 1d object with extra information, instead of a 2d object. This is only allowed for params data frames, in which case they contain a 'value' column. The extra information of such an object can be accessed using the data_col argument. By default the 'value' column is extracted. If data_col is not 'value' but the data frame contains a 'value' column, a list of np.nan is returned. Args: extended (bool): If True appends types 'numpy.ndarray', 'pandas.Series' and 'pandas.DataFrame' to the registry. data_col (str): This column is used as the data source in a data frame when flattening and unflattening a pytree. Defaults to 'value'; see special rules above for behavior with non-default values. Returns: dict: The pytree registry. """ types = ( ["numpy.ndarray", "pandas.Series", "jax.numpy.ndarray"] if extended else None ) registry = get_pybaum_registry(types=types) if extended: registry[pd.DataFrame] = { "flatten": partial(_flatten_df, data_col=data_col), "unflatten": partial(_unflatten_df, data_col=data_col), "names": _get_df_names, } return registry def _flatten_df(df, data_col): is_value_df = "value" in df if is_value_df: flat = df.get(data_col, default=np.full(len(df), np.nan)).tolist() else: flat = df.to_numpy().flatten().tolist() aux_data = { "is_value_df": is_value_df, "df": df, } return flat, aux_data def _unflatten_df(aux_data, leaves, data_col): if aux_data["is_value_df"]: out = aux_data["df"].assign(**{data_col: leaves}) else: out = pd.DataFrame( data=np.array(leaves).reshape(aux_data["df"].shape), columns=aux_data["df"].columns, index=aux_data["df"].index, ) return out def _get_df_names(df): index_strings = list(df.index.map(_index_element_to_string)) if "value" in df: out = index_strings else: out = ["_".join([loc, col]) for loc, col in product(index_strings, df.columns)] return out def _index_element_to_string(element): if isinstance(element, (tuple, list)): as_strings = [str(entry) for entry in element] res_string = "_".join(as_strings) else: res_string = str(element) return res_string ================================================ FILE: src/optimagic/py.typed ================================================ ================================================ FILE: src/optimagic/sandbox.py ================================================ from optimagic.visualization.slice_plot_3d import slice_plot_3d __all__ = ["slice_plot_3d"] ================================================ FILE: src/optimagic/shared/__init__.py ================================================ ================================================ FILE: src/optimagic/shared/check_option_dicts.py ================================================ """Check option dictionaries for minimize, maximize.""" def check_optimization_options(options, usage, algorithm_mandatory=True): """Check optimize_options or maximize_options for usage in estimation functions.""" options = {} if options is None else options if algorithm_mandatory: if not isinstance(options, dict) or "algorithm" not in options: raise ValueError( "optimize_options or maximize_options must be a dict containing at " "least the entry 'algorithm'" ) else: if not isinstance(options, dict): raise ValueError( "optimize_options or maximize_options must be a dict or None." ) criterion_options = { "criterion", "criterion_kwargs", "derivative", "derivative_kwargs", } invalid_criterion = criterion_options.intersection(options) if invalid_criterion: msg = ( "Entries related to the criterion function, its derivatives or keyword " "arguments of those functions are not valid entries of optimize_options " f"or maximize_options for {usage}. Remove: {invalid_criterion}" ) raise ValueError(msg) general_options = {"logging", "log_options", "constraints"} invalid_general = general_options.intersection(options) if invalid_general: msg = ( "The following are not valid entries of optimize_options because they are " "not only relevant for minimization but also for inference: " f"{invalid_general}" ) raise ValueError(msg) ================================================ FILE: src/optimagic/shared/compat.py ================================================ """Compatibility module. Contains wrapper functions to handle compatibility issues between different versions of external libraries. """ def pd_df_map(df, func, na_action=None, **kwargs): """Apply a function to a Dataframe elementwise. pandas has depricated the .applymap() function with version 2.1.0. This function calls either .map() (if pandas version is greater or equal to 2.1.0) or .applymap() (if pandas version is smaller than 2.1.0). Args: df (pd.DataFrame): A pandas DataFrame. func (callable): Python function, returns a single value from a single value. na_action (str): If 'ignore', propagate NaN values, without passing them to func. If None, pass NaN values to func. Default is None. **kwargs: Additional keyword arguments to pass as keywords arguments to func. Returns: pd.DataFrame: Transformed DataFrame. """ return df.map(func, na_action=na_action, **kwargs) ================================================ FILE: src/optimagic/shared/process_user_function.py ================================================ """Process user provided functions.""" import inspect from functools import partial, update_wrapper from optimagic.exceptions import InvalidFunctionError, InvalidKwargsError from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.typing import AggregationLevel from optimagic.utilities import propose_alternatives def partial_func_of_params(func, kwargs, name="your function", skip_checks=False): # fast path if skip_checks and kwargs in (None, {}): return func kept, ignored = filter_kwargs(func, kwargs) if ignored: possibilities = [p for p in inspect.signature(func).parameters if p != "params"] proposals = [propose_alternatives(arg, possibilities, 1)[0] for arg in ignored] msg = ( "The following user provided keyword arguments are not compatible with " f"{name}:\n\n" ) for arg, prop in zip(ignored, proposals, strict=False): msg += f"{arg}: Did you mean {prop}?" raise InvalidKwargsError(msg) # update_wrapper preserves static fields that might have been added to the function # via mark decorators. out = update_wrapper(partial(func, **kept), func) if not skip_checks: unpartialled_args = get_unpartialled_arguments(out) no_default_args = get_arguments_without_default(out) no_free_argument_left = len(unpartialled_args) < 1 if no_free_argument_left and kept: raise InvalidKwargsError( f"Too many keyword arguments for {name}. After applying all keyword " "arguments there must be at least one free argument (the params) left." ) elif no_free_argument_left: raise InvalidFunctionError(f"{name} must have at least one free argument.") required_args = unpartialled_args.intersection(no_default_args) too_many_required_arguments = len(required_args) > 1 # Try to discover if we have a jax calculated jacobian that has a weird # signature that would not pass this test: skip_because_of_jax = required_args == {"args", "kwargs"} if too_many_required_arguments and not skip_because_of_jax: raise InvalidKwargsError( f"Too few keyword arguments for {name}. After applying all keyword " "arguments at most one required argument (the params) should remain. " "in your case the following required arguments remain: " f"{required_args}." ) return out def filter_kwargs(func, kwargs): valid = get_unpartialled_arguments(func) kept = {key: val for key, val in kwargs.items() if key in valid} ignored = {key: val for key, val in kwargs.items() if key not in valid} return kept, ignored def get_unpartialled_arguments(func): unpartialled = set(inspect.signature(func).parameters) if isinstance(func, partial): partialed_in = set(func.keywords) unpartialled = unpartialled - partialed_in return unpartialled def get_arguments_without_default(func): args = dict(inspect.signature(func).parameters) no_default = [] for name, arg in args.items(): if not hasattr(arg.default, "__len__"): if arg.default == inspect.Parameter.empty: no_default.append(name) no_default = set(no_default) return no_default def get_kwargs_from_args(args, func, offset=0): """Convert positional arguments to a dict of keyword arguments. Args: args (list, tuple): Positional arguments. func (callable): Function to be called. offset (int, optional): Number of arguments to skip. Defaults to 0. Returns: dict: Keyword arguments. """ names = list(inspect.signature(func).parameters)[offset:] kwargs = {name: arg for name, arg in zip(names, args, strict=False)} return kwargs def infer_aggregation_level(func): """Infer the problem type from type hints or attributes left by mark decorators. The problem type is either inferred from a `._problem_type` attribute or from type hints. If neither is present, we assume the problem type is scalar. This assumption is motivated by compatibility with the `scipy.optimize` interface. """ return_type = inspect.signature(func).return_annotation if hasattr(func, "_problem_type"): out = func._problem_type elif return_type in (ScalarFunctionValue, float): out = AggregationLevel.SCALAR elif return_type == LeastSquaresFunctionValue: out = AggregationLevel.LEAST_SQUARES elif return_type == LikelihoodFunctionValue: out = AggregationLevel.LIKELIHOOD else: out = AggregationLevel.SCALAR return out ================================================ FILE: src/optimagic/timing.py ================================================ from dataclasses import dataclass from typing import Callable, Iterable @dataclass(frozen=True) class CostModel: fun: float | None jac: float | None fun_and_jac: float | None label: str aggregate_batch_time: Callable[[Iterable[float]], float] def __post_init__(self) -> None: if not callable(self.aggregate_batch_time): raise ValueError( "aggregate_batch_time must be a callable, got " f"{self.aggregate_batch_time}" ) evaluation_time = CostModel( fun=None, jac=None, fun_and_jac=None, label="Function time (seconds)", aggregate_batch_time=sum, ) fun_evaluations = CostModel( fun=1, jac=0, fun_and_jac=1, label="Number of criterion evaluations", aggregate_batch_time=sum, ) fun_batches = CostModel( fun=1, jac=0, fun_and_jac=1, label="Number of batches", aggregate_batch_time=max ) wall_time = "wall_time" TIMING_REGISTRY = { "evaluation_time": evaluation_time, "fun_evaluations": fun_evaluations, "fun_batches": fun_batches, "wall_time": wall_time, } ================================================ FILE: src/optimagic/type_conversion.py ================================================ from typing import Any from optimagic.typing import ( GtOneFloat, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, ) def _process_float_like(value: Any) -> float: """Process a value that should be converted to a float.""" return float(value) def _process_int_like(value: Any) -> int: """Process a value that should be converted to an int.""" if isinstance(value, int): return value elif isinstance(value, str): return int(float(value)) else: return int(value) def _process_positive_int_like(value: Any) -> PositiveInt: """Process a value that should be converted to a positive int.""" out = _process_int_like(value) if out <= 0: raise ValueError(f"Value must be positive, got {out}") return out def _process_non_negative_int_like(value: Any) -> NonNegativeInt: """Process a value that should be converted to a non-negative int.""" out = _process_int_like(value) if out < 0: raise ValueError(f"Value must be non-negative, got {out}") return out def _process_positive_float_like(value: Any) -> PositiveFloat: """Process a value that should be converted to a positive float.""" out = _process_float_like(value) if out <= 0: raise ValueError(f"Value must be positive, got {out}") return out def _process_non_negative_float_like(value: Any) -> NonNegativeFloat: """Process a value that should be converted to a non-negative float.""" out = _process_float_like(value) if out < 0: raise ValueError(f"Value must be non-negative, got {out}") return out def _process_gt_one_float_like(value: Any) -> GtOneFloat: """Process a value that should be converted to a float greater than one.""" out = _process_float_like(value) if out <= 1: raise ValueError(f"Value must be greater than one, got {out}") return out def _process_bool_like(value: Any) -> bool: """Process a value that should be converted to a bool.""" if isinstance(value, bool): return value elif isinstance(value, str): if value.lower() in {"true", "1", "yes"}: return True elif value.lower() in {"false", "0", "no"}: return False return bool(value) TYPE_CONVERTERS = { float: _process_float_like, int: _process_int_like, bool: _process_bool_like, PositiveInt: _process_positive_int_like, NonNegativeInt: _process_non_negative_int_like, PositiveFloat: _process_positive_float_like, NonNegativeFloat: _process_non_negative_float_like, GtOneFloat: _process_gt_one_float_like, } ================================================ FILE: src/optimagic/typing.py ================================================ from dataclasses import dataclass, fields from enum import Enum from typing import ( Annotated, Any, Callable, ItemsView, Iterator, KeysView, Literal, Protocol, TypeVar, ValuesView, ) import numpy as np from annotated_types import Ge, Gt, Le, Lt from numpy._typing import NDArray PyTree = Any PyTreeRegistry = dict[type | str, dict[str, Callable[[Any], Any]]] Scalar = Any T = TypeVar("T") class AggregationLevel(Enum): """Enum to specify the aggregation level of objective functions and solvers.""" SCALAR = "scalar" LEAST_SQUARES = "least_squares" LIKELIHOOD = "likelihood" class Direction(str, Enum): """Enum to specify the direction of optimization.""" MINIMIZE = "minimize" MAXIMIZE = "maximize" @dataclass(frozen=True) class DictLikeAccess: r"""Useful base class for replacing string-based dictionaries with dataclass instances and keeping backward compatability regarding read access to the data structure. """ def __getitem__(self, key: str) -> Any: if key in self.__dict__: return getattr(self, key) else: raise KeyError(f"{key} not found in {self.__class__.__name__}") def __iter__(self) -> Iterator[str]: return iter(self._dict_repr()) def _dict_repr(self) -> dict[str, Any]: return {field.name: getattr(self, field.name) for field in fields(self)} def keys(self) -> KeysView[str]: return self._dict_repr().keys() def items(self) -> ItemsView[str, Any]: return self._dict_repr().items() def values(self) -> ValuesView[str]: return self._dict_repr().values() @dataclass(frozen=True) class TupleLikeAccess: r"""Useful base class for replacing tuples with dataclass instances and keeping backward compatability regarding read access to the data structure. """ def __getitem__(self, index: int | slice) -> Any: field_values = [getattr(self, field.name) for field in fields(self)] return field_values[index] def __len__(self) -> int: return len(fields(self)) def __iter__(self) -> Iterator[str]: for field in fields(self): yield getattr(self, field.name) class ErrorHandling(Enum): """Enum to specify the error handling strategy of the optimization algorithm.""" RAISE = "raise" RAISE_STRICT = "raise_strict" CONTINUE = "continue" class EvalTask(Enum): """Enum to specify the task of the evaluation function.""" FUN = "fun" JAC = "jac" FUN_AND_JAC = "fun_and_jac" EXPLORATION = "exploration" class BatchEvaluator(Protocol): def __call__( self, func: Callable[..., T], arguments: list[Any], n_cores: int = 1, error_handling: ErrorHandling | Literal["raise", "continue"] = ErrorHandling.CONTINUE, unpack_symbol: Literal["*", "**"] | None = None, ) -> list[T]: pass PositiveInt = Annotated[int, Gt(0)] """Type alias for positive integers (greater than 0).""" NonNegativeInt = Annotated[int, Ge(0)] """Type alias for non-negative integers (greater than or equal to 0).""" PositiveFloat = Annotated[float, Gt(0)] """Type alias for positive floats (greater than 0).""" NonNegativeFloat = Annotated[float, Ge(0)] """Type alias for non-negative floats (greater than or equal to 0).""" ProbabilityFloat = Annotated[float, Ge(0), Le(1)] """Type alias for probability floats (between 0 and 1, inclusive).""" NegativeFloat = Annotated[float, Lt(0)] """Type alias for negative floats (less than 0).""" GtOneFloat = Annotated[float, Gt(1)] """Type alias for floats greater than 1.""" UnitIntervalFloat = Annotated[float, Gt(0), Le(1)] """Type alias for floats in (0, 1].""" YesNoBool = Literal["yes", "no"] | bool """Type alias for boolean values represented as 'yes' or 'no' strings or as boolean values.""" DirectionLiteral = Literal["minimize", "maximize"] """Type alias for optimization direction, either 'minimize' or 'maximize'.""" BatchEvaluatorLiteral = Literal["joblib", "pathos", "threading"] """Type alias for batch evaluator types, can be 'joblib', 'pathos', or 'threading'.""" ErrorHandlingLiteral = Literal["raise", "continue"] """Type alias for error handling strategies, can be 'raise' or 'continue'.""" @dataclass(frozen=True) class IterationHistory(DictLikeAccess): """History of iterations in a process. Attributes: params: A list of parameters used in each iteration. criterion: A list of criterion values obtained in each iteration. runtime: A list or array of runtimes associated with each iteration. """ params: list[PyTree] fun: list[float] time: list[float] | NDArray[np.float64] @dataclass(frozen=True) class MultiStartIterationHistory(TupleLikeAccess): """History of multiple start iterations. Attributes: history: The main iteration history, representing the best end value. local_histories: Optional, a list of local iteration histories. exploration: Optional, iteration history for exploration steps. """ history: IterationHistory local_histories: list[IterationHistory] | None = None exploration: IterationHistory | None = None ================================================ FILE: src/optimagic/utilities.py ================================================ import difflib import warnings from hashlib import sha1 import cloudpickle import numpy as np import pandas as pd from numpy.typing import NDArray from scipy.linalg import ldl, qr with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) def fast_numpy_full(length: int, fill_value: float) -> NDArray[np.float64]: """Return a new array of given length, filled with fill_value. Empirically, this is faster than using np.full for small arrays. """ if length < 18: return np.array([fill_value] * length, dtype=np.float64) else: return np.full(length, fill_value=fill_value, dtype=np.float64) def chol_params_to_lower_triangular_matrix(params): dim = number_of_triangular_elements_to_dimension(len(params)) mat = np.zeros((dim, dim)) mat[np.tril_indices(dim)] = params return mat def cov_params_to_matrix(cov_params): """Build covariance matrix from 1d array with its lower triangular elements. Args: cov_params (np.array): 1d array with the lower triangular elements of a covariance matrix (in C-order) Returns: cov (np.array): a covariance matrix """ lower = chol_params_to_lower_triangular_matrix(cov_params) cov = lower + np.tril(lower, k=-1).T return cov def cov_matrix_to_params(cov): return cov[np.tril_indices(len(cov))] def sdcorr_params_to_sds_and_corr(sdcorr_params): dim = number_of_triangular_elements_to_dimension(len(sdcorr_params)) sds = np.array(sdcorr_params[:dim]) corr = np.eye(dim) corr[np.tril_indices(dim, k=-1)] = sdcorr_params[dim:] corr += np.tril(corr, k=-1).T return sds, corr def sds_and_corr_to_cov(sds, corr): diag = np.diag(sds) return diag @ corr @ diag def cov_to_sds_and_corr(cov): sds = np.sqrt(np.diagonal(cov)) diag = np.diag(1 / sds) corr = diag @ cov @ diag return sds, corr def sdcorr_params_to_matrix(sdcorr_params): """Build covariance matrix out of variances and correlations. Args: sdcorr_params (np.array): 1d array with parameters. The dimensions of the covariance matrix are inferred automatically. The first dim parameters are assumed to be the variances. The remainder are the lower triangular elements (excluding the diagonal) of a correlation matrix. Returns: cov (np.array): a covariance matrix """ return sds_and_corr_to_cov(*sdcorr_params_to_sds_and_corr(sdcorr_params)) def cov_matrix_to_sdcorr_params(cov): dim = len(cov) sds, corr = cov_to_sds_and_corr(cov) correlations = corr[np.tril_indices(dim, k=-1)] return np.hstack([sds, correlations]) def number_of_triangular_elements_to_dimension(num): """Calculate the dimension of a square matrix from number of triangular elements. Args: num (int): The number of upper or lower triangular elements in the matrix. Examples: >>> number_of_triangular_elements_to_dimension(6) 3 >>> number_of_triangular_elements_to_dimension(10) 4 """ return int(np.sqrt(8 * num + 1) / 2 - 0.5) def dimension_to_number_of_triangular_elements(dim): """Calculate number of triangular elements from the dimension of a square matrix. Args: dim (int): Dimension of a square matrix. """ return int(dim * (dim + 1) / 2) def propose_alternatives(requested, possibilities, number=3): """Propose possible alternatives based on similarity to requested. Args: requested_algo (str): From the user requested algorithm. possibilities (list(str)): List of available algorithms are lists of algorithms. number (int) : Number of proposals. Returns: proposals (list(str)): List of proposed algorithms. Example: >>> possibilities = ["scipy_lbfgsb", "scipy_slsqp", "nlopt_lbfgsb"] >>> propose_alternatives("scipy_L-BFGS-B", possibilities, number=1) ['scipy_slsqp'] >>> propose_alternatives("L-BFGS-B", possibilities, number=2) ['scipy_slsqp', 'scipy_lbfgsb'] """ number = min(number, len(possibilities)) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) proposals = difflib.get_close_matches( requested, possibilities, n=number, cutoff=0 ) return proposals def robust_cholesky(matrix, threshold=None, return_info=False): """Lower triangular cholesky factor of *matrix*. Args: matrix (np.array): Square, symmetric and (almost) positive semi-definite matrix threshold (float): Small negative number. Diagonal elements of D from the LDL decomposition between threshold and zero are set to zero. Default is minus machine accuracy. return_info (bool): If True, also return a dictionary with 'method'. Method can take the values 'np.linalg.cholesky' and 'Eigenvalue QR'. Returns: chol (np.array): Cholesky factor of matrix info (float, optional): see return_info. Raises: np.linalg.LinalgError if an eigenvalue of *matrix* is below *threshold*. In contrast to a regular cholesky decomposition, this function will also work for matrices that are only positive semi-definite or even indefinite. For speed and precision reasons we first try a regular cholesky decomposition. If it fails we switch to more robust methods. """ try: chol = np.linalg.cholesky(matrix) method = "np.linalg.cholesky" except np.linalg.LinAlgError: method = "LDL cholesky" threshold = threshold if threshold is not None else -np.finfo(float).eps chol = _internal_robust_cholesky(matrix, threshold) chol_unique = _make_cholesky_unique(chol) info = {"method": method} out = (chol_unique, info) if return_info else chol_unique return out def robust_inverse(matrix, msg=""): """Calculate the inverse or pseudo-inverse of a matrix. The difference to calling a pseudo inverse directly is that this function will emit a warning if the matrix is singular. Args: matrix (np.ndarray) """ header = ( "Standard matrix inversion failed due to LinAlgError described below. " "A pseudo inverse was calculated instead. " ) if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError("Matrix must be square.") try: out = np.linalg.inv(matrix) except np.linalg.LinAlgError: out = np.linalg.pinv(matrix) warnings.warn(header + msg) except Exception: raise return out def _internal_robust_cholesky(matrix, threshold): """Lower triangular cholesky factor of *matrix* using an LDL decomposition and QR factorization. Args: matrix (np.array): Square, symmetric and (almost) positive semi-definite matrix threshold (float): Small negative number. Diagonal elements of D from the LDL decomposition between threshold and zero are set to zero. Default is minus machine accuracy. Returns: chol (np.array): Cholesky factor of matrix. Raises: np.linalg.LinalgError if diagonal entry in D from LDL decomposition is below *threshold*. """ lu, d, _ = ldl(matrix) diags = np.diagonal(d).copy() for i in range(len(diags)): if diags[i] >= 0: diags[i] = np.sqrt(diags[i]) elif diags[i] > threshold: diags[i] = 0 else: raise np.linalg.LinAlgError( "Diagonal entry below threshold in D from LDL decomposition." ) candidate = lu * diags.reshape(1, len(diags)) is_triangular = (candidate[np.triu_indices(len(matrix), k=1)] == 0).all() if is_triangular: chol = candidate else: _, r = qr(candidate.T) chol = r.T return chol def _make_cholesky_unique(chol): """Make a lower triangular cholesky factor unique. Cholesky factors are only unique with the additional requirement that all diagonal elements are positive. This is done automatically by np.linalg.cholesky. Since we calucate cholesky factors by QR decompositions we have to do it manually. It is obvious from that this is admissible because: chol sign_swither sign_switcher.T chol.T = chol chol.T """ sign_switcher = np.sign(np.diagonal(chol)) return chol * sign_switcher def hash_array(arr): """Create a hashsum for fast comparison of numpy arrays.""" # make sure array can be represented exactly in floating point numbers arr = 1 + arr - 1 return sha1(arr.tobytes()).hexdigest() def calculate_trustregion_initial_radius(x): r"""Calculate the initial trust region radius. It is calculated as :math:`0.1\\max(|x|_{\\infty}, 1)`. Args: x (np.ndarray): the start parameter values. Returns: trust_radius (float): initial trust radius """ x_norm = np.linalg.norm(x, ord=np.inf) return 0.1 * max(x_norm, 1) def to_pickle(obj, path): with open(path, "wb") as buffer: cloudpickle.dump(obj, buffer) def read_pickle(path): return pd.read_pickle(path) def isscalar(element): """Jax aware replacement for np.isscalar.""" if np.isscalar(element): return True # call anything a scalar that says it has 0 dimensions return getattr(element, "ndim", -1) == 0 def get_rng(seed): """Construct a random number generator. seed (Union[None, int, numpy.random.Generator]): If seed is None or int the numpy.random.default_rng is used seeded with seed. If seed is already a Generator instance then that instance is used. Returns: numpy.random.Generator: The random number generator. """ if isinstance(seed, np.random.Generator): rng = seed elif seed is None or isinstance(seed, int): rng = np.random.default_rng(seed) else: raise TypeError("seed type must be in {None, int, numpy.random.Generator}.") return rng def list_of_dicts_to_dict_of_lists(list_of_dicts): """Convert a list of dicts to a dict of lists. Args: list_of_dicts (list): List of dictionaries. All dictionaries have the same keys. Returns: dict Examples: >>> list_of_dicts_to_dict_of_lists([{"a": 1, "b": 2}, {"a": 3, "b": 4}]) {'a': [1, 3], 'b': [2, 4]} """ return {k: [dic[k] for dic in list_of_dicts] for k in list_of_dicts[0]} def dict_of_lists_to_list_of_dicts(dict_of_lists): """Convert a dict of lists to a list of dicts. Args: dict_of_lists (dict): Dictionary of lists where all lists have the same length. Returns: list Examples: >>> dict_of_lists_to_list_of_dicts({'a': [1, 3], 'b': [2, 4]}) [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}] """ return [ dict(zip(dict_of_lists, t, strict=False)) for t in zip(*dict_of_lists.values(), strict=False) ] ================================================ FILE: src/optimagic/visualization/__init__.py ================================================ ================================================ FILE: src/optimagic/visualization/backends.py ================================================ import itertools from typing import TYPE_CHECKING, Any, Literal, Protocol, overload, runtime_checkable import numpy as np import plotly.graph_objects as go from optimagic.config import ( IS_ALTAIR_INSTALLED, IS_BOKEH_INSTALLED, IS_MATPLOTLIB_INSTALLED, ) from optimagic.exceptions import InvalidPlottingBackendError, NotInstalledError from optimagic.visualization.plotting_utilities import LineData, MarkerData if TYPE_CHECKING: import altair as alt import bokeh import matplotlib.pyplot as plt @runtime_checkable class LinePlotFunction(Protocol): def __call__( self, lines: list[LineData], *, title: str | None, xlabel: str | None, xrange: tuple[float, float] | None, ylabel: str | None, yrange: tuple[float, float] | None, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, horizontal_line: float | None, marker: MarkerData | None, subplot: Any | None = None, ) -> Any: """Protocol of the line_plot function used for type checking. Args: ...: All other argument descriptions can be found in the docstring of the `line_plot` function. subplot: The subplot to which the lines should be plotted. The type of this argument depends on the backend used. If not provided, a new figure is created. """ ... @runtime_checkable class GridLinePlotFunction(Protocol): def __call__( self, lines_list: list[list[LineData]], *, n_rows: int, n_cols: int, titles: list[str] | None, xlabels: list[str] | None, xrange: tuple[float, float] | None, share_x: bool, ylabels: list[str] | None, yrange: tuple[float, float] | None, share_y: bool, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, plot_title: str | None, marker_list: list[MarkerData] | None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> Any: """Protocol of the grid_line_plot function used for type checking. Args: ...: All other argument descriptions can be found in the docstring of the `grid_line_plot` function. """ ... def _line_plot_plotly( lines: list[LineData], *, title: str | None, xlabel: str | None, xrange: tuple[float, float] | None, ylabel: str | None, yrange: tuple[float, float] | None, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, horizontal_line: float | None, marker: MarkerData | None, subplot: tuple[go.Figure, int, int] | None = None, ) -> go.Figure: """Create a line plot using Plotly. Args: ...: All other argument descriptions can be found in the docstring of the `line_plot` function. subplot: A tuple specifying the subplot to which the lines should be plotted. The tuple contains the Plotly `Figure` object, the row index, and the column index of the subplot. If not provided, a new `Figure` object is created. Returns: A Plotly Figure object. """ if template is None: template = "simple_white" if subplot is None: fig = go.Figure() row, col = None, None else: fig, row, col = subplot fig.update_layout( title=title, template=template, height=height, width=width, legend=legend_properties, margin=margin_properties, ) fig.update_xaxes( title=xlabel.format(linebreak="
") if xlabel else None, range=xrange, row=row, col=col, ) fig.update_yaxes( title=ylabel.format(linebreak="
") if ylabel else None, range=yrange, row=row, col=col, ) if horizontal_line is not None: fig.add_hline( y=horizontal_line, line_width=fig.layout.yaxis.linewidth or 1, opacity=1.0, row=row, col=col, ) for line in lines: trace = go.Scatter( x=line.x, y=line.y, name=line.name, line_color=line.color, mode="lines", showlegend=line.show_in_legend, legendgroup=line.name, ) fig.add_trace(trace, row=row, col=col) if marker is not None: trace = go.Scatter( x=[marker.x], y=[marker.y], name=marker.name, marker_color=marker.color, showlegend=False, ) fig.add_trace(trace, row=row, col=col) return fig def _grid_line_plot_plotly( lines_list: list[list[LineData]], *, n_rows: int, n_cols: int, titles: list[str] | None, xlabels: list[str] | None, xrange: tuple[float, float] | None, share_x: bool, ylabels: list[str] | None, yrange: tuple[float, float] | None, share_y: bool, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, plot_title: str | None, marker_list: list[MarkerData] | None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> go.Figure: """Create a grid of line plots using Plotly. Args: ...: All other argument descriptions can be found in the docstring of the `grid_line_plot` function. Returns: A Plotly Figure object. """ from plotly.subplots import make_subplots subplot_kwargs = dict( rows=n_rows, cols=n_cols, subplot_titles=titles, shared_yaxes=share_y, shared_xaxes=share_x, horizontal_spacing=0.3 / n_cols, ) subplot_kwargs.update(make_subplot_kwargs or {}) fig = make_subplots(**subplot_kwargs) for i, (row, col) in enumerate( itertools.product(range(1, n_rows + 1), range(1, n_cols + 1)) ): if i >= len(lines_list): break _line_plot_plotly( lines_list[i], title=None, xlabel=xlabels[i] if xlabels else None, xrange=xrange, ylabel=ylabels[i] if ylabels else None, yrange=yrange, template=template, height=height, width=width, legend_properties=legend_properties, margin_properties=margin_properties, horizontal_line=None, marker=marker_list[i] if marker_list else None, subplot=(fig, row, col), ) if plot_title is not None: fig.update_layout(title=plot_title) return fig def _line_plot_matplotlib( lines: list[LineData], *, title: str | None, xlabel: str | None, xrange: tuple[float, float] | None, ylabel: str | None, yrange: tuple[float, float] | None, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, horizontal_line: float | None, marker: MarkerData | None, subplot: "plt.Axes | None" = None, ) -> "plt.Axes": """Create a line plot using Matplotlib. Args: ...: All other argument descriptions can be found in the docstring of the `line_plot` function. subplot: A Matplotlib `Axes` object to which the lines should be plotted. If provided, the plot is drawn on the given `Axes`. If not provided, a new `Figure` and `Axes` are created. Returns: A Matplotlib Axes object. """ import matplotlib.pyplot as plt # In interactive environments (like Jupyter), explicitly enable matplotlib's # interactive mode. If it is not enabled, matplotlib's context manager will # revert to non-interactive mode after creating the first figure, causing # subsequent figures to not display inline. # See: https://github.com/matplotlib/matplotlib/issues/26716 if plt.get_backend() == "module://matplotlib_inline.backend_inline": plt.ion() if template is None: template = "default" with plt.style.context(template): if subplot is None: px = 1 / plt.rcParams["figure.dpi"] # pixel in inches fig, ax = plt.subplots( figsize=(width * px, height * px) if width and height else None, layout="constrained", ) else: ax = subplot for line in lines: ax.plot( line.x, line.y, label=line.name if line.show_in_legend else None, color=line.color, ) if horizontal_line is not None: ax.axhline( y=horizontal_line, color=ax.spines["left"].get_edgecolor() or "gray", linewidth=ax.spines["left"].get_linewidth() or 1.0, ) if marker is not None: ax.scatter( [marker.x], [marker.y], color=marker.color, label=None, ) ax.set( title=title, xlabel=xlabel.format(linebreak="\n") if xlabel else None, xlim=xrange, ylabel=ylabel.format(linebreak="\n") if ylabel else None, ylim=yrange, ) if subplot is None and legend_properties is not None: fig.legend(**legend_properties) return ax def _grid_line_plot_matplotlib( lines_list: list[list[LineData]], *, n_rows: int, n_cols: int, titles: list[str] | None, xlabels: list[str] | None, xrange: tuple[float, float] | None, share_x: bool, ylabels: list[str] | None, yrange: tuple[float, float] | None, share_y: bool, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, plot_title: str | None, marker_list: list[MarkerData] | None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> np.ndarray: """Create a grid of line plots using Matplotlib. Args: ...: All other argument descriptions can be found in the docstring of the `grid_line_plot` function. Returns: A 2D numpy array of Matplotlib Axes objects. """ import matplotlib.pyplot as plt px = 1 / plt.rcParams["figure.dpi"] # pixel in inches fig, axes = plt.subplots( nrows=n_rows, ncols=n_cols, squeeze=False, # always return a 2D array of axes figsize=(width * px, height * px) if width and height else None, layout="constrained", ) for i, (row, col) in enumerate(itertools.product(range(n_rows), range(n_cols))): if i >= len(lines_list): axes[row, col].set_visible(False) continue if share_x and row < n_rows - 1: # Share x-axis with bottom subplot in the same column axes[row, col].sharex(axes[-1, col]) axes[row, col].xaxis.set_tick_params(labelbottom=False) if share_y and col > 0: # Share y-axis with left subplot in the same row axes[row, col].sharey(axes[row, 0]) axes[row, col].yaxis.set_tick_params(labelleft=False) _line_plot_matplotlib( lines_list[i], title=titles[i] if titles else None, xlabel=xlabels[i] if xlabels else None, xrange=xrange, ylabel=ylabels[i] if ylabels else None, yrange=yrange, template=template, height=None, width=None, legend_properties=None, margin_properties=None, horizontal_line=None, marker=marker_list[i] if marker_list else None, subplot=axes[row, col], ) if legend_properties is not None: fig.legend(**legend_properties) if plot_title is not None: fig.suptitle(plot_title) return axes def _line_plot_bokeh( lines: list[LineData], *, title: str | None, xlabel: str | None, xrange: tuple[float, float] | None, ylabel: str | None, yrange: tuple[float, float] | None, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, horizontal_line: float | None, marker: MarkerData | None, subplot: "bokeh.plotting.figure | None" = None, ) -> "bokeh.plotting.figure": """Create a line plot using Bokeh. Args: ...: All other argument descriptions can be found in the docstring of the `line_plot` function. subplot: A Bokeh `Figure` object to which the lines should be plotted. If provided, the plot is drawn on the given `Figure`. If not provided, a new `Figure` is created. Returns: A Bokeh Figure object. """ from bokeh import themes from bokeh.io import curdoc from bokeh.models import Range1d from bokeh.models.annotations import Legend, LegendItem, Span, Title from bokeh.plotting import figure if template is None: template = "light_minimal" curdoc().theme = themes.built_in_themes[template] if subplot is not None: p = subplot else: p = figure() if title is not None: p.title = Title(text=title) if xlabel is not None: p.xaxis.axis_label = xlabel.format(linebreak="\n") if xrange is not None: p.x_range = Range1d(*xrange) if ylabel is not None: p.yaxis.axis_label = ylabel.format(linebreak="\n") if yrange is not None: p.y_range = Range1d(*yrange) if height is not None: p.height = height if width is not None: p.width = width _legend_items = [] for line in lines: glyph = p.line( line.x, line.y, line_color=line.color, line_width=2, ) if line.show_in_legend: _legend_items.append(LegendItem(label=line.name, renderers=[glyph])) # type: ignore[list-item] if horizontal_line is not None: span = Span( location=horizontal_line, dimension="width", line_color=p.yaxis.axis_line_color or "gray", line_width=p.yaxis.axis_line_width or 2, ) p.add_layout(span) if marker is not None: p.scatter( x=[marker.x], y=[marker.y], marker="circle", fill_color=marker.color, line_color=marker.color, size=10, ) if _legend_items: legend_kwargs = legend_properties.copy() if legend_properties else {} place = legend_kwargs.pop("place", "center") text = legend_kwargs.pop("title", None) legend = Legend(items=_legend_items, **(legend_kwargs)) p.add_layout(legend, place=place) p.legend.title = text return p def _grid_line_plot_bokeh( lines_list: list[list[LineData]], *, n_rows: int, n_cols: int, titles: list[str] | None, xlabels: list[str] | None, xrange: tuple[float, float] | None, share_x: bool, ylabels: list[str] | None, yrange: tuple[float, float] | None, share_y: bool, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, plot_title: str | None, marker_list: list[MarkerData] | None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> "bokeh.models.GridPlot": """Create a grid of line plots using Bokeh. Args: ...: All other argument descriptions can be found in the docstring of the `grid_line_plot` function. Returns: A Bokeh gridplot object. """ from bokeh.layouts import gridplot from bokeh.plotting import figure plots: list[list[figure]] = [] for row in range(n_rows): subplot_row: list[Any] = [] for col in range(n_cols): idx = row * n_cols + col if idx >= len(lines_list): break p = figure() _line_plot_bokeh( lines_list[idx], title=titles[idx] if titles else None, xlabel=xlabels[idx] if xlabels else None, xrange=xrange, ylabel=ylabels[idx] if ylabels else None, yrange=yrange, template=template, height=None, width=None, legend_properties=legend_properties, margin_properties=None, horizontal_line=None, marker=marker_list[idx] if marker_list else None, subplot=p, ) if share_x: if row > 0: # Share x-range with the top-most subplot in the same column p.x_range = plots[0][col].x_range if row < n_rows - 1: # Hide tick labels except for subplots in the last row p.xaxis.major_label_text_font_size = "0pt" if share_y: if col > 0: # Share y-range with the left-most subplot in the same row p.y_range = subplot_row[0].y_range # Hide tick labels except for subplots in the first column p.yaxis.major_label_text_font_size = "0pt" subplot_row.append(p) plots.append(subplot_row) grid = gridplot( # type: ignore[call-overload] plots, height=height // n_rows if height else None, width=width // n_cols if width else None, toolbar_location="right", ) return grid def _line_plot_altair( lines: list[LineData], *, title: str | None, xlabel: str | None, xrange: tuple[float, float] | None, ylabel: str | None, yrange: tuple[float, float] | None, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, horizontal_line: float | None, marker: MarkerData | None, subplot: None = None, ) -> "alt.Chart": """Create a line plot using Altair. Args: ...: All other argument descriptions can be found in the docstring of the `line_plot` function. subplot: Unused by Altair. Returns: An Altair Chart object. """ import altair as alt import pandas as pd alt.data_transformers.disable_max_rows() if template is None: template = "default" alt.theme.enable(template) dfs = [] for line in lines: df = pd.DataFrame( {"x": line.x, "y": line.y, "name": line.name, "color": line.color} ) dfs.append(df) source = pd.concat(dfs) figure_properties: dict[str, str | int] = {} if title is not None: figure_properties["title"] = title if width is not None: figure_properties["width"] = width if height is not None: figure_properties["height"] = height chart = ( alt.Chart(source) .mark_line() .encode( x=alt.X( "x", title=xlabel.split("{linebreak}") if xlabel else None, scale=alt.Scale(domain=list(xrange)) if xrange else alt.Undefined, ), y=alt.Y( "y", title=ylabel.split("{linebreak}") if ylabel else None, scale=alt.Scale(domain=list(yrange)) if yrange else alt.Undefined, ), color=alt.Color("color:N", scale=None), detail="name:N", ) .properties(**figure_properties) ) if any(line.show_in_legend for line in lines): legend = ( alt.Chart(source) .mark_line() .encode( color=alt.Color( "name:N", title=None, legend=alt.Legend(**(legend_properties or {})), scale=alt.Scale( domain=[line.name for line in lines if line.show_in_legend], range=[ line.color or "" for line in lines if line.show_in_legend ], ), ) ) ) chart = chart + legend if horizontal_line is not None: hline = ( alt.Chart(pd.DataFrame({"y": [horizontal_line]})).mark_rule().encode(y="y") ) chart = chart + hline if marker is not None: marker_chart = ( alt.Chart(pd.DataFrame({"x": [marker.x], "y": [marker.y]})) .mark_point(size=100, shape="circle", color=marker.color, filled=True) .encode(x="x", y="y") ) chart = chart + marker_chart return chart.interactive() def _grid_line_plot_altair( lines_list: list[list[LineData]], *, n_rows: int, n_cols: int, titles: list[str] | None, xlabels: list[str] | None, xrange: tuple[float, float] | None, share_x: bool, ylabels: list[str] | None, yrange: tuple[float, float] | None, share_y: bool, template: str | None, height: int | None, width: int | None, legend_properties: dict[str, Any] | None, margin_properties: dict[str, Any] | None, plot_title: str | None, marker_list: list[MarkerData] | None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> "alt.Chart | alt.HConcatChart | alt.VConcatChart": """Create a grid of line plots using Altair. Args: ...: All other argument descriptions can be found in the docstring of the `grid_line_plot` function. Returns: An Altair Chart if the grid contains only one subplot, an Altair HConcatChart if 'n_rows' is 1, otherwise an Altair VConcatChart. """ import altair as alt subplot_height = height // n_rows if height else None subplot_width = width // n_cols if width else None charts = [] for row_idx in range(n_rows): chart_row = [] for col_idx in range(n_cols): i = row_idx * n_cols + col_idx if i >= len(lines_list): break chart = _line_plot_altair( lines_list[i], title=titles[i] if titles else None, xlabel=xlabels[i] if xlabels else None, xrange=xrange, ylabel=ylabels[i] if ylabels else None, yrange=yrange, template=template, height=subplot_height, width=subplot_width, legend_properties=legend_properties, margin_properties=None, horizontal_line=None, marker=marker_list[i] if marker_list else None, subplot=None, ) chart_row.append(chart) charts.append(chart_row) row_selections = [ alt.selection_interval( bind="scales", encodings=["y"], name=f"share_y_row{row_idx}" ) for row_idx in range(n_rows) ] col_selections = [ alt.selection_interval( bind="scales", encodings=["x"], name=f"share_x_col{col_idx}" ) for col_idx in range(n_cols) ] for row_idx, row in enumerate(charts): for col_idx in range(len(row)): chart = row[col_idx] params = [] if share_y: # Share y-axis for all subplots in the same row params.append(row_selections[row_idx]) else: # Use independent y-axes for each subplot params.append( alt.selection_interval( bind="scales", encodings=["y"], name=f"ind_y_row{row_idx}_col{col_idx}", ) ) if share_x: # Share x-axis for all subplots in the same column params.append(col_selections[col_idx]) else: # Use independent x-axes for each subplot params.append( alt.selection_interval( bind="scales", encodings=["x"], name=f"ind_x_row{row_idx}_col{col_idx}", ) ) chart = chart.add_params(*params) if share_y and col_idx > 0: # Hide y-axis ticklabels for all subplots except the leftmost column chart = chart.encode(y=alt.Y(axis=alt.Axis(labels=False))) if share_x and row_idx < n_rows - 1: # Hide x-axis ticklabels for all subplots except the bottom row chart = chart.encode(x=alt.X(axis=alt.Axis(labels=False))) charts[row_idx][col_idx] = chart row_charts = [] for row in charts: row_chart: alt.Chart | alt.HConcatChart if len(row) == 1: row_chart = row[0] else: row_chart = alt.hconcat(*row) row_charts.append(row_chart) grid_chart: alt.Chart | alt.HConcatChart | alt.VConcatChart if len(row_charts) == 1: grid_chart = row_charts[0] else: grid_chart = alt.vconcat(*row_charts) if plot_title is not None: grid_chart = grid_chart.properties(title=plot_title) return grid_chart def line_plot( lines: list[LineData], backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", *, title: str | None = None, xlabel: str | None = None, xrange: tuple[float, float] | None = None, ylabel: str | None = None, yrange: tuple[float, float] | None = None, template: str | None = None, height: int | None = None, width: int | None = None, legend_properties: dict[str, Any] | None = None, margin_properties: dict[str, Any] | None = None, horizontal_line: float | None = None, marker: MarkerData | None = None, ) -> Any: """Create a line plot corresponding to the specified backend. Args: lines: List of objects each containing data for a line in the plot. The order of lines in the list determines the order in which they are plotted, with later lines being rendered on top of earlier ones. backend: The backend to use for plotting. title: Title of the plot. xlabel: Label for the x-axis. xrange: View limits for the x-axis. ylabel: Label for the y-axis. yrange: View limits for the y-axis. template: Backend-specific template for styling the plot. height: Height of the plot (in pixels). width: Width of the plot (in pixels). legend_properties: Backend-specific properties for the legend. margin_properties: Backend-specific properties for the plot margins. horizontal_line: If provided, a horizontal line is drawn at the specified y-value. marker: An object containing data for a marker in the plot. Returns: A figure object corresponding to the specified backend. """ _line_plot_backend_function = _get_plot_function(backend, grid_plot=False) fig = _line_plot_backend_function( lines, title=title, xlabel=xlabel, xrange=xrange, ylabel=ylabel, yrange=yrange, template=template, height=height, width=width, legend_properties=legend_properties, margin_properties=margin_properties, horizontal_line=horizontal_line, marker=marker, ) return fig def grid_line_plot( lines_list: list[list[LineData]], backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", *, n_rows: int, n_cols: int, titles: list[str] | None = None, xlabels: list[str] | None = None, xrange: tuple[float, float] | None = None, share_x: bool = False, ylabels: list[str] | None = None, yrange: tuple[float, float] | None = None, share_y: bool = False, template: str | None = None, height: int | None = None, width: int | None = None, legend_properties: dict[str, Any] | None = None, margin_properties: dict[str, Any] | None = None, plot_title: str | None = None, marker_list: list[MarkerData] | None = None, make_subplot_kwargs: dict[str, Any] | None = None, ) -> Any: """Create a grid of line plots corresponding to the specified backend. Args: lines_list: A list where each element is a list of objects containing data for the lines in a subplot. The order of sublists determines the order of subplots in the grid (row-wise), and the order of lines within each sublist determines the order of lines in that subplot. backend: The backend to use for plotting. n_rows: Number of rows in the grid. n_cols: Number of columns in the grid. titles: Titles for each subplot in the grid. xlabels: Labels for the x-axis of each subplot. xrange: View limits for the x-axis of each subplot. share_x: If True, all subplots share the same x-axis limits and each subplot in a column actually share the x-axis. ylabels: Labels for the y-axis of each subplot. yrange: View limits for the y-axis of each subplot. share_y: If True, all subplots share the same y-axis limits and each subplot in a row actually share the y-axis. template: Backend-specific template for styling the plots. height: Height of the entire grid plot (in pixels). width: Width of the entire grid plot (in pixels). legend_properties: Backend-specific properties for the legend. margin_properties: Backend-specific properties for the plot margins. plot_title: Title for the entire grid plot. marker_list: A list where where each element is an object containing data for a marker in a subplot. The order of objects in the list determines the subplot on which the marker is plotted. Returns: A figure object corresponding to the specified backend. """ _grid_line_plot_backend_function = _get_plot_function(backend, grid_plot=True) fig = _grid_line_plot_backend_function( lines_list, n_rows=n_rows, n_cols=n_cols, titles=titles, xlabels=xlabels, xrange=xrange, share_x=share_x, ylabels=ylabels, yrange=yrange, share_y=share_y, template=template, height=height, width=width, legend_properties=legend_properties, margin_properties=margin_properties, plot_title=plot_title, marker_list=marker_list, make_subplot_kwargs=make_subplot_kwargs, ) return fig BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION: dict[ str, tuple[bool, LinePlotFunction, GridLinePlotFunction] ] = { "plotly": (True, _line_plot_plotly, _grid_line_plot_plotly), "matplotlib": ( IS_MATPLOTLIB_INSTALLED, _line_plot_matplotlib, _grid_line_plot_matplotlib, ), "bokeh": ( IS_BOKEH_INSTALLED, _line_plot_bokeh, _grid_line_plot_bokeh, ), "altair": ( IS_ALTAIR_INSTALLED, _line_plot_altair, _grid_line_plot_altair, ), } @overload def _get_plot_function( backend: Literal["plotly", "matplotlib", "bokeh", "altair"], grid_plot: Literal[False], ) -> LinePlotFunction: ... @overload def _get_plot_function( backend: Literal["plotly", "matplotlib", "bokeh", "altair"], grid_plot: Literal[True], ) -> GridLinePlotFunction: ... def _get_plot_function( backend: str, grid_plot: bool ) -> LinePlotFunction | GridLinePlotFunction: if backend not in BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION: msg = ( f"Invalid plotting backend '{backend}'. " f"Available backends: " f"{', '.join(BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())}" ) raise InvalidPlottingBackendError(msg) ( _is_backend_available, _line_plot_backend_function, _grid_line_plot_backend_function, ) = BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION[backend] if not _is_backend_available: msg = ( f"The {backend} backend is not installed. " f"Install the package using either 'pip install {backend}' or " f"'conda install -c conda-forge {backend}'" ) raise NotInstalledError(msg) if grid_plot: return _grid_line_plot_backend_function else: return _line_plot_backend_function ================================================ FILE: src/optimagic/visualization/convergence_plot.py ================================================ from typing import Any, Literal import numpy as np import pandas as pd from optimagic.benchmarking.process_benchmark_results import ( process_benchmark_results, ) from optimagic.config import DEFAULT_PALETTE from optimagic.utilities import propose_alternatives from optimagic.visualization.backends import grid_line_plot, line_plot from optimagic.visualization.plotting_utilities import LineData, get_palette_cycle BACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = { "plotly": {}, "matplotlib": {"loc": "outside right upper", "fontsize": "x-small"}, "bokeh": { "location": "top_right", "place": "right", "label_text_font_size": "8pt", }, "altair": {"orient": "right"}, } BACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES: dict[str, dict[str, int]] = { "plotly": {"l": 10, "r": 10, "t": 30, "b": 10}, # "matplotlib": handles margins automatically via constrained layout } OUTCOME_TO_CONVERGENCE_PLOT_YLABEL: dict[str, str] = { "criterion": "Current Function Value", "monotone_criterion": "Best Function Value Found So Far", "criterion_normalized": ( "Share of Function Distance to Optimum{linebreak}" "Missing From Current Criterion Value" ), "monotone_criterion_normalized": ( "Share of Function Distance to Optimum{linebreak}Missing From Best So Far" ), "parameter_distance": "Distance Between Current and{linebreak}Optimal Parameters", "parameter_distance_normalized": ( "Share of Parameter Distance to Optimum{linebreak}" "Missing From Current Parameters" ), "monotone_parameter_distance_normalized": ( "Share of Parameter Distance to Optimum{linebreak}" "Missing From the Best Parameters So Far" ), "monotone_parameter_distance": ( "Distance Between the Best Parameters{linebreak}" "So Far and the Optimal Parameters" ), } RUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL: dict[str, str] = { "n_evaluations": "Number of Function Evaluations", "walltime": "Elapsed Time", "n_batches": "Number of Batches", } def convergence_plot( problems: dict[str, dict[str, Any]], results: dict[tuple[str, str], dict[str, Any]], *, problem_subset: list[str] | None = None, algorithm_subset: list[str] | None = None, n_cols: int = 2, distance_measure: Literal["criterion", "parameter_distance"] = "criterion", monotone: bool = True, normalize_distance: bool = True, runtime_measure: Literal[ "n_evaluations", "walltime", "n_batches" ] = "n_evaluations", stopping_criterion: Literal["x", "y", "x_and_y", "x_or_y"] = "y", x_precision: float = 1e-4, y_precision: float = 1e-4, combine_plots_in_grid: bool = True, backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", template: str | None = None, palette: list[str] | str = DEFAULT_PALETTE, ) -> Any: """Plot convergence of optimizers for a set of problems. This creates a grid of plots, showing the convergence of the different algorithms on each problem. The faster a line falls, the faster the algorithm improved on the problem. The algorithm converged where its line reaches 0 (if normalize_distance is True) or the horizontal line labeled "true solution". Each plot shows on the x axis the runtime_measure, which can be walltime, number of evaluations or number of batches. Each algorithm's convergence is a line in the plot. Convergence can be measured by the criterion value of the particular time/evaluation. The convergence can be made monotone (i.e. always taking the bast value so far) or normalized such that the distance from the start to the true solution is one. Args: problems: optimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results: optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. problem_subset: List of problem names. These must be a subset of the keys of the problems dictionary. If provided the convergence plot is only created for the problems specified in this list. algorithm_subset: List of algorithm names. These must be a subset of the keys of the optimizer_options passed to run_benchmark. If provided only the convergence of the given algorithms are shown. n_cols: number of columns in the plot of grids. The number of rows is determined automatically. distance_measure: One of "criterion", "parameter_distance". monotone: If True the best found criterion value so far is plotted. If False the particular criterion evaluation of that time is used. normalize_distance: If True the progress is scaled by the total distance between the start value and the optimal value, i.e. 1 means the algorithm is as far from the solution as the start value and 0 means the algorithm has reached the solution value. runtime_measure: This is the runtime until the desired convergence was reached by an algorithm. stopping_criterion: Determines how convergence is determined from the two precisions. To effectively disable convergence, set `x_precision` and/or `y_precision` to very small values (or 0). x_precision: how close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. y_precision: how close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. combine_plots_in_grid: Whether to return a single figure containing subplots for each factor pair or a dictionary of individual plots. Default is True. backend: The backend to use for plotting. Default is "plotly". template: The template for the figure. If not specified, the default template of the backend is used. For the 'bokeh' and 'altair' backends, this changes the global theme, which affects all plots from that backend in the session. palette: The coloring palette for traces. Default is the D3 qualitative palette. Returns: The figure object containing the convergence plot if `combine_plots_in_grid` is True. Otherwise, a dictionary mapping problem names to their respective figure objects is returned. """ # ================================================================================== # Process inputs df, _ = process_benchmark_results( problems=problems, results=results, stopping_criterion=stopping_criterion, x_precision=x_precision, y_precision=y_precision, ) if isinstance(problem_subset, str): problem_subset = [problem_subset] if isinstance(algorithm_subset, str): algorithm_subset = [algorithm_subset] _check_only_allowed_subset_provided(problem_subset, df["problem"], "problem") _check_only_allowed_subset_provided(algorithm_subset, df["algorithm"], "algorithm") if problem_subset is not None: df = df[df["problem"].isin(problem_subset)] if algorithm_subset is not None: df = df[df["algorithm"].isin(algorithm_subset)] # ================================================================================== # Extract backend-agnostic plotting data outcome = ( f"{'monotone_' if monotone else ''}" + distance_measure + f"{'_normalized' if normalize_distance else ''}" ) lines_list, titles = _extract_convergence_plot_lines( df=df, problems=problems, runtime_measure=runtime_measure, outcome=outcome, palette=palette, combine_plots_in_grid=combine_plots_in_grid, backend=backend, ) n_rows = int(np.ceil(len(lines_list) / n_cols)) # ================================================================================== # Generate the figure if combine_plots_in_grid: fig = grid_line_plot( lines_list, backend=backend, n_rows=n_rows, n_cols=n_cols, titles=titles, xlabels=( [RUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL[runtime_measure]] * len(lines_list) ), ylabels=[OUTCOME_TO_CONVERGENCE_PLOT_YLABEL[outcome]] * len(lines_list), template=template, height=320 * n_rows, width=500 * n_cols, legend_properties=BACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES.get( backend, None ), margin_properties=BACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES.get( backend, None ), ) return fig else: fig_dict = {} for i, subplot_lines in enumerate(lines_list): fig = line_plot( subplot_lines, backend=backend, title=titles[i], xlabel=RUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL[runtime_measure], ylabel=OUTCOME_TO_CONVERGENCE_PLOT_YLABEL[outcome], template=template, height=320, width=500, legend_properties=BACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES.get( backend, None ), margin_properties=BACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES.get( backend, None ), ) key = titles[i].replace(" ", "_").lower() fig_dict[key] = fig return fig_dict def _extract_convergence_plot_lines( df: pd.DataFrame, problems: dict[str, dict[str, Any]], runtime_measure: str, outcome: str, palette: list[str] | str, combine_plots_in_grid: bool, backend: str, ) -> tuple[list[list[LineData]], list[str]]: lines_list = [] # container for all subplots titles = [] for i, (_prob_name, _prob_data) in enumerate(df.groupby("problem", sort=False)): prob_name = str(_prob_name) subplot_lines = [] # container for data of traces in individual subplot palette_cycle = get_palette_cycle(palette) if runtime_measure == "n_batches": to_plot = ( _prob_data.groupby(["algorithm", runtime_measure]).min().reset_index() ) else: to_plot = _prob_data show_in_legend = True if combine_plots_in_grid: # If combining plots, only show in legend of first subplot # For 'bokeh' backend, show in legend for all subplots # as it does not support single legend on grid plots. # See: https://github.com/bokeh/bokeh/issues/7607 show_in_legend = (i == 0) or (backend == "bokeh") for alg, group in to_plot.groupby("algorithm", sort=False): line_data = LineData( x=group[runtime_measure].to_numpy(), y=group[outcome].to_numpy(), name=str(alg), color=next(palette_cycle), # if combining plots, only show legend in first subplot show_in_legend=show_in_legend, ) subplot_lines.append(line_data) if outcome in ("criterion", "monotone_criterion"): f_opt = problems[prob_name]["solution"]["value"] line_data = LineData( x=to_plot[runtime_measure].to_numpy(), y=np.full(to_plot[runtime_measure].shape, f_opt), name="true solution", color=next(palette_cycle), # if combining plots, only show legend in first subplot show_in_legend=show_in_legend, ) subplot_lines.append(line_data) lines_list.append(subplot_lines) titles.append(prob_name.replace("_", " ").title()) return lines_list, titles def _check_only_allowed_subset_provided( subset: list[str] | None, allowed: pd.Series | list[str], name: str ) -> None: """Check if all entries of a proposed subset are in a Series. Args: subset: If None, no checks are performed. Else a ValueError is raised listing all entries that are not in the provided Series. allowed: allowed entries. name: name of the provided entries to use for the ValueError. Raises: ValueError """ allowed_set = set(allowed) if subset is not None: missing = [entry for entry in subset if entry not in allowed_set] if missing: missing_msg = "" for entry in missing: proposed = propose_alternatives(entry, allowed_set) missing_msg += f"Invalid {name}: {entry}. Did you mean {proposed}?\n" raise ValueError(missing_msg) ================================================ FILE: src/optimagic/visualization/deviation_plot.py ================================================ import pandas as pd import plotly.express as px from optimagic.benchmarking.process_benchmark_results import ( process_benchmark_results, ) from optimagic.config import PLOTLY_TEMPLATE def deviation_plot( problems, results, *, runtime_measure="n_evaluations", distance_measure="criterion", monotone=True, template=PLOTLY_TEMPLATE, ): """Plot average convergence of optimizers for a set of problems. Returns aggregated version convergence plot, showing the convergence of the different algorithms, averaged over a problem set. The faster a line falls, the faster the algorithm improved on average. The x axis is the runtime_measure, which can be walltime or number of evaluations. The y axis is the average over the convergence measures of the problems in the set. Convergence can be measured by the criterion value of the particular time/evaluation. The convergence can be made monotone by always taking the best value. Args: problems (dict): optimagic benchmarking problems dictionary. Keys are the problem names. Values contain information on the problem, including the solution value. results (dict): optimagic benchmarking results dictionary. Keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. runtime_measure (str): One of "n_evaluations", "n_batches". distance_measure (str): One of "criterion", "parameter_distance". monotone (bool): If True the best found criterion value so far is plotted. If False the particular criterion evaluation of that time is used. template (str): The template for the figure. Default is "plotly_white". Returns: plotly.Figure """ df, _ = process_benchmark_results( problems=problems, results=results, stopping_criterion="y", x_precision=1e-6, y_precision=1e-6, ) outcome = f"{'monotone_' if monotone else ''}" + distance_measure + "_normalized" deviations = ( df.groupby(["problem", "algorithm", runtime_measure]) .min()[outcome] .reindex( pd.MultiIndex.from_product( [ df["problem"].unique(), df["algorithm"].unique(), range(df[runtime_measure].min(), df[runtime_measure].max() + 1), ], names=["problem", "algorithm", runtime_measure], ) ) .ffill() .reset_index() ) average_deviations = ( deviations.groupby(["algorithm", runtime_measure]) .mean(numeric_only=True)[outcome] .reset_index() ) fig = px.line(average_deviations, x=runtime_measure, y=outcome, color="algorithm") y_labels = { "criterion_normalized": "Share of Function Distance to Optimum
" "Missing From Current Criterion Value", "monotone_criterion_normalized": "Share of Function Distance to Optimum
" "Missing From Best So Far", "parameter_distance_normalized": "Share of Parameter Distance to Optimum
" "Missing From Current Parameters", "monotone_parameter_distance_normalized": "Share of the Parameter Distance " "to Optimum
Missing From the Best Parameters So Far", } x_labels = { "n_evaluations": "Numver of Function Evaluations", "n_batches": "Number of Batches", } fig.update_layout( xaxis_title=x_labels[runtime_measure], yaxis_title=y_labels[outcome], title=None, height=300, width=500, margin={"l": 10, "r": 10, "t": 30, "b": 10}, template=template, ) return fig ================================================ FILE: src/optimagic/visualization/history_plots.py ================================================ import inspect import itertools from dataclasses import dataclass from pathlib import Path from typing import Any, Callable, Literal import numpy as np from pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten from optimagic.config import DEFAULT_PALETTE from optimagic.logging.logger import LogReader, SQLiteLogOptions from optimagic.optimization.algorithm import Algorithm from optimagic.optimization.history import History from optimagic.optimization.optimize_result import OptimizeResult from optimagic.parameters.tree_registry import get_registry from optimagic.typing import IterationHistory, PyTree from optimagic.visualization.backends import line_plot from optimagic.visualization.plotting_utilities import LineData, get_palette_cycle BACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = { "plotly": { "yanchor": "top", "xanchor": "right", "y": 0.95, "x": 0.95, }, "matplotlib": { "loc": "upper right", }, "bokeh": { "location": "top_right", }, "altair": { "orient": "top-right", }, } ResultOrPath = OptimizeResult | str | Path def criterion_plot( results: ResultOrPath | list[ResultOrPath] | dict[str, ResultOrPath], names: list[str] | str | None = None, max_evaluations: int | None = None, backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", template: str | None = None, palette: list[str] | str = DEFAULT_PALETTE, stack_multistart: bool = False, monotone: bool = False, show_exploration: bool = False, ) -> Any: """Plot the criterion history of an optimization. Args: results: An optimization result (or list of, or dict of results) with collected history, or path(s) to it. If dict, then the key is used as the name in the legend. max_evaluations: Clip the criterion history after that many entries. backend: The backend to use for plotting. Default is "plotly". template: The template for the figure. If not specified, the default template of the backend is used. For the 'bokeh' and 'altair' backends, this changes the global theme, which affects all plots from that backend in the session. palette: The coloring palette for traces. Default is the D3 qualitative palette. stack_multistart: Whether to combine multistart histories into a single history. Default is False. monotone: If True, the criterion plot becomes monotone in the sense that at each iteration the current best criterion value is displayed. Default is False. show_exploration: If True, exploration samples of a multistart optimization are visualized. Default is False. Returns: The figure object containing the criterion plot. """ # ================================================================================== # Process inputs palette_cycle = get_palette_cycle(palette) dict_of_optimize_results_or_paths = _harmonize_inputs_to_dict(results, names) # ================================================================================== # Extract backend-agnostic plotting data from results list_of_optimize_data = _retrieve_optimization_data_from_results( results=dict_of_optimize_results_or_paths, stack_multistart=stack_multistart, show_exploration=show_exploration, plot_name="criterion_plot", ) lines, multistart_lines = _extract_criterion_plot_lines( data=list_of_optimize_data, max_evaluations=max_evaluations, palette_cycle=palette_cycle, stack_multistart=stack_multistart, monotone=monotone, ) # ================================================================================== # Generate the figure fig = line_plot( lines=multistart_lines + lines, backend=backend, xlabel="No. of criterion evaluations", ylabel="Criterion value", template=template, legend_properties=BACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES.get(backend, None), ) return fig def _harmonize_inputs_to_dict( results: ResultOrPath | list[ResultOrPath] | dict[str, ResultOrPath], names: list[str] | str | None, ) -> dict[str, ResultOrPath]: """Convert all valid inputs for results and names to dict[str, OptimizeResult].""" # convert scalar case to list case if not isinstance(names, list) and names is not None: names = [names] if isinstance(results, (OptimizeResult, str, Path)): results = [results] if names is not None and len(names) != len(results): raise ValueError("len(results) needs to be equal to len(names).") # handle dict case if isinstance(results, dict): if names is not None: results_dict = dict(zip(names, list(results.values()), strict=False)) else: results_dict = results # unlabeled iterable of results else: if names is None: names = [str(i) for i in range(len(results))] results_dict = dict(zip(names, results, strict=False)) # convert keys to strings results_dict = {_convert_key_to_str(k): v for k, v in results_dict.items()} return results_dict def _convert_key_to_str(key: Any) -> str: if inspect.isclass(key) and issubclass(key, Algorithm): out = str(key.name) elif isinstance(key, Algorithm): out = str(key.name) else: out = str(key) return out def params_plot( result: ResultOrPath, selector: Callable[[PyTree], PyTree] | None = None, max_evaluations: int | None = None, backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", template: str | None = None, palette: list[str] | str = DEFAULT_PALETTE, show_exploration: bool = False, ) -> Any: """Plot the params history of an optimization. Args: result: An optimization result with collected history, or path to it. If dict, then the key is used as the name in the legend. selector: A callable that takes params and returns a subset of params. If provided, only the selected subset of params is plotted. max_evaluations: Clip the criterion history after that many entries. backend: The backend to use for plotting. Default is "plotly". template: The template for the figure. If not specified, the default template of the backend is used. For the 'bokeh' and 'altair' backends, this changes the global theme, which affects all plots from that backend in the session. palette: The coloring palette for traces. Default is the D3 qualitative palette. show_exploration: If True, exploration samples of a multistart optimization are visualized. Default is False. Returns: The figure object containing the params plot. """ # ================================================================================== # Process inputs palette_cycle = get_palette_cycle(palette) # ================================================================================== # Extract backend-agnostic plotting data from results optimize_data = _retrieve_optimization_data_from_single_result( result=result, stack_multistart=True, show_exploration=show_exploration, plot_name="params_plot", ) lines = _extract_params_plot_lines( data=optimize_data, selector=selector, max_evaluations=max_evaluations, palette_cycle=palette_cycle, ) # ================================================================================== # Generate the figure fig = line_plot( lines=lines, backend=backend, xlabel="No. of criterion evaluations", ylabel="Parameter value", template=template, legend_properties=BACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES.get(backend, None), ) return fig @dataclass(frozen=True) class _PlottingMultistartHistory: """Data container for an optimization history and metadata. Contains local histories in case of multistart optimization. This dataclass is only used internally. """ history: History name: str | None start_params: PyTree is_multistart: bool local_histories: list[History] | list[IterationHistory] | None stacked_local_histories: History | None def _retrieve_optimization_data_from_results( results: dict[str, ResultOrPath], stack_multistart: bool, show_exploration: bool, plot_name: str, ) -> list[_PlottingMultistartHistory]: # Retrieves data from multiple results by iterating over the results dictionary # and calling the single result retrieval function. data = [] for name, res in results.items(): _data = _retrieve_optimization_data_from_single_result( result=res, stack_multistart=stack_multistart, show_exploration=show_exploration, plot_name=plot_name, res_name=name, ) data.append(_data) return data def _retrieve_optimization_data_from_single_result( result: ResultOrPath, stack_multistart: bool, show_exploration: bool, plot_name: str, res_name: str | None = None, ) -> _PlottingMultistartHistory: """Retrieve data from a single result (OptimizeResult or database). Args: result: An optimization result with collected history, or path to it. stack_multistart: Whether to combine multistart histories into a single history. Default is False. show_exploration: If True, exploration samples of a multistart optimization are visualized. Default is False. plot_name: Name of the plotting function that calls this function. Used for raising errors. res_name: Name of the result. Returns: A data object containing the history, metadata, and local histories of the optimization result. """ if isinstance(result, OptimizeResult): data = _retrieve_optimization_data_from_result_object( res=result, stack_multistart=stack_multistart, show_exploration=show_exploration, plot_name=plot_name, res_name=res_name, ) elif isinstance(result, (str, Path)): data = _retrieve_optimization_data_from_database( res=result, stack_multistart=stack_multistart, show_exploration=show_exploration, res_name=res_name, ) else: msg = ( "result must be an OptimizeResult or a path to a log file, " f"but is type {type(result)}." ) raise TypeError(msg) return data def _retrieve_optimization_data_from_result_object( res: OptimizeResult, stack_multistart: bool, show_exploration: bool, plot_name: str, res_name: str | None = None, ) -> _PlottingMultistartHistory: """Retrieve optimization data from result object. Args: res: An optimization result object. stack_multistart: Whether to combine multistart histories into a single history. Default is False. show_exploration: If True, exploration samples of a multistart optimization are visualized. Default is False. plot_name: Name of the plotting function that calls this function. Used for raising errors. res_name: Name of the result. Returns: A data object containing the history, metadata, and local histories of the optimization result. """ if res.history is None: msg = f"{plot_name} requires an optimize result with history. Enable history " "collection by setting collect_history=True when calling maximize or minimize." raise ValueError(msg) if res.multistart_info: local_histories = [ opt.history for opt in res.multistart_info.local_optima if opt.history is not None ] if stack_multistart: stacked = _get_stacked_local_histories(local_histories, res.direction) if show_exploration: fun = res.multistart_info.exploration_results[::-1] + stacked.fun params = res.multistart_info.exploration_sample[::-1] + stacked.params stacked = History( direction=stacked.direction, fun=fun, params=params, # TODO: This needs to be fixed start_time=len(fun) * [None], # type: ignore stop_time=len(fun) * [None], # type: ignore batches=len(fun) * [None], # type: ignore task=len(fun) * [None], # type: ignore ) else: stacked = None else: local_histories = None stacked = None data = _PlottingMultistartHistory( history=res.history, name=res_name, start_params=res.start_params, is_multistart=res.multistart_info is not None, local_histories=local_histories, stacked_local_histories=stacked, ) return data def _retrieve_optimization_data_from_database( res: str | Path, stack_multistart: bool, show_exploration: bool, res_name: str | None = None, ) -> _PlottingMultistartHistory: """Retrieve optimization data from a database. Args: res: A path to an optimization database. stack_multistart: Whether to combine multistart histories into a single history. Default is False. show_exploration: If True, exploration samples of a multistart optimization are visualized. Default is False. res_name: Name of the result. Returns: A data object containing the history, metadata, and local histories of the optimization result. """ reader: LogReader = LogReader.from_options(SQLiteLogOptions(res)) _problem_table = reader.problem_df direction = _problem_table["direction"].tolist()[-1] multistart_history = reader.read_multistart_history(direction) _history = multistart_history.history local_histories = multistart_history.local_histories exploration = multistart_history.exploration if stack_multistart and local_histories is not None: stacked = _get_stacked_local_histories(local_histories, direction, _history) if show_exploration: stacked["params"] = exploration["params"][::-1] + stacked["params"] # type: ignore stacked["criterion"] = exploration["criterion"][::-1] + stacked["criterion"] # type: ignore else: stacked = None history = History( direction=direction, fun=_history["fun"], params=_history["params"], start_time=_history["time"], # TODO (@janosg): Retrieve `stop_time` from `hist` once it is available. # https://github.com/optimagic-dev/optimagic/pull/553 stop_time=len(_history["fun"]) * [None], # type: ignore task=len(_history["fun"]) * [None], # type: ignore batches=list(range(len(_history["fun"]))), ) data = _PlottingMultistartHistory( history=history, name=res_name, start_params=reader.read_start_params(), is_multistart=local_histories is not None, local_histories=local_histories, stacked_local_histories=stacked, ) return data def _get_stacked_local_histories( local_histories: list[History] | list[IterationHistory], direction: Any, history: History | IterationHistory | None = None, ) -> History: """Stack local histories. Local histories is a list of dictionaries, each of the same structure. We transform this to a dictionary of lists. Finally, when the data is read from the database we append the best history at the end. """ stacked: dict[str, list[Any]] = {"criterion": [], "params": [], "runtime": []} for hist in local_histories: stacked["criterion"].extend(hist.fun) stacked["params"].extend(hist.params) stacked["runtime"].extend(hist.time) # append additional history is necessary if history is not None: stacked["criterion"].extend(history.fun) stacked["params"].extend(history.params) stacked["runtime"].extend(history.time) return History( direction=direction, fun=stacked["criterion"], params=stacked["params"], start_time=stacked["runtime"], # TODO (@janosg): Retrieve `stop_time` from `hist` once it is available for the # IterationHistory. # https://github.com/optimagic-dev/optimagic/pull/553 stop_time=len(stacked["criterion"]) * [None], # type: ignore task=len(stacked["criterion"]) * [None], # type: ignore batches=list(range(len(stacked["criterion"]))), ) def _extract_criterion_plot_lines( data: list[_PlottingMultistartHistory], max_evaluations: int | None, palette_cycle: "itertools.cycle[str]", stack_multistart: bool, monotone: bool, ) -> tuple[list[LineData], list[LineData]]: """Extract lines for criterion plot from data. Args: data: Data retrieved from results or database. max_evaluations: Clip the criterion history after that many entries. palette_cycle: Cycle of colors for plotting. stack_multistart: Whether to combine multistart histories into a single history. Default is False. monotone: If True, the criterion plot becomes monotone in the sense that at each iteration the current best criterion value is displayed. Returns: Tuple containing - lines: Main optimization paths. - multistart_lines: Multistart optimization paths. """ fun_or_monotone_fun = "monotone_fun" if monotone else "fun" # Collect multistart optimization paths multistart_lines: list[LineData] = [] plot_multistart = len(data) == 1 and data[0].is_multistart and not stack_multistart if plot_multistart and data[0].local_histories: for i, local_history in enumerate(data[0].local_histories): history = getattr(local_history, fun_or_monotone_fun) if max_evaluations is not None and len(history) > max_evaluations: history = history[:max_evaluations] line_data = LineData( x=np.arange(len(history)), y=history, color="#bab0ac", name=str(i), show_in_legend=False, ) multistart_lines.append(line_data) # Collect main optimization paths lines: list[LineData] = [] for _data in data: if stack_multistart and _data.stacked_local_histories is not None: _history = _data.stacked_local_histories else: _history = _data.history history = getattr(_history, fun_or_monotone_fun) if max_evaluations is not None and len(history) > max_evaluations: history = history[:max_evaluations] line_data = LineData( x=np.arange(len(history)), y=history, color=next(palette_cycle), name="best result" if plot_multistart else _data.name, show_in_legend=not plot_multistart, ) lines.append(line_data) return lines, multistart_lines def _extract_params_plot_lines( data: _PlottingMultistartHistory, selector: Callable[[PyTree], PyTree] | None, max_evaluations: int | None, palette_cycle: "itertools.cycle[str]", ) -> list[LineData]: """Extract lines for params plot from data. Args: data: Data retrieved from results or database. selector: A callable that takes params and returns a subset of params. If provided, only the selected subset of params is plotted. max_evaluations: Clip the criterion history after that many entries. palette_cycle: Cycle of colors for plotting. Returns: lines: Parameter histories. """ if data.stacked_local_histories is not None: history = data.stacked_local_histories.params else: history = data.history.params start_params = data.start_params registry = get_registry(extended=True) hist_arr = np.array([tree_just_flatten(p, registry=registry) for p in history]).T names = leaf_names(start_params, registry=registry) if selector is not None: flat, treedef = tree_flatten(start_params, registry=registry) helper = tree_unflatten(treedef, list(range(len(flat))), registry=registry) selected = np.array(tree_just_flatten(selector(helper), registry=registry)) names = [names[i] for i in selected] hist_arr = hist_arr[selected] lines: list[LineData] = [] for name, _data in zip(names, hist_arr, strict=False): if max_evaluations is not None and len(_data) > max_evaluations: plot_data = _data[:max_evaluations] else: plot_data = _data line_data = LineData( x=np.arange(len(plot_data)), y=plot_data, color=next(palette_cycle), name=name, show_in_legend=True, ) lines.append(line_data) return lines ================================================ FILE: src/optimagic/visualization/plotting_utilities.py ================================================ import base64 import collections.abc import itertools from copy import deepcopy from dataclasses import dataclass from typing import Any import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from optimagic.config import PLOTLY_TEMPLATE @dataclass(frozen=True) class LineData: """Data of a single line. Attributes: x: The x-coordinates of the points. y: The y-coordinates of the points. color: The color of the line. Default is None. name: The name of the line. Default is None. show_in_legend: Whether to show the line in the legend. Default is True. """ x: np.ndarray y: np.ndarray color: str | None = None name: str | None = None show_in_legend: bool = True @dataclass(frozen=True) class MarkerData: """Data of a single marker. Attributes: x: The x-coordinate of the marker. y: The y-coordinate of the marker. color: The color of the marker. Default is None. name: The name of the marker. Default is None. """ x: float y: float color: str | None = None name: str | None = None def combine_plots( plots, plots_per_row=2, sharex=False, sharey=True, share_yrange_all=True, expand_yrange=0.02, share_xrange_all=False, make_subplot_kwargs=None, showlegend=True, template=PLOTLY_TEMPLATE, clean_legend=True, layout_kwargs=None, legend_kwargs=None, title_kwargs=None, ): """Combine individual plots into figure with subplots. Uses list of plotly Figures to build plotly Figure with subplots. Args: plots (list): List with individual plots. plots_per_row (int): Number of plots per row. make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used to instantiate plotly Figure with multiple subplots. Is used to define properties such as, for example, the spacing between subplots. If None, default arguments defined in the function are used. sharex (bool): Whether to share the properties of x-axis across subplots. In the sam column sharey (bool): If True, share the properties of y-axis across subplots in the share_yrange_all (bool): If True, set the same range of y axis for all plots. y_expand (float): The ration by which to expand the range of the (shared) y axis, such that the axis is not cropped at exactly max of y variable. share_xrange_all (bool): If True, set the same range of x axis for all plots. showlegend (bool): If True, show legend. template (str): Plotly layout template. Must be one of plotly.io.templates. clean_legend (bool): If True, then cleans the legend from duplicates. layout_kwargs (dict or NoneType): Dictionary of key word arguments used to update layout of plotly Figure object. If None, the default kwargs defined in the function will be used. legend_kwargs (dict or NoneType): Dictionary of key word arguments used to update position, orientation and title of figure legend. If None, default position and orientation will be used with no title. title_kwargs (dict or NoneType): Dictionary of key word arguments used to update properties of the figure title. Use {'text': ''} to set figure title. Returns: fig (plotly.Figure): Plotly figure with subplots that combines individual slice plots. """ plots = deepcopy(plots) make_subplot_kwargs, nrows = get_make_subplot_kwargs( sharex, sharey, make_subplot_kwargs, plots_per_row, plots ) fig = make_subplots(**make_subplot_kwargs) layout_kwargs = get_layout_kwargs( layout_kwargs, legend_kwargs, title_kwargs, template, showlegend ) for i, (row, col) in enumerate( itertools.product(np.arange(nrows), np.arange(plots_per_row)) ): try: subfig = plots[i] fig.update_xaxes( title_text=subfig.layout.xaxis.title.text, col=col + 1, row=row + 1 ) if sharey: if col == 0: fig.update_yaxes( title_text=subfig.layout.yaxis.title.text, col=col + 1, row=row + 1, ) else: fig.update_yaxes( title_text=subfig.layout.yaxis.title.text, col=col + 1, row=row + 1 ) except IndexError: subfig = go.Figure() for d in subfig.data: fig.add_trace( d, col=col + 1, row=row + 1, ) fig.update_layout(**layout_kwargs, width=400 * plots_per_row, height=300 * nrows) if share_yrange_all: lb = [] ub = [] for f in plots: for d in f.data: y = _ensure_array_from_plotly_data(d["y"]) lb.append(np.min(y)) ub.append(np.max(y)) ub = np.max(ub) lb = np.min(lb) y_range = ub - lb y_lower = lb - y_range * expand_yrange y_upper = ub + y_range * expand_yrange fig.update_yaxes(range=[y_lower, y_upper]) if share_xrange_all: lb = [] ub = [] for f in plots: for d in f.data: x = _ensure_array_from_plotly_data(d["x"]) lb.append(np.min(x)) ub.append(np.max(x)) x_upper = np.max(ub) x_lower = np.min(lb) fig.update_xaxes(range=[x_lower, x_upper]) if clean_legend: fig = _clean_legend_duplicates(fig) return fig def create_grid_plot( rows, cols, ind_list, names, kws, x_title=None, y_title=None, clean_legend=False, scientific_notation=False, share_xax=False, x_min=None, x_max=None, ): """Create a dictionary for a grid plot from a list of traces. Args: rows (int): Number of rows in a plot. cols (int): Number of cols in a plot. ind_list (iterable): The list of traces for each individual plot. names (iterable): The list of titles for the each plot. kws (dict): The dictionary for the layout.update, unified for each individual plot. x_title (iterable or None): The list of x-axis labels for each plot. If None, then no labels are added. y_title (iterable or None): The list of y-axis labels for each plot. If None, then no labels are added. clean_legend (bool): If True, then cleans the legend from duplicates. Default False. sci_notation (bool): If True then updates the ticks on x- and y-axis to be displayed in a scientific notation. Default False. share_xax (bool): If True, then the x-axis domain is the same for each individual plot. x_min (int or None): The lower bound for share_xax. x_max (int or None): The upped bound for share_xax. Returns: plotly.Figure: The plot with subplots. """ if x_title is None: x_title = ["" for ind in range(len(ind_list))] if y_title is None: y_title = ["" for ind in range(len(ind_list))] fig = make_subplots(rows=rows, cols=cols, subplot_titles=names) for ind, (facet_row, facet_col) in enumerate( itertools.product(range(1, rows + 1), range(1, cols + 1)) ): if ind + 1 > len(ind_list): break # if there are empty individual plots traces = ind_list[ind] for trace in range(len(traces)): fig.add_trace(traces[trace], row=facet_row, col=facet_col) # style axis labels fig.update_xaxes(row=facet_row, col=facet_col, title=x_title[ind]) fig.update_yaxes(row=facet_row, col=facet_col, title=y_title[ind]) # deleting duplicates in legend if clean_legend: fig = _clean_legend_duplicates(fig) # scientific notations for axis ticks if scientific_notation: fig.update_yaxes(tickformat=".2e") fig.update_xaxes(tickformat=".2e") if share_xax: fig.update_xaxes(range=[x_min, x_max]) # setting template theme and size fig.update_layout(**kws) return fig def create_ind_dict( ind_list, names, kws, x_title=None, y_title=None, clean_legend=False, scientific_notation=False, share_xax=False, x_min=None, x_max=None, ): """Create a dictionary for individual plots from a list of traces. Args: ind_list (iterable): The list of traces for each individual plot. names (iterable): The list of titles for the each plot. kws (dict): The dictionary for the layout.update, unified for each individual plot. x_title (iterable or None): The list of x-axis labels for each plot. If None, then no labels are added. y_title (iterable or None): The list of y-axis labels for each plot. If None, then no labels are added. clean_legend (bool): If True, then cleans the legend from duplicates. Default False. sci_notation (bool): If True then updates the ticks on x- and y-axis to be displayed in a scientific notation. Default False. share_xax (bool): If True, then the x-axis domain is the same for each individual plot. x_min (int or None): The lower bound for share_xax. x_max (int or None): The upped bound for share_xax. Returns: Dictionary of individual plots. """ fig_dict = {} if x_title is None: x_title = ["" for ind in range(len(ind_list))] if y_title is None: y_title = ["" for ind in range(len(ind_list))] for ind in range(len(ind_list)): fig = go.Figure() traces = ind_list[ind] for trace in range(len(traces)): fig.add_trace(traces[trace]) # adding title and styling axes and theme fig.update_layout( title=names[ind], xaxis_title=x_title[ind], yaxis_title=y_title[ind], **kws ) # scientific notations for axis ticks if scientific_notation: fig.update_yaxes(tickformat=".2e") fig.update_xaxes(tickformat=".2e") # deleting duplicates in legend if clean_legend: fig = _clean_legend_duplicates(fig) if share_xax: fig.update_xaxes(range=[x_min, x_max]) # adding to dictionary key = names[ind].replace(" ", "_").lower() fig_dict[key] = fig return fig_dict def _clean_legend_duplicates(fig): trace_names = set() def disable_legend_if_duplicate(trace): if trace.name in trace_names: # in this case the legend is a duplicate trace.update(showlegend=False) else: trace_names.add(trace.name) fig.for_each_trace(disable_legend_if_duplicate) return fig def get_make_subplot_kwargs(sharex, sharey, kwrgs, plots_per_row, plots): """Define and update keywargs for instantiating figure with subplots.""" nrows = int(np.ceil(len(plots) / plots_per_row)) default_kwargs = { "rows": nrows, "cols": plots_per_row, "start_cell": "top-left", "print_grid": False, "shared_yaxes": sharey, "shared_xaxes": sharex, "horizontal_spacing": 1 / (plots_per_row * 4), } if nrows > 1: default_kwargs["vertical_spacing"] = (1 / (nrows - 1)) / 3 if not sharey: default_kwargs["horizontal_spacing"] = 2 * default_kwargs["horizontal_spacing"] if kwrgs: default_kwargs.update(kwrgs) return default_kwargs, nrows def get_layout_kwargs(layout_kwargs, legend_kwargs, title_kwargs, template, showlegend): """Define and update default kwargs for update_layout. Defines some default keyword arguments to update figure layout, such as title and legend. """ default_kwargs = { "template": template, "xaxis_showgrid": False, "yaxis_showgrid": False, "showlegend": showlegend, "legend": {}, "title": {}, } if title_kwargs: default_kwargs["title"] = title_kwargs if legend_kwargs: default_kwargs["legend"].update(legend_kwargs) if layout_kwargs: default_kwargs.update(layout_kwargs) return default_kwargs def _ensure_array_from_plotly_data(data: Any) -> np.ndarray: """Ensure that data is a numpy array, including decoding Plotly v6+ base64 format. Args: data: Can be a numpy array, (nested) sequence (e.g., list of lists), or a dict with 'bdata' and 'dtype' keys (Plotly v6+ format). Returns: Data as a numpy array. Raises: ValueError: If input cannot be interpreted as an array. """ if isinstance(data, np.ndarray): return data elif isinstance(data, dict) and "bdata" in data and "dtype" in data: return _decode_base64_data(data["bdata"], dtype=data["dtype"]) elif isinstance(data, collections.abc.Sequence): try: return np.array(data, dtype=np.float64) except Exception: pass raise ValueError("Failed to convert input to numpy array.") def _decode_base64_data(b64data: str, dtype: str) -> np.ndarray: decoded = base64.b64decode(b64data) return np.frombuffer(decoded, dtype=np.dtype(dtype)) def get_palette_cycle(palette: list[str] | str) -> "itertools.cycle[str]": if not isinstance(palette, list): palette = [palette] return itertools.cycle(palette) ================================================ FILE: src/optimagic/visualization/profile_plot.py ================================================ import itertools from typing import Any, Literal import numpy as np import pandas as pd from numpy.typing import NDArray from optimagic.benchmarking.process_benchmark_results import ( process_benchmark_results, ) from optimagic.config import DEFAULT_PALETTE from optimagic.visualization.backends import line_plot from optimagic.visualization.plotting_utilities import LineData, get_palette_cycle BACKEND_TO_PROFILE_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = { "plotly": {"title": {"text": "algorithm"}}, "matplotlib": { "loc": "outside right upper", "fontsize": "x-small", "title": "algorithm", }, "bokeh": { "location": "top_right", "place": "right", "label_text_font_size": "8pt", "title": "algorithm", }, "altair": {"orient": "right", "title": "algorithm"}, } BACKEND_TO_PROFILE_PLOT_MARGIN_PROPERTIES: dict[str, dict[str, Any]] = { "plotly": {"l": 10, "r": 10, "t": 30, "b": 30}, # "matplotlib": handles margins automatically via constrained layout } def profile_plot( problems: dict[str, dict[str, Any]], results: dict[tuple[str, str], dict[str, Any]], *, runtime_measure: Literal[ "walltime", "n_evaluations", "n_batches" ] = "n_evaluations", normalize_runtime: bool = False, stopping_criterion: Literal["x", "y", "x_and_y", "x_or_y"] = "y", x_precision: float = 1e-4, y_precision: float = 1e-4, backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", template: str | None = None, palette: list[str] | str = DEFAULT_PALETTE, ) -> Any: """Compare optimizers over a problem set. This plot answers the question: What percentage of problems can each algorithm solve within a certain runtime budget? The runtime budget is plotted on the x axis and the share of problems each algorithm solved on the y axis. Thus, algorithms that are very specialized and perform well on some share of problems but are not able to solve more problems with a larger computational budget will have steep increases and then flat lines. Algorithms that are robust but slow, will have low shares in the beginning but reach very high. Note that failing to converge according to the given stopping_criterion and precisions is scored as needing an infinite computational budget. For details, see the description of performance and data profiles by Moré and Wild (2009). Args: problems: A dictionary where keys are the problem names. Values contain information on the problem, including the solution value. results: A dictionary where keys are tuples of the form (problem, algorithm), values are dictionaries of the collected information on the benchmark run, including 'criterion_history' and 'time_history'. runtime_measure: This is the runtime until the desired convergence was reached by an algorithm. This is called performance measure by Moré and Wild (2009). normalize_runtime: If True the runtime each algorithm needed for each problem is scaled by the time the fastest algorithm needed. If True, the resulting plot is what Moré and Wild (2009) called data profiles. stopping_criterion: Determines how convergence is determined from the two precisions. x_precision: How close an algorithm must have gotten to the true parameter values (as percent of the Euclidean distance between start and solution parameters) before the criterion for clipping and convergence is fulfilled. y_precision: How close an algorithm must have gotten to the true criterion values (as percent of the distance between start and solution criterion value) before the criterion for clipping and convergence is fulfilled. backend: The backend to use for plotting. Default is "plotly". template: The template for the figure. If not specified, the default template of the backend is used. For the 'bokeh' and 'altair' backends, this changes the global theme, which affects all plots from that backend in the session. palette: The coloring palette for traces. Default is the D3 qualitative palette. Returns: The figure object containing the profile plot. """ # ================================================================================== # Process inputs palette_cycle = get_palette_cycle(palette) if stopping_criterion is None: raise ValueError( "You must specify a stopping criterion for the performance plot. " ) if runtime_measure not in ["walltime", "n_evaluations", "n_batches"]: raise ValueError( "Only 'walltime', 'n_evaluations' or 'n_batches' are allowed as " f"runtime_measure. You specified '{runtime_measure}'." ) # ================================================================================== # Extract backend-agnostic plotting data from benchmark results df, converged_info = process_benchmark_results( problems=problems, results=results, stopping_criterion=stopping_criterion, x_precision=x_precision, y_precision=y_precision, ) solution_times = create_solution_times( df, runtime_measure=runtime_measure, converged_info=converged_info, ) lines = _extract_profile_plot_lines( solution_times=solution_times, normalize_runtime=normalize_runtime, converged_info=converged_info, palette_cycle=palette_cycle, ) # ================================================================================== # Generate the figure fig = line_plot( lines, backend=backend, xlabel=_get_profile_plot_xlabel(runtime_measure, normalize_runtime), ylabel="Share of Problems Solved", template=template, height=300, width=500, legend_properties=BACKEND_TO_PROFILE_PLOT_LEGEND_PROPERTIES.get(backend, None), margin_properties=BACKEND_TO_PROFILE_PLOT_MARGIN_PROPERTIES.get(backend, None), horizontal_line=1.0, ) return fig def _extract_profile_plot_lines( solution_times: pd.DataFrame, normalize_runtime: bool, converged_info: pd.DataFrame, palette_cycle: "itertools.cycle[str]", ) -> list[LineData]: """Extract lines for profile plot from data. Args: solution_times: A DataFrame where columns are the names of the algorithms, indexes are the problems. Values are performance measures. normalize_runtime: If True the runtime each algorithm needed for each problem is scaled by the time the fastest algorithm needed. converged_info: A DataFrame where columns are the names of the algorithms, indexes are the problems. The values are boolean and True when the algorithm arrived at the solution with the desired precision. palette_cycle: Cycle of colors for plotting. Returns: A list of data objects containing data for each line of the profile plot. """ if normalize_runtime: solution_times = solution_times.divide(solution_times.min(axis=1), axis=0) solution_times[~converged_info] = np.inf alphas = _determine_alpha_grid(solution_times) for_each_alpha = pd.concat( {alpha: solution_times <= alpha for alpha in alphas}, names=["alpha"], ) performance_profiles = for_each_alpha.groupby("alpha").mean().stack().reset_index() lines: list[LineData] = [] for algorithm, data in performance_profiles.groupby("algorithm"): line_data = LineData( x=data["alpha"].to_numpy(), y=data[0].to_numpy(), name=str(algorithm), color=next(palette_cycle), ) lines.append(line_data) return lines def create_solution_times( df: pd.DataFrame, runtime_measure: Literal["walltime", "n_evaluations", "n_batches"], converged_info: pd.DataFrame, return_tidy: bool = True, ) -> pd.DataFrame: """Find the solution time for each algorithm and problem. Args: df: A DataFrame which contains 'problem', 'algorithm' and 'runtime_measure' as columns. runtime_measure: This is the runtime until the desired convergence was reached by an algorithm. This is called performance measure by Moré and Wild (2009). converged_info: A DataFrame where columns are the names of the algorithms, indexes are the problems. The values are boolean and True when the algorithm arrived at the solution with the desired precision. return_tidy: If True, the resulting DataFrame will be a tidy DataFrame with problem and algorithm as indexes and runtime_measure as column. If False, the resulting DataFrame will have problem, algorithm and runtime_measure as columns. Returns: A DataFrame. If return_tidy is True, indexes are the problems, columns are the algorithms. If return_tidy is False, columns are problem, algorithm and runtime_measure. The values are either the number of evaluations or the walltime each algorithm needed to achieve the desired precision. If the desired precision was not achieved the value is set to np.inf. """ solution_times = ( df.groupby(["problem", "algorithm"])[runtime_measure].max().unstack() ) # We convert the dtype to float to support the use of np.inf solution_times = solution_times.astype(float).where(converged_info, other=np.inf) if not return_tidy: solution_times = solution_times.stack().reset_index() solution_times = solution_times.rename( columns={solution_times.columns[2]: runtime_measure} ) return solution_times def _determine_alpha_grid(solution_times: pd.DataFrame) -> list[np.float64]: switch_points = _find_switch_points(solution_times=solution_times) point_to_right = switch_points[-1] * 1.05 extended_switch_points = np.append(switch_points, point_to_right) mid_points = (extended_switch_points[:-1] + extended_switch_points[1:]) / 2 alphas = sorted(np.append(extended_switch_points, mid_points)) return alphas def _find_switch_points(solution_times: pd.DataFrame) -> NDArray[np.float64]: """Determine the switch points of the performance profiles. Args: solution_times: A DataFrame where columns are the names of the algorithms, indexes are the problems. Values are performance measures. They can be either float, when normalize_runtime was True or int when the runtime_measure are not normalized function evaluations or datetime when the not normalized walltime is used. Returns: A sorted array of switching points. """ switch_points = np.unique(solution_times.values) if pd.api.types.is_float_dtype(switch_points): switch_points += 1e-10 switch_points = switch_points[np.isfinite(switch_points)] return switch_points def _get_profile_plot_xlabel(runtime_measure: str, normalize_runtime: bool) -> str: # The '{linebreak}' placeholder is replaced with the backend-specific line break # in the corresponding plotting function. if normalize_runtime: runtime_measure_to_xlabel = { "walltime": ( "Multiple of Minimal Wall Time{linebreak}Needed to Solve the Problem" ), "n_evaluations": ( "Multiple of Minimal Number of Function Evaluations" "{linebreak}Needed to Solve the Problem" ), "n_batches": ( "Multiple of Minimal Number of Batches" "{linebreak}Needed to Solve the Problem" ), } else: runtime_measure_to_xlabel = { "walltime": "Wall Time Needed to Solve the Problem", "n_evaluations": "Number of Function Evaluations", "n_batches": "Number of Batches", } return runtime_measure_to_xlabel[runtime_measure] ================================================ FILE: src/optimagic/visualization/slice_plot.py ================================================ import warnings from functools import partial from typing import Any, Callable, Literal import numpy as np import pandas as pd from numpy.typing import NDArray from pybaum import tree_just_flatten import optimagic as om from optimagic import deprecations from optimagic.batch_evaluators import ( BatchEvaluator, BatchEvaluatorLiteral, process_batch_evaluator, ) from optimagic.config import DEFAULT_N_CORES, DEFAULT_PALETTE from optimagic.deprecations import replace_and_warn_about_deprecated_bounds from optimagic.optimization.fun_value import ( SpecificFunctionValue, convert_fun_output_to_function_value, enforce_return_type, ) from optimagic.parameters.bounds import pre_process_bounds from optimagic.parameters.conversion import get_converter from optimagic.parameters.space_conversion import InternalParams from optimagic.parameters.tree_registry import get_registry from optimagic.shared.process_user_function import infer_aggregation_level from optimagic.typing import AggregationLevel, PyTree from optimagic.visualization.backends import grid_line_plot, line_plot from optimagic.visualization.plotting_utilities import LineData, MarkerData def slice_plot( func: Callable, params: PyTree, bounds: om.Bounds | None = None, func_kwargs: dict | None = None, selector: Callable[[PyTree], PyTree] | None = None, n_cores: int = DEFAULT_N_CORES, n_gridpoints: int = 20, plots_per_row: int = 2, param_names: dict[str, str] | None = None, share_y: bool = True, expand_yrange: float = 0.02, share_x: bool = False, backend: Literal["plotly", "matplotlib", "bokeh", "altair"] = "plotly", template: str | None = None, color: str | None = DEFAULT_PALETTE[0], title: str | None = None, return_dict: bool = False, batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = "joblib", # deprecated make_subplot_kwargs: dict | None = None, lower_bounds: None = None, upper_bounds: None = None, ) -> Any: """Plot criterion along coordinates at given and random values. Generates plots for each parameter and optionally combines them into a figure with subplots. # TODO: Use soft bounds to create the grid (if available). # TODO: Don't do a function evaluation outside the batch evaluator. Args: func: criterion function that takes params and returns scalar, PyTree or FunctionValue object. params: A pytree with parameters. bounds: Lower and upper bounds on the parameters. The bounds are used to create a grid over which slice plots are drawn. The most general and preferred way to specify bounds is an `optimagic.Bounds` object that collects lower, upper, soft_lower and soft_upper bounds. The soft bounds are not used for slice_plots. Each bound type mirrors the structure of params. Check our how-to guide on bounds for examples. If params is a flat numpy array, you can also provide bounds via any format that is supported by scipy.optimize.minimize. func_kwargs: Additional keyword arguments passed to func. selector: Function that takes params and returns a subset of params for which we actually want to generate the plot. n_cores: Number of cores. n_gridpoints: Number of gridpoints on which the criterion function is evaluated. This is the number per plotted line. plots_per_row: Number of plots per row. param_names: Dictionary mapping old parameter names to new ones. share_y: If True, the individual plots share the scale on the yaxis and plots in one row actually share the y axis. expand_yrange: The ratio by which to expand the range of the (shared) y axis, such that the axis is not cropped at exactly max of Criterion Value. share_x: If True, set the same range of x axis for all plots and share the x axis for all plots in one column. backend: The backend to use for plotting. Default is "plotly". template: The template for the figure. If not specified, the default template of the backend is used. For the 'bokeh' and 'altair' backends, this changes the global theme, which affects all plots from that backend in the session. color: The line color. title: The figure title. This is not used for the `bokeh` backend, as it does not support title for grid plot. return_dict: If True, return dictionary with individual plots of each parameter, else, combine individual plots into a figure with subplots. batch_evaluator: See :ref:`batch_evaluators`. Returns: The figure object containing the slice plot if `return_dict` is False. Otherwise, a dictionary with individual slice plots for each parameter. """ # ================================================================================== # Process inputs bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) bounds = pre_process_bounds(bounds) func, func_eval = _get_processed_func_and_func_eval(func, func_kwargs, params) if make_subplot_kwargs is not None: deprecations.throw_make_subplot_kwargs_in_slice_plot_future_warning() # ================================================================================== # Extract backend-agnostic plotting data from results plot_data, internal_params = _get_plot_data( func=func, params=params, bounds=bounds, func_eval=func_eval, selector=selector, n_gridpoints=n_gridpoints, batch_evaluator=batch_evaluator, n_cores=n_cores, ) lines_list, marker_list, xlabels, ylabels = _extract_slice_plot_lines_and_labels( plot_data=plot_data, internal_params=internal_params, func_eval=func_eval, param_names=param_names, color=color, ) # ================================================================================== # Generate the figure xrange, yrange = _get_axis_limits( plot_data, share_y=share_y, share_x=share_x, expand_yrange=expand_yrange ) if return_dict: fig_dict = {} for i in range(len(lines_list)): fig = line_plot( lines=lines_list[i], marker=marker_list[i], backend=backend, xlabel=xlabels[i], ylabel=ylabels[i], template=template, ) fig_dict[xlabels[i]] = fig return fig_dict else: n_rows = int(np.ceil(len(lines_list) / plots_per_row)) if share_y: ylabels = [ ylabel if i % plots_per_row == 0 else "" for i, ylabel in enumerate(ylabels) ] fig = grid_line_plot( lines_list=lines_list, marker_list=marker_list, backend=backend, n_rows=n_rows, n_cols=plots_per_row, xlabels=xlabels, xrange=xrange, share_x=share_x, ylabels=ylabels, yrange=yrange, share_y=share_y, template=template, height=300 * n_rows, width=400 * plots_per_row, plot_title=title, make_subplot_kwargs=make_subplot_kwargs, ) return fig def _get_processed_func_and_func_eval( func: Callable, func_kwargs: dict | None, params: PyTree ) -> tuple[Callable, SpecificFunctionValue]: if func_kwargs is not None: func = partial(func, **func_kwargs) func_eval = func(params) # handle deprecated function output if deprecations.is_dict_output(func_eval): msg = ( "Functions that return dictionaries are deprecated in slice_plot and will " "raise an error in version 0.6.0. Please pass a function that returns a " "FunctionValue object instead and use the `mark` decorators to specify " "whether it is a scalar, least-squares or likelihood function." ) warnings.warn(msg, FutureWarning) func_eval = deprecations.convert_dict_to_function_value(func_eval) func = deprecations.replace_dict_output(func) # Infer the function type and enforce the return type if deprecations.is_dict_output(func_eval): problem_type = deprecations.infer_problem_type_from_dict_output(func_eval) else: problem_type = infer_aggregation_level(func) func_eval = convert_fun_output_to_function_value(func_eval, problem_type) func = enforce_return_type(problem_type)(func) return func, func_eval def _get_plot_data( func: Callable, params: PyTree, bounds: om.Bounds | None, func_eval: SpecificFunctionValue, selector: Callable[[PyTree], PyTree] | None, n_gridpoints: int, batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator, n_cores: int, ) -> tuple[pd.DataFrame, InternalParams]: converter, internal_params = get_converter( params=params, constraints=None, bounds=bounds, func_eval=func_eval, solver_type="value", ) n_params = len(internal_params.values) selected = np.arange(n_params, dtype=int) if selector is not None: helper = converter.params_from_internal(selected) registry = get_registry(extended=True) selected = np.array( tree_just_flatten(selector(helper), registry=registry), dtype=int ).ravel() # Ensure the result is a 1D array if not np.isfinite(internal_params.lower_bounds[selected]).all(): raise ValueError("All selected parameters must have finite lower bounds.") if not np.isfinite(internal_params.upper_bounds[selected]).all(): raise ValueError("All selected parameters must have finite upper bounds.") evaluation_points, metadata = [], [] for pos in selected: lb = internal_params.lower_bounds[pos] ub = internal_params.upper_bounds[pos] grid = np.linspace(lb, ub, n_gridpoints) name = internal_params.names[pos] for param_value in grid: if param_value != internal_params.values[pos]: meta = { "name": name, "Parameter Value": param_value, } x = internal_params.values.copy() x[pos] = param_value point = converter.params_from_internal(x) evaluation_points.append(point) metadata.append(meta) func_values = _retrieve_func_values( func, evaluation_points, batch_evaluator, n_cores ) func_values += [func_eval.internal_value(AggregationLevel.SCALAR)] * len(selected) for pos in selected: meta = { "name": internal_params.names[pos], "Parameter Value": internal_params.values[pos], } metadata.append(meta) plot_data = pd.DataFrame(metadata) plot_data["Function Value"] = func_values # type: ignore[assignment] return plot_data, internal_params def _retrieve_func_values( func: Callable, evaluation_points: list[PyTree], batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator, n_cores: int, ) -> list[float | NDArray[np.float64]]: """Retrieve function values at given evaluation points using batch evaluator.""" batch_evaluator = process_batch_evaluator(batch_evaluator) func_values = batch_evaluator( func=func, arguments=evaluation_points, error_handling="continue", n_cores=n_cores, ) # add NaNs where an evaluation failed func_values = [ np.nan if isinstance(val, str) else val.internal_value(AggregationLevel.SCALAR) for val in func_values ] return func_values def _extract_slice_plot_lines_and_labels( plot_data: pd.DataFrame, internal_params: InternalParams, func_eval: SpecificFunctionValue, param_names: dict[str, str] | None, color: str | None, ) -> tuple[list[list[LineData]], list[MarkerData], list[str], list[str]]: """Extract lines, markers and labels for slice plots.""" lines_list = [] marker_list = [] xlabels = [] ylabels = [] for _par_name, _data in plot_data.groupby("name", sort=False): df = _data.sort_values("Parameter Value") par_name = str(_par_name) if param_names is not None and par_name in param_names: par_name = param_names[par_name] subplot_line = LineData( x=df["Parameter Value"].to_numpy(), y=df["Function Value"].to_numpy(), color=color, name=par_name, show_in_legend=False, ) lines_list.append([subplot_line]) if internal_params.names is not None: pos = internal_params.names.index(_par_name) marker_data = MarkerData( x=float(internal_params.values[pos]), y=float(func_eval.internal_value(AggregationLevel.SCALAR)), color=color, ) marker_list.append(marker_data) xlabels.append(par_name) ylabels.append("Function Value") return lines_list, marker_list, xlabels, ylabels def _get_axis_limits( plot_data: pd.DataFrame, share_y: bool, share_x: bool, expand_yrange: float ) -> tuple[tuple[float, float] | None, tuple[float, float] | None]: if share_y: lb = plot_data["Function Value"].min() ub = plot_data["Function Value"].max() y_range = ub - lb ub += y_range * expand_yrange lb -= y_range * expand_yrange yrange = (lb, ub) else: yrange = None if share_x: lb = plot_data["Parameter Value"].min() ub = plot_data["Parameter Value"].max() xrange = (lb, ub) else: xrange = None return xrange, yrange ================================================ FILE: src/optimagic/visualization/slice_plot_3d.py ================================================ import warnings from copy import deepcopy from enum import Enum from functools import partial import numpy as np import plotly.express as px import plotly.graph_objects as go from numpy.typing import NDArray from plotly.subplots import make_subplots from pybaum import tree_just_flatten from optimagic import deprecations from optimagic.batch_evaluators import process_batch_evaluator from optimagic.config import DEFAULT_N_CORES, PLOTLY_TEMPLATE from optimagic.deprecations import replace_and_warn_about_deprecated_bounds from optimagic.optimization.fun_value import ( convert_fun_output_to_function_value, enforce_return_type, ) from optimagic.parameters.bounds import pre_process_bounds from optimagic.parameters.conversion import get_converter from optimagic.parameters.tree_registry import get_registry from optimagic.shared.process_user_function import infer_aggregation_level from optimagic.typing import AggregationLevel def slice_plot_3d( # type: ignore[no-untyped-def] func, params, bounds=None, func_kwargs=None, selector=None, n_gridpoints: int = 20, projection="univariate", make_subplot_kwargs=None, layout_kwargs=None, plot_kwargs=None, param_names: dict[str, str] | None = None, expand_yrange: float = 0.02, batch_evaluator="joblib", n_cores: int = DEFAULT_N_CORES, return_dict: bool = False, lower_bounds=None, upper_bounds=None, ) -> go.Figure | dict[tuple[int, int], go.Figure]: """Generate interactive slice, contour or surface plots of a function. This function produces plots of a user-supplied criterion function evaluated on a grid of its parameters. It can generate: - 2D univariate slice plots (each parameter vs. function value). - 2D contour plots (two parameters vs. function value). - 3D surface plots (two parameters vs. function value). Plots can be returned as a dictionary of individual figures or combined into a single Plotly figure with subplots. Args: func (callable): The criterion function. It takes `params` and returns a scalar, PyTree, or `FunctionValue` object. params (pytree): A pytree of parameters. bounds (optimagic.Bounds or sequence or None): An `optimagic.Bounds` object or other supported format specifying the lower and upper bounds for parameters. These bounds define the grid for the plots. func_kwargs (dict or None): Additional keyword arguments for `func`. selector (callable): A function that takes `params` and returns a subset of them to be plotted. If None, all parameters are plotted. n_gridpoints (int): The number of points per parameter used to create the evaluation grid. For a 2D plot, this means `n_gridpoints`**2 evaluations. projection (str or dict): The type of plot. Can be `"univariate"`, `"contour"`, `"surface"`, or a dictionary like `{"lower": "contour", "upper": "surface"}` to create a grid of mixed plot types. make_subplot_kwargs (dict or None): Keyword arguments for `plotly.subplots.make_subplots`. layout_kwargs (dict or None): Keyword arguments for the figure's `update_layout` method. plot_kwargs (dict or None): A nested dictionary of keyword arguments to customize traces, e.g., `{"line_plot": {"color": "blue"}}`. param_names (dict or NoneType): A dictionary mapping internal parameter names to display names. expand_yrange (float): The factor by which to expand the function value axis range. This only applies to the z-axis of **surface plots** to prevent the plot from feeling cramped. It does not affect line or contour plots. batch_evaluator (str or callable): The batch evaluator to parallelize function evaluations. See :ref:`batch_evaluators`. n_cores (int): The number of cores to use for parallelization. return_dict (bool): If `True`, returns a dictionary of `go.Figure` objects keyed by `(row, col)`. If `False`, returns a single combined `go.Figure`. lower_bounds (sequence or None): Deprecated. Use `bounds` instead. upper_bounds (sequence or None): Deprecated. Use `bounds` instead. Returns: plotly.Figure | dict: A single combined Plotly figure or a dictionary of individual figures. """ bounds = replace_and_warn_about_deprecated_bounds( lower_bounds=lower_bounds, upper_bounds=upper_bounds, bounds=bounds, ) bounds = pre_process_bounds(bounds) if func_kwargs is not None: func = partial(func, **func_kwargs) func_eval = func(params) # ================================================================================== # handle deprecated function output # ================================================================================== if deprecations.is_dict_output(func_eval): msg = ( "Functions that return dictionaries are deprecated in slice_plot and will " "raise an error in version 0.6.0. Please pass a function that returns a " "FunctionValue object instead and use the `mark` decorators to specify " "whether it is a scalar, least-squares or likelihood function." ) warnings.warn(msg, FutureWarning) func_eval = deprecations.convert_dict_to_function_value(func_eval) func = deprecations.replace_dict_output(func) # ================================================================================== # Infer the function type and enforce the return type # ================================================================================== if deprecations.is_dict_output(func_eval): problem_type = deprecations.infer_problem_type_from_dict_output(func_eval) else: problem_type = infer_aggregation_level(func) func_eval = convert_fun_output_to_function_value(func_eval, problem_type) func = enforce_return_type(problem_type)(func) # ================================================================================== converter, internal_params = get_converter( params=params, constraints=None, bounds=bounds, func_eval=func_eval, solver_type="value", ) n_params = len(internal_params.values) selected = np.arange(n_params, dtype=int) if selector is not None: helper = converter.params_from_internal(selected) registry = get_registry(extended=True) selected = np.array( tree_just_flatten(selector(helper), registry=registry), dtype=int ).reshape(-1) n_params = len(selected) if not np.isfinite(internal_params.lower_bounds[selected]).all(): raise ValueError("All selected parameters must have finite lower bounds.") if not np.isfinite(internal_params.upper_bounds[selected]).all(): raise ValueError("All selected parameters must have finite upper bounds.") # Projection configuration projection = Projection(projection) if not projection.is_univariate and n_params < 2: raise ValueError( f"{projection!r} requires at least two parameters. Got {n_params} params." ) params_data, display_names = {}, {} for pos in selected: name = internal_params.names[pos] params_data[name] = np.linspace( internal_params.lower_bounds[pos], internal_params.upper_bounds[pos], n_gridpoints, ) display_names[name] = param_names.get(name, name) if param_names else name # This is where evaluation_points = generate_evaluation_points( projection, selected, internal_params, params_data, converter ) evaluator = process_batch_evaluator(batch_evaluator) raw_func_values = evaluator( func=func, arguments=evaluation_points, error_handling="continue", n_cores=n_cores, ) # add NaNs where an evaluation failed func_values = np.array( [ np.nan if isinstance(val, str) else val.internal_value(AggregationLevel.SCALAR) for val in raw_func_values ] ) plot_data = plot_data_cache( projection, selected, internal_params, func_values, n_gridpoints ) # Kwargs evaluation plot_kwargs = evaluate_plot_kwargs(plot_kwargs) make_subplot_kwargs = evaluate_make_subplot_kwargs( make_subplot_kwargs, n_params, projection, display_names ) layout_kwargs = evaluate_layout_kwargs( layout_kwargs, projection, make_subplot_kwargs ) plots = {} if projection.is_univariate: cols = make_subplot_kwargs.get("cols") for idx, param_pos in enumerate(selected): row, col = divmod(idx, cols) param_name = internal_params.names[param_pos] display_name = display_names[param_name] x = params_data[param_name].tolist() y = plot_data.get( tuple( sorted( [ param_name, ] ) ), [], ) y_range = compute_yaxis_range( y[~np.isnan(y)] if np.any(~np.isnan(y)) else [0, 1], expand_yrange ) grid_univariate = False # Scatter plot point scatter_point = { "x": [internal_params.values[param_pos]], "y": [func_eval.internal_value(AggregationLevel.SCALAR)], } fig = plot_line( x, y, display_name, y_range, scatter_point, plot_kwargs, layout_kwargs, grid_univariate, ) plots[(row, col)] = fig else: single_plot = True if n_params == 2 else False projection_config = projection.get_config() lower_projection = projection_config.get("lower") upper_projection = projection_config.get("upper") for i, x_selected in enumerate(selected): for j, y_selected in enumerate(selected): if x_selected == y_selected and single_plot: x_pos, y_pos = selected else: x_pos = x_selected y_pos = y_selected # Diagonal plot are slice plots if i == j and not single_plot: grid_univariate = True param_name = internal_params.names[x_pos] display_name = display_names[param_name] x = params_data[param_name].tolist() y = plot_data.get( tuple( sorted( [ param_name, ] ) ), [], ) y_range = compute_yaxis_range(y, expand_yrange) # Scatter plot point scatter_point = { "x": [internal_params.values[x_pos]], "y": [func_eval.internal_value(AggregationLevel.SCALAR)], } fig = plot_line( x, y, display_name, y_range, scatter_point, plot_kwargs, layout_kwargs, grid_univariate, ) else: subplot_projection = None if i < j and upper_projection is not None: subplot_projection = upper_projection elif i > j and lower_projection is not None: subplot_projection = lower_projection elif i == j and single_plot: subplot_projection = lower_projection if subplot_projection is not None: x_name = internal_params.names[x_pos] y_name = internal_params.names[y_pos] current_param_names = [x_name, y_name] x, y = np.meshgrid(params_data[x_name], params_data[y_name]) z = plot_data.get(tuple(sorted(current_param_names)), []) z = np.reshape(z, (n_gridpoints, n_gridpoints)) # Scatter plot point scatter_point = { "x": [internal_params.values[x_pos]], "y": [internal_params.values[y_pos]], "z": [func_eval.internal_value(AggregationLevel.SCALAR)], } if subplot_projection.is_surface: fig = plot_surface( x, y, z, scatter_point, plot_kwargs, layout_kwargs ) else: fig = plot_contour( x, y, z, # type: ignore[arg-type] scatter_point, plot_kwargs, layout_kwargs, ) else: fig = go.Figure() plots[(i, j)] = fig if single_plot: break if single_plot: break if return_dict: return plots return combine_plots(plots, make_subplot_kwargs, layout_kwargs, expand_yrange) def generate_evaluation_points( # type: ignore[no-untyped-def] projection, selected, internal_params, params_data, converter ): """Create the list of parameter sets for function evaluation. This function generates all the points (parameter sets) that need to be evaluated by the criterion function to create the plots. It generates points for both univariate slices and, if applicable, bivariate grids. Args: projection (Projection): The processed projection configuration object. selected (NDArray[int]): Array of integer positions for the selected parameters. internal_params (InternalParams): An object holding the internal parameter representation (values, names, bounds). params_data (dict): A dictionary mapping parameter names to their grid values (np.linspace array). converter (Converter): The parameter converter object. Returns: list: A list of parameter pytrees. Each element is a full parameter set ready to be passed to the user's criterion function. """ evaluation_points = [] default_point = dict( zip(internal_params.names, internal_params.values, strict=False) ) for pos in selected: name = internal_params.names[pos] for value in params_data[name]: point = default_point.copy() point[name] = value values = np.array(list(point.values())) evaluation_points.append(converter.params_from_internal(values)) if projection.is_dict: for x_pos in selected: for y_pos in selected: if x_pos == y_pos: continue x_name = internal_params.names[x_pos] y_name = internal_params.names[y_pos] x_mesh, y_mesh = np.meshgrid(params_data[x_name], params_data[y_name]) for x_val, y_val in zip(x_mesh.ravel(), y_mesh.ravel(), strict=False): point = default_point.copy() point[x_name] = x_val point[y_name] = y_val values = np.array(list(point.values())) evaluation_points.append(converter.params_from_internal(values)) return evaluation_points def plot_data_cache( # type: ignore[no-untyped-def] projection, selected, internal_params, func_values, n_gridpoints ): """Caches and maps evaluated function values to their parameters. This function takes the flat array of criterion function outputs and maps them back to the parameters that generated them. The result is a dictionary where keys are tuples of parameter names and values are the corresponding function values. Args: projection (Projection): The processed projection configuration object. selected (NDArray[int]): Array of integer positions for the selected parameters. internal_params (InternalParams): An object holding the internal parameter representation. func_values (NDArray[float]): A flat numpy array containing the results from the batch evaluator. n_gridpoints (int): The number of grid points per parameter. Returns: dict: A dictionary mapping parameter name tuples to numpy arrays of function values. - For univariate plots: `{(param_name,): array([...])}` - For bivariate plots: `{(param_a, param_b): array([...])}` """ plot_data = {} func_values_idx = 0 for pos in selected: key = tuple( sorted( [ internal_params.names[pos], ] ) ) y = func_values[func_values_idx : func_values_idx + n_gridpoints] plot_data[key] = y func_values_idx += n_gridpoints if projection.is_dict: for x_pos in selected: for y_pos in selected: if x_pos == y_pos: continue key = tuple( sorted([internal_params.names[x_pos], internal_params.names[y_pos]]) ) plot_data[key] = func_values[ func_values_idx : func_values_idx + (n_gridpoints**2) ] func_values_idx += n_gridpoints**2 return plot_data def plot_line( # type: ignore[no-untyped-def] x: list[float], y: list[float], display_name: str, y_range: list[float], scatter_point, plot_kwargs, layout_kwargs, grid_univariate: bool, ) -> go.Figure: """Generate a 2D line plot with an overlayed scatter point. This function constructs a line plot for a univariate parameter slice and highlights the initial parameter's function value with a scatter marker. Args: x (list[float]): The parameter values for the x-axis. y (list[float]): The function values for the y-axis. display_name (str): The name of the parameter to be used as the x-axis title. y_range (list[float]): A list `[min, max]` defining the y-axis range. scatter_point (dict): A dictionary with "x" and "y" keys for the overlayed scatter marker. plot_kwargs (dict): A dictionary of trace-level customizations. layout_kwargs (dict): A dictionary of layout customizations. grid_univariate (bool): If `True`, this is a diagonal plot in a grid, and axis titles are omitted. Returns: go.Figure: A Plotly figure object containing the line plot. """ fig = px.line(x=x, y=y, **plot_kwargs["line_plot"]) if plot_kwargs["scatter_plot"] is not None: fig.add_trace( go.Scatter( x=scatter_point["x"], y=scatter_point["y"], **plot_kwargs["scatter_plot"], ) ) if layout_kwargs: fig.update_layout(**layout_kwargs) if not grid_univariate: fig.update_xaxes(title={"text": display_name}) fig.update_yaxes(title={"text": "Function Value"}, range=y_range) else: fig.update_xaxes(title=None) fig.update_yaxes(title=None, range=y_range) return fig def plot_surface( # type: ignore[no-untyped-def] x: NDArray[np.float64], y: NDArray[np.float64], z, scatter_point, plot_kwargs, layout_kwargs, ): """Create a 3D surface plot of the function over two parameters. This function constructs a 3D surface plot and highlights the initial parameter's function value with a 3D scatter marker. Args: x (NDArray[np.float64]): A meshgrid of x-axis parameter values. y (NDArray[np.float64]): A meshgrid of y-axis parameter values. z (NDArray[np.float64]): A 2D array of function values corresponding to the x-y grid. scatter_point (dict): A dictionary with "x", "y", and "z" keys for the overlayed 3D scatter marker. plot_kwargs (dict): A dictionary of trace-level customizations. layout_kwargs (dict): A dictionary of layout customizations. Returns: go.Figure: A Plotly figure object containing the surface plot. """ trace = go.Surface(z=z, x=x, y=y, **plot_kwargs["surface_plot"]) fig = go.Figure(data=[trace], layout=layout_kwargs) if plot_kwargs["scatter_plot"] is not None: fig.add_trace( go.Scatter3d( x=scatter_point["x"], y=scatter_point["y"], z=scatter_point["z"], **plot_kwargs["scatter_plot"], ) ) return fig def plot_contour( # type: ignore[no-untyped-def] x: NDArray[np.float64], y: NDArray[np.float64], z: list[float], scatter_point, plot_kwargs, layout_kwargs, ): """Create a 2D contour plot for function values over a parameter grid. This function constructs a 2D contour plot and highlights the initial parameter's function value with a scatter marker. Args: x (NDArray[np.float64]): A meshgrid of x-axis parameter values. y (NDArray[np.float64]): A meshgrid of y-axis parameter values. z (list[float]): A list of function values corresponding to the grid. scatter_point (dict): A dictionary with "x" and "y" keys for the overlayed scatter marker. plot_kwargs (dict): A dictionary of trace-level customizations. layout_kwargs (dict): A dictionary of layout customizations. Returns: go.Figure: A Plotly figure object containing the contour plot. """ trace = go.Contour( z=z, x=x[0], y=y[:, 0], coloraxis="coloraxis", **plot_kwargs["contour_plot"] ) fig = go.Figure(data=[trace], layout=layout_kwargs) if plot_kwargs["scatter_plot"] is not None: fig.add_trace( go.Scatter( x=scatter_point["x"], y=scatter_point["y"], **plot_kwargs["scatter_plot"], ) ) return fig class ProjectionConfig(str, Enum): """An Enum to validate and represent supported projection types.""" UNIVARIATE = "univariate" CONTOUR = "contour" SURFACE = "surface" @classmethod def validate(cls, value): # type: ignore[no-untyped-def] if value is None: return None if isinstance(value, str): value = value.lower() if value in cls._value2member_map_: return cls(value) raise ValueError(f"Invalid projection: '{value}'") raise TypeError(f"Expected str or None, got {type(value)}") @property def is_univariate(self) -> bool: return self == ProjectionConfig.UNIVARIATE @property def is_surface(self) -> bool: return self == ProjectionConfig.SURFACE @property def is_contour(self) -> bool: return self == ProjectionConfig.CONTOUR class Projection: """A helper class to parse the `projection` argument. This class handles parsing the `projection` argument, which can be a simple string (e.g., "univariate") or a dictionary (e.g., `{"lower": "contour", "upper": "surface"}`) for creating mixed-grid plots. """ def __init__(self, value): # type: ignore[no-untyped-def] self._univariate = False self.lower = None self.upper = None self._parse(value) def _parse(self, value): # type: ignore[no-untyped-def] if isinstance(value, str): value = value.lower() if value == ProjectionConfig.UNIVARIATE: self._univariate = True elif value in (ProjectionConfig.SURFACE, ProjectionConfig.CONTOUR): self.lower = ProjectionConfig.validate(value) self.upper = None else: raise ValueError(f"Invalid projection: '{value}'") elif isinstance(value, dict): self.lower = ProjectionConfig.validate(value.get("lower")) self.upper = ProjectionConfig.validate(value.get("upper")) else: raise TypeError( f"Invalid type for projection: {type(value)}. " "Must be a string or dict with 'lower' and 'upper' keys." ) @property def is_univariate(self) -> bool: return self._univariate @property def is_dict(self) -> bool: return not self._univariate def get_config(self): # type: ignore[no-untyped-def] if self._univariate: return ProjectionConfig.UNIVARIATE return {"lower": self.lower, "upper": self.upper} def compute_yaxis_range(y: list[float], expand_yrange: float) -> list[float]: # Calculate expanded y-axis limits based on data range y_min, y_max = np.min(y), np.max(y) y_range = y_max - y_min return [y_min - expand_yrange * y_range, y_max + expand_yrange * y_range] def combine_plots( # type: ignore[no-untyped-def] plots: dict[tuple[int, int], go.Figure], make_subplot_kwargs, layout_kwargs, expand_yrange: float, ) -> go.Figure: """Combine individual Plotly figures into a single subplot layout. This function merges traces from a dictionary of individual plots into a single `go.Figure` with a subplot grid. It handles axis sharing, range adjustments, and overall layout formatting. Args: plots (dict): A dictionary mapping `(row, col)` tuples to `go.Figure` objects. make_subplot_kwargs (dict): Keyword arguments for `make_subplots`. layout_kwargs (dict): Keyword arguments for the final layout update. expand_yrange (float): The expansion factor to apply to any shared y-axes. Returns: go.Figure: A single, combined Plotly Figure object. """ plots = deepcopy(plots) # --- NEW, SIMPLIFIED LOGIC FOR SINGLE PLOTS --- # If the plot grid is just 1x1, do not rebuild the figure. # Return the already correctly-scaled plot directly. if make_subplot_kwargs.get("rows") == 1 and make_subplot_kwargs.get("cols") == 1: # Extract the single figure from the plots dictionary. (row, col), fig = plots.popitem() # Apply final layout customizations like width and height. fig.update_layout(**layout_kwargs) # Get the correct titles for the x and y axes. # Note: A bug in title assignment is also fixed here. all_titles = make_subplot_kwargs.get("column_titles", ["", ""]) x_title = all_titles[0] y_title = all_titles[1] # Assign titles correctly depending on whether it's a 3D or 2D plot. if hasattr(fig.layout, "scene") and fig.layout.scene: scene_key = next(key for key in fig.layout if key.startswith("scene")) fig.layout[scene_key].xaxis.title = x_title fig.layout[scene_key].yaxis.title = y_title fig.layout[scene_key].zaxis.title = "Function Value" else: fig.update_xaxes(title_text=x_title) fig.update_yaxes(title_text=y_title) return fig # --- END OF NEW LOGIC --- # --- Original logic for creating a grid of subplots (for len(plots) > 1) --- fig = make_subplots(**make_subplot_kwargs) fig.update_layout(**layout_kwargs) for ann in fig.layout.annotations: if abs(ann.y - 1) < 1e-3: ann.update(y=-0.18 / make_subplot_kwargs["cols"]) elif abs(ann.x - 0.98) < 1e-3: ann.update(x=-0.18 / make_subplot_kwargs["rows"], textangle=270) shared_y = make_subplot_kwargs.get("shared_yaxes", False) shared_x = make_subplot_kwargs.get("shared_xaxes", False) all_y, all_x = [], [] for (row_idx, col_idx), subfig in plots.items(): for trace in subfig.data: fig.add_trace(trace, row=row_idx + 1, col=col_idx + 1) if shared_y and hasattr(trace, "y"): arr = np.array(trace.y) if arr.ndim > 0: all_y.append(arr) if shared_x and hasattr(trace, "x"): arr = np.array(trace.x) if arr.ndim > 0: all_x.append(arr) if hasattr(subfig.layout, "xaxis") and hasattr(subfig.layout.xaxis, "title"): fig.update_xaxes( title_text=subfig.layout.xaxis.title.text, row=row_idx + 1, col=col_idx + 1, ) if hasattr(subfig.layout, "yaxis") and hasattr(subfig.layout.yaxis, "title"): if shared_y: if col_idx == 0: fig.update_yaxes( title_text=subfig.layout.yaxis.title.text, row=row_idx + 1, col=col_idx + 1, ) else: fig.update_yaxes( title_text=subfig.layout.yaxis.title.text, row=row_idx + 1, col=col_idx + 1, ) if shared_y and all_y: y_range = compute_yaxis_range(np.concatenate(all_y).tolist(), expand_yrange) fig.update_yaxes(range=y_range) if shared_x and all_x: x_all = np.concatenate(all_x) fig.update_xaxes(range=[np.min(x_all), np.max(x_all)]) return fig def _get_subplot_spec( # type: ignore[no-untyped-def] i: int, j: int, projection, n_selected: int ) -> dict[str | None, str | None]: # Determine subplot spec type (xy, scene, contour) for a given subplot position. if i == j and n_selected != 2: return {"type": "xy"} projection_config = projection.get_config() if n_selected == 2: sub_projection = projection_config["lower"] else: sub_projection = ( projection_config["lower"] if i > j else projection_config["upper"] ) if sub_projection: if sub_projection.is_surface: return {"type": "scene"} elif sub_projection.is_contour: return {"type": "contour"} return {} def evaluate_plot_kwargs(plot_kwargs): # type: ignore[no-untyped-def] # Set default styling for plots if not provided by the user. if plot_kwargs is None: plot_kwargs = {} plot_kwargs_defaults = { "line_plot": { "color_discrete_sequence": ["#497ea7"], "markers": False, "template": PLOTLY_TEMPLATE, }, "scatter_plot": { "marker": {"color": "red", "size": 5}, }, "surface_plot": { "colorscale": "Aggrnyl", "showscale": False, "opacity": 0.8, }, "contour_plot": { "colorscale": "Aggrnyl", "showscale": True, # "line_smoothing": 0.85, }, } plot_kwargs_defaults.update(plot_kwargs) return plot_kwargs_defaults def evaluate_make_subplot_kwargs( # type: ignore[no-untyped-def] make_subplot_kwargs, n_selected: int, projection, titles: dict[str, str], ): # Set default parameters for make_subplots() if not provided by user. if make_subplot_kwargs is None: make_subplot_kwargs = {} if projection.is_dict and any(k in make_subplot_kwargs for k in ["rows", "cols"]): raise ValueError( f"`rows` and `cols` cannot be manually specified when projection is " f"{projection} is of grid type." ) if projection.is_univariate: cols = make_subplot_kwargs.get("cols", 1 if n_selected == 1 else 2) rows = (n_selected + cols - 1) // cols make_subplot_defaults = { "rows": rows, "cols": cols, "shared_xaxes": True, "shared_yaxes": True, } else: rows = cols = n_selected if n_selected > 2 else 1 specs = [] for i in range(rows): specs_row = [] for j in range(cols): specs_row.append(_get_subplot_spec(i, j, projection, n_selected)) specs.append(specs_row) make_subplot_defaults = { "rows": rows, "cols": cols, "specs": specs, "row_titles": list(titles.values()), "column_titles": list(titles.values()), } make_subplot_defaults.update( { "horizontal_spacing": 1 / (make_subplot_defaults["cols"] * 5), "vertical_spacing": (1 / max(make_subplot_defaults["rows"] - 1, 1)) / 5, } ) make_subplot_defaults.update(make_subplot_kwargs) return make_subplot_defaults # mypy: disable-error-code="dict-item" def evaluate_layout_kwargs( # type: ignore[no-untyped-def] layout_kwargs, projection, subplot_config, ): # Set default parameters for update_layout() if not provided by user. # Default camera view default_scene_camera_view = dict(x=2, y=2, z=0.5) if layout_kwargs is None: layout_kwargs = {} layout_defaults = {} if subplot_config.get("rows", 0) > 1 or subplot_config.get("cols", 0) > 1: width = 300 * subplot_config.get("cols", 0) height = 300 * subplot_config.get("rows", 0) else: width = 450 height = 450 if projection.is_dict: scene_layout = {} scene_counter = 0 template = "plotly" rows = subplot_config.get("rows", 0) cols = subplot_config.get("cols", 0) scene_layout["coloraxis"] = {"colorscale": "aggrnyl"} if "specs" in subplot_config: specs = subplot_config["specs"] for i in range(rows): for j in range(cols): if "type" in specs[i][j] and specs[i][j]["type"] == "scene": scene_counter += 1 scene_id = f"scene{scene_counter}" scene_layout[f"{scene_id}"] = { "camera": {"eye": default_scene_camera_view}, "xaxis": dict(title="", nticks=5), "yaxis": dict(title="", nticks=5), "zaxis": dict(title="", nticks=5), } layout_defaults.update(scene_layout) else: template = PLOTLY_TEMPLATE layout_defaults.update( { "width": width, "height": height, "template": template, "showlegend": False, } ) layout_defaults.update(layout_kwargs) return layout_defaults ================================================ FILE: tests/__init__.py ================================================ ================================================ FILE: tests/conftest.py ================================================ import os import pandas as pd import pytest import statsmodels.api as sm from optimagic.config import IS_MATPLOTLIB_INSTALLED @pytest.fixture(autouse=True) def fresh_directory(tmp_path): # noqa: PT004 """Each test is executed in a fresh directory.""" os.chdir(tmp_path) @pytest.fixture() def logit_inputs(): spector_data = sm.datasets.spector.load_pandas() spector_data.exog = sm.add_constant(spector_data.exog) x_df = sm.add_constant(spector_data.exog) out = { "y": spector_data.endog, "x": x_df.to_numpy(), "params": pd.DataFrame([-10, 2, 0.2, 2], index=x_df.columns, columns=["value"]), } return out @pytest.fixture() def logit_object(): spector_data = sm.datasets.spector.load_pandas() spector_data.exog = sm.add_constant(spector_data.exog) logit_mod = sm.Logit(spector_data.endog, spector_data.exog) return logit_mod @pytest.fixture() def close_mpl_figures(): """Close all matplotlib figures after test execution.""" yield if IS_MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt plt.close("all") ================================================ FILE: tests/estimagic/__init__.py ================================================ ================================================ FILE: tests/estimagic/examples/test_logit.py ================================================ """Tests for the logit example.""" from numpy.testing import assert_array_almost_equal as aaae from estimagic.examples.logit import logit_grad, logit_hess, logit_jac, logit_loglike def test_logit_loglikes(logit_inputs, logit_object): x = logit_inputs["params"]["value"].to_numpy() expected = logit_object.loglikeobs(x) got = logit_loglike(**logit_inputs) aaae(got, expected) def test_logit_jac(logit_inputs, logit_object): x = logit_inputs["params"]["value"].to_numpy() expected = logit_object.score_obs(x) got = logit_jac(**logit_inputs) aaae(got, expected) def test_logit_grad(logit_inputs, logit_object): x = logit_inputs["params"]["value"].to_numpy() expected = logit_object.score(x) calculated = logit_grad(**logit_inputs) aaae(calculated, expected) def test_logit_hessian(logit_inputs, logit_object): x = logit_inputs["params"]["value"].to_numpy() expected = logit_object.hessian(x) got = logit_hess(**logit_inputs) aaae(got, expected) ================================================ FILE: tests/estimagic/test_bootstrap.py ================================================ import numpy as np import pandas as pd import pytest import seaborn as sns import statsmodels.api as sm from estimagic import bootstrap def aaae(obj1, obj2, decimal=6): arr1 = np.asarray(obj1) arr2 = np.asarray(obj2) np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal) @pytest.fixture() def setup(): out = {} out["df"] = pd.DataFrame( np.array([[1, 10], [2, 7], [3, 6], [4, 5]]), columns=["x1", "x2"] ) y = np.array([[2.0, 8.0], [2.0, 8.0], [2.5, 7.0], [3.0, 6.0], [3.25, 5.75]]) out["estimates_arr"] = y out["estimates_df"] = pd.DataFrame(y, columns=["x1", "x2"]) out["estimates_dict"] = {"x1": [2, 2, 2.5, 3, 3.25], "x2": [8, 8, 7, 6, 5.75]} return out @pytest.fixture() def expected(): out = {} summary = np.array( [ [2.5, 0.576222, 1.5, 3.5, np.nan, np.nan], [7.0, 0.956896, 5.5, 9.0, np.nan, np.nan], ] ) cov = np.array([[0.332032, -0.528158], [-0.528158, 0.915651]]) p_values = np.array([0.0, 0.0]) ci_lower = np.array([1.5, 5.5]) ci_upper = np.array([3.5, 9.0]) out["summary"] = pd.DataFrame( summary, columns=["value", "standard_error", "ci_lower", "ci_upper", "p_value", "stars"], index=["x1", "x2"], ) out["ci_lower"] = pd.Series(ci_lower, index=["x1", "x2"]) out["ci_upper"] = pd.Series(ci_upper, index=["x1", "x2"]) out["ci_lower_x1"] = pd.Series(ci_lower[0], index=["x1"]) out["ci_upper_x1"] = pd.Series(ci_upper[0], index=["x1"]) out["cov"] = pd.DataFrame(cov, columns=["x1", "x2"], index=["x1", "x2"]) out["se"] = pd.Series(np.sqrt(np.diagonal(cov)), index=["x1", "x2"]) out["p_values"] = pd.Series(p_values, index=["x1", "x2"]) out["p_value_x1"] = pd.Series(p_values[0], index=["x1"]) return out @pytest.fixture() def seaborn_example(): out = {} raw = sns.load_dataset("exercise", index_col=0) replacements = {"1 min": 1, "15 min": 15, "30 min": 30} df = raw.assign(time=raw.time.cat.rename_categories(replacements).astype(int)) df["constant"] = 1 lower_ci = pd.Series([90.709236, 0.151193], index=["constant", "time"]) upper_ci = pd.Series([96.827145, 0.627507], index=["constant", "time"]) expected = {"lower_ci": lower_ci, "upper_ci": upper_ci} out["df"] = df out["expected"] = expected return out def _outcome_func(data, shift=0): """Compute column means. Args: data (pd.Series or pd.DataFrame): The data set. shift (float): Scalar that is added to the column means. Returns: pd.Series: Series where the k-th row corresponds to the mean of the k-th column of the input data. """ # Return pd.Series when .mean() is applied to a Series # Only applying .mean() to a pd.Series would yield a float return pd.DataFrame(data).mean(axis=0) + shift def _outcome_ols(data): y = data["pulse"] x = data[["constant", "time"]] params = sm.OLS(y, x).fit().params return params @pytest.mark.parametrize("shift", [0, 10, -10]) def test_bootstrap_with_outcome_kwargs(shift, setup): result = bootstrap( outcome=_outcome_func, data=setup["df"], seed=123, outcome_kwargs={"shift": shift}, ) expected = pd.Series([2.5, 7.0], index=["x1", "x2"]) aaae(result.base_outcome, expected + shift) def test_bootstrap_existing_outcomes(setup): result = bootstrap( data=setup["df"], outcome=_outcome_func, n_draws=3, ) assert len(result.outcomes) == 3 result = bootstrap( outcome=_outcome_func, data=setup["df"], existing_result=result, n_draws=2, ) assert len(result.outcomes) == 2 def test_bootstrap_from_outcomes(setup, expected): result = bootstrap(outcome=_outcome_func, data=setup["df"], seed=1234) _ = result.outcomes summary = result.summary() ci_lower, ci_upper = result.ci() covariance = result.cov() standard_errors = result.se() with pytest.raises(NotImplementedError): assert result._p_values aaae(ci_lower, expected["ci_lower"]) aaae(ci_upper, expected["ci_upper"]) aaae(covariance, expected["cov"]) aaae(standard_errors, expected["se"]) aaae(summary["value"], expected["summary"]["value"]) aaae(summary["standard_error"], expected["summary"]["standard_error"]) aaae(summary["ci_lower"], expected["summary"]["ci_lower"]) aaae(summary["ci_upper"], expected["summary"]["ci_upper"]) def test_bootstrap_from_outcomes_private_methods(setup, expected): result = bootstrap(outcome=_outcome_func, data=setup["df"], seed=1234) _ = result.outcomes ci_lower, ci_upper = result._ci covariance = result._cov standard_errors = result._se with pytest.raises(NotImplementedError): assert result._p_values aaae(ci_lower, expected["ci_lower"]) aaae(ci_upper, expected["ci_upper"]) aaae(covariance, expected["cov"]) aaae(standard_errors, expected["se"]) def test_bootstrap_from_outcomes_single_outcome(setup, expected): result = bootstrap(outcome=_outcome_func, data=setup["df"]["x1"], seed=1234) _ = result.outcomes ci_lower, ci_upper = result.ci() aaae(ci_lower, expected["ci_lower_x1"]) aaae(ci_upper, expected["ci_upper_x1"]) def test_outcome_not_callable(setup): expected_msg = "outcome must be a callable." with pytest.raises(TypeError) as error: assert bootstrap(data=setup["df"], outcome=setup["estimates_df"]) assert str(error.value) == expected_msg @pytest.mark.parametrize("input_type", ["arr", "df", "dict"]) def test_existing_result_wrong_input_type(input_type, setup): expected_msg = "existing_result must be None or a BootstrapResult." with pytest.raises(ValueError) as error: assert bootstrap( outcome=_outcome_func, data=setup["df"], existing_result=setup["estimates_" + input_type], ) assert str(error.value) == expected_msg @pytest.mark.parametrize("return_type", ["array", "dataframe", "pytree"]) def test_cov_correct_return_type(return_type, setup): result = bootstrap( outcome=_outcome_func, data=setup["df"], ) _ = result.cov(return_type=return_type) def test_cov_wrong_return_type(setup): result = bootstrap( outcome=_outcome_func, data=setup["df"], ) expected_msg = "return_type must be one of pytree, array, or dataframe, not dict." with pytest.raises(ValueError) as error: assert result.cov(return_type="dict") assert str(error.value) == expected_msg def test_existing_result(seaborn_example): first_result = bootstrap( data=seaborn_example["df"], outcome=_outcome_ols, seed=1234 ) expected_msg = "existing_result must be None or a BootstrapResult." with pytest.raises(ValueError) as error: assert bootstrap( data=seaborn_example["df"], outcome=_outcome_ols, existing_result=first_result.outcomes, ) assert str(error.value) == expected_msg my_result = bootstrap( data=seaborn_example["df"], outcome=_outcome_ols, existing_result=first_result, seed=2, ) lower_ci, upper_ci = my_result.ci(ci_method="t") aaae(lower_ci, seaborn_example["expected"]["lower_ci"]) aaae(upper_ci, seaborn_example["expected"]["upper_ci"]) ================================================ FILE: tests/estimagic/test_bootstrap_ci.py ================================================ import itertools import numpy as np import pandas as pd import pytest from pybaum import tree_just_flatten from estimagic.bootstrap_ci import calculate_ci, check_inputs from estimagic.bootstrap_samples import get_bootstrap_indices from optimagic.parameters.tree_registry import get_registry from optimagic.utilities import get_rng def aaae(obj1, obj2, decimal=6): arr1 = np.asarray(obj1) arr2 = np.asarray(obj2) np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal) @pytest.fixture() def setup(): out = {} out["df"] = pd.DataFrame( np.array([[1, 10], [2, 7], [3, 6], [4, 5]]), columns=["x1", "x2"] ) out["estimates"] = np.array( [[2.0, 8.0], [2.0, 8.0], [2.5, 7.0], [3.0, 6.0], [3.25, 5.75]] ) return out @pytest.fixture() def expected(): out = {} out["percentile_ci"] = np.array([[2, 3.225], [5.775, 8.0]]) out["normal_ci"] = np.array( [ [1.5006105396891194, 3.499389460310881], [5.130313521781885, 8.869686478218114], ] ) out["basic_ci"] = np.array([[1.775, 3.0], [6.0, 8.225]]) out["bc_ci"] = np.array([[2, 3.2342835077057543], [5.877526959881923, 8]]) out["t_ci"] = np.array([[1.775, 3], [6.0, 8.225]]) return out def _outcome_fun_series(data): return data.mean(axis=0) def _outcome_func_dict(data): return data.mean(axis=0).to_dict() def _outcome_func_arr(data): return np.array(data.mean(axis=0)) TEST_CASES = itertools.product( [_outcome_fun_series, _outcome_func_dict, _outcome_func_arr], ["percentile", "normal", "basic", "bc", "t"], ) @pytest.mark.parametrize("outcome, method", TEST_CASES) def test_ci(outcome, method, setup, expected): registry = get_registry(extended=True) def outcome_flat(data): return tree_just_flatten(outcome(data), registry=registry) base_outcome = outcome_flat(setup["df"]) lower, upper = calculate_ci(base_outcome, setup["estimates"], ci_method=method) aaae(lower, expected[method + "_ci"][:, 0]) aaae(upper, expected[method + "_ci"][:, 1]) def test_check_inputs_data(): data = "this is not a data frame" expected_msg = "Data must be a pandas.DataFrame or pandas.Series." with pytest.raises(TypeError) as error: check_inputs(data=data) assert str(error.value) == expected_msg def test_check_inputs_weight_by(setup): expected_error_msg = "Input 'weight_by' must be None or a column name of 'data'." with pytest.raises(ValueError, match=expected_error_msg): check_inputs(data=setup["df"], weight_by="this is not a column name of df") def test_get_bootstrap_indices_heterogeneous_weights(): data = pd.DataFrame( {"id": [0, 1], "w_homogenous": [0.5, 0.5], "w_heterogenous": [0.1, 0.9]} ) res_homogenous = get_bootstrap_indices( data, weight_by="w_homogenous", n_draws=1_000, rng=get_rng(seed=0) ) res_heterogenous = get_bootstrap_indices( data, weight_by="w_heterogenous", n_draws=1_000, rng=get_rng(seed=0) ) # Given the weights, the first sample mean should be close to 0.5, # while the second one should be close to 0.9 assert np.mean(res_homogenous) < 0.75 < np.mean(res_heterogenous) def test_check_inputs_cluster_by(setup): cluster_by = "this is not a column name of df" expected_msg = "Input 'cluster_by' must be None or a column name of 'data'." with pytest.raises(ValueError) as error: check_inputs(data=setup["df"], cluster_by=cluster_by) assert str(error.value) == expected_msg def test_check_inputs_ci_method(setup): ci_method = 4 expected_msg = ( "ci_method must be 'percentile', 'bc'," f" 't', 'basic' or 'normal', '{ci_method}'" f" was supplied" ) with pytest.raises(ValueError) as error: check_inputs(data=setup["df"], ci_method=ci_method) assert str(error.value) == expected_msg def test_check_inputs_ci_level(setup): ci_level = 666 expected_msg = "Input 'ci_level' must be in [0,1]." with pytest.raises(ValueError) as error: check_inputs(data=setup["df"], ci_level=ci_level) assert str(error.value) == expected_msg ================================================ FILE: tests/estimagic/test_bootstrap_outcomes.py ================================================ import functools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from estimagic.bootstrap_outcomes import ( _get_bootstrap_outcomes_from_indices, get_bootstrap_outcomes, ) from optimagic.batch_evaluators import joblib_batch_evaluator from optimagic.utilities import get_rng @pytest.fixture() def data(): df = pd.DataFrame([[1, 10], [2, 7], [3, 6], [4, 5]], columns=["x1", "x2"]) return df def _mean_return_series(data): out = np.mean(data, axis=0) return out def _mean_return_dict(data): out = np.mean(data, axis=0) return out.to_dict() def _mean_return_array(data): out = np.mean(data, axis=0).to_numpy() return out @pytest.mark.parametrize( "outcome", [ (functools.partial(np.mean, axis=0)), (_mean_return_series), (_mean_return_dict), (_mean_return_array), ], ) def test_get_bootstrap_estimates_runs(outcome, data): rng = get_rng(seed=1234) get_bootstrap_outcomes( data=data, outcome=outcome, rng=rng, n_draws=5, ) def test_bootstrap_estimates_from_indices_without_errors(data): calculated = _get_bootstrap_outcomes_from_indices( indices=[np.array([1, 3]), np.array([0, 2])], data=data, outcome=functools.partial(np.mean, axis=0), n_cores=1, error_handling="raise", batch_evaluator=joblib_batch_evaluator, ) expected = [[3.0, 6.0], [2, 8]] aaae(calculated, expected) def test_get_bootstrap_estimates_with_error_and_raise(data): rng = get_rng(seed=1234) def _raise_assertion_error(data): # noqa: ARG001 raise AssertionError() with pytest.raises(AssertionError): get_bootstrap_outcomes( data=data, outcome=_raise_assertion_error, rng=rng, n_draws=2, error_handling="raise", ) def test_get_bootstrap_estimates_with_all_errors_and_continue(data): rng = get_rng(seed=1234) def _raise_assertion_error(data): # noqa: ARG001 raise AssertionError() with pytest.warns(UserWarning): with pytest.raises(RuntimeError): get_bootstrap_outcomes( data=data, outcome=_raise_assertion_error, rng=rng, n_draws=2, error_handling="continue", ) def test_get_bootstrap_estimates_with_some_errors_and_continue(data): rng = get_rng(seed=1234) def _raise_assertion_error_sometimes(data): assert rng.uniform() > 0.5 return data.mean() with pytest.warns(UserWarning): res_flat = get_bootstrap_outcomes( data=data, outcome=_raise_assertion_error_sometimes, rng=rng, n_draws=100, error_handling="continue", ) assert 30 <= len(res_flat) <= 70 ================================================ FILE: tests/estimagic/test_bootstrap_samples.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal as aae from pandas.testing import assert_frame_equal as afe from pandas.testing import assert_series_equal as ase from estimagic.bootstrap_samples import ( _calculate_bootstrap_indices_weights, _convert_cluster_ids_to_indices, _get_bootstrap_samples_from_indices, get_bootstrap_indices, get_bootstrap_samples, ) from optimagic.utilities import get_rng @pytest.fixture() def data(): df = pd.DataFrame() df["id"] = np.arange(900) df["hh"] = [3, 1, 2, 0, 0, 2, 5, 4, 5] * 100 df["weights"] = np.ones(900) return df def test_get_bootstrap_indices_randomization_works_without_clustering(data): rng = get_rng(seed=12345) res = get_bootstrap_indices(data, n_draws=2, rng=rng) assert set(res[0]) != set(res[1]) def test_get_bootstrap_indices_radomization_works_with_clustering(data): rng = get_rng(seed=12345) res = get_bootstrap_indices(data, cluster_by="hh", n_draws=2, rng=rng) assert set(res[0]) != set(res[1]) def test_get_bootstrap_indices_randomization_works_with_weights(data): rng = get_rng(seed=12345) res = get_bootstrap_indices(data, weight_by="weights", n_draws=2, rng=rng) assert set(res[0]) != set(res[1]) def test_get_bootstrap_indices_randomization_works_with_weights_and_clustering(data): rng = get_rng(seed=12345) res = get_bootstrap_indices( data, weight_by="weights", cluster_by="hh", n_draws=2, rng=rng ) assert set(res[0]) != set(res[1]) def test_get_bootstrap_indices_randomization_works_with_and_without_weights(data): rng1 = get_rng(seed=12345) rng2 = get_rng(seed=12345) res1 = get_bootstrap_indices(data, n_draws=1, rng=rng1) res2 = get_bootstrap_indices(data, weight_by="weights", n_draws=1, rng=rng2) assert not np.array_equal(res1, res2) def test_get_boostrap_indices_randomization_works_with_extreme_case(data): rng = get_rng(seed=12345) weights = np.zeros(900) weights[0] = 1.0 data["weights"] = weights res = get_bootstrap_indices(data, weight_by="weights", n_draws=1, rng=rng) assert len(np.unique(res)) == 1 def test_clustering_leaves_households_intact(data): rng = get_rng(seed=12345) indices = get_bootstrap_indices(data, cluster_by="hh", n_draws=1, rng=rng)[0] sampled = data.iloc[indices] sampled_households = sampled["hh"].unique() for household in sampled_households: expected_ids = set(data[data["hh"] == household]["id"].unique()) actual_ids = set(sampled[sampled["hh"] == household]["id"].unique()) assert expected_ids == actual_ids def test_convert_cluster_ids_to_indices(): cluster_col = pd.Series([2, 2, 0, 1, 0, 1]) drawn_clusters = np.array([[1, 0]]) expected = np.array([3, 5, 2, 4]) calculated = _convert_cluster_ids_to_indices(cluster_col, drawn_clusters)[0] aae(calculated, expected) def test_get_bootstrap_samples_from_indices(): indices = [np.array([0, 1])] data = pd.DataFrame(np.arange(6).reshape(3, 2)) expected = pd.DataFrame(np.arange(4).reshape(2, 2)) calculated = _get_bootstrap_samples_from_indices(data, indices)[0] afe(calculated, expected) def test_get_bootstrap_samples_runs(data): rng = get_rng(seed=12345) get_bootstrap_samples(data, n_draws=2, rng=rng) @pytest.fixture def sample_data(): return pd.DataFrame({"weight": [1, 2, 3, 4], "cluster": ["A", "A", "B", "B"]}) def test_no_weights_no_clusters(sample_data): result = _calculate_bootstrap_indices_weights(sample_data, None, None) assert result is None def test_weights_no_clusters(sample_data): result = _calculate_bootstrap_indices_weights(sample_data, "weight", None) expected = pd.Series([0.1, 0.2, 0.3, 0.4], index=sample_data.index, name="weight") pd.testing.assert_series_equal(result, expected) def test_weights_and_clusters(sample_data): result = _calculate_bootstrap_indices_weights(sample_data, "weight", "cluster") expected = pd.Series( [0.3, 0.7], index=pd.Index(["A", "B"], name="cluster"), name="weight" ) ase(result, expected) def test_invalid_weight_column(): data = pd.DataFrame({"x": [1, 2, 3]}) with pytest.raises(KeyError): _calculate_bootstrap_indices_weights(data, "weight", None) def test_invalid_cluster_column(sample_data): with pytest.raises(KeyError): _calculate_bootstrap_indices_weights(sample_data, "weight", "invalid_cluster") def test_empty_dataframe(): empty_df = pd.DataFrame() result = _calculate_bootstrap_indices_weights(empty_df, None, None) assert result is None def test_some_zero_weights_with_clusters(): data = pd.DataFrame({"weight": [0, 1, 0, 2], "cluster": ["A", "A", "B", "B"]}) result = _calculate_bootstrap_indices_weights(data, "weight", "cluster") expected = pd.Series( [1 / 3, 2 / 3], index=pd.Index(["A", "B"], name="cluster"), name="weight" ) ase(result, expected) ================================================ FILE: tests/estimagic/test_estimate_ml.py ================================================ import itertools import numpy as np import pandas as pd import pytest import scipy as sp import statsmodels.api as sm from numpy.testing import assert_array_equal from scipy.stats import multivariate_normal from statsmodels.base.model import GenericLikelihoodModel import optimagic as om from estimagic.estimate_ml import estimate_ml from estimagic.examples.logit import ( logit_hess, logit_jac, logit_loglike, scalar_logit_fun_and_jac, ) from optimagic import mark from optimagic.optimizers import scipy_optimizers from optimagic.parameters.bounds import Bounds def aaae(obj1, obj2, decimal=3): arr1 = np.asarray(obj1) arr2 = np.asarray(obj2) np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal) # ================================================================================== # Test case with constraints using multivariate Normal model # ================================================================================== @mark.likelihood def multivariate_normal_loglike(params, data): mean = params["mean"] cov = params["cov"] mn = multivariate_normal(mean=mean, cov=cov) return mn.logpdf(data) @pytest.fixture() def multivariate_normal_example(): # true parameters true_mean = np.arange(1, 4) true_cov = np.diag(np.arange(1, 4)) # simulate 10.000 random samples mn = multivariate_normal(mean=true_mean, cov=true_cov) data = mn.rvs(size=10_000) loglike_kwargs = {"data": data} params = {"mean": np.ones(3), "cov": np.diag(np.ones(3))} true_params = {"mean": true_mean, "cov": true_cov} return params, true_params, loglike_kwargs def test_estimate_ml_with_constraints(multivariate_normal_example): params, true_params, loglike_kwargs = multivariate_normal_example constraints = [ om.FixedConstraint(selector=lambda p: p["mean"][0]), om.FlatCovConstraint(selector=lambda p: p["cov"][np.tril_indices(3)]), ] results = estimate_ml( loglike=multivariate_normal_loglike, params=params, loglike_kwargs=loglike_kwargs, optimize_options="scipy_lbfgsb", constraints=constraints, ) aaae(results.params["mean"], true_params["mean"], decimal=1) aaae(results.params["cov"], true_params["cov"], decimal=1) # test free_mask of summary expected_msg = ( "seed is set to None and constraints are transforming. " "This leads to randomness in the result. To avoid random behavior, " "choose a non-None seed." ) with pytest.warns(UserWarning, match=expected_msg): summary = results.summary() assert np.all(summary["mean"]["free"].values == np.array([False, True, True])) assert np.all(summary["cov"]["free"].values) # ====================================================================================== # Test case using Logit model # ====================================================================================== @pytest.fixture() def logit_np_inputs(): spector_data = sm.datasets.spector.load_pandas() spector_data.exog = sm.add_constant(spector_data.exog) x_df = sm.add_constant(spector_data.exog) out = { "y": spector_data.endog, "x": x_df.to_numpy(), "params": np.array([-10, 2, 0.2, 2]), } return out @pytest.fixture() def fitted_logit_model(logit_object): """We need to use a generic model class to access all standard errors etc.""" class GenericLogit(GenericLikelihoodModel): def nloglikeobs(self, params, *args, **kwargs): return -logit_object.loglikeobs(params, *args, **kwargs) generic_logit = GenericLogit(logit_object.endog, logit_object.exog) return generic_logit.fit() test_cases = list( itertools.product( [ {"algorithm": "scipy_lbfgsb"}, "scipy_lbfgsb", { "algorithm": "scipy_lbfgsb", "fun_and_jac": scalar_logit_fun_and_jac, }, ], [None, logit_jac, False], [None, logit_hess, False], ) ) test_cases = [ case for case in test_cases if not (case[1] is False and case[2] is False) ] @pytest.mark.parametrize("optimize_options, jacobian, hessian", test_cases) def test_estimate_ml_with_logit_no_constraints( fitted_logit_model, logit_np_inputs, optimize_options, jacobian, hessian, ): """Test that estimate_ml computes correct params and covariances under different scenarios. """ # ================================================================================== # estimate # ================================================================================== kwargs = {"y": logit_np_inputs["y"], "x": logit_np_inputs["x"]} if "fun_and_jac" in optimize_options: optimize_options["fun_and_jac_kwargs"] = kwargs got = estimate_ml( loglike=logit_loglike, params=logit_np_inputs["params"], loglike_kwargs=kwargs, optimize_options=optimize_options, jacobian=jacobian, jacobian_kwargs=kwargs, hessian=hessian, hessian_kwargs=kwargs, ) # ================================================================================== # test # ================================================================================== exp = fitted_logit_model if jacobian is not False and hessian is not False: methods = ["jacobian", "hessian", "robust"] elif jacobian is not False: methods = ["jacobian"] elif hessian is not False: methods = ["hessian"] statsmodels_suffix_map = { "jacobian": "jac", "hessian": "", "robust": "jhj", } # compare estimated parameters aaae(got.params, exp.params, decimal=4) for method in methods: # compare estimated standard errors exp_se = getattr(exp, f"bse{statsmodels_suffix_map[method]}") got_se = got.se(method=method) aaae(got_se, exp_se, decimal=3) # compare estimated confidence interval if method == "hessian": lower, upper = got.ci(method=method) exp_lower = exp.conf_int().T[0] exp_upper = exp.conf_int().T[1] aaae(lower, exp_lower, decimal=3) aaae(upper, exp_upper, decimal=3) # compare covariance if method == "hessian": aaae(got.cov(method=method), exp.cov_params(), decimal=3) elif method == "robust": aaae(got.cov(method=method), exp.covjhj, decimal=2) elif method == "jacobian": aaae(got.cov(method=method), exp.covjac, decimal=4) summary = got.summary(method=method) aaae(summary["value"], exp.params, decimal=4) aaae(summary["standard_error"], got.se(method=method)) lower, upper = got.ci(method=method) aaae(summary["ci_lower"], lower) aaae(summary["ci_upper"], upper) aaae(summary["p_value"], got.p_values(method=method)) if "jacobian" in methods: aaae(got._se, got.se()) aaae(got._ci[0], got.ci()[0]) aaae(got._ci[1], got.ci()[1]) aaae(got._p_values, got.p_values()) test_cases_constr = list( itertools.product( [None, logit_jac], # jacobian [ om.FlatCovConstraint(selector=lambda x: x[[1, 2, 3]]), om.LinearConstraint( selector=lambda x: x[[0, 1]], lower_bound=-20, weights=1 ), om.IncreasingConstraint(selector=lambda x: x[[0, 1]]), ], ) ) @pytest.mark.parametrize("jacobian, constraints", test_cases_constr) def test_estimate_ml_with_logit_constraints( fitted_logit_model, logit_np_inputs, jacobian, constraints, ): """Test that estimate_ml computes correct params and standard errors under different scenarios with constraints. """ seed = 1234 # ================================================================================== # estimate # ================================================================================== kwargs = {"y": logit_np_inputs["y"], "x": logit_np_inputs["x"]} optimize_options = { "algorithm": "scipy_lbfgsb", "algo_options": {"convergence.ftol_rel": 1e-12}, } if "fun_and_jac" in optimize_options: optimize_options["fun_and_jac_kwargs"] = kwargs got = estimate_ml( loglike=logit_loglike, params=logit_np_inputs["params"], loglike_kwargs=kwargs, optimize_options=optimize_options, jacobian=jacobian, jacobian_kwargs=kwargs, constraints=constraints, ) # ================================================================================== # test # ================================================================================== exp = fitted_logit_model methods = ["jacobian", "hessian", "robust"] statsmodels_suffix_map = { "jacobian": "jac", "hessian": "", "robust": "jhj", } # compare estimated parameters aaae(got.params, exp.params, decimal=3) for method in methods: # compare estimated standard errors exp_se = getattr(exp, f"bse{statsmodels_suffix_map[method]}") got_se = got.se(method=method, seed=seed) corr = np.corrcoef(got_se, exp_se) aaae(corr, np.ones_like(corr), decimal=4) # compare estimated confidence interval if method == "hessian": lower, upper = got.ci(method=method, seed=seed) exp_lower = exp.conf_int().T[0] exp_upper = exp.conf_int().T[1] corr_lower = np.corrcoef(lower, exp_lower) corr_upper = np.corrcoef(upper, exp_upper) aaae(corr_lower, np.ones_like(corr), decimal=4) aaae(corr_upper, np.ones_like(corr), decimal=4) summary = got.summary(method=method, seed=seed) aaae(summary["value"], exp.params, decimal=3) aaae(summary["standard_error"], got.se(method=method, seed=seed)) lower, upper = got.ci(method=method, seed=seed) aaae(summary["ci_lower"], lower) aaae(summary["ci_upper"], upper) aaae(summary["p_value"], got.p_values(method=method, seed=seed)) def test_estimate_ml_optimize_options_false(fitted_logit_model, logit_np_inputs): """Test that estimate_ml computes correct covariances given correct params.""" kwargs = {"y": logit_np_inputs["y"], "x": logit_np_inputs["x"]} params = pd.DataFrame({"value": fitted_logit_model.params}) got = estimate_ml( loglike=logit_loglike, params=params, loglike_kwargs=kwargs, optimize_options=False, ) summary = got.summary() # compare estimated parameters aaae(summary["value"], fitted_logit_model.params, decimal=4) # compare estimated standard errors aaae(summary["standard_error"], fitted_logit_model.bsejac, decimal=3) # compare covariance (if not robust case) aaae(got.cov(method="jacobian"), fitted_logit_model.covjac, decimal=4) def test_estimate_ml_algorithm_type(logit_np_inputs): """Test that estimate_ml computes correct covariances given correct params.""" kwargs = {"y": logit_np_inputs["y"], "x": logit_np_inputs["x"]} params = pd.DataFrame({"value": logit_np_inputs["params"]}) estimate_ml( loglike=logit_loglike, params=params, loglike_kwargs=kwargs, optimize_options=scipy_optimizers.ScipyLBFGSB, ) def test_estimate_ml_algorithm(logit_np_inputs): """Test that estimate_ml computes correct covariances given correct params.""" kwargs = {"y": logit_np_inputs["y"], "x": logit_np_inputs["x"]} params = pd.DataFrame({"value": logit_np_inputs["params"]}) estimate_ml( loglike=logit_loglike, params=params, loglike_kwargs=kwargs, optimize_options=scipy_optimizers.ScipyLBFGSB(stopping_maxfun=10), ) # ====================================================================================== # Univariate normal case using dict params # ====================================================================================== @mark.likelihood def normal_loglike(params, y): return sp.stats.norm.logpdf(y, loc=params["mean"], scale=params["sd"]) @pytest.fixture() def normal_inputs(): true = { "mean": 1.0, "sd": 1.0, } rng = np.random.default_rng(12345) y = rng.normal(loc=true["mean"], scale=true["sd"], size=10_000) return {"true": true, "y": y} def test_estimate_ml_general_pytree(normal_inputs): # ================================================================================== # estimate # ================================================================================== kwargs = {"y": normal_inputs["y"]} start_params = {"mean": 5, "sd": 3} got = estimate_ml( loglike=normal_loglike, params=start_params, loglike_kwargs=kwargs, optimize_options="scipy_lbfgsb", bounds=Bounds(lower={"sd": 0.0001}), jacobian_kwargs=kwargs, constraints=om.FlatSDCorrConstraint(selector=lambda p: p["sd"]), ) # ================================================================================== # test # ================================================================================== true = normal_inputs["true"] assert ( np.abs(true["mean"] - got.summary(method="jacobian")["mean"]["value"][0]) < 1e-1 ) assert np.abs(true["sd"] - got.summary(method="jacobian")["sd"]["value"][0]) < 1e-1 def test_to_pickle(normal_inputs, tmp_path): kwargs = {"y": normal_inputs["y"]} start_params = {"mean": 5, "sd": 3} got = estimate_ml( loglike=normal_loglike, params=start_params, loglike_kwargs=kwargs, optimize_options="scipy_lbfgsb", bounds=Bounds(lower={"sd": 0.0001}), jacobian_kwargs=kwargs, constraints=om.FlatSDCorrConstraint(selector=lambda p: p["sd"]), ) got.to_pickle(tmp_path / "bla.pkl") def test_caching(normal_inputs): kwargs = {"y": normal_inputs["y"]} start_params = {"mean": 5, "sd": 3} got = estimate_ml( loglike=normal_loglike, params=start_params, loglike_kwargs=kwargs, optimize_options="scipy_lbfgsb", bounds=Bounds(lower={"sd": 0.0001}), jacobian_kwargs=kwargs, constraints=om.FlatSDCorrConstraint(selector=lambda p: p["sd"]), ) assert got._cache == {} cov = got.cov(method="robust", return_type="array") assert got._cache == {} cov = got.cov(method="robust", return_type="array", seed=0) assert_array_equal(list(got._cache.values())[0], cov) ================================================ FILE: tests/estimagic/test_estimate_msm.py ================================================ """Most test exploit the special case where simulate_moments just returns parameters.""" import itertools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.testing import assert_array_equal from estimagic.estimate_msm import estimate_msm from optimagic.optimization.optimize_result import OptimizeResult from optimagic.optimizers import scipy_optimizers from optimagic.shared.check_option_dicts import ( check_optimization_options, ) def _sim_pd(params): return pd.Series(params) def _sim_np(params): return params def _sim_dict_pd(params): return {"simulated_moments": pd.Series(params), "other": "bla"} def _sim_dict_np(params): return {"simulated_moments": params, "other": "bla"} cov_np = np.diag([1, 2, 3.0]) cov_pd = pd.DataFrame(cov_np) test_cases = list( itertools.product( [_sim_pd, _sim_np, _sim_dict_pd, _sim_dict_np], # simulate_moments [cov_np, cov_pd], # moments_cov [{"algorithm": "scipy_lbfgsb"}, "scipy_lbfgsb"], # optimize_options ) ) @pytest.mark.parametrize("simulate_moments, moments_cov, optimize_options", test_cases) def test_estimate_msm(simulate_moments, moments_cov, optimize_options): start_params = np.array([3, 2, 1]) expected_params = np.zeros(3) # abuse simulate_moments to get empirical moments in correct format empirical_moments = simulate_moments(expected_params) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] calculated = estimate_msm( simulate_moments=simulate_moments, empirical_moments=empirical_moments, moments_cov=moments_cov, params=start_params, optimize_options=optimize_options, ) # check that minimization works aaae(calculated.params, expected_params) # assert that optimization result exists and is of correct type assert isinstance(calculated.optimize_result, OptimizeResult) # check that cov works calculated_cov = calculated.cov() if isinstance(calculated_cov, pd.DataFrame): calculated_cov = calculated_cov.to_numpy() # this works only in the very special case with diagonal moments cov and # jac = identity matrix expected_cov = np.diag([1, 2, 3]) aaae(calculated_cov, expected_cov) aaae(calculated.se(), np.sqrt([1, 2, 3])) # works only because parameter point estimates are exactly zero aaae(calculated.p_values(), np.ones(3)) expected_ci_upper = np.array([1.95996398, 2.77180765, 3.3947572]) expected_ci_lower = -expected_ci_upper lower, upper = calculated.ci() aaae(lower, expected_ci_lower) aaae(upper, expected_ci_upper) aaae(calculated.ci(), calculated._ci) aaae(calculated.p_values(), calculated._p_values) aaae(calculated.se(), calculated._se) aaae(calculated.cov(), calculated._cov) summary = calculated.summary() aaae(summary["value"], np.zeros(3)) aaae(summary["p_value"], np.ones(3)) assert summary["stars"].tolist() == [""] * 3 def test_check_and_process_optimize_options_with_invalid_entries(): with pytest.raises(ValueError): check_optimization_options({"criterion": lambda x: x}, "estimate_msm") ls_test_cases = list( itertools.product( [_sim_pd, _sim_np, _sim_dict_pd, _sim_dict_np], # simulate_moments [cov_np, cov_pd], # moments_cov [{"algorithm": "pounders"}, "pounders"], # optimize_options ) ) @pytest.mark.parametrize( "simulate_moments, moments_cov, optimize_options", ls_test_cases ) def test_estimate_msm_ls(simulate_moments, moments_cov, optimize_options): start_params = np.array([3, 2, 1]) expected_params = np.zeros(3) # abuse simulate_moments to get empirical moments in correct format empirical_moments = simulate_moments(expected_params) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] calculated = estimate_msm( simulate_moments=simulate_moments, empirical_moments=empirical_moments, moments_cov=moments_cov, params=start_params, optimize_options=optimize_options, ) aaae(calculated.params, expected_params) def test_estimate_msm_with_jacobian(): start_params = np.array([3, 2, 1]) expected_params = np.zeros(3) # abuse simulate_moments to get empirical moments in correct format empirical_moments = _sim_np(expected_params) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] calculated = estimate_msm( simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options="scipy_lbfgsb", jacobian=lambda x: np.eye(len(x)), ) aaae(calculated.params, expected_params) aaae(calculated.cov(), cov_np) def test_estimate_msm_with_algorithm_type(): start_params = np.array([3, 2, 1]) expected_params = np.zeros(3) empirical_moments = _sim_np(expected_params) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] estimate_msm( simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options=scipy_optimizers.ScipyLBFGSB, jacobian=lambda x: np.eye(len(x)), ) def test_estimate_msm_with_algorithm(): start_params = np.array([3, 2, 1]) expected_params = np.zeros(3) empirical_moments = _sim_np(expected_params) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] estimate_msm( simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options=scipy_optimizers.ScipyLBFGSB(stopping_maxfun=10), jacobian=lambda x: np.eye(len(x)), ) def test_to_pickle(tmp_path): start_params = np.array([3, 2, 1]) # abuse simulate_moments to get empirical moments in correct format empirical_moments = _sim_np(np.zeros(3)) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] calculated = estimate_msm( simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options="scipy_lbfgsb", ) calculated.to_pickle(tmp_path / "bla.pkl") def test_caching(): start_params = np.array([3, 2, 1]) # abuse simulate_moments to get empirical moments in correct format empirical_moments = _sim_np(np.zeros(3)) if isinstance(empirical_moments, dict): empirical_moments = empirical_moments["simulated_moments"] got = estimate_msm( simulate_moments=_sim_np, empirical_moments=empirical_moments, moments_cov=cov_np, params=start_params, optimize_options="scipy_lbfgsb", ) assert got._cache == {} cov = got.cov(method="robust", return_type="array") assert got._cache == {} cov = got.cov(method="robust", return_type="array", seed=0) assert_array_equal(list(got._cache.values())[0], cov) ================================================ FILE: tests/estimagic/test_estimate_msm_dict_params_and_moments.py ================================================ """Most test exploit the special case where simulate_moments just returns parameters.""" import numpy as np import pandas as pd from numpy.testing import assert_array_almost_equal as aaae from pybaum import tree_just_flatten from estimagic.estimate_msm import estimate_msm from optimagic.parameters.tree_registry import get_registry def test_estimate_msm_dict_params_and_moments(): def simulate_moments(params): return {k * 2: v for k, v in params.items()} start_params = {"a": 3, "b": 2, "c": 1} expected_params = {"a": 0, "b": 0, "c": 0} empirical_moments = {"aa": 0, "bb": 0, "cc": 0} moments_cov = { "aa": {"aa": 1, "bb": 0, "cc": 0}, "bb": {"aa": 0, "bb": 2, "cc": 0}, "cc": {"aa": 0, "bb": 0, "cc": 3}, } calculated = estimate_msm( simulate_moments=simulate_moments, empirical_moments=empirical_moments, moments_cov=moments_cov, params=start_params, optimize_options="scipy_lbfgsb", ) # check that minimization works assert_almost_equal(calculated.params, expected_params) # this works only in the very special case with diagonal moments cov and # jac = identity matrix assert_almost_equal(calculated.cov(), moments_cov) assert_almost_equal(calculated.se(), {"a": 1, "b": np.sqrt(2), "c": np.sqrt(3)}) # works only because parameter point estimates are exactly zero assert_almost_equal(calculated.p_values(), {"a": 1, "b": 1, "c": 1}) expected_ci_upper = {"a": 1.95996398, "b": 2.77180765, "c": 3.3947572} expected_ci_lower = {k: -v for k, v in expected_ci_upper.items()} lower, upper = calculated.ci() assert_almost_equal(lower, expected_ci_lower) assert_almost_equal(upper, expected_ci_upper) assert_almost_equal(calculated.ci(), calculated._ci) assert_almost_equal(calculated.p_values(), calculated._p_values) assert_almost_equal(calculated.se(), calculated._se) assert_almost_equal(calculated.cov(), calculated._cov) summary = calculated.summary() summary_df = pd.concat(list(summary.values())) aaae(summary_df["value"], np.zeros(3)) aaae(summary_df["p_value"], np.ones(3)) assert summary_df["stars"].tolist() == [""] * 3 expected_sensitivity_to_bias_dict = { "a": {"aa": -1.0, "bb": 0.0, "cc": 0.0}, "b": {"aa": 0.0, "bb": -1.0, "cc": 0.0}, "c": {"aa": 0.0, "bb": 0.0, "cc": -1.0}, } assert_almost_equal( calculated.sensitivity("bias"), expected_sensitivity_to_bias_dict ) expected_sensitivity_to_bias_arr = -np.eye(3) aaae( calculated.sensitivity("bias", return_type="array"), expected_sensitivity_to_bias_arr, ) aaae( calculated.sensitivity("bias", return_type="dataframe").to_numpy(), expected_sensitivity_to_bias_arr, ) expected_jacobian = { "a": {"aa": 1.0, "bb": 0.0, "cc": 0.0}, "b": {"aa": 0.0, "bb": 1.0, "cc": 0.0}, "c": {"aa": 0.0, "bb": 0.0, "cc": 1.0}, } assert_almost_equal(calculated.jacobian, expected_jacobian) def assert_almost_equal(x, y, decimal=6): if isinstance(x, np.ndarray): x_flat = x y_flat = y else: registry = get_registry(extended=True) x_flat = np.array(tree_just_flatten(x, registry=registry)) y_flat = np.array(tree_just_flatten(x, registry=registry)) aaae(x_flat, y_flat, decimal=decimal) ================================================ FILE: tests/estimagic/test_estimation_table.py ================================================ import io import textwrap import numpy as np import pandas as pd import pytest import statsmodels.api as sm from pandas.testing import assert_frame_equal as afe from pandas.testing import assert_series_equal as ase from estimagic.config import EXAMPLE_DIR from estimagic.estimation_table import ( _apply_number_format, _center_align_integers_and_non_numeric_strings, _check_order_of_model_names, _convert_frame_to_string_series, _create_group_to_col_position, _create_statistics_sr, _customize_col_groups, _customize_col_names, _get_default_column_names_and_groups, _get_digits_after_decimal, _get_model_names, _get_params_frames_with_common_index, _process_frame_indices, _process_model, estimation_table, render_html, render_latex, ) # ====================================================================================== # Helper functions # ====================================================================================== def _get_models_multiindex(): df = pd.DataFrame( data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"] ) df.index = pd.MultiIndex.from_tuples( [("p_1", "v_1"), ("p_1", "v_2"), ("p_2", "v_2")] ) info = {"n_obs": 400} mod1 = {"params": df, "info": info, "name": "m1"} mod2 = {"params": df, "info": info, "name": "m2"} models = [mod1, mod2] return models def _get_models_single_index(): df = pd.DataFrame( data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"] ) df.index = [f"p{i}" for i in [1, 2, 3]] info = {"n_obs": 400} mod1 = {"params": df, "info": info, "name": "m1"} mod2 = {"params": df, "info": info, "name": "m2"} models = [mod1, mod2] return models def _get_models_multiindex_multi_column(): df = pd.DataFrame( data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"] ) df.index = pd.MultiIndex.from_tuples( [("p_1", "v_1"), ("p_1", "v_2"), ("p_2", "v_2")] ) info = {"n_obs": 400} mod1 = {"params": df.iloc[1:], "info": info, "name": "m1"} mod2 = {"params": df, "info": info, "name": "m2"} mod3 = {"params": df, "info": info, "name": "m2"} models = [mod1, mod2, mod3] return models def _read_csv_string(string, index_cols=None): string = textwrap.dedent(string) return pd.read_csv(io.StringIO(string), index_col=index_cols) # ====================================================================================== # Tests # ====================================================================================== # test process_model for different model types fix_path = EXAMPLE_DIR / "diabetes.csv" df_ = pd.read_csv(fix_path, index_col=0) est = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:4]])).fit() est1 = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:5]])).fit() def test_estimation_table(): models = [est] res = estimation_table(models, return_type="render_inputs", append_notes=False) exp = {} body = """ index,target const,152.00$^{*** }$ ,(2.85) Age,37.20$^{ }$ ,(64.10) Sex,-107.00$^{* }$ ,(62.10) BMI,787.00$^{*** }$ ,(65.40) ABP,417.00$^{*** }$ ,(69.50) """ exp["body"] = _read_csv_string(body).fillna("") exp["body"].set_index("index", inplace=True) footer_str = """ ,target R$^2$,0.40 Adj. R$^2$,0.40 Residual Std. Error,60 F Statistic,72.90$^{***}$ Observations,442 """ exp["footer"] = _read_csv_string(footer_str).fillna("") exp["footer"].set_index(" ", inplace=True) exp["footer"].index.names = [None] exp["footer"].index = pd.MultiIndex.from_arrays([exp["footer"].index]) afe(exp["footer"].sort_index(), res["footer"].sort_index()) afe(exp["body"], res["body"], check_index_type=False) MODELS = [ _get_models_multiindex(), _get_models_single_index(), _get_models_multiindex_multi_column(), ] PARAMETRIZATION = [("latex", render_latex, models) for models in MODELS] PARAMETRIZATION += [("html", render_html, models) for models in MODELS] @pytest.mark.parametrize("return_type, render_func,models", PARAMETRIZATION) def test_one_and_stage_rendering_are_equal(return_type, render_func, models): first_stage = estimation_table( models, return_type="render_inputs", confidence_intervals=True ) second_stage = render_func(siunitx_warning=False, **first_stage) one_stage = estimation_table( models, return_type=return_type, siunitx_warning=False, confidence_intervals=True, ) assert one_stage == second_stage def test_process_model_stats_model(): params = pd.DataFrame( columns=["value", "p_value", "standard_error", "ci_lower", "ci_upper"], index=["const", "Age", "Sex", "BMI", "ABP"], ) params["value"] = [152.133484, 37.241211, -106.577520, 787.179313, 416.673772] params["p_value"] = [ 2.048808e-193, 5.616557e-01, 8.695658e-02, 5.345260e-29, 4.245663e-09, ] params["standard_error"] = [2.852749, 64.117433, 62.125062, 65.424126, 69.494666] params["ci_lower"] = [146.526671, -88.775663, -228.678572, 658.594255, 280.088446] params["ci_upper"] = [157.740298, 163.258084, 15.523532, 915.764371, 553.259097] info = {} info["rsquared"] = 0.40026108237714 info["rsquared_adj"] = 0.39477148130050055 info["fvalue"] = 72.91259907398705 info["f_pvalue"] = 2.700722880950139e-47 info["df_model"] = 4.0 info["df_resid"] = 437.0 info["resid_std_err"] = 59.97560860753488 info["n_obs"] = 442.0 res = _process_model(est) afe(res["params"], params) ase(pd.Series(res["info"]), pd.Series(info)) assert res["name"] == "target" # test convert_model_to_series for different arguments def test_convert_model_to_series_with_ci(): df = pd.DataFrame( np.array( [[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3], [1.2, 3.3, 4.33]] ).T, columns=["value", "p_value", "ci_lower", "ci_upper"], index=["a", "b", "c"], ).astype("str") df["p_value"] = df["p_value"].astype("float") significance_levels = [0.1, 0.05, 0.01] show_stars = True res = _convert_frame_to_string_series(df, significance_levels, show_stars) exp = pd.Series( [ "0.6$^{ }$", r"(0.6;1.2)", "2.3$^{** }$", r"(2.3;3.3)", "3.3$^{*** }$", r"(3.3;4.33)", ], index=["a", "", "b", "", "c", ""], name="", ) exp.index.name = "index" ase(exp, res) def test_convert_model_to_series_with_se(): df = pd.DataFrame( np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3]]).T, columns=["value", "p_value", "standard_error"], index=["a", "b", "c"], ).astype("str") df["p_value"] = df["p_value"].astype("float") significance_levels = [0.1, 0.05, 0.01] show_stars = True res = _convert_frame_to_string_series(df, significance_levels, show_stars) exp = pd.Series( ["0.6$^{ }$", "(0.6)", "2.3$^{** }$", "(2.3)", "3.3$^{*** }$", "(3.3)"], index=["a", "", "b", "", "c", ""], name="", ) exp.index.name = "index" ase(exp, res) def test_convert_model_to_series_without_inference(): df = pd.DataFrame( np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009]]).T, columns=["value", "p_value"], index=["a", "b", "c"], ).astype("str") df["p_value"] = df["p_value"].astype("float") significance_levels = [0.1, 0.05, 0.01] show_stars = True res = _convert_frame_to_string_series(df, significance_levels, show_stars) exp = pd.Series( ["0.6$^{ }$", "2.3$^{** }$", "3.3$^{*** }$"], index=["a", "b", "c"], name="" ) ase(exp, res) # test create stat series def test_create_statistics_sr(): df = pd.DataFrame(np.empty((10, 3)), columns=["a", "b", "c"]) df.index = pd.MultiIndex.from_arrays(np.array([np.arange(10), np.arange(10)])) info = {"rsquared": 0.45, "n_obs": 400, "rsquared_adj": 0.0002} number_format = ("{0:.3g}", "{0:.5f}", "{0:.4g}") add_trailing_zeros = True sig_levels = [0.1, 0.2] show_stars = False model = {"params": df, "info": info, "name": "target"} stats_options = { "n_obs": "Observations", "rsquared": "R2", "rsquared_adj": "R2 Adj.", } res = _create_statistics_sr( model, stats_options, sig_levels, show_stars, number_format, add_trailing_zeros, max_trail=4, ) exp = pd.Series(["0.4500", "0.0002", "400"]) exp.index = pd.MultiIndex.from_arrays( np.array([np.array(["R2", "R2 Adj.", "Observations"]), np.array(["", "", ""])]) ) ase(exp.sort_index(), res.sort_index()) # test _process_frame_axes for different arguments def test_process_frame_indices_index(): df = pd.DataFrame(np.ones((3, 3)), columns=["", "", ""]) df.index = pd.MultiIndex.from_arrays( np.array([["today", "today", "today"], ["var1", "var2", "var3"]]) ) df.index.names = ["l1", "l2"] par_name_map = {"today": "tomorrow", "var1": "1stvar"} index_name_map = ["period", "variable"] column_names = list("abc") res = _process_frame_indices( df, custom_param_names=par_name_map, custom_index_names=index_name_map, column_names=column_names, show_col_names=True, show_col_groups=False, column_groups=None, ) # expected: params = """ period,variable,a,b,c tomorrow,1stvar,1,1,1 tomorrow,var2,1,1,1 tomorrow,var3,1,1,1 """ exp = _read_csv_string(params).fillna("") exp.set_index(["period", "variable"], inplace=True) afe(res, exp, check_dtype=False) def test_process_frame_indices_columns(): df = pd.DataFrame(np.ones((3, 3)), columns=["", "", ""]) col_names = list("abc") col_groups = ["first", "first", "second"] res = _process_frame_indices( df=df, custom_index_names=None, custom_param_names=None, show_col_groups=True, show_col_names=True, column_names=col_names, column_groups=col_groups, ) arrays = [np.array(col_groups), np.array(col_names)] exp = pd.DataFrame(data=np.ones((3, 3)), columns=arrays) afe(res, exp, check_dtype=False) def test_apply_number_format_tuple(): number_format = ("{0:.2g}", "{0:.2f}", "{0:.2g}") raw = pd.DataFrame(data=[1234.2332, 0.0001]) exp = pd.DataFrame(data=["1.2e+03", "0"]) res = _apply_number_format( df_raw=raw, number_format=number_format, format_integers=False ) afe(exp, res) def test_apply_number_format_int(): number_format = 3 raw = pd.DataFrame(data=["1234.2332", "1.2e+03"]) exp = pd.DataFrame(data=["1234.233", "1200"]) res = _apply_number_format( df_raw=raw, number_format=number_format, format_integers=False ) afe(exp, res) def test_apply_number_format_callable(): def nsf(num, n=3): """N-Significant Figures.""" numstr = ("{0:.%ie}" % (n - 1)).format(num) return numstr raw = pd.DataFrame(data=[1234.2332, 0.0001]) exp = pd.DataFrame(data=["1.23e+03", "1.00e-04"]) res = _apply_number_format(df_raw=raw, number_format=nsf, format_integers=False) afe(exp, res) def test_get_digits_after_decimal(): df = pd.DataFrame( data=[["12.456", "0.00003", "1.23e+05"], ["16", "0.03", "1.2e+05"]] ).T exp = 5 res = _get_digits_after_decimal(df) assert exp == res def test_create_group_to_col_position(): col_groups = [ "a_name", "a_name", "a_name", "second_name", "second_name", "third_name", ] exp = {"a_name": [0, 1, 2], "second_name": [3, 4], "third_name": [5]} res = _create_group_to_col_position(col_groups) assert exp == res def test_get_model_names(): m1 = {"params": None, "info": None, "name": "a_name"} m3 = {"params": None, "info": None, "name": None} m5 = {"params": None, "info": None, "name": "third_name"} models = [m1, m3, m5] res = _get_model_names(models) exp = ["a_name", "(2)", "third_name"] assert res == exp def test_get_default_column_names_and_groups(): model_names = ["a_name", "a_name", "(3)", "(4)", "third_name"] res_names, res_groups = _get_default_column_names_and_groups(model_names) exp_names = [f"({i + 1})" for i in range(len(model_names))] exp_groups = ["a_name", "a_name", "(3)", "(4)", "third_name"] assert res_names == exp_names assert res_groups == exp_groups def test_get_default_column_names_and_groups_undefined_groups(): model_names = ["a_name", "second_name", "(3)", "(4)", "third_name"] res_names, res_groups = _get_default_column_names_and_groups(model_names) exp_names = model_names assert res_names == exp_names assert pd.isna(res_groups) def test_customize_col_groups(): default = ["a_name", "a_name", "(3)", "(4)", "third_name"] mapping = {"a_name": "first_name", "third_name": "fifth_name"} exp = ["first_name", "first_name", "(3)", "(4)", "fifth_name"] res = _customize_col_groups(default, mapping) assert exp == res def test_customize_col_names_dict(): default = list("abcde") custom = {"a": "1", "c": "3", "e": "5"} res = _customize_col_names(default_col_names=default, custom_col_names=custom) exp = ["1", "b", "3", "d", "5"] assert exp == res def test_customize_col_names_list(): default = list("abcde") custom = list("12345") res = _customize_col_names(default_col_names=default, custom_col_names=custom) exp = ["1", "2", "3", "4", "5"] assert exp == res def test_get_params_frames_with_common_index(): m1 = { "params": pd.DataFrame(np.ones(5), index=list("abcde")), "info": None, "name": None, } m2 = { "params": pd.DataFrame(np.ones(3), index=list("abc")), "info": None, "name": None, } res = _get_params_frames_with_common_index([m1, m2]) exp = [ pd.DataFrame(np.ones(5), index=list("abcde")), pd.DataFrame( np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=list("abcde") ), ] afe(res[0], exp[0]) afe(res[1], exp[1]) def test_get_params_frames_with_common_index_multiindex(): mi = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2), ("b", 3)]) m1 = {"params": pd.DataFrame(np.ones(5), index=mi), "info": None, "name": None} m2 = {"params": pd.DataFrame(np.ones(3), index=mi[:3]), "info": None, "name": None} res = _get_params_frames_with_common_index([m1, m2]) exp = [ pd.DataFrame(np.ones(5), index=mi), pd.DataFrame(np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=mi), ] afe(res[0], exp[0]) afe(res[1], exp[1]) def test_check_order_of_model_names_raises_error(): model_names = ["a", "b", "a"] with pytest.raises(ValueError): _check_order_of_model_names(model_names) def test_manual_extra_info(): footer_str = """ ,target R$^2$,0.40 Adj. R$^2$,0.40 Residual Std. Error,60.5 F Statistic,72.90$^{***}$ Observations,442 Controls,Yes """ footer = _read_csv_string(footer_str).fillna("") footer.set_index(" ", inplace=True) footer.index.names = [None] footer.index = pd.MultiIndex.from_arrays([footer.index]) exp = footer.copy(deep=True) exp.loc["Controls"] = "\\multicolumn{1}{c}{Yes}" exp.loc["Observations"] = "\\multicolumn{1}{c}{442}" for i, r in footer.iterrows(): res = _center_align_integers_and_non_numeric_strings(r) ase(exp.loc[i], res) ================================================ FILE: tests/estimagic/test_lollipop_plot.py ================================================ import numpy as np import pandas as pd from estimagic.lollipop_plot import lollipop_plot def test_lollipop_plot_runs(): df = pd.DataFrame( np.arange(12).reshape(4, 3), index=pd.MultiIndex.from_tuples([(0, "a"), ("b", 1), ("a", "b"), (2, 3)]), columns=["a", "b", "c"], ) for grid in [True, False]: lollipop_plot(df, combine_plots_in_grid=grid) ================================================ FILE: tests/estimagic/test_ml_covs.py ================================================ from itertools import product from pathlib import Path import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from estimagic import ml_covs from estimagic.ml_covs import ( _clustering, _sandwich_step, _stratification, cov_cluster_robust, cov_hessian, cov_jacobian, cov_robust, cov_strata_robust, ) @pytest.fixture() def jac(): _jac = np.array( [ [0.017986, 0.089931, 0, 0.035972], [0.0024726, 0.014836, 0.0024726, 0.0098905], [0.0009111, 0.002733, 0, 0.009111], [-0.993307, -4.966536, 0, -3.973229], [0.119203, 0.238406, 0, 0.119203], ] ) return _jac @pytest.fixture() def hess(): _hess = np.array( [ [-0.132681, -0.349071, -0.002467, -0.185879], [-0.349071, -1.124730, -0.014799, -0.606078], [-0.002467, -0.014799, -0.002467, -0.009866], [-0.185879, -0.606078, -0.009866, -0.412500], ] ) return _hess @pytest.fixture() def design_options(): df = pd.DataFrame( data=[ [164, 88, 0.116953], [562, 24, 0.174999], [459, 71, 0.374608], [113, 25, 0.369494], [311, 63, 0.203738], ], columns=["psu", "strata", "weight"], ) return df def test_clustering(jac, design_options): calculated = _clustering(jac, design_options) expected = np.array( [ [1.251498, 6.204213, 0.000008, 4.951907], [6.204213, 30.914541, 0.000046, 24.706263], [0.000008, 0.000046, 0.000008, 0.000031], [4.951907, 24.706263, 0.000031, 19.752791], ] ) np.allclose(calculated, expected) def test_stratification(jac, design_options): calculated = _stratification(jac, design_options) expected = np.array( [ [1.0012, 4.963, 0.000006, 3.9615], [4.9634, 24.732, 0.000037, 19.765], [0.000006, 0.000037, 0.000006, 0.000024], [3.961525, 19.76501, 0.000024, 15.8022], ] ) np.allclose(calculated, expected) def test_sandwich_step(hess): calculated = _sandwich_step(hess, meat=np.ones((4, 4))) expected = np.array( [ [5194.925, -1876.241, 36395.846, -279.962], [-1876.2415, 677.638707, -13145.02087, 101.11338], [36395.8461, -13145.0208, 254990.7081, -1961.4250], [-279.962055, 101.113381, -1961.425002, 15.087562], ] ) np.allclose(calculated, expected) def test_cov_robust(jac, hess): calculated = cov_robust(jac, hess) expected = np.array( [ [911.67667, -172.809772, 2264.15098415, -534.7422541], [-172.809772, 32.823296, -429.142924, 101.253230], [2264.150984, -429.142924, 5647.129400, -1333.791658], [-534.742254, 101.253230, -1333.791658, 315.253633], ] ) np.allclose(calculated, expected) def test_cov_cluster_robust(jac, hess, design_options): calculated = cov_cluster_robust( jac, hess, design_options, ) expected = np.array( [ [911.411, -172.753, 2264.03, -534.648], [-172.753, 32.8104, -429.901, 101.228], [2263.03, -428.901, 5643, -1333.24], [-534.648, 101.228, -1333.24, 315.225], ] ) np.allclose(calculated, expected) def test_cov_strata_robust(jac, hess, design_options): calculated = cov_strata_robust( jac, hess, design_options, ) expected = np.array( [ [729.153, -138.203, 1810.42, -427.719], [-138.203, 26.2483, -343.121, 80.9828], [1810.42, -343.121, 4514.4, -1066.59], [-427.719, 80.9828, -1066.59, 252.18], ] ) np.allclose(calculated, expected) def test_cov_hessian(hess): calculated = cov_hessian(hess) expected = np.array( [ [44.7392, -14.563, 41.659, 0.2407], [-14.56307, 9.01046, -14.14055, -6.3383], [41.65906, -14.14055, 487.09343, -9.645899], [0.240678, -6.338334, -9.645898, 11.859284], ] ) np.allclose(calculated, expected) def test_cov_jacobian(jac): calculated = cov_jacobian(jac) expected = np.array( [ [937.03508, -780.893, 781.1802, 741.8099], [-780.893, 749.9739, -749.918, -742.28097], [781.1802, -749.918045, 164316.58829, 741.88592], [741.8099, -742.280970, 741.8859, 742.520006], ] ) np.allclose(calculated, expected) FIX_PATH = Path(__file__).resolve().parent / "pickled_statsmodels_ml_covs" def get_expected_covariance(model, cov_method): """Load expected covariance matrix. Args: model (str): one of ['logit', 'probit'] cov_method (str): one of ['jacobian', 'hessian', 'robust'] Returns: expected_covariance """ _name = cov_method if cov_method != "robust" else "sandwich" fix_name = f"{model}_{_name}.pickle" expected_cov = pd.read_pickle(FIX_PATH / fix_name) return expected_cov def get_input(model, input_types): """Load the inputs. Args: model (str): one of ['logit', 'probit'] input_types (list): can contain the elements 'jacobian' and 'hessian' Returns: inputs (dict): The inputs for the covariance function """ inputs = {} for typ in input_types: fix_name = f"{model}_{typ}_matrix.pickle" input_matrix = pd.read_pickle(FIX_PATH / fix_name) inputs[typ] = input_matrix short_names = {"jacobian": "jac", "hessian": "hess"} inputs = {short_names[key]: val for key, val in inputs.items()} return inputs models = ["probit", "logit"] methods = ["jacobian", "hessian", "robust"] test_cases = list(product(models, methods)) @pytest.mark.parametrize("model, method", test_cases) def test_cov_function_against_statsmodels(model, method): expected = get_expected_covariance(model, method) if method in ["jacobian", "hessian"]: input_types = [method] elif method == "robust": input_types = ["jacobian", "hessian"] inputs = get_input(model, input_types) calculated = getattr(ml_covs, f"cov_{method}")(**inputs) aaae(calculated, expected) ================================================ FILE: tests/estimagic/test_msm_covs.py ================================================ import itertools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from pandas.testing import assert_frame_equal from estimagic.msm_covs import cov_optimal, cov_robust from optimagic.utilities import get_rng rng = get_rng(seed=1234) jac_np = rng.uniform(size=(10, 5)) jac_pd = pd.DataFrame(jac_np) moments_cov_np = rng.uniform(size=(10, 10)) + np.eye(10) * 2.5 moments_cov_pd = pd.DataFrame(moments_cov_np) test_cases = itertools.product([jac_np, jac_pd], [moments_cov_np, moments_cov_pd]) @pytest.mark.parametrize("jac, moments_cov", test_cases) def test_cov_robust_and_cov_optimal_are_equivalent_in_special_case(jac, moments_cov): weights = np.linalg.inv(moments_cov) if isinstance(moments_cov, pd.DataFrame): weights = pd.DataFrame( weights, index=moments_cov.index, columns=moments_cov.columns ) sandwich = cov_robust(jac, weights, moments_cov) optimal = cov_optimal(jac, weights) if isinstance(sandwich, pd.DataFrame): assert_frame_equal(sandwich, optimal) else: aaae(sandwich, optimal) ================================================ FILE: tests/estimagic/test_msm_sensitivity.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from scipy import stats from estimagic.config import EXAMPLE_DIR from estimagic.msm_covs import cov_optimal from estimagic.msm_sensitivity import ( calculate_actual_sensitivity_to_noise, calculate_actual_sensitivity_to_removal, calculate_fundamental_sensitivity_to_noise, calculate_fundamental_sensitivity_to_removal, calculate_sensitivity_to_bias, calculate_sensitivity_to_weighting, ) from optimagic.differentiation.derivatives import first_derivative def simulate_aggregated_moments(params, x, y): """Calculate aggregated moments for example from Honore, DePaula, Jorgensen.""" mom_value = simulate_moment_contributions(params, x, y) moments = mom_value.mean(axis=1) return moments def simulate_moment_contributions(params, x, y): """Calculate moment contributions for example from Honore, DePaula, Jorgensen.""" y_estimated = x.to_numpy() @ (params["value"].to_numpy()) x_np = x.T.to_numpy() residual = y.T.to_numpy() - stats.norm.cdf(y_estimated) mom_value = [] length = len(x_np) for i in range(length): for j in range(i, length): moment = residual * x_np[i] * x_np[j] mom_value.append(moment) mom_value = np.stack(mom_value, axis=1)[0] mom_value = pd.DataFrame(data=mom_value) return mom_value @pytest.fixture() def moments_cov(params, func_kwargs): mom_value = simulate_moment_contributions(params, **func_kwargs) mom_value = mom_value.to_numpy() s = np.cov(mom_value, ddof=0) return s @pytest.fixture() def params(): params_index = [["beta"], ["intersection", "x1", "x2"]] params_index = pd.MultiIndex.from_product(params_index, names=["type", "name"]) params = pd.DataFrame( data=[[0.57735], [0.57735], [0.57735]], index=params_index, columns=["value"] ) return params @pytest.fixture() def func_kwargs(): data = pd.read_csv(EXAMPLE_DIR / "sensitivity_probit_example_data.csv") y_data = data[["y"]] x_data = data[["intercept", "x1", "x2"]] func_kwargs = {"x": x_data, "y": y_data} return func_kwargs @pytest.fixture() def jac(params, func_kwargs): derivative_dict = first_derivative( func=simulate_aggregated_moments, params=params, func_kwargs=func_kwargs, ) g = derivative_dict.derivative return g.to_numpy() @pytest.fixture() def weights(moments_cov): return np.linalg.inv(moments_cov) @pytest.fixture() def params_cov_opt(jac, weights): return cov_optimal(jac, weights) def test_sensitivity_to_bias(jac, weights, params): calculated = calculate_sensitivity_to_bias(jac, weights) expected = pd.DataFrame( data=[ [4.010481, 2.068143, 2.753155, 0.495683, 1.854492, 0.641020], [0.605718, 6.468960, -2.235886, 1.324065, -1.916986, -0.116590], [2.218011, -1.517303, 7.547212, -0.972578, 1.956985, 0.255691], ], index=params.index, ) aaae(calculated, expected) def test_fundamental_sensitivity_to_noise( jac, weights, moments_cov, params_cov_opt, params ): calculated = calculate_fundamental_sensitivity_to_noise( jac, weights, moments_cov, params_cov_opt, ) expected = pd.DataFrame( data=[ [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528], [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667], [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929], ], index=params.index, ) aaae(calculated, expected) def test_actual_sensitivity_to_noise(jac, weights, moments_cov, params_cov_opt, params): sensitivity_to_bias = calculate_sensitivity_to_bias(jac, weights) calculated = calculate_actual_sensitivity_to_noise( sensitivity_to_bias, weights, moments_cov, params_cov_opt, ) expected = pd.DataFrame( data=[ [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528], [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667], [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929], ], index=params.index, ) aaae(calculated, expected) def test_actual_sensitivity_to_removal( jac, weights, moments_cov, params_cov_opt, params ): calculated = calculate_actual_sensitivity_to_removal( jac, weights, moments_cov, params_cov_opt ) expected = pd.DataFrame( data=[ [1.020791, 0.343558, 0.634299, 0.014418, 0.058827, 0.017187], [0.016262, 2.313441, 0.285552, 0.052574, 0.043585, 0.000306], [0.189769, 0.114946, 2.984443, 0.022729, 0.042140, 0.005072], ], index=params.index, ) aaae(calculated, expected) def test_fundamental_sensitivity_to_removal(jac, moments_cov, params_cov_opt, params): calculated = calculate_fundamental_sensitivity_to_removal( jac, moments_cov, params_cov_opt ) expected = pd.DataFrame( data=[ [0.992910, 0.340663, 0.634157, 0.009277, 0.058815, 0.013542], [0.015455, 2.274235, 0.285389, 0.045166, 0.042882, 0.000306], [0.189311, 0.114299, 2.970578, 0.022262, 0.040827, 0.001343], ], index=params.index, ) aaae(calculated, expected) def test_sensitivity_to_weighting(jac, weights, moments_cov, params_cov_opt, params): calculated = calculate_sensitivity_to_weighting( jac, weights, moments_cov, params_cov_opt ) expected = pd.DataFrame( data=np.zeros((3, 6)), index=params.index, ) aaae(calculated, expected) ================================================ FILE: tests/estimagic/test_msm_sensitivity_via_estimate_msm.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from scipy import stats from estimagic.config import EXAMPLE_DIR from estimagic.estimate_msm import estimate_msm def simulate_aggregated_moments(params, x, y): """Calculate aggregated moments for example from Honore, DePaula, Jorgensen.""" mom_value = simulate_moment_contributions(params, x, y) moments = mom_value.mean(axis=1) return moments def simulate_moment_contributions(params, x, y): """Calculate moment contributions for example from Honore, DePaula, Jorgensen.""" y_estimated = x.to_numpy() @ (params["value"].to_numpy()) x_np = x.T.to_numpy() residual = y.T.to_numpy() - stats.norm.cdf(y_estimated) mom_value = [] length = len(x_np) for i in range(length): for j in range(i, length): moment = residual * x_np[i] * x_np[j] mom_value.append(moment) mom_value = np.stack(mom_value, axis=1)[0] mom_value = pd.DataFrame(data=mom_value) return mom_value @pytest.fixture() def moments_cov(params, func_kwargs): mom_value = simulate_moment_contributions(params, **func_kwargs) mom_value = mom_value.to_numpy() s = np.cov(mom_value, ddof=0) return s @pytest.fixture() def params(): params_index = [["beta"], ["intersection", "x1", "x2"]] params_index = pd.MultiIndex.from_product(params_index, names=["type", "name"]) params = pd.DataFrame( data=[[0.57735], [0.57735], [0.57735]], index=params_index, columns=["value"] ) return params @pytest.fixture() def func_kwargs(): data = pd.read_csv(EXAMPLE_DIR / "sensitivity_probit_example_data.csv") y_data = data[["y"]] x_data = data[["intercept", "x1", "x2"]] func_kwargs = {"x": x_data, "y": y_data} return func_kwargs @pytest.fixture() def msm_res(params, moments_cov, func_kwargs): res = estimate_msm( simulate_moments=simulate_aggregated_moments, # only needed for shape since optimization is skipped empirical_moments=np.zeros(6), params=params, optimize_options=False, moments_cov=moments_cov, simulate_moments_kwargs=func_kwargs, weights="optimal", ) return res def test_sensitivity_to_bias(msm_res): calculated = msm_res.sensitivity(kind="bias") expected = np.array( [ [4.010481, 2.068143, 2.753155, 0.495683, 1.854492, 0.641020], [0.605718, 6.468960, -2.235886, 1.324065, -1.916986, -0.116590], [2.218011, -1.517303, 7.547212, -0.972578, 1.956985, 0.255691], ] ) aaae(calculated, expected) def test_fundamental_sensitivity_to_noise(msm_res): calculated = msm_res.sensitivity(kind="noise_fundamental") expected = np.array( [ [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528], [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667], [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929], ] ) aaae(calculated, expected) def test_actual_sensitivity_to_noise(msm_res): calculated = msm_res.sensitivity(kind="noise") expected = np.array( [ [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528], [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667], [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929], ] ) aaae(calculated, expected) def test_actual_sensitivity_to_removal(msm_res): calculated = msm_res.sensitivity(kind="removal") expected = np.array( [ [1.020791, 0.343558, 0.634299, 0.014418, 0.058827, 0.017187], [0.016262, 2.313441, 0.285552, 0.052574, 0.043585, 0.000306], [0.189769, 0.114946, 2.984443, 0.022729, 0.042140, 0.005072], ] ) aaae(calculated, expected) def test_fundamental_sensitivity_to_removal(msm_res): calculated = msm_res.sensitivity(kind="removal_fundamental") expected = np.array( [ [0.992910, 0.340663, 0.634157, 0.009277, 0.058815, 0.013542], [0.015455, 2.274235, 0.285389, 0.045166, 0.042882, 0.000306], [0.189311, 0.114299, 2.970578, 0.022262, 0.040827, 0.001343], ] ) aaae(calculated, expected) def test_sensitivity_to_weighting(msm_res): calculated = msm_res.sensitivity(kind="weighting") expected = np.zeros((3, 6)) aaae(calculated, expected) ================================================ FILE: tests/estimagic/test_msm_weighting.py ================================================ import itertools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from estimagic.msm_weighting import ( _assemble_block_diagonal_matrix, get_moments_cov, get_weighting_matrix, ) from optimagic.parameters.block_trees import block_tree_to_matrix from optimagic.utilities import get_rng @pytest.fixture() def expected_values(): values = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 5, 6], [0, 0, 7, 8]]) return values cov_np = np.diag([1, 2, 3]) cov_pd = pd.DataFrame(cov_np) test_cases = itertools.product([cov_np, cov_pd], ["diagonal", "optimal", "identity"]) @pytest.mark.parametrize("moments_cov, method", test_cases) def test_get_weighting_matrix(moments_cov, method): if isinstance(moments_cov, np.ndarray): fake_emp_moms = np.ones(len(moments_cov)) else: fake_emp_moms = pd.Series(np.ones(len(moments_cov)), index=moments_cov.index) calculated = get_weighting_matrix(moments_cov, method, fake_emp_moms) if isinstance(moments_cov, pd.DataFrame): assert calculated.index.equals(moments_cov.index) assert calculated.columns.equals(moments_cov.columns) calculated = calculated.to_numpy() if method == "identity": expected = np.identity(cov_np.shape[0]) else: expected = np.diag(1 / np.array([1, 2, 3])) aaae(calculated, expected) def test_assemble_block_diagonal_matrix_pd(expected_values): matrices = [ pd.DataFrame([[1, 2], [3, 4]]), pd.DataFrame([[5, 6], [7, 8]], columns=[2, 3], index=[2, 3]), ] calculated = _assemble_block_diagonal_matrix(matrices) assert isinstance(calculated, pd.DataFrame) assert calculated.index.equals(calculated.columns) assert calculated.index.tolist() == [0, 1, 2, 3] aaae(calculated, expected_values) def test_assemble_block_diagonal_matrix_mixed(expected_values): matrices = [pd.DataFrame([[1, 2], [3, 4]]), np.array([[5, 6], [7, 8]])] calculated = _assemble_block_diagonal_matrix(matrices) assert isinstance(calculated, np.ndarray) aaae(calculated, expected_values) def test_get_moments_cov_runs_with_pytrees(): rng = get_rng(1234) data = rng.normal(scale=[10, 5, 1], size=(100, 3)) data = pd.DataFrame(data=data) def calc_moments(data, keys): means = data.mean() means.index = keys return means.to_dict() moment_kwargs = {"keys": ["a", "b", "c"]} calculated = get_moments_cov( data=data, calculate_moments=calc_moments, moment_kwargs=moment_kwargs, bootstrap_kwargs={"n_draws": 100}, ) fake_tree = {"a": 1, "b": 2, "c": 3} cov = block_tree_to_matrix(calculated, fake_tree, fake_tree) assert cov.shape == (3, 3) assert cov[0, 0] > cov[1, 1] > cov[2, 2] def test_get_moments_cov_passes_bootstrap_kwargs_to_bootstrap(): rng = get_rng(1234) data = rng.normal(scale=[10, 5, 1], size=(100, 3)) data = pd.DataFrame(data=data) data["cluster"] = np.random.choice([1, 2, 3], size=100) def calc_moments(data, keys): means = data.mean() means.index = keys return means.to_dict() moment_kwargs = {"keys": ["a", "b", "c", "cluster"]} with pytest.raises(ValueError, match="a must be a positive integer unless no"): get_moments_cov( data=data, calculate_moments=calc_moments, moment_kwargs=moment_kwargs, bootstrap_kwargs={"n_draws": -1}, ) with pytest.raises(ValueError, match="Invalid bootstrap_kwargs: {'cluster'}"): get_moments_cov( data=data, calculate_moments=calc_moments, moment_kwargs=moment_kwargs, bootstrap_kwargs={"cluster": "cluster"}, ) ================================================ FILE: tests/estimagic/test_shared.py ================================================ from typing import NamedTuple import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from pybaum import leaf_names, tree_equal from estimagic.shared_covs import ( _to_numpy, calculate_estimation_summary, get_derivative_case, process_pandas_arguments, transform_covariance, transform_free_cov_to_cov, transform_free_values_to_params_tree, ) from optimagic.parameters.tree_registry import get_registry from optimagic.utilities import get_rng @pytest.fixture() def inputs(): jac = pd.DataFrame(np.ones((5, 3)), columns=["a", "b", "c"]) hess = pd.DataFrame(np.eye(3) / 2, columns=list("abc"), index=list("abc")) weights = pd.DataFrame(np.eye(5)) moments_cov = 1 / weights out = {"jac": jac, "hess": hess, "weights": weights, "moments_cov": moments_cov} return out def test_process_pandas_arguments_all_pd(inputs): *arrays, names = process_pandas_arguments(**inputs) for arr in arrays: assert isinstance(arr, np.ndarray) expected_names = {"moments": list(range(5)), "params": ["a", "b", "c"]} for key, value in expected_names.items(): assert names[key].tolist() == value def test_process_pandas_arguments_incompatible_names(inputs): inputs["jac"].columns = ["c", "d", "e"] with pytest.raises(ValueError): process_pandas_arguments(**inputs) def _from_internal(x, return_type="flat"): # noqa: ARG001 return x class FakeConverter(NamedTuple): has_transforming_constraints: bool = True params_from_internal: callable = _from_internal class FakeInternalParams(NamedTuple): values: np.ndarray = np.arange(2) lower_bounds: np.ndarray = np.full(2, -np.inf) upper_bounds: np.ndarray = np.full(2, np.inf) free_mask: np.ndarray = np.array([True, True]) def test_transform_covariance_no_bounds(): internal_cov = np.eye(2) converter = FakeConverter() internal_params = FakeInternalParams() got = transform_covariance( internal_params=internal_params, internal_cov=internal_cov, converter=converter, rng=get_rng(seed=5687), n_samples=100, bounds_handling="ignore", ) expected_sample = get_rng(seed=5687).multivariate_normal( np.arange(2), np.eye(2), 100 ) expected = np.cov(expected_sample, rowvar=False) aaae(got, expected) def test_transform_covariance_with_clipping(): rng = get_rng(seed=1234) internal_cov = np.eye(2) converter = FakeConverter() internal_params = FakeInternalParams( lower_bounds=np.ones(2), upper_bounds=np.ones(2) ) got = transform_covariance( internal_params=internal_params, internal_cov=internal_cov, converter=converter, rng=rng, n_samples=100, bounds_handling="clip", ) expected = np.zeros((2, 2)) aaae(got, expected) def test_transform_covariance_invalid_bounds(): rng = get_rng(seed=1234) internal_cov = np.eye(2) converter = FakeConverter() internal_params = FakeInternalParams( lower_bounds=np.ones(2), upper_bounds=np.ones(2) ) with pytest.raises(ValueError): transform_covariance( internal_params=internal_params, internal_cov=internal_cov, converter=converter, rng=rng, n_samples=10, bounds_handling="raise", ) class FakeFreeParams(NamedTuple): free_mask: np.ndarray = np.array([True, False, True]) all_names: list = ["a", "b", "c"] free_names: list = ["a", "c"] def test_transform_free_cov_to_cov_pytree(): got = transform_free_cov_to_cov( free_cov=np.eye(2), free_params=FakeFreeParams(), params={"a": 1, "b": 2, "c": 3}, return_type="pytree", ) assert got["a"]["a"] == 1 assert got["c"]["c"] == 1 assert got["a"]["c"] == 0 assert got["c"]["a"] == 0 assert np.isnan(got["a"]["b"]) def test_transform_free_cov_to_cov_array(): got = transform_free_cov_to_cov( free_cov=np.eye(2), free_params=FakeFreeParams(), params={"a": 1, "b": 2, "c": 3}, return_type="array", ) expected = np.array([[1, np.nan, 0], [np.nan, np.nan, np.nan], [0, np.nan, 1]]) assert np.array_equal(got, expected, equal_nan=True) def test_transform_free_cov_to_cov_dataframe(): got = transform_free_cov_to_cov( free_cov=np.eye(2), free_params=FakeFreeParams(), params={"a": 1, "b": 2, "c": 3}, return_type="dataframe", ) expected = np.array([[1, np.nan, 0], [np.nan, np.nan, np.nan], [0, np.nan, 1]]) assert np.array_equal(got.to_numpy(), expected, equal_nan=True) assert isinstance(got, pd.DataFrame) assert list(got.columns) == list("abc") assert list(got.index) == list("abc") def test_transform_free_cov_to_cov_invalid(): with pytest.raises(ValueError): transform_free_cov_to_cov( free_cov=np.eye(2), free_params=FakeFreeParams(), params={"a": 1, "b": 2, "c": 3}, return_type="bla", ) def test_transform_free_values_to_params_tree(): got = transform_free_values_to_params_tree( values=np.array([10, 11]), free_params=FakeFreeParams(), params={"a": 1, "b": 2, "c": 3}, ) assert got["a"] == 10 assert got["c"] == 11 assert np.isnan(got["b"]) def test_get_derivative_case(): assert get_derivative_case(lambda x: True) == "closed-form" # noqa: ARG005 assert get_derivative_case(False) == "skip" assert get_derivative_case(None) == "numerical" def test_to_numpy_invalid(): with pytest.raises(TypeError): _to_numpy(15) def test_calculate_estimation_summary(): # input data summary_data = { "value": { "a": pd.Series([0], index=["i"]), "b": pd.DataFrame({"c1": [1], "c2": [2]}), }, "standard_error": { "a": pd.Series([0.1], index=["i"]), "b": pd.DataFrame({"c1": [0.2], "c2": [0.3]}), }, "ci_lower": { "a": pd.Series([-0.2], index=["i"]), "b": pd.DataFrame({"c1": [-0.4], "c2": [-0.6]}), }, "ci_upper": { "a": pd.Series([0.2], index=["i"]), "b": pd.DataFrame({"c1": [0.4], "c2": [0.6]}), }, "p_value": { "a": pd.Series([0.001], index=["i"]), "b": pd.DataFrame({"c1": [0.2], "c2": [0.07]}), }, "free": np.array([True, True, True]), } registry = get_registry(extended=True) names = leaf_names(summary_data["value"], registry=registry) free_names = names # function call summary = calculate_estimation_summary(summary_data, names, free_names) # expectations expectation = { "a": pd.DataFrame( { "value": 0, "standard_error": 0.1, "ci_lower": -0.2, "ci_upper": 0.2, "p_value": 0.001, "free": True, "stars": "***", }, index=["i"], ), "b": pd.DataFrame( { "value": [1, 2], "standard_error": [0.2, 0.3], "ci_lower": [-0.4, -0.6], "ci_upper": [0.4, 0.6], "p_value": [0.2, 0.7], "free": [True, True], "stars": ["", "*"], }, index=pd.MultiIndex.from_tuples([(0, "c1"), (0, "c2")]), ), } tree_equal(summary, expectation) ================================================ FILE: tests/optimagic/__init__.py ================================================ ================================================ FILE: tests/optimagic/benchmarking/__init__.py ================================================ ================================================ FILE: tests/optimagic/benchmarking/test_benchmark_reports.py ================================================ from itertools import product import numpy as np import pytest from optimagic import ( OptimizeResult, convergence_report, get_benchmark_problems, rank_report, traceback_report, ) @pytest.fixture def benchmark_example(): all_problems = get_benchmark_problems("example") problems = { k: v for k, v in all_problems.items() if k in ["bard_good_start", "box_3d", "rosenbrock_good_start"] } _stop_after_10 = { "stopping_max_criterion_evaluations": 10, "stopping_max_iterations": 10, } optimizers = { "lbfgsb": {"algorithm": "scipy_lbfgsb", "algo_options": _stop_after_10}, "nm": {"algorithm": "scipy_neldermead", "algo_options": _stop_after_10}, } results = { ("bard_good_start", "lbfgsb"): { "params_history": [ [1.0, 1.0, 1.0], [0.48286315298120086, 1.6129119244711858, 1.5974181569859445], [0.09754340799557773, 1.7558262514618663, 1.7403560082627973], ], "criterion_history": np.array( [ 4.16816959e01, 3.20813118e00, 9.97263708e-03, ] ), "time_history": [ 0.0, 0.0003762839987757616, 0.0007037959985609632, ], "batches_history": [0, 1, 2], "solution": OptimizeResult, # success }, ("box_3d", "lbfgsb"): { "params_history": [ [0.0, 10.0, 20.0], [-0.6579976970071755, 10.014197643614924, 19.247113914560085], [-3.2899884850358774, 10.070988218074623, 16.235569572800433], ], "criterion_history": np.array( [ 1.03115381e03, 8.73640769e02, 9.35093416e02, ] ), "time_history": [ 0.0, 0.000555748996703187, 0.0009771709992492106, ], "batches_history": [0, 1, 2], "solution": OptimizeResult, # failed }, ("rosenbrock_good_start", "lbfgsb"): { "params_history": [ [-1.2, 1.0], [0.0, 0.0], ], "criterion_history": np.array([1.795769e6, 1e3]), "time_history": [ 0.0, 5.73799989069812e-04, ], "batches_history": [0, 1], "solution": "lbfgsb traceback", # error }, ("bard_good_start", "nm"): { "params_history": [ [1.0, 1.0, 1.0], [1.05, 1.0, 1.0], [0.7999999999999998, 1.1999999999999993, 1.0499999999999994], [0.08241056, 1.13303608, 2.34369519], ], "criterion_history": np.array( [ 41.68169586, 43.90748158, 23.92563745, 0.00821487730657897, ] ), "time_history": [ 0.0, 3.603900040616281e-05, 0.0004506860022956971, 0.00015319500016630627, ], "batches_history": [0, 1, 2, 4], "solution": OptimizeResult, # success }, ("box_3d", "nm"): { "params_history": [ [0.0, 10.0, 20.0], [0.025, 10.0, 20.0], [0.0, 10.5, 20.0], ], "criterion_history": np.array( [1031.15381061, 1031.17836473, 1030.15033678] ), "time_history": [ 0.0, 5.73799989069812e-05, 0.00010679600018193014, ], "batches_history": [0, 1, 2], "solution": "some traceback", # error }, ("rosenbrock_good_start", "nm"): { "params_history": [ [-1.2, 1.0], [0.0, 0.0], ], "criterion_history": np.array([1.795769e6, 1e3]), "time_history": [ 0.0, 5.73799989069812e-04, ], "batches_history": [0, 1], "solution": "another traceback", # error }, } return problems, optimizers, results # ==================================================================================== # Convergence report # ==================================================================================== keys = ["stopping_criterion"] stopping_criterion = ["x_and_y", "x_or_y", "x", "y"] x_precision = [1e-4, 1e-6] y_precision = [1e-4, 1e-6] CONVERGENCE_REPORT_OPTIONS = [ dict(zip(keys, value, strict=False)) for value in product(stopping_criterion, x_precision, y_precision) ] @pytest.mark.parametrize("options", CONVERGENCE_REPORT_OPTIONS) def test_convergence_report(options, benchmark_example): problems, optimizers, results = benchmark_example df = convergence_report(problems=problems, results=results, **options) expected_columns = list(optimizers.keys()) + ["dimensionality"] assert df.shape == (len(problems), len(expected_columns)) assert set(df.columns) == set(expected_columns) assert df["lbfgsb"].loc["box_3d"] == "failed" assert df["nm"].loc["box_3d"] == "error" # ==================================================================================== # Rank report # ==================================================================================== keys = ["runtime_measure", "stopping_criterion"] runtime_measure = ["n_evaluations", "walltime", "n_batches"] RANK_REPORT_OPTIONS = [ dict(zip(keys, value, strict=False)) for value in product(runtime_measure, stopping_criterion) ] @pytest.mark.parametrize("options", RANK_REPORT_OPTIONS) def test_rank_report(options, benchmark_example): problems, optimizers, results = benchmark_example df = rank_report(problems=problems, results=results, **options) assert df.shape == (len(problems), len(optimizers) + 1) # +1 for dimensionality assert set(df.columns) == set(optimizers.keys()) | {"dimensionality"} assert df["lbfgsb"].loc["box_3d"] == "failed" assert df["nm"].loc["box_3d"] == "error" # ==================================================================================== # Traceback report # ==================================================================================== @pytest.mark.parametrize("return_type", ["text", "markdown", "dict", "dataframe"]) def test_traceback_report(return_type, benchmark_example): problems, optimizers, results = benchmark_example n_failed_problems = 3 report = traceback_report( problems=problems, results=results, return_type=return_type ) if return_type in ["text", "dict"]: assert len(report) == n_failed_problems elif return_type == "markdown": for algorithm_name in optimizers: assert algorithm_name in report elif return_type == "dataframe": assert report.shape == (n_failed_problems, 2) assert list(report.index.names) == ["algorithm", "problem"] ================================================ FILE: tests/optimagic/benchmarking/test_cartis_roberts.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal from optimagic.benchmarking.cartis_roberts import ( CARTIS_ROBERTS_PROBLEMS, get_start_points_bdvalues, get_start_points_msqrta, ) @pytest.mark.parametrize("name, specification", list(CARTIS_ROBERTS_PROBLEMS.items())) def test_cartis_roberts_function_at_start_x(name, specification): # noqa: ARG001 _criterion = specification["fun"] _x = np.array(specification["start_x"]) assert isinstance(specification["start_x"], list) _contributions = _criterion(_x) calculated = _contributions @ _contributions expected = specification["start_criterion"] assert np.allclose(calculated, expected) assert isinstance(specification["start_x"], list) @pytest.mark.parametrize("name, specification", list(CARTIS_ROBERTS_PROBLEMS.items())) def test_cartis_roberts_function_at_solution_x(name, specification): # noqa: ARG001 _criterion = specification["fun"] _x = specification["solution_x"] if _x is not None: assert isinstance(_x, list) _x = np.array(_x) _contributions = _criterion(_x) calculated = _contributions @ _contributions expected = specification["solution_criterion"] assert np.allclose(calculated, expected, atol=1e-7) def test_get_start_points_bdvalues(): expected = np.array([-0.1389, -0.2222, -0.2500, -0.2222, -0.1389]) result = get_start_points_bdvalues(5) assert_array_almost_equal(expected, result, decimal=4) def test_get_start_points_msqrta(): matlab_mat = np.array( [ [0.8415, -0.7568, 0.4121, -0.2879, -0.1324], [-0.9918, -0.9538, 0.9200, -0.6299, -0.5064], [0.9988, -0.4910, -0.6020, 0.9395, -0.9301], [-0.9992, -0.0265, -0.4041, 0.2794, -0.8509], [0.9235, 0.1935, 0.9365, -0.8860, 0.1760], ] ) expected = 0.2 * matlab_mat.flatten() result = get_start_points_msqrta(5) assert_array_almost_equal(result, expected, decimal=4) ================================================ FILE: tests/optimagic/benchmarking/test_get_benchmark_problems.py ================================================ from itertools import product import numpy as np import pytest from optimagic.benchmarking.get_benchmark_problems import ( _step_func, get_benchmark_problems, ) PARMETRIZATION = [] for name in ["more_wild", "cartis_roberts", "example", "estimagic"]: for additive, multiplicative, scaling in product([False, True], repeat=3): PARMETRIZATION.append((name, additive, multiplicative, scaling)) @pytest.mark.parametrize( "name, additive_noise, multiplicative_noise, scaling", PARMETRIZATION ) def test_get_problems(name, additive_noise, multiplicative_noise, scaling): is_noisy = any((additive_noise, multiplicative_noise)) problems = get_benchmark_problems( name=name, additive_noise=additive_noise, multiplicative_noise=multiplicative_noise, scaling=scaling, ) first_name = list(problems)[0] first = problems[first_name] func = first["inputs"]["fun"] params = first["inputs"]["params"] first_eval = func(params) second_eval = func(params) if is_noisy: assert not np.allclose(first_eval, second_eval) else: assert np.allclose(first_eval, second_eval) for problem in problems.values(): assert isinstance(problem["inputs"]["params"], np.ndarray) assert isinstance(problem["solution"]["params"], np.ndarray) def test_step_func(): p = np.array([0.0001, 0.0002]) got = _step_func(p, lambda x: x @ x) assert np.allclose(got, 0) assert not np.allclose(p @ p, 0) ================================================ FILE: tests/optimagic/benchmarking/test_more_wild.py ================================================ import numpy as np import pytest from optimagic.benchmarking.more_wild import ( MORE_WILD_PROBLEMS, get_start_points_mancino, ) @pytest.mark.parametrize("name, specification", list(MORE_WILD_PROBLEMS.items())) def test_more_wild_function_at_start_x(name, specification): # noqa: ARG001 _criterion = specification["fun"] assert isinstance(specification["start_x"], list) _x = np.array(specification["start_x"]) _contributions = _criterion(_x) calculated = _contributions @ _contributions expected = specification["start_criterion"] assert np.allclose(calculated, expected) if specification.get("solution_x") is not None: assert isinstance(specification["solution_x"], list) _x = np.array(specification["solution_x"]) _contributions = _criterion(_x) calculated = _contributions @ _contributions expected = specification["solution_criterion"] assert np.allclose(calculated, expected, rtol=1e-8, atol=1e-8) def test_get_start_points_mancino(): expected = (np.array([102.4824, 96.3335, 90.4363, 84.7852, 79.3747]),) result = get_start_points_mancino(5) assert np.allclose(expected, result) ================================================ FILE: tests/optimagic/benchmarking/test_noise_distributions.py ================================================ import numpy as np import pandas as pd import pytest from optimagic.benchmarking.get_benchmark_problems import _sample_from_distribution from optimagic.benchmarking.noise_distributions import NOISE_DISTRIBUTIONS from optimagic.utilities import get_rng @pytest.mark.parametrize("distribution", NOISE_DISTRIBUTIONS) def test_sample_from_distribution(distribution): mean = 0.33 std = 0.55 correlation = 0.44 sample = _sample_from_distribution( distribution=distribution, mean=mean, std=std, size=(100_000, 5), correlation=correlation, rng=get_rng(seed=0), ) calculated_mean = sample.mean() calculated_std = sample.std() corrmat = pd.DataFrame(sample).corr().to_numpy().round(2) calculated_avgcorr = corrmat[~np.eye(len(corrmat)).astype(bool)].mean() assert np.allclose(calculated_mean, mean, atol=0.001) assert np.allclose(calculated_std, std, atol=0.001) assert np.allclose(calculated_avgcorr, correlation, atol=0.001) ================================================ FILE: tests/optimagic/benchmarking/test_run_benchmark.py ================================================ import pytest from optimagic import get_benchmark_problems from optimagic.benchmarking.run_benchmark import run_benchmark def test_run_benchmark_dict_options(): all_problems = get_benchmark_problems("more_wild") first_two_names = list(all_problems)[:2] first_two = {name: all_problems[name] for name in first_two_names} optimize_options = { "default_lbfgsb": "scipy_lbfgsb", "tuned_lbfgsb": { "algorithm": "scipy_lbfgsb", "algo_options": {"convergence.relative_criterion_tolerance": 1e-10}, }, } result = run_benchmark( problems=first_two, optimize_options=optimize_options, error_handling="raise", ) expected_keys = { ("linear_full_rank_good_start", "default_lbfgsb"), ("linear_full_rank_bad_start", "default_lbfgsb"), ("linear_full_rank_good_start", "tuned_lbfgsb"), ("linear_full_rank_bad_start", "tuned_lbfgsb"), } assert set(result) == expected_keys def test_run_benchmark_list_options(): all_problems = get_benchmark_problems("example") first_two_names = list(all_problems)[:2] first_two = {name: all_problems[name] for name in first_two_names} optimize_options = ["scipy_lbfgsb", "scipy_neldermead"] result = run_benchmark( problems=first_two, optimize_options=optimize_options, ) expected_keys = { ("helical_valley_good_start", "scipy_lbfgsb"), ("rosenbrock_good_start", "scipy_lbfgsb"), ("helical_valley_good_start", "scipy_neldermead"), ("rosenbrock_good_start", "scipy_neldermead"), } assert set(result) == expected_keys def test_run_benchmark_failing(): all_problems = get_benchmark_problems("more_wild") failing_name = "jennrich_sampson" failing = {failing_name: all_problems[failing_name]} optimize_options = ["scipy_lbfgsb"] with pytest.warns(): result = run_benchmark(problems=failing, optimize_options=optimize_options) key = (failing_name, "scipy_lbfgsb") assert isinstance(result[key]["solution"], str) ================================================ FILE: tests/optimagic/differentiation/test_compare_derivatives_with_jax.py ================================================ """Compare first and second derivative behavior to that of jax. This test module only runs if jax is installed. """ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from pybaum import tree_equal from optimagic.config import IS_JAX_INSTALLED from optimagic.differentiation.derivatives import first_derivative, second_derivative if not IS_JAX_INSTALLED: pytestmark = pytest.mark.skip(reason="jax is not installed.") else: import jax import jax.numpy as jnp jax.config.update("jax_enable_x64", True) # arrays have to be equal up to 5 decimals DECIMALS = 5 def _tree_equal_numpy_leaves(tree1, tree2): equality_checkers = {np.ndarray: lambda x, y: aaae(x, y, decimal=DECIMALS)} tree_equal(tree1, tree2, equality_checkers=equality_checkers) def _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax=None): """Computes first and second derivative using optimagic and jax. Then converts leaves of jax output to numpy so that we can use numpy.testing. For higher dimensional output we need to define two function, one with numpy array output and one with jax.numpy array output. """ func_jax = func if func_jax is None else func_jax optimagic_jac = first_derivative(func, params).derivative jax_jac = jax.jacobian(func_jax)(params) optimagic_hess = second_derivative(func, params).derivative jax_hess = jax.hessian(func_jax)(params) out = { "jac": {"optimagic": optimagic_jac, "jax": jax_jac}, "hess": {"optimagic": optimagic_hess, "jax": jax_hess}, } return out @pytest.mark.jax() def test_scalar_input_scalar_output(): def func(params): return params**2 params = 1.0 result = _compute_testable_optimagic_and_jax_derivatives(func, params) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_array_input_scalar_output(): def func(params): return params @ params params = np.array([1.0, 2, 3]) result = _compute_testable_optimagic_and_jax_derivatives(func, params) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_dict_input_scalar_output(): def func(params): return params["a"] * params["b"] params = {"a": 1.0, "b": 2.0} result = _compute_testable_optimagic_and_jax_derivatives(func, params) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_array_dict_input_scalar_output(): def func(params): return params["a"].sum() * params["b"].prod() params = { "a": np.array([1.0, 2, 3]), "b": np.arange(9, dtype=np.float64).reshape(3, 3), } result = _compute_testable_optimagic_and_jax_derivatives(func, params) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_array_input_array_output(): def func(params): return np.array([params.sum(), params.prod()]) def func_jax(params): return jnp.array([params.sum(), params.prod()]) params = np.array([1.0, 2, 3]) result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_array_dict_input_array_output(): def func(params): return params["b"] * np.array([params["a"].sum(), params["a"].prod()]) def func_jax(params): return params["b"] * jnp.array([params["a"].sum(), params["a"].prod()]) params = {"a": np.array([1.0, 2, 3]), "b": 2.0} result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) @pytest.mark.jax() def test_array_dict_input_dict_output(): def func(params): value = params["b"] * np.array([params["a"].sum(), params["a"].prod()]) return [value[0], {"c": 0.0, "d": value[1]}] def func_jax(params): value = params["b"] * jnp.array([params["a"].sum(), params["a"].prod()]) return [value[0], {"c": 0.0, "d": value[1]}] params = {"a": np.array([1.0, 2, 3]), "b": 2.0} result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax) _tree_equal_numpy_leaves(result["jac"]["optimagic"], result["jac"]["jax"]) _tree_equal_numpy_leaves(result["hess"]["optimagic"], result["hess"]["jax"]) ================================================ FILE: tests/optimagic/differentiation/test_derivatives.py ================================================ from dataclasses import dataclass from functools import partial from pathlib import Path from typing import get_type_hints import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from pandas.testing import assert_frame_equal from scipy.optimize._numdiff import approx_derivative from optimagic.differentiation.derivatives import ( Evals, NumdiffResult, _consolidate_one_step_derivatives, _convert_evaluation_data_to_frame, _convert_richardson_candidates_to_frame, _is_scalar_nan, _nan_skipping_batch_evaluator, _reshape_cross_step_evals, _reshape_one_step_evals, _reshape_two_step_evals, _select_minimizer_along_axis, first_derivative, second_derivative, ) from optimagic.differentiation.generate_steps import Steps from optimagic.examples.numdiff_functions import ( logit_loglike, logit_loglike_gradient, logit_loglike_hessian, logit_loglikeobs, logit_loglikeobs_jacobian, ) from optimagic.parameters.bounds import Bounds @pytest.fixture() def binary_choice_inputs(): fix_path = Path(__file__).resolve().parent / "binary_choice_inputs.pickle" inputs = pd.read_pickle(fix_path) return inputs methods = ["forward", "backward", "central"] methods_second_derivative = ["forward", "backward", "central_average", "central_cross"] @pytest.mark.parametrize("method", methods) def test_first_derivative_jacobian(binary_choice_inputs, method): fix = binary_choice_inputs func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"]) bounds = Bounds( lower=np.full(fix["params_np"].shape, -np.inf), upper=np.full(fix["params_np"].shape, np.inf), ) calculated = first_derivative( func=func, method=method, params=fix["params_np"], step_size=None, bounds=bounds, min_steps=1e-8, f0=func(fix["params_np"]), n_cores=1, ) expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"]) aaae(calculated.derivative, expected, decimal=6) def test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs): fix = binary_choice_inputs func = partial(logit_loglikeobs, y=fix["y"], x=fix["x"]) calculated = first_derivative(func=func, params=fix["params_np"], n_cores=1) expected = logit_loglikeobs_jacobian(fix["params_np"], fix["y"], fix["x"]) aaae(calculated.derivative, expected, decimal=6) @pytest.mark.parametrize("method", methods) def test_first_derivative_gradient(binary_choice_inputs, method): fix = binary_choice_inputs func = partial(logit_loglike, y=fix["y"], x=fix["x"]) calculated = first_derivative( func=func, method=method, params=fix["params_np"], f0=func(fix["params_np"]), n_cores=1, ) expected = logit_loglike_gradient(fix["params_np"], fix["y"], fix["x"]) aaae(calculated.derivative, expected, decimal=4) @pytest.mark.parametrize("method", methods_second_derivative) def test_second_derivative_hessian(binary_choice_inputs, method): fix = binary_choice_inputs func = partial(logit_loglike, y=fix["y"], x=fix["x"]) calculated = second_derivative( func=func, method=method, params=fix["params_np"], f0=func(fix["params_np"]), n_cores=1, ) expected = logit_loglike_hessian(fix["params_np"], fix["y"], fix["x"]) assert np.max(np.abs(calculated.derivative - expected)) < 1.5 * 10 ** (-2) assert np.mean(np.abs(calculated.derivative - expected)) < 1.5 * 10 ** (-3) @pytest.mark.parametrize("method", methods) def test_first_derivative_scalar(method): # noqa: ARG001 def f(x): return x**2 calculated = first_derivative(f, 3.0, n_cores=1) expected = 6.0 assert calculated.derivative == expected @pytest.mark.parametrize("method", methods_second_derivative) def test_second_derivative_scalar(method): # noqa: ARG001 def f(x): return x**2 calculated = second_derivative(f, 3.0, n_cores=1) expected = 2.0 assert np.abs(calculated.derivative - expected) < 1.5 * 10 ** (-6) def test_nan_skipping_batch_evaluator(): arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])] expected = [ np.full(2, np.nan), np.ones(2), np.array([9, 16]), np.full(2, np.nan), np.array([1, 4]), ] calculated = _nan_skipping_batch_evaluator( func=lambda x: x**2, arguments=arglist, n_cores=1, error_handling="continue", batch_evaluator="joblib", ) for arr_calc, arr_exp in zip(calculated, expected, strict=False): if np.isnan(arr_exp).all(): assert np.isnan(arr_calc).all() else: aaae(arr_calc, arr_exp) def test_consolidate_one_step_derivatives(): forward = np.ones((1, 4, 3)) forward[:, :, 0] = np.nan backward = np.zeros_like(forward) calculated = _consolidate_one_step_derivatives( {"forward": forward, "backward": backward}, ["forward", "backward"] ) expected = np.array([[0, 1, 1]] * 4) aaae(calculated, expected) @pytest.fixture() def example_function_gradient_fixtures(): def f(x): """F:R^3 -> R.""" x1, x2, x3 = x[0], x[1], x[2] y1 = np.sin(x1) + np.cos(x2) + x3 - x3 return y1 def fprime(x): """Gradient(f)(x):R^3 -> R^3.""" x1, x2, x3 = x[0], x[1], x[2] grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3]) return grad return {"func": f, "func_prime": fprime} @pytest.fixture() def example_function_jacobian_fixtures(): def f(x): """F:R^3 -> R^2.""" x1, x2, x3 = x[0], x[1], x[2] y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3) return np.array([y1, y2]) def fprime(x): """Jacobian(f)(x):R^3 -> R^(2x3)""" x1, x2, x3 = x[0], x[1], x[2] jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]]) return jac return {"func": f, "func_prime": fprime} @pytest.mark.filterwarnings("ignore:The `n_steps` argument") def test_first_derivative_gradient_richardson(example_function_gradient_fixtures): f = example_function_gradient_fixtures["func"] fprime = example_function_gradient_fixtures["func_prime"] true_fprime = fprime(np.ones(3)) scipy_fprime = approx_derivative(f, np.ones(3)) our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1) aaae(scipy_fprime, our_fprime.derivative) aaae(true_fprime, our_fprime.derivative) @pytest.mark.filterwarnings("ignore:The `n_steps` argument") def test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures): f = example_function_jacobian_fixtures["func"] fprime = example_function_jacobian_fixtures["func_prime"] true_fprime = fprime(np.ones(3)) scipy_fprime = approx_derivative(f, np.ones(3)) our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1) aaae(scipy_fprime, our_fprime.derivative) aaae(true_fprime, our_fprime.derivative) def test_convert_evaluation_data_to_frame(): arr = np.arange(4).reshape(2, 2) arr2 = arr.reshape(2, 1, 2) steps = Steps(pos=arr, neg=-arr) evals = Evals(pos=arr2, neg=-arr2) expected = [ [1, 0, 0, 0, 0, 0], [1, 0, 1, 0, 1, 1], [1, 1, 0, 0, 2, 2], [1, 1, 1, 0, 3, 3], [-1, 0, 0, 0, 0, 0], [-1, 0, 1, 0, 1, -1], [-1, 1, 0, 0, 2, -2], [-1, 1, 1, 0, 3, -3], ] expected = pd.DataFrame( expected, columns=["sign", "step_number", "dim_x", "dim_f", "step", "eval"] ) got = _convert_evaluation_data_to_frame(steps, evals) assert_frame_equal(expected, got.reset_index(), check_dtype=False) def test__convert_richardson_candidates_to_frame(): jac = { "forward1": np.array([[0, 1], [2, 3]]), "forward2": np.array([[0.5, 1], [2, 3]]), } err = { "forward1": np.array([[0, 0], [0, 1]]), "forward2": np.array([[1, 0], [0, 0]]), } expected = [ ["forward", 1, 0, 0, 0, 0], ["forward", 1, 1, 0, 1, 0], ["forward", 1, 0, 1, 2, 0], ["forward", 1, 1, 1, 3, 1], ["forward", 2, 0, 0, 0.5, 1], ["forward", 2, 1, 0, 1, 0], ["forward", 2, 0, 1, 2, 0], ["forward", 2, 1, 1, 3, 0], ] expected = pd.DataFrame( expected, columns=["method", "num_term", "dim_x", "dim_f", "der", "err"] ) expected = expected.set_index(["method", "num_term", "dim_x", "dim_f"]) got = _convert_richardson_candidates_to_frame(jac, err) assert_frame_equal(got, expected, check_dtype=False, check_index_type=False) def test__select_minimizer_along_axis(): der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]]) expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]])) got = _select_minimizer_along_axis(der, err) aaae(expected, got) def test_reshape_one_step_evals(): n_steps, dim_f, dim_x = 2, 3, 4 raw_evals_one_step = np.arange(2 * n_steps * dim_f * dim_x) pos_expected = np.array( [ [[0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11]], [[12, 15, 18, 21], [13, 16, 19, 22], [14, 17, 20, 23]], ] ) neg_expected = np.array( [ [[24, 27, 30, 33], [25, 28, 31, 34], [26, 29, 32, 35]], [[36, 39, 42, 45], [37, 40, 43, 46], [38, 41, 44, 47]], ] ) got = _reshape_one_step_evals(raw_evals_one_step, n_steps, dim_x) assert np.all(got.pos == pos_expected) assert np.all(got.neg == neg_expected) def test_reshape_two_step_evals(): n_steps, dim_x, dim_f = 1, 2, 2 raw_evals_two_step = np.arange(2 * n_steps * dim_f * dim_x * dim_x) pos_expected = np.array([[[[0, 2], [2, 6]], [[1, 3], [3, 7]]]]) neg_expected = np.array([[[[8, 10], [10, 14]], [[9, 11], [11, 15]]]]) got = _reshape_two_step_evals(raw_evals_two_step, n_steps, dim_x) assert np.all(got.pos == pos_expected) assert np.all(got.neg == neg_expected) def test_reshape_cross_step_evals(): n_steps = 1 dim_x = 2 dim_f = 2 f0 = np.array([-1000, 1000]) raw_evals_cross_step = np.arange(2 * n_steps * dim_f * dim_x * dim_x) expected_pos = np.array([[[[-1000, 2], [10, -1000]], [[1000, 3], [11, 1000]]]]) expected_neg = expected_pos.swapaxes(2, 3) got = _reshape_cross_step_evals(raw_evals_cross_step, n_steps, dim_x, f0) assert np.all(got.pos == expected_pos) assert np.all(got.neg == expected_neg) def test_is_scalar_nan(): assert _is_scalar_nan(np.nan) assert not _is_scalar_nan(1.0) assert not _is_scalar_nan(np.array([np.nan])) @dataclass class MyOutput: value: float message: str def test_first_derivative_with_unpacking(): def f(x): return MyOutput(x @ x, "success") got = first_derivative( func=f, params=np.ones(3), unpacker=lambda out: out.value, ) assert isinstance(got.func_value, MyOutput) aaae(got.derivative, np.ones(3) * 2) def test_second_derivative_with_unpacking(): def f(x): return MyOutput(x @ x, "success") got = second_derivative( func=f, params=np.ones(3), unpacker=lambda out: out.value, ) assert isinstance(got.func_value, MyOutput) aaae(got.derivative, np.eye(3) * 2, decimal=4) @pytest.mark.filterwarnings("ignore:The dictionary access for") def test_numdiff_result_getitem(): res = NumdiffResult( derivative=1, func_value=2, _func_evals=pd.DataFrame([0, 1]), _derivative_candidates=pd.DataFrame([2, 3]), ) assert res["derivative"] == res.derivative assert res["func_value"] == res.func_value assert_frame_equal(res["_func_evals"], res._func_evals) assert_frame_equal(res["_derivative_candidates"], res._derivative_candidates) def test_first_and_second_derivative_have_same_type_hints(): # exclude method from comparison, as the argument options differ here exclude = ["method"] first_hints = { k: v for k, v in get_type_hints(first_derivative).items() if k not in exclude } second_hints = { k: v for k, v in get_type_hints(second_derivative).items() if k not in exclude } assert first_hints == second_hints def test_first_derivative_pytree_step_size(): params = {"a": np.array([1, 2, 3]), "b": 4} got = first_derivative( lambda params: params["a"] @ params["a"] + 2 * params["b"], params=params, step_size=params, ) assert np.allclose(got.derivative["a"], np.array([2, 4, 6])) assert np.allclose(got.derivative["b"], 2) def test_second_derivative_pytree_step_size(): params = {"a": np.array([1, 2, 3]), "b": 4} got = second_derivative( lambda params: params["a"] @ params["a"] + 2 * params["b"], params=params, step_size=params, ) assert np.allclose(got.derivative["a"]["a"], np.eye(3) * 2) assert np.allclose(got.derivative["a"]["b"], np.zeros(3)) assert np.allclose(got.derivative["b"]["b"], 0) def test_first_derivative_pytree_min_steps(): params = {"a": np.array([1, 2, 3]), "b": 4} bounds = Bounds( lower={"a": np.array([0, 1, 2]), "b": 3}, upper={"a": np.array([2, 3, 4]), "b": 5}, ) min_steps = {"a": np.array([0.2, 0.5, 0.7]), "b": 0.2} got = first_derivative( lambda params: params["a"] @ params["a"] + 2 * params["b"], params=params, bounds=bounds, min_steps=min_steps, ) assert np.allclose(got.derivative["a"], np.array([2, 4, 6])) assert np.allclose(got.derivative["b"], 2) def test_second_derivative_pytree_min_steps(): params = {"a": np.array([1, 2, 3]), "b": 4} bounds = Bounds( lower={"a": np.array([0, 1, 2]), "b": 3}, upper={"a": np.array([2, 3, 4]), "b": 5}, ) min_steps = {"a": np.array([0.2, 0.5, 0.7]), "b": 0.2} got = second_derivative( lambda params: params["a"] @ params["a"] + 2 * params["b"], params=params, bounds=bounds, min_steps=min_steps, ) assert np.allclose(got.derivative["a"]["a"], np.eye(3) * 2) assert np.allclose(got.derivative["a"]["b"], np.zeros(3)) assert np.allclose(got.derivative["b"]["b"], 0) ================================================ FILE: tests/optimagic/differentiation/test_finite_differences.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.differentiation.derivatives import Evals from optimagic.differentiation.finite_differences import jacobian from optimagic.differentiation.generate_steps import Steps @pytest.fixture() def jacobian_inputs(): """Very contrived test case for finite difference formulae with linear function.""" steps_pos = np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]]) steps = Steps(pos=steps_pos, neg=-steps_pos) jac1 = (np.arange(1, 13)).reshape(3, 4) jac2 = jac1 * 1.1 evals_pos1 = jac1 @ (np.zeros((4, 4)) + np.eye(4) * 0.1) evals_pos2 = jac2 @ (np.zeros((4, 4)) + np.eye(4) * 0.2) evals_neg1 = jac1 @ (np.zeros((4, 4)) - np.eye(4) * 0.1) evals_neg2 = jac2 @ (np.zeros((4, 4)) - np.eye(4) * 0.2) evals = Evals( pos=np.array([evals_pos1, evals_pos2]), neg=np.array([evals_neg1, evals_neg2]) ) expected_jac = np.array([jac1, jac2]) f0 = np.zeros(3) out = {"evals": evals, "steps": steps, "f0": f0, "expected_jac": expected_jac} return out methods = ["forward", "backward", "central"] @pytest.mark.parametrize("method", methods) def test_jacobian_finite_differences(jacobian_inputs, method): expected_jac = jacobian_inputs.pop("expected_jac") calculated_jac = jacobian(**jacobian_inputs, method=method) aaae(calculated_jac, expected_jac) ================================================ FILE: tests/optimagic/differentiation/test_generate_steps.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.differentiation.generate_steps import ( _calculate_or_validate_base_steps, _fillna, _rescale_to_accomodate_bounds, _set_unused_side_to_nan, generate_steps, ) from optimagic.parameters.bounds import Bounds def test_scalars_as_base_steps(): steps_scalar = _calculate_or_validate_base_steps( 0.1, np.ones(3), "first_derivative", None, scaling_factor=1 ) steps_array = _calculate_or_validate_base_steps( np.full(3, 0.1), np.ones(3), "first_derivative", None, scaling_factor=1 ) aaae(steps_scalar, steps_array) def test_scalars_as_min_steps(): steps_scalar = _calculate_or_validate_base_steps( 0.1, np.ones(3), "first_derivative", 0.12, scaling_factor=1.5 ) steps_array = _calculate_or_validate_base_steps( np.full(3, 0.1), np.ones(3), "first_derivative", np.full(3, 0.12), scaling_factor=1.5, ) aaae(steps_scalar, steps_array) def test_calculate_or_validate_base_steps_invalid_too_small(): base_steps = np.array([1e-10, 0.01, 0.01]) min_steps = np.full(3, 1e-8) with pytest.raises(ValueError): _calculate_or_validate_base_steps( base_steps, np.ones(3), "first_derivative", min_steps, scaling_factor=1 ) def test_calculate_or_validate_base_steps_wrong_shape(): base_steps = np.array([0.01, 0.01, 0.01]) min_steps = np.full(3, 1e-8) with pytest.raises(ValueError): _calculate_or_validate_base_steps( base_steps, np.ones(2), "first_derivative", min_steps, scaling_factor=1 ) def test_calculate_or_validate_base_steps_jacobian(): x = np.array([0.05, 1, -5]) expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) calculated = _calculate_or_validate_base_steps( None, x, "first_derivative", 0, scaling_factor=1.0 ) aaae(calculated, expected, decimal=12) def test_calculate_or_validate_base_steps_jacobian_with_scaling_factor(): x = np.array([0.05, 1, -5]) expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) * 2 calculated = _calculate_or_validate_base_steps( None, x, "first_derivative", 0, scaling_factor=2.0 ) aaae(calculated, expected, decimal=12) def test_calculate_or_validate_base_steps_binding_min_step(): x = np.array([0.05, 1, -5]) expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) expected[0] = 1e-8 calculated = _calculate_or_validate_base_steps( None, x, "first_derivative", 1e-8, scaling_factor=1.0 ) aaae(calculated, expected, decimal=12) def test_calculate_or_validate_base_steps_hessian(): x = np.array([0.05, 1, -5]) expected = np.array([0.1, 1, 5]) * np.finfo(float).eps ** (1 / 3) calculated = _calculate_or_validate_base_steps( None, x, "second_derivative", 0, scaling_factor=1.0 ) aaae(calculated, expected, decimal=12) def test_set_unused_side_to_nan_forward(): pos = np.ones((3, 2)) neg = -np.ones((3, 2)) method = "forward" x = np.zeros(3) upper_bounds = np.array([0.5, 2, 3]) lower_bounds = np.array([-2, -0.1, -0.1]) expected_pos = np.array([[np.nan, np.nan], [1, 1], [1, 1]]) expected_neg = np.array([[-1, -1], [np.nan, np.nan], [np.nan, np.nan]]) calculated_pos, calculated_neg = _set_unused_side_to_nan( x, pos, neg, method, lower_bounds, upper_bounds ) assert np.allclose(calculated_pos, expected_pos, equal_nan=True) assert np.allclose(calculated_neg, expected_neg, equal_nan=True) def test_set_unused_side_to_nan_backward(): pos = np.ones((3, 2)) neg = -np.ones((3, 2)) method = "backward" x = np.zeros(3) upper_bounds = np.array([0.5, 2, 3]) lower_bounds = np.array([-2, -0.1, -2]) expected_pos = np.array([[np.nan, np.nan], [1, 1], [np.nan, np.nan]]) expected_neg = np.array([[-1, -1], [np.nan, np.nan], [-1, -1]]) calculated_pos, calculated_neg = _set_unused_side_to_nan( x, pos, neg, method, lower_bounds, upper_bounds ) assert np.allclose(calculated_pos, expected_pos, equal_nan=True) assert np.allclose(calculated_neg, expected_neg, equal_nan=True) def test_fillna(): a = np.array([np.nan, 3, 4]) assert np.allclose(_fillna(a, 0), np.array([0, 3, 4.0])) def test_rescale_to_accomodate_bounds(): pos = np.array([[1, 2], [1.5, 3], [1, 2], [3, np.nan]]) neg = -pos base_steps = np.array([1, 1.5, 2, 3]) min_step = 0.1 lower_bounds = -4 * np.ones(4) upper_bounds = np.ones(4) * 2.5 expected_pos = np.array([[1, 2], [1.25, 2.5], [1, 2], [2.5, np.nan]]) expected_neg = -expected_pos calculated_pos, calculated_neg = _rescale_to_accomodate_bounds( base_steps, pos, neg, lower_bounds, upper_bounds, min_step ) np.allclose(calculated_pos, expected_pos, equal_nan=True) np.allclose(calculated_neg, expected_neg, equal_nan=True) def test_rescale_to_accomodate_bounds_binding_min_step(): pos = np.array([[1, 2], [1.5, 3], [1, 2]]) neg = -pos base_steps = np.array([1, 1.5, 2]) min_step = np.array([0, 1.4, 0]) lower_bounds = -4 * np.ones(3) upper_bounds = np.ones(3) * 2.5 expected_pos = np.array([[1, 2], [1.4, 2.8], [1, 2]]) expected_neg = -expected_pos calculated_pos, calculated_neg = _rescale_to_accomodate_bounds( base_steps, pos, neg, lower_bounds, upper_bounds, min_step ) aaae(calculated_pos, expected_pos) aaae(calculated_neg, expected_neg) def test_generate_steps_binding_min_step(): calculated_steps = generate_steps( x=np.arange(3), method="central", n_steps=2, target="first_derivative", base_steps=np.array([0.1, 0.2, 0.3]), bounds=Bounds(lower=np.full(3, -np.inf), upper=np.full(3, 2.5)), step_ratio=2, min_steps=np.full(3, 1e-8), scaling_factor=1.0, ) expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.25, 0.5]]).T expected_neg = -expected_pos aaae(calculated_steps.pos, expected_pos) aaae(calculated_steps.neg, expected_neg) def test_generate_steps_min_step_equals_base_step(): calculated_steps = generate_steps( x=np.arange(3), method="central", n_steps=2, target="first_derivative", base_steps=np.array([0.1, 0.2, 0.3]), bounds=Bounds(lower=np.full(3, -np.inf), upper=np.full(3, 2.5)), step_ratio=2, min_steps=None, scaling_factor=1.0, ) expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.3, np.nan]]).T expected_neg = np.array([[-0.1, -0.2], [-0.2, -0.4], [-0.3, -0.6]]).T aaae(calculated_steps.pos, expected_pos) aaae(calculated_steps.neg, expected_neg) ================================================ FILE: tests/optimagic/differentiation/test_numdiff_options.py ================================================ import pytest from optimagic.differentiation.numdiff_options import ( NumdiffOptions, pre_process_numdiff_options, ) from optimagic.exceptions import InvalidNumdiffOptionsError def test_pre_process_numdiff_options_trivial_case(): numdiff_options = NumdiffOptions( method="central", step_size=0.1, scaling_factor=0.5, min_steps=None, batch_evaluator="joblib", ) got = pre_process_numdiff_options(numdiff_options) assert got == numdiff_options def test_pre_process_numdiff_options_none_case(): assert pre_process_numdiff_options(None) is None def test_pre_process_numdiff_options_dict_case(): got = pre_process_numdiff_options( {"method": "central", "step_size": 0.1, "batch_evaluator": "pathos"} ) assert got == NumdiffOptions( method="central", step_size=0.1, batch_evaluator="pathos" ) def test_pre_process_numdiff_options_invalid_type(): with pytest.raises(InvalidNumdiffOptionsError): pre_process_numdiff_options(numdiff_options="invalid") def test_pre_process_numdiff_options_invalid_dict_key(): with pytest.raises(InvalidNumdiffOptionsError, match="Invalid numdiff options"): pre_process_numdiff_options(numdiff_options={"wrong_key": "central"}) def test_pre_process_numdiff_options_invalid_dict_value(): with pytest.raises(InvalidNumdiffOptionsError, match="Invalid numdiff `method`:"): pre_process_numdiff_options(numdiff_options={"method": "invalid"}) def test_numdiff_options_invalid_method(): with pytest.raises(InvalidNumdiffOptionsError, match="Invalid numdiff `method`:"): NumdiffOptions(method="invalid") def test_numdiff_options_invalid_step_size(): with pytest.raises( InvalidNumdiffOptionsError, match="Invalid numdiff `step_size`:" ): NumdiffOptions(step_size=0) def test_numdiff_options_invalid_scaling_factor(): with pytest.raises( InvalidNumdiffOptionsError, match="Invalid numdiff `scaling_factor`:" ): NumdiffOptions(scaling_factor=-1) def test_numdiff_options_invalid_min_steps(): with pytest.raises( InvalidNumdiffOptionsError, match="Invalid numdiff `min_steps`:" ): NumdiffOptions(min_steps=-1) def test_numdiff_options_invalid_n_cores(): with pytest.raises(InvalidNumdiffOptionsError, match="Invalid numdiff `n_cores`:"): NumdiffOptions(n_cores=-1) def test_numdiff_options_invalid_batch_evaluator(): with pytest.raises( InvalidNumdiffOptionsError, match="Invalid batch evaluator: invalid" ): NumdiffOptions(batch_evaluator="invalid") ================================================ FILE: tests/optimagic/examples/test_criterion_functions.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.testing import assert_array_equal from pandas.testing import assert_frame_equal from optimagic.examples.criterion_functions import ( rhe_fun_and_gradient, rhe_function_value, rhe_gradient, rhe_scalar, rosenbrock_fun_and_gradient, rosenbrock_function_value, rosenbrock_gradient, rosenbrock_scalar, sos_fun_and_gradient, sos_gradient, sos_likelihood_fun_and_jac, sos_likelihood_jacobian, sos_ls, sos_ls_fun_and_jac, sos_ls_jacobian, sos_ls_with_pd_objects, sos_scalar, trid_fun_and_gradient, trid_gradient, trid_scalar, ) from optimagic.optimization.fun_value import FunctionValue TRID_GRAD = pd.DataFrame({"value": [7, 1, -6, 11, -19.0]}) RHE_GRAD = pd.DataFrame({"value": [90, 72, 36, 28, -10.0]}) ROSENBROCK_GRAD = pd.DataFrame({"value": [259216, 255616, 54610, 145412, -10800.0]}) @pytest.fixture() def input_params(): params = pd.DataFrame({"value": [9, 9, 6, 7, -5]}) return params def test_trid_scalar(input_params): got = trid_scalar(input_params) assert got == 83 def test_trid_gradient(input_params): got = trid_gradient(input_params) assert_frame_equal(got, TRID_GRAD) def test_trid_fun_and_gradient(input_params): got = trid_fun_and_gradient(input_params) assert_frame_equal(got[1], TRID_GRAD) def test_rhe_scalar(input_params): got = rhe_scalar(input_params) assert got == 960 def test_rhe_gradient(input_params): got = rhe_gradient(input_params) assert_frame_equal(got, RHE_GRAD) def test_rhe_fun_and_gradient(input_params): got = rhe_fun_and_gradient(input_params) assert_frame_equal(got[1], RHE_GRAD) def test_rosenbrock_scalar(input_params): got = rosenbrock_scalar(input_params) assert got == 1456789 def test_rosenbrock_gradient(input_params): got = rosenbrock_gradient(input_params) assert_frame_equal(got, ROSENBROCK_GRAD) def test_rosenbrock_fun_and_gradient(input_params): got = rosenbrock_fun_and_gradient(input_params) assert_frame_equal(got[1], ROSENBROCK_GRAD) def test_rhe_function_value(input_params): got = rhe_function_value(input_params) assert isinstance(got, FunctionValue) expected = np.array([9, 12.72792206, 14.07124728, 15.71623365, 16.4924225]) aaae(got.value, expected) def test_rosenbrock_function_value(input_params): got = rosenbrock_function_value(input_params) assert isinstance(got, FunctionValue) expected = np.array([720.04444307, 750.04266545, 290.04310025, 540.0333323, 0]) aaae(got.value, expected) SOS_GRAD = {"a": 2, "b": 4.0} SOS_LL_JAC = {"a": np.array([2, 0]), "b": np.array([0, 4])} SOS_LS_JAC = {"a": np.array([1, 0]), "b": np.array([0, 1])} def test_sos_ls(): got = sos_ls({"a": 1, "b": 2}) aaae(got, np.array([1, 2.0])) def test_sos_ls_with_pd_objects(): got = sos_ls_with_pd_objects({"a": 1, "b": 2}) assert isinstance(got, pd.Series) aaae(got.to_numpy(), np.array([1, 2.0])) def test_sos_scalar(): got = sos_scalar({"a": 1, "b": 2}) assert got == 5 def test_sos_gradient(): got = sos_gradient({"a": 1, "b": 2}) assert got == SOS_GRAD def test_sos_likelihood_jacobian(): got = sos_likelihood_jacobian({"a": 1, "b": 2}) for key, val in SOS_LL_JAC.items(): assert_array_equal(got[key], val) def test_sos_ls_jacobian(): got = sos_ls_jacobian({"a": 1, "b": 2}) for key, val in SOS_LS_JAC.items(): assert_array_equal(got[key], val) def test_sos_fun_and_gradient(): got_val, got_grad = sos_fun_and_gradient({"a": 1, "b": 2}) assert got_val == 5 assert_array_equal(got_grad, SOS_GRAD) def test_sos_likelihood_fun_and_jac(): got_val, got_jac = sos_likelihood_fun_and_jac({"a": 1, "b": 2}) aaae(got_val, np.array([1, 4])) for key, val in SOS_LL_JAC.items(): assert_array_equal(got_jac[key], val) def test_sos_ls_fun_and_jac(): got_val, got_jac = sos_ls_fun_and_jac({"a": 1, "b": 2}) aaae(got_val, np.array([1, 2])) for key, val in SOS_LS_JAC.items(): assert_array_equal(got_jac[key], val) ================================================ FILE: tests/optimagic/logging/test_base.py ================================================ from dataclasses import dataclass import pytest from optimagic.logging.base import InputType, NonUpdatableKeyValueStore, OutputType from optimagic.typing import DictLikeAccess def test_key_value_store_raise_errors(): class NoDataClass(NonUpdatableKeyValueStore): def __init__(self): super().__init__({1}, [], "key") def insert(self, value: InputType) -> None: pass def _select_by_key(self, key: int) -> list[OutputType]: pass def _select_all(self) -> list[OutputType]: pass def select_last_rows(self, n_rows: int) -> list[OutputType]: pass class WrongPrimaryKey(NonUpdatableKeyValueStore): @dataclass(frozen=True) class InputDummy(DictLikeAccess): value: str @dataclass(frozen=True) class OutputDummy(DictLikeAccess): id: int value: str def __init__(self): super().__init__( WrongPrimaryKey.InputDummy, WrongPrimaryKey.OutputDummy, "ID" ) def insert(self, value: InputType) -> None: pass def _select_by_key(self, key: int) -> list[OutputType]: pass def _select_all(self) -> list[OutputType]: pass def select_last_rows(self, n_rows: int) -> list[OutputType]: pass with pytest.raises(ValueError): NoDataClass() with pytest.raises(ValueError): WrongPrimaryKey() ================================================ FILE: tests/optimagic/logging/test_logger.py ================================================ from dataclasses import asdict import numpy as np import pandas as pd import pytest from pybaum import tree_equal, tree_just_flatten from optimagic.logging.logger import ( LogOptions, LogReader, LogStore, SQLiteLogOptions, SQLiteLogReader, ) from optimagic.optimization.optimize import minimize from optimagic.parameters.tree_registry import get_registry from optimagic.typing import Direction @pytest.fixture() def example_db(tmp_path): path = tmp_path / "test.db" def _crit(params): x = np.array(list(params.values())) return x @ x minimize( fun=_crit, params={"a": 1, "b": 2, "c": 3}, algorithm="scipy_lbfgsb", logging=path, ) return path def test_read_start_params(example_db): res = LogReader.from_options(SQLiteLogOptions(example_db)).read_start_params() assert res == {"a": 1, "b": 2, "c": 3} def test_log_reader_read_start_params(example_db): reader = LogReader.from_options(SQLiteLogOptions(example_db)) res = reader.read_start_params() assert res == {"a": 1, "b": 2, "c": 3} def test_log_reader_read_iteration(example_db): reader = SQLiteLogReader(example_db) first_row = reader.read_iteration(0) assert first_row["params"] == {"a": 1, "b": 2, "c": 3} assert first_row["rowid"] == 1 assert first_row["scalar_fun"] == 14 last_row = reader.read_iteration(-1) assert list(last_row["params"]) == ["a", "b", "c"] assert np.allclose(last_row["scalar_fun"], 0) def test_log_reader_index_exception(example_db): with pytest.raises(IndexError): SQLiteLogReader(example_db).read_iteration(10) with pytest.raises(IndexError): SQLiteLogReader(example_db).read_iteration(-4) def test_log_reader_read_history(example_db): reader = SQLiteLogReader(example_db) res = reader.read_history() assert res["time"][0] == 0 assert res["fun"][0] == 14 assert res["params"][0] == {"a": 1, "b": 2, "c": 3} def test_log_reader_read_multistart_history(example_db): reader = SQLiteLogReader(example_db) history, local_history, exploration = reader.read_multistart_history( direction=Direction.MINIMIZE ) assert local_history is None assert exploration is None registry = get_registry(extended=True) assert tree_equal( tree_just_flatten(asdict(history), registry=registry), tree_just_flatten(asdict(reader.read_history()), registry=registry), ) def test_read_steps_table(example_db): res = SQLiteLogReader(example_db)._step_store.to_df() assert isinstance(res, pd.DataFrame) assert res.loc[0, "rowid"] == 1 assert res.loc[0, "type"] == "optimization" assert res.loc[0, "status"] == "complete" def test_read_optimization_problem_table(example_db): res = SQLiteLogReader(example_db).problem_df assert isinstance(res, pd.DataFrame) def test_non_existing_database_raises_error(tmp_path): with pytest.raises(FileNotFoundError): SQLiteLogReader(tmp_path / "i_do_not_exist.db").read_start_params() def test_available_log_options(): available_types = LogOptions.available_option_types() assert len(available_types) == 1 assert available_types[0] is SQLiteLogOptions def test_no_registered(): class DummyOptions(LogOptions): pass with pytest.raises(ValueError, match="DummyOptions"): LogReader.from_options(DummyOptions()) with pytest.raises(ValueError, match="DummyOptions"): LogStore.from_options(DummyOptions()) ================================================ FILE: tests/optimagic/logging/test_sqlalchemy.py ================================================ import pickle import sys from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor import numpy as np import pytest from sqlalchemy import inspect from optimagic.logging import ExistenceStrategy from optimagic.logging.logger import LogStore, SQLiteLogOptions from optimagic.logging.sqlalchemy import IterationStore, StepStore from optimagic.logging.types import ( IterationState, StepResult, StepStatus, StepType, ) class TestIterationStore: @pytest.fixture def store(self, tmp_path): """Fixture to set up the IterationStore.""" return IterationStore(SQLiteLogOptions(tmp_path / "test.db")) @staticmethod def create_test_point(i: int): return IterationState( params=np.array([i, i + 1]), timestamp=123456.0 + i, exceptions=None, valid=True, scalar_fun=0.5 + i, step=i, raw_fun=None, ) def test_table_creation(self, store): """Test that the IterationStore table is created properly.""" assert store.table_name in inspect(store.engine).get_table_names() def test_insert_and_query(self, store): """Test inserting and querying data in the IterationStore.""" result = self.create_test_point(2456) store.insert(result) queried_result = store.select(1)[0] assert queried_result is not None assert queried_result.scalar_fun == result.scalar_fun def test_update_raise(self, store): """Test updating an entry in the IterationStore.""" # Insert initial data result = self.create_test_point(568) store.insert(result) queried_result = store.select(1)[0] # Update the value updated_result = IterationState( params=queried_result.params, timestamp=queried_result.timestamp, exceptions=queried_result.exceptions, valid=queried_result.valid, scalar_fun=1.0, # New value step=queried_result.step, raw_fun=queried_result.raw_fun, ) msg = f"'{IterationStore.__name__}' object does not allow to update items in" "the store" with pytest.raises(AttributeError, match=msg): store.update(key=1, value=updated_result) with pytest.raises(AttributeError): store.sellect_typo # type:ignore # noqa: B018 def test_serialization(self, store): """Test the serialization and deserialization of the IterationStore.""" pickled_store = pickle.dumps(store) unpickled_store = pickle.loads(pickled_store) assert store.table_name == unpickled_store.table_name assert store.table_name in inspect(unpickled_store.engine).get_table_names() @pytest.mark.parametrize( "executor_factory", [ lambda: ThreadPoolExecutor(max_workers=10), lambda: ProcessPoolExecutor(max_workers=10), ], ids=["threads", "processes"], ) def test_parallel_insert(self, store, executor_factory): """Test multithreaded writing and reading in the IterationStore.""" with executor_factory() as executor: # Insert data concurrently to_insert = list(map(self.create_test_point, range(10))) futures = [executor.submit(store.insert, item) for item in to_insert] for future in futures: future.result() result = store.select() assert [row.rowid for row in result] == list(range(1, 11)) assert set([row.step for row in result]) == set(range(10)) result_last = store.select_last_rows(5) assert len(result_last) == 5 @pytest.mark.skipif( not sys.platform.startswith("win"), reason="On linux and macOS, this will result in a warning", ) def test_db_replacement_error(self, store): store.insert(self.create_test_point(245)) with pytest.raises(RuntimeError, match="PermissionError"): LogStore.from_options( SQLiteLogOptions( store._db_config.url.split("sqlite:///")[-1], if_database_exists=ExistenceStrategy.REPLACE, ) ) def test_db_existence_raise(self, store): store.insert(self.create_test_point(245)) with pytest.raises(FileExistsError): LogStore.from_options( SQLiteLogOptions( store._db_config.url.split("sqlite:///")[-1], ) ) class TestStepStore: @pytest.fixture def store(self, tmp_path): """Fixture to set up the IterationStore.""" return StepStore(SQLiteLogOptions(tmp_path / "test.db")) @staticmethod def create_test_point(i: int): return StepResult( f"random_{i}", StepType.OPTIMIZATION, StepStatus.RUNNING, n_iterations=i ) def test_table_creation(self, store): """Test that the IterationStore table is created properly.""" assert store.table_name in inspect(store.engine).get_table_names() def test_insert_and_query(self, store): """Test inserting and querying data in the IterationStore.""" result = self.create_test_point(2456) store.insert(result) queried_result = store.select(1)[0] assert queried_result is not None assert queried_result.n_iterations == result.n_iterations def test_insert_string(self, store): result = StepResult("strings", "optimization", "running", n_iterations=1) store.insert(result) queried_result = store.select(1)[0] assert queried_result is not None assert queried_result.status is StepStatus.RUNNING assert queried_result.type is StepType.OPTIMIZATION def test_update(self, store): """Test updating an entry in the IterationStore.""" # Insert initial data result = self.create_test_point(568) store.insert(result) queried_result = store.select(1)[0] # Update the value updated_result = StepResult( queried_result.name, queried_result.type, queried_result.status, n_iterations=50, ) store.update(key=1, value=updated_result) # Verify the update updated_entry = store.select(1)[0] assert updated_entry is not None assert updated_entry.n_iterations == 50 store.update(key=1, value={"n_iterations": 34}) updated_entry = store.select(1)[0] assert updated_entry is not None assert updated_entry.n_iterations == 34 with pytest.raises(ValueError): store.update(key=1, value={"N_iterations_typo": 34}) def test_serialization(self, store): """Test the serialization and deserialization of the IterationStore.""" pickled_store = pickle.dumps(store) unpickled_store = pickle.loads(pickled_store) assert store.table_name == unpickled_store.table_name assert store.table_name in inspect(unpickled_store.engine).get_table_names() @pytest.mark.parametrize( "executor_factory", [ lambda: ThreadPoolExecutor(max_workers=10), lambda: ProcessPoolExecutor(max_workers=10), ], ids=["threads", "processes"], ) def test_parallel_insert(self, store, executor_factory): """Test multithreaded writing and reading in the IterationStore.""" with executor_factory() as executor: # Insert data concurrently to_insert = list(map(self.create_test_point, range(10))) futures = [executor.submit(store.insert, item) for item in to_insert] for future in futures: future.result() result = store.select() assert [row.rowid for row in result] == list(range(1, 11)) assert set([row.n_iterations for row in result]) == set(range(10)) result_last = store.select_last_rows(5) assert len(result_last) == 5 @pytest.mark.parametrize( "executor_factory", [ lambda: ThreadPoolExecutor(max_workers=10), lambda: ProcessPoolExecutor(max_workers=10), ], ids=["threads", "processes"], ) def test_parallel_update(self, store, executor_factory): """Test multithreaded writing and reading in the IterationStore.""" with executor_factory() as executor: # Insert data concurrently to_insert = list(map(self.create_test_point, range(10))) futures = [executor.submit(store.insert, item) for item in to_insert] for future in futures: future.result() with executor_factory() as executor: # Update data concurrently to_update = [ (2, {"status": StepStatus.COMPLETE}), (2, {"n_iterations": 200}), ] futures = [executor.submit(store.update, *item) for item in to_update] for future in futures: future.result() result = store.select(2)[0] assert result.status == StepStatus.COMPLETE assert result.n_iterations == 200 ================================================ FILE: tests/optimagic/logging/test_types.py ================================================ import pytest from optimagic.logging.types import ( IterationStateWithId, ProblemInitializationWithId, StepResultWithId, ) def test_raise_on_missing_id(): with pytest.raises(ValueError, match="rowid"): IterationStateWithId(1, 2, 3, True, None, None, None) with pytest.raises(ValueError, match="rowid"): StepResultWithId("n", "optimization", "skipped") with pytest.raises(ValueError, match="rowid"): ProblemInitializationWithId("minimize", 2) ================================================ FILE: tests/optimagic/optimization/test_algorithm.py ================================================ from dataclasses import dataclass import numpy as np import pytest from optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError from optimagic.optimization.algorithm import AlgoInfo, Algorithm, InternalOptimizeResult from optimagic.optimization.history import HistoryEntry from optimagic.typing import ( AggregationLevel, EvalTask, NonNegativeFloat, PositiveFloat, PositiveInt, ) # ====================================================================================== # Test AlgoInfo does proper validation of arguments # ====================================================================================== INVALID_ALGO_INFO_KWARGS = [ {"name": 3}, {"solver_type": "scalar"}, {"is_available": "yes"}, {"is_global": "no"}, {"needs_jac": "yes"}, {"needs_hess": "no"}, {"needs_bounds": "no"}, {"supports_parallelism": "yes"}, {"supports_bounds": "no"}, {"supports_infinite_bounds": "no"}, {"supports_linear_constraints": "yes"}, {"supports_nonlinear_constraints": "no"}, {"disable_history": "no"}, ] @pytest.mark.parametrize("kwargs", INVALID_ALGO_INFO_KWARGS) def test_algo_info_validation(kwargs): valid_kwargs = { "name": "test", "solver_type": AggregationLevel.LEAST_SQUARES, "is_available": True, "is_global": True, "needs_jac": True, "needs_hess": True, "needs_bounds": True, "supports_parallelism": True, "supports_bounds": True, "supports_infinite_bounds": True, "supports_linear_constraints": True, "supports_nonlinear_constraints": True, "disable_history": True, } combined_kwargs = {**valid_kwargs, **kwargs} msg = "The following arguments to AlgoInfo or `mark.minimizer` are invalid" with pytest.raises(InvalidAlgoInfoError, match=msg): AlgoInfo(**combined_kwargs) # ====================================================================================== # Test InternalOptimizeResult does proper validation of arguments # ====================================================================================== INVALID_INTERNAL_OPTIMIZE_RESULT_KWARGS = [ {"x": 3}, {"fun": [1, 2, 3]}, {"success": "successful"}, {"message": 3}, {"n_fun_evals": "3"}, {"n_jac_evals": "3"}, {"n_hess_evals": "3"}, {"n_iterations": "3"}, {"status": "3"}, {"jac": "3"}, {"hess": "3"}, {"hess_inv": "3"}, {"max_constraint_violation": "3"}, ] @pytest.mark.parametrize("kwargs", INVALID_INTERNAL_OPTIMIZE_RESULT_KWARGS) def test_internal_optimize_result_validation(kwargs): valid_kwargs = { "x": np.array([1, 2, 3]), "fun": 3.0, "success": True, "message": "success", "n_fun_evals": 3, "n_jac_evals": 3, "n_hess_evals": 3, "n_iterations": 3, "status": 3, "jac": np.array([1, 2, 3]), "hess": np.array([1, 2, 3]), "hess_inv": np.array([1, 2, 3]), "max_constraint_violation": 3.0, } combined_kwargs = {**valid_kwargs, **kwargs} msg = "The following arguments to InternalOptimizeResult are invalid" with pytest.raises(TypeError, match=msg): InternalOptimizeResult(**combined_kwargs) # ====================================================================================== # Test the copy constructors of Algorithm # ====================================================================================== @dataclass(frozen=True) class DummyAlgorithm(Algorithm): initial_radius: PositiveFloat = 1.0 max_radius: PositiveFloat = 10.0 convergence_ftol_rel: NonNegativeFloat = 1e-6 stopping_maxiter: PositiveInt = 1000 def _solve_internal_problem(self, problem, x0): hist_entry = HistoryEntry( params=x0, fun=0.0, start_time=0.0, task=EvalTask.FUN, ) problem.history.add_entry(hist_entry) return InternalOptimizeResult(x=x0, fun=0.0, success=True) def test_with_option(): algo = DummyAlgorithm() new_algo = algo.with_option(initial_radius=2.0, max_radius=20.0) assert new_algo is not algo assert new_algo.initial_radius == 2.0 assert new_algo.max_radius == 20.0 def test_with_option_invalid_key(): algo = DummyAlgorithm() with pytest.raises(InvalidAlgoOptionError): algo.with_option(invalid_key=2.0) def test_with_stopping(): algo = DummyAlgorithm() new_algo = algo.with_stopping(maxiter=2000) assert new_algo is not algo assert new_algo.stopping_maxiter == 2000 def test_with_stopping_with_full_option_name(): algo = DummyAlgorithm() new_algo = algo.with_stopping(stopping_maxiter=2000) assert new_algo is not algo assert new_algo.stopping_maxiter == 2000 def test_with_stopping_invalid_key(): algo = DummyAlgorithm() with pytest.raises(InvalidAlgoOptionError): algo.with_stopping(invalid_key=2000) def test_with_convergence(): algo = DummyAlgorithm() new_algo = algo.with_convergence(ftol_rel=1e-5) assert new_algo is not algo assert new_algo.convergence_ftol_rel == 1e-5 def test_with_convergence_with_full_option_name(): algo = DummyAlgorithm() new_algo = algo.with_convergence(convergence_ftol_rel=1e-5) assert new_algo is not algo assert new_algo.convergence_ftol_rel == 1e-5 def test_with_convergence_invalid_key(): algo = DummyAlgorithm() with pytest.raises(InvalidAlgoOptionError): algo.with_convergence(invalid_key=1e-5) def test_with_option_if_applicable(): algo = DummyAlgorithm() with pytest.warns(UserWarning): new_algo = algo.with_option_if_applicable( invalid=15, initial_radius=42, ) assert new_algo is not algo assert new_algo.initial_radius == 42 # ====================================================================================== # Test the type conversions of algo options # ====================================================================================== def test_algorithm_does_type_conversion(): algo = DummyAlgorithm( initial_radius="1.0", max_radius="10.0", convergence_ftol_rel="1e-6", stopping_maxiter="1000", ) assert isinstance(algo.initial_radius, float) assert algo.initial_radius == 1.0 assert isinstance(algo.max_radius, float) assert algo.max_radius == 10.0 assert isinstance(algo.convergence_ftol_rel, float) assert algo.convergence_ftol_rel == 1e-6 assert isinstance(algo.stopping_maxiter, int) assert algo.stopping_maxiter == 1000 def test_algorithm_does_type_conversion_in_with_option(): algo = DummyAlgorithm() new_algo = algo.with_option( initial_radius="2.0", max_radius="20.0", ) assert isinstance(new_algo.initial_radius, float) assert new_algo.initial_radius == 2.0 assert isinstance(new_algo.max_radius, float) assert new_algo.max_radius == 20.0 def test_error_with_negative_radius(): with pytest.raises(Exception): # noqa: B017 DummyAlgorithm(initial_radius=-1.0) ================================================ FILE: tests/optimagic/optimization/test_convergence_report.py ================================================ import numpy as np import pandas as pd from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimization.convergence_report import get_convergence_report from optimagic.optimization.history import History from optimagic.typing import Direction, EvalTask def test_get_convergence_report_minimize(): hist = History( direction=Direction.MINIMIZE, params=[{"a": 0}, {"a": 2.1}, {"a": 2.5}, {"a": 2.0}], fun=[5, 4.1, 4.4, 4.0], start_time=[0, 1, 2, 3], stop_time=[1, 2, 3, 4], task=4 * [EvalTask.FUN], batches=[0, 1, 2, 3], ) calculated = pd.DataFrame.from_dict(get_convergence_report(hist)) expected = np.array([[0.025, 0.25], [0.05, 1.0], [0.1, 1], [0.1, 2.0]]) aaae(calculated.to_numpy(), expected) def test_get_convergence_report_maximize(): hist = History( direction=Direction.MAXIMIZE, params=[{"a": 0}, {"a": 2.1}, {"a": 2.5}, {"a": 2.0}], fun=[-5, -4.1, -4.4, -4.0], start_time=[0, 1, 2, 3], stop_time=[1, 2, 3, 4], task=4 * [EvalTask.FUN], batches=[0, 1, 2, 3], ) calculated = pd.DataFrame.from_dict(get_convergence_report(hist)) expected = np.array([[0.025, 0.25], [0.05, 1.0], [0.1, 1], [0.1, 2.0]]) aaae(calculated.to_numpy(), expected) def test_history_is_too_short(): # first value is best, so history of accepted parameters has only one entry hist = History( direction=Direction.MAXIMIZE, params=[{"a": 0}, {"a": 2.1}, {"a": 2.5}, {"a": 2.0}], fun=[5, 4.1, 4.4, 4.0], start_time=[0, 1, 2, 3], stop_time=[1, 2, 3, 4], task=4 * [EvalTask.FUN], batches=[0, 1, 2, 3], ) calculated = get_convergence_report(hist) assert calculated is None ================================================ FILE: tests/optimagic/optimization/test_create_optimization_problem.py ================================================ import pytest from optimagic.optimization.create_optimization_problem import ( pre_process_user_algorithm, ) from optimagic.optimizers.scipy_optimizers import ScipyLBFGSB def test_pre_process_user_algorithm_valid_string(): got = pre_process_user_algorithm("scipy_lbfgsb") assert isinstance(got, ScipyLBFGSB) def test_pre_process_user_algorithm_invalid_string(): with pytest.raises(ValueError): pre_process_user_algorithm("not_an_algorithm") def test_pre_process_user_algorithm_valid_instance(): got = pre_process_user_algorithm(ScipyLBFGSB()) assert isinstance(got, ScipyLBFGSB) def test_pre_process_user_algorithm_valid_class(): got = pre_process_user_algorithm(ScipyLBFGSB) assert isinstance(got, ScipyLBFGSB) ================================================ FILE: tests/optimagic/optimization/test_error_penalty.py ================================================ import functools import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.differentiation.derivatives import first_derivative from optimagic.optimization.error_penalty import ( _likelihood_penalty, _penalty_residuals, _scalar_penalty, get_error_penalty_function, ) from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.typing import AggregationLevel, Direction from optimagic.utilities import get_rng @pytest.mark.parametrize("seed", range(10)) def test_penalty_aggregations(seed): rng = get_rng(seed) x = rng.uniform(size=5) x0 = rng.uniform(size=5) slope = 0.3 constant = 3 dim_out = 10 scalar, _ = _scalar_penalty(x, constant, slope, x0) contribs, _ = _likelihood_penalty(x, constant, slope, x0, dim_out) root_contribs, _ = _penalty_residuals(x, constant, slope, x0, dim_out) assert np.isclose(scalar.value, contribs.value.sum()) assert np.isclose(scalar.value, (root_contribs.value**2).sum()) pairs = [ (_scalar_penalty, AggregationLevel.SCALAR), (_likelihood_penalty, AggregationLevel.LIKELIHOOD), (_penalty_residuals, AggregationLevel.LEAST_SQUARES), ] @pytest.mark.parametrize("func, solver_type", pairs) def test_penalty_derivatives(func, solver_type): rng = get_rng(seed=5) x = rng.uniform(size=5) x0 = rng.uniform(size=5) slope = 0.3 constant = 3 dim_out = 8 _, calculated = func(x, constant, slope, x0, dim_out) partialed = functools.partial( func, constant=constant, slope=slope, x0=x0, dim_out=dim_out ) expected = first_derivative( partialed, x, unpacker=lambda x: x[0].internal_value(solver_type) ) aaae(calculated, expected.derivative) @pytest.mark.parametrize("seed", range(10)) def test_penalty_aggregations_via_get_error_penalty(seed): rng = get_rng(seed) x = rng.uniform(size=5) x0 = rng.uniform(size=5) slope = 0.3 constant = 3 scalar_func = get_error_penalty_function( start_x=x0, start_criterion=ScalarFunctionValue(3), error_penalty={"slope": slope, "constant": constant}, solver_type=AggregationLevel.SCALAR, direction=Direction.MINIMIZE, ) contribs_func = get_error_penalty_function( start_x=x0, start_criterion=LikelihoodFunctionValue(np.ones(10)), error_penalty={"slope": slope, "constant": constant}, solver_type=AggregationLevel.LIKELIHOOD, direction=Direction.MINIMIZE, ) root_contribs_func = get_error_penalty_function( start_x=x0, start_criterion=LeastSquaresFunctionValue(np.ones(10)), error_penalty={"slope": slope, "constant": constant}, solver_type=AggregationLevel.LEAST_SQUARES, direction=Direction.MINIMIZE, ) scalar, _ = scalar_func(x) contribs, _ = contribs_func(x) root_contribs, _ = root_contribs_func(x) assert np.isclose(scalar.value, contribs.value.sum()) assert np.isclose(scalar.value, (root_contribs.value**2).sum()) ================================================ FILE: tests/optimagic/optimization/test_fun_value.py ================================================ import numpy as np import pytest from numpy.testing import assert_almost_equal as aae from optimagic.exceptions import InvalidFunctionError from optimagic.optimization.fun_value import ( FunctionValue, LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, enforce_return_type, enforce_return_type_with_jac, ) from optimagic.typing import AggregationLevel SCALAR_VALUES = [ ScalarFunctionValue(5), ] LS_VALUES = [ LeastSquaresFunctionValue(np.array([1, 2])), LeastSquaresFunctionValue({"a": 1, "b": 2}), ] LIKELIHOOD_VALUES = [ LikelihoodFunctionValue(np.array([1, 4])), LikelihoodFunctionValue({"a": 1, "b": 4}), ] @pytest.mark.parametrize("value", SCALAR_VALUES + LS_VALUES + LIKELIHOOD_VALUES) def test_values_for_scalar_optimizers(value): got = value.internal_value(AggregationLevel.SCALAR) assert isinstance(got, float) assert got == 5.0 @pytest.mark.parametrize("value", LS_VALUES) def test_values_for_least_squares_optimizers(value): got = value.internal_value(AggregationLevel.LEAST_SQUARES) assert isinstance(got, np.ndarray) assert got.dtype == np.float64 aae(got, np.array([1.0, 2])) @pytest.mark.parametrize("value", LS_VALUES + LIKELIHOOD_VALUES) def test_values_for_likelihood_optimizers(value): got = value.internal_value(AggregationLevel.LIKELIHOOD) assert isinstance(got, np.ndarray) assert got.dtype == np.float64 aae(got, np.array([1.0, 4])) @pytest.mark.parametrize("value", SCALAR_VALUES + LIKELIHOOD_VALUES) def test_invalid_values_for_least_squares_optimizers(value): with pytest.raises(InvalidFunctionError): SCALAR_VALUES[0].internal_value(AggregationLevel.LEAST_SQUARES) @pytest.mark.parametrize("value", SCALAR_VALUES) def test_invalid_values_for_likelihood_optimizers(value): with pytest.raises(InvalidFunctionError): SCALAR_VALUES[0].internal_value(AggregationLevel.LIKELIHOOD) def test_enforce_scalar_with_scalar_return(): @enforce_return_type(AggregationLevel.SCALAR) def f(x): return 3 got = f(np.ones(3)) assert isinstance(got, ScalarFunctionValue) assert got.value == 3 def test_enforce_scalar_with_function_value_return(): @enforce_return_type(AggregationLevel.SCALAR) def f(x): return FunctionValue(3) got = f(np.ones(3)) assert isinstance(got, ScalarFunctionValue) assert got.value == 3 def test_enforce_scalar_trivial_case(): @enforce_return_type(AggregationLevel.SCALAR) def f(x): return ScalarFunctionValue(3) got = f(3) assert isinstance(got, ScalarFunctionValue) assert got.value == 3 def test_enforce_scalar_invalid_return(): @enforce_return_type(AggregationLevel.SCALAR) def f(x): return x with pytest.raises(InvalidFunctionError): f(np.ones(3)) def test_enforce_least_squares_with_vector_return(): @enforce_return_type(AggregationLevel.LEAST_SQUARES) def f(x): return np.ones(3) got = f(np.ones(3)) assert isinstance(got, LeastSquaresFunctionValue) aae(got.value, np.ones(3)) def test_enforce_least_squares_with_function_value_return(): @enforce_return_type(AggregationLevel.LEAST_SQUARES) def f(x): return FunctionValue(np.ones(3)) got = f(np.ones(3)) assert isinstance(got, LeastSquaresFunctionValue) aae(got.value, np.ones(3)) def test_enforce_least_squares_trivial_case(): @enforce_return_type(AggregationLevel.LEAST_SQUARES) def f(x): return LeastSquaresFunctionValue(np.ones(3)) got = f(np.ones(3)) assert isinstance(got, LeastSquaresFunctionValue) aae(got.value, np.ones(3)) def test_enforce_least_squares_invalid_return(): @enforce_return_type(AggregationLevel.LEAST_SQUARES) def f(x): return 3 with pytest.raises(InvalidFunctionError): f(np.ones(3)) def test_enforce_likelihood_with_vector_return(): @enforce_return_type(AggregationLevel.LIKELIHOOD) def f(x): return np.ones(3) got = f(np.ones(3)) assert isinstance(got, LikelihoodFunctionValue) aae(got.value, np.ones(3)) def test_enforce_likelihood_with_function_value_return(): @enforce_return_type(AggregationLevel.LIKELIHOOD) def f(x): return FunctionValue(np.ones(3)) got = f(np.ones(3)) assert isinstance(got, LikelihoodFunctionValue) aae(got.value, np.ones(3)) def test_enforce_likelihood_trivial_case(): @enforce_return_type(AggregationLevel.LIKELIHOOD) def f(x): return LikelihoodFunctionValue(np.ones(3)) got = f(np.ones(3)) assert isinstance(got, LikelihoodFunctionValue) aae(got.value, np.ones(3)) def test_enforce_likelihood_invalid_return(): @enforce_return_type(AggregationLevel.LIKELIHOOD) def f(x): return 3 with pytest.raises(InvalidFunctionError): f(np.ones(3)) def test_enforce_scalar_with_jac_with_scalar_return(): @enforce_return_type_with_jac(AggregationLevel.SCALAR) def f(x): return 3, np.zeros(3) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, ScalarFunctionValue) assert got_value.value == 3 aae(got_jac, np.zeros(3)) def test_enforce_scalar_with_jac_with_function_value_return(): @enforce_return_type_with_jac(AggregationLevel.SCALAR) def f(x): return FunctionValue(3), np.zeros(3) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, ScalarFunctionValue) assert got_value.value == 3 aae(got_jac, np.zeros(3)) def test_enforce_scalar_with_jac_trivial_case(): @enforce_return_type_with_jac(AggregationLevel.SCALAR) def f(x): return ScalarFunctionValue(3), np.zeros(3) got_value, got_jac = f(3) assert isinstance(got_value, ScalarFunctionValue) assert got_value.value == 3 aae(got_jac, np.zeros(3)) def test_enforce_scalar_with_jac_invalid_return(): @enforce_return_type_with_jac(AggregationLevel.SCALAR) def f(x): return x, np.zeros(3) with pytest.raises(InvalidFunctionError): f(np.ones(3)) def test_enforce_least_squares_with_jac_with_vector_return(): @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES) def f(x): return np.ones(3), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LeastSquaresFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_least_squares_with_jac_with_function_value_return(): @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES) def f(x): return FunctionValue(np.ones(3)), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LeastSquaresFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_least_squares_with_jac_trivial_case(): @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES) def f(x): return LeastSquaresFunctionValue(np.ones(3)), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LeastSquaresFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_least_squares_with_jac_invalid_return(): @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES) def f(x): return 3, np.zeros((3, 3)) with pytest.raises(InvalidFunctionError): f(np.ones(3)) def test_enforce_likelihood_with_jac_with_vector_return(): @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD) def f(x): return np.ones(3), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LikelihoodFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_likelihood_with_jac_with_function_value_return(): @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD) def f(x): return FunctionValue(np.ones(3)), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LikelihoodFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_likelihood_with_jac_trivial_case(): @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD) def f(x): return LikelihoodFunctionValue(np.ones(3)), np.zeros((3, 3)) got_value, got_jac = f(np.ones(3)) assert isinstance(got_value, LikelihoodFunctionValue) aae(got_value.value, np.ones(3)) aae(got_jac, np.zeros((3, 3))) def test_enforce_likelihood_with_jac_invalid_return(): @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD) def f(x): return 3, np.zeros((3, 3)) with pytest.raises(InvalidFunctionError): f(np.ones(3)) ================================================ FILE: tests/optimagic/optimization/test_function_formats_ls.py ================================================ """Test different ways of specifying objective functions and their derivatives. We also test that least-squares problems can be optimized with scalar optimizers. """ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import mark, maximize, minimize from optimagic.exceptions import InvalidFunctionError from optimagic.optimization.fun_value import ( FunctionValue, LeastSquaresFunctionValue, ) # ====================================================================================== # minimize cases with numpy params # ====================================================================================== @mark.least_squares def sos_ls(x): return x def typed_sos_ls(x: np.ndarray) -> LeastSquaresFunctionValue: return LeastSquaresFunctionValue(x) @mark.least_squares def sos_ls_with_info(x): return FunctionValue(x, info={"x": x}) MIN_FUNS = [ sos_ls, typed_sos_ls, sos_ls_with_info, ] def jac(x): return 2 * x @mark.least_squares def jac_ls(x): return np.diag(2 * x) MIN_JACS = [None, [jac, jac_ls]] ALGORITHMS = ["scipy_lbfgsb", "scipy_ls_lm"] @pytest.mark.parametrize("fun", MIN_FUNS) @pytest.mark.parametrize("jac", MIN_JACS) @pytest.mark.parametrize("use_fun_and_jac", [False, True]) @pytest.mark.parametrize("algorithm", ALGORITHMS) def test_least_squares_minimize(fun, jac, use_fun_and_jac, algorithm): start_params = np.array([1, 2, 3]) if use_fun_and_jac and jac is not None: def fun_and_jac_scalar(x): return x @ x, 2 * x @mark.least_squares def fun_and_jac_ls(x): return x, np.diag(2 * x) fun_and_jac = [fun_and_jac_scalar, fun_and_jac_ls] else: fun_and_jac = None res = minimize( fun=fun, params=start_params, algorithm=algorithm, jac=jac, fun_and_jac=fun_and_jac, ) aaae(res.params, np.zeros(3)) # ====================================================================================== # minimize cases with dict params # ====================================================================================== def dict_jac(params): return {k: 2 * v for k, v in params.items()} @mark.least_squares def dict_jac_ls(params): out = {} for outer_key in params: row = {} for inner_key in params: if outer_key == inner_key: row[inner_key] = 2 * params[inner_key] else: row[inner_key] = 0 out[outer_key] = row return out MIN_JACS_DICT = [None, [dict_jac, dict_jac_ls]] @pytest.mark.parametrize("fun", MIN_FUNS) @pytest.mark.parametrize("jac", MIN_JACS_DICT) @pytest.mark.parametrize("use_fun_and_jac", [False, True]) @pytest.mark.parametrize("algorithm", ALGORITHMS) def test_least_squares_minimize_dict(fun, jac, use_fun_and_jac, algorithm): start_params = {"a": 1, "b": 2, "c": 3} if use_fun_and_jac and jac is not None: def fun_and_jac_dict_scalar(params): x = np.array(list(params.values())) return x @ x, dict_jac(params) @mark.least_squares def fun_and_jac_dict_ls(params): return params, dict_jac_ls(params) fun_and_jac = [fun_and_jac_dict_scalar, fun_and_jac_dict_ls] else: fun_and_jac = None res = minimize( fun=fun, params=start_params, algorithm=algorithm, jac=jac, fun_and_jac=fun_and_jac, ) for key in start_params: assert np.allclose(res.params[key], 0, atol=1e-5) # ====================================================================================== # invalid cases # ====================================================================================== @pytest.mark.parametrize("algorithm", ALGORITHMS) def test_maximize_with_ls_problems_raises_error(algorithm): with pytest.raises(InvalidFunctionError): maximize( fun=sos_ls, params=np.array([1, 2, 3]), algorithm=algorithm, ) @mark.least_squares def invalid_sos_ls(x): return x @ x @mark.least_squares def invalid_sos_ls_with_info(x): return FunctionValue(x @ x, info={"x": x}) INVALID_FUNS = [ invalid_sos_ls, invalid_sos_ls_with_info, ] @pytest.mark.parametrize("fun", INVALID_FUNS) @pytest.mark.parametrize("algorithm", ALGORITHMS) def test_invalid_least_squares_minimize(fun, algorithm): start_params = np.array([1, 2, 3]) with pytest.raises(InvalidFunctionError): minimize( fun=fun, params=start_params, algorithm=algorithm, ) @mark.least_squares def invalid_jac_ls(x): return 2 * x @mark.least_squares def invalid_jac_ls_2(x): return FunctionValue(2 * x) INVALID_JACS = [invalid_jac_ls, invalid_jac_ls_2] @pytest.mark.parametrize("jac", INVALID_JACS) def test_least_squares_minimize_with_invalid_jac(jac): with pytest.raises(Exception): # noqa: B017 minimize( fun=sos_ls, params=np.array([1, 2, 3]), algorithm="scipy_ls_lm", jac=jac, ) @mark.least_squares def invalid_fun_and_jac_value(x): return x @ x, np.diag(2 * x) @mark.least_squares def invalid_fun_and_jac_derivative(x): return x, 2 * x INVALID_FUN_AND_JACS = [invalid_fun_and_jac_value, invalid_fun_and_jac_derivative] @pytest.mark.parametrize("fun_and_jac", INVALID_FUN_AND_JACS) def test_least_squares_minimize_with_invalid_fun_and_jac(fun_and_jac): with pytest.raises(InvalidFunctionError): minimize( fun=sos_ls, params=np.array([1, 2, 3]), algorithm="scipy_ls_lm", fun_and_jac=fun_and_jac, ) ================================================ FILE: tests/optimagic/optimization/test_function_formats_scalar.py ================================================ """Test different ways of specifying objective functions and their derivatives.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.typing import NDArray from optimagic import mark, maximize, minimize from optimagic.exceptions import InvalidFunctionError from optimagic.optimization.fun_value import FunctionValue, ScalarFunctionValue # ====================================================================================== # minimize cases with numpy params # ====================================================================================== def sos(x): return x @ x @mark.scalar def marked_sos(x): return x @ x def typed_sos_float(x: np.ndarray) -> float: return x @ x def typed_sos_value(x: np.ndarray) -> ScalarFunctionValue: return ScalarFunctionValue(x @ x) def sos_with_info(x): return FunctionValue(x @ x, info={"x": x}) MIN_FUNS = [ sos, marked_sos, typed_sos_float, typed_sos_value, sos_with_info, ] def jac(x): return 2 * x @mark.scalar def marked_jac(x): return 2 * x MIN_JACS = [None, jac, marked_jac] FUN_AND_JAC_CASES = [None, "marked", "unmarked"] @pytest.mark.parametrize("fun", MIN_FUNS) @pytest.mark.parametrize("jac", MIN_JACS) @pytest.mark.parametrize("fun_and_jac_case", FUN_AND_JAC_CASES) def test_minimize_with_numpy_inputs(fun, jac, fun_and_jac_case): if fun_and_jac_case is None: fun_and_jac = None elif fun_and_jac_case == "marked": @mark.scalar def fun_and_jac(x): return fun(x), 2 * x else: def fun_and_jac(x): return fun(x), 2 * x res = minimize( fun=fun, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", jac=jac, fun_and_jac=fun_and_jac, ) aaae(res.params, np.zeros(3)) # ====================================================================================== # maximize cases with numpy params # ====================================================================================== def neg_sos(x): return -x @ x @mark.scalar def marked_neg_sos(x): return -x @ x def typed_neg_sos_float(x: np.ndarray) -> float: return -x @ x def typed_neg_sos_value(x: np.ndarray) -> ScalarFunctionValue: return ScalarFunctionValue(-x @ x) def neg_sos_with_info(x): return FunctionValue(-x @ x, info={"x": x}) MAX_FUNS = [ neg_sos, marked_neg_sos, typed_neg_sos_float, typed_neg_sos_value, neg_sos_with_info, ] def neg_jac(x): return -2 * x @mark.scalar def marked_neg_jac(x): return -2 * x MAX_JACS = [None, neg_jac, marked_neg_jac] @pytest.mark.parametrize("fun", MAX_FUNS) @pytest.mark.parametrize("jac", MAX_JACS) @pytest.mark.parametrize("fun_and_jac_case", FUN_AND_JAC_CASES) def test_maximize_with_numpy_inputs(fun, jac, fun_and_jac_case): if fun_and_jac_case is None: fun_and_jac = None elif fun_and_jac_case == "marked": @mark.scalar def fun_and_jac(x): return fun(x), -2 * x else: def fun_and_jac(x): return fun(x), -2 * x res = maximize( fun=fun, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", jac=jac, fun_and_jac=fun_and_jac, ) aaae(res.params, np.zeros(3)) # ====================================================================================== # minimize cases with dict params # ====================================================================================== def sos_dict(params): x = np.array(list(params.values())) return x @ x @mark.scalar def marked_sos_dict(params): x = np.array(list(params.values())) return x @ x def typed_sos_dict_float(params: dict) -> float: x = np.array(list(params.values())) return x @ x def typed_sos_dict_value(params: dict) -> ScalarFunctionValue: x = np.array(list(params.values())) return ScalarFunctionValue(x @ x) def sos_dict_with_info(params): x = np.array(list(params.values())) return FunctionValue(x @ x, info={"x": x}) MIN_FUNS_DICT = [ sos_dict, marked_sos_dict, typed_sos_dict_float, typed_sos_dict_value, sos_dict_with_info, ] def jac_dict(params): return {k: 2 * v for k, v in params.items()} @mark.scalar def marked_jac_dict(params): return {k: 2 * v for k, v in params.items()} MIN_JACS_DICT = [None, jac_dict, marked_jac_dict] @pytest.mark.parametrize("fun", MIN_FUNS_DICT) @pytest.mark.parametrize("jac", MIN_JACS_DICT) @pytest.mark.parametrize("fun_and_jac_case", FUN_AND_JAC_CASES) def test_minimize_with_dict_inputs(fun, jac, fun_and_jac_case): if fun_and_jac_case is None: fun_and_jac = None elif fun_and_jac_case == "marked": @mark.scalar def fun_and_jac(params): return fun(params), {k: 2 * v for k, v in params.items()} else: def fun_and_jac(params): return fun(params), {k: 2 * v for k, v in params.items()} res = minimize( fun=fun, params={"x": 1, "y": 2, "z": 3}, algorithm="scipy_lbfgsb", jac=jac, fun_and_jac=fun_and_jac, ) for number in res.params.values(): assert np.allclose(number, 0, atol=1e-5) # ====================================================================================== # maximize cases with dict params # ====================================================================================== def neg_sos_dict(params): x = np.array(list(params.values())) return -x @ x @mark.scalar def marked_neg_sos_dict(params): x = np.array(list(params.values())) return -x @ x def typed_neg_sos_dict_float(params: dict) -> float: x = np.array(list(params.values())) return -x @ x def typed_neg_sos_dict_value(params: dict) -> ScalarFunctionValue: x = np.array(list(params.values())) return ScalarFunctionValue(-x @ x) def neg_sos_dict_with_info(params): x = np.array(list(params.values())) return FunctionValue(-x @ x, info={"x": x}) MAX_FUNS_DICT = [ neg_sos_dict, marked_neg_sos_dict, typed_neg_sos_dict_float, typed_neg_sos_dict_value, neg_sos_dict_with_info, ] def neg_jac_dict(params): return {k: -2 * v for k, v in params.items()} @mark.scalar def marked_neg_jac_dict(params): return {k: -2 * v for k, v in params.items()} MAX_JACS_DICT = [None, neg_jac_dict, marked_neg_jac_dict] @pytest.mark.parametrize("fun", MAX_FUNS_DICT) @pytest.mark.parametrize("jac", MAX_JACS_DICT) @pytest.mark.parametrize("fun_and_jac_case", FUN_AND_JAC_CASES) def test_maximize_with_dict_inputs(fun, jac, fun_and_jac_case): if fun_and_jac_case is None: fun_and_jac = None elif fun_and_jac_case == "marked": @mark.scalar def fun_and_jac(params): return fun(params), {k: -2 * v for k, v in params.items()} else: def fun_and_jac(params): return fun(params), {k: -2 * v for k, v in params.items()} res = maximize( fun=fun, params={"x": 1, "y": 2, "z": 3}, algorithm="scipy_lbfgsb", jac=jac, fun_and_jac=fun_and_jac, ) for number in res.params.values(): assert np.allclose(number, 0, atol=1e-5) # ====================================================================================== # invalid cases; Only test minimize for things that cannot plausibly depend on the # direction of the optimization # ====================================================================================== def test_invalid_marker_for_jac_in_minimize(): @mark.least_squares def jac(x): return 2 * x with pytest.warns(UserWarning): minimize( fun=sos, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", jac=jac, ) def test_invalid_marker_for_fun_and_jac_in_minimize(): @mark.least_squares def fun_and_jac(x): return x @ x, 2 * x with pytest.warns(UserWarning): minimize( fun=sos, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", fun_and_jac=fun_and_jac, ) def invalid_sos(x): return x @mark.scalar def invalid_marked_sos(x): return x def invalid_typed_sos_array(x: np.ndarray) -> NDArray[np.float64]: return x def invalid_typed_sos_value(x: np.ndarray) -> ScalarFunctionValue: return ScalarFunctionValue(x) def invalid_sos_with_info(x): return FunctionValue(x, info={"x": x}) INVALID_FUNS = [ invalid_sos, invalid_marked_sos, invalid_typed_sos_array, invalid_typed_sos_value, invalid_sos_with_info, ] @pytest.mark.parametrize("fun", INVALID_FUNS) def test_minimize_with_invalid_fun(fun): with pytest.raises(InvalidFunctionError): minimize( fun=fun, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", ) def invalid_jac(x): return np.eye(len(x)) @mark.scalar def invalid_marked_jac(x): return np.eye(len(x)) INVALID_JACS = [invalid_jac, invalid_marked_jac] @pytest.mark.parametrize("jac", INVALID_JACS) def test_minimize_with_invalid_jac(jac): with pytest.raises(Exception): # noqa: B017 minimize( fun=sos, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", jac=jac, ) def invalid_fun_and_jac(x): return x, np.eye(len(x)) @mark.scalar def invalid_marked_fun_and_jac(x): return x, np.eye(len(x)) INVALID_FUN_AND_JACS = [invalid_fun_and_jac, invalid_marked_fun_and_jac] @pytest.mark.parametrize("fun_and_jac", INVALID_FUN_AND_JACS) def test_minimize_with_invalid_fun_and_jac(fun_and_jac): with pytest.raises(Exception): # noqa: B017 minimize( fun=sos, params=np.array([1, 2, 3]), algorithm="scipy_lbfgsb", fun_and_jac=fun_and_jac, ) ================================================ FILE: tests/optimagic/optimization/test_history.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.testing import assert_array_equal from pandas.testing import assert_frame_equal from pybaum import tree_map import optimagic as om from optimagic.optimization.history import ( History, HistoryEntry, _apply_reduction_to_batches, _calculate_monotone_sequence, _get_batch_starts_and_stops, _get_flat_param_names, _get_flat_params, _is_1d_array, _task_to_categorical, _validate_args_are_all_none_or_lists_of_same_length, ) from optimagic.typing import Direction, EvalTask # ====================================================================================== # Test methods to add data to History (add_entry, add_batch, init) # ====================================================================================== @pytest.fixture def history_entries(): return [ HistoryEntry( params={"a": 1, "b": [2, 3]}, fun=1, start_time=0.1, stop_time=0.2, task=EvalTask.FUN, ), HistoryEntry( params={"a": 4, "b": [5, 6]}, fun=3, start_time=0.2, stop_time=0.3, task=EvalTask.FUN, ), HistoryEntry( params={"a": 7, "b": [8, 9]}, fun=2, start_time=0.3, stop_time=0.4, task=EvalTask.FUN, ), ] def test_history_add_entry(history_entries): history = History(Direction.MINIMIZE) for entry in history_entries: history.add_entry(entry) assert history.direction == Direction.MINIMIZE assert history.params == [ {"a": 1, "b": [2, 3]}, {"a": 4, "b": [5, 6]}, {"a": 7, "b": [8, 9]}, ] assert history.task == [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN] assert history.batches == [0, 1, 2] aaae(history.fun, [1, 3, 2]) aaae(history.start_time, [0.1, 0.2, 0.3]) aaae(history.stop_time, [0.2, 0.3, 0.4]) assert_array_equal(history.monotone_fun, np.array([1, 1, 1], dtype=np.float64)) assert_array_equal( history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3) ) def test_history_add_batch(history_entries): history = History(Direction.MAXIMIZE) history.add_batch(history_entries) assert history.direction == Direction.MAXIMIZE assert history.params == [ {"a": 1, "b": [2, 3]}, {"a": 4, "b": [5, 6]}, {"a": 7, "b": [8, 9]}, ] assert history.task == [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN] assert history.batches == [0, 0, 0] aaae(history.fun, [1, 3, 2]) aaae(history.start_time, [0.1, 0.2, 0.3]) aaae(history.stop_time, [0.2, 0.3, 0.4]) assert_array_equal(history.monotone_fun, np.array([1, 3, 3], dtype=np.float64)) assert_array_equal( history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3) ) def test_history_from_data(): data = { "params": [{"a": 1, "b": [2, 3]}, {"a": 4, "b": [5, 6]}, {"a": 7, "b": [8, 9]}], "fun": [1, 3, 2], "task": [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN], "batches": [0, 0, 0], "start_time": [0.0, 0.15, 0.3], "stop_time": [0.1, 0.25, 0.4], } history = History( direction=Direction.MAXIMIZE, **data, ) assert history.direction == Direction.MAXIMIZE assert history.params == data["params"] assert history.task == data["task"] assert history.batches == data["batches"] aaae(history.fun, data["fun"]) aaae(history.start_time, data["start_time"]) aaae(history.stop_time, data["stop_time"]) assert_array_equal(history.monotone_fun, np.array([1, 3, 3], dtype=np.float64)) assert_array_equal( history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3) ) # ====================================================================================== # Test functionality of History # ====================================================================================== @pytest.fixture def params(): params_tree = {"a": None, "b": {"c": None, "d": (None, None)}} return [ tree_map(lambda _: k, params_tree, is_leaf=lambda leaf: leaf is None) # noqa: B023 for k in range(6) ] @pytest.fixture def history_data(params): return { "fun": [10, 9, None, None, 2, 5], "task": [ EvalTask.FUN, EvalTask.FUN, EvalTask.JAC, EvalTask.JAC, EvalTask.FUN, EvalTask.FUN_AND_JAC, ], "start_time": [0, 2, 5, 7, 10, 12], "stop_time": [1, 4, 6, 9, 11, 14], "params": params, "batches": [0, 1, 2, 3, 4, 5], } @pytest.fixture def history(history_data): return History(direction=Direction.MINIMIZE, **history_data) @pytest.fixture def history_parallel(history_data): data = history_data.copy() data["batches"] = [0, 0, 1, 1, 2, 3] return History(direction=Direction.MINIMIZE, **data) # Function data, function value, and monotone function value # -------------------------------------------------------------------------------------- def test_history_fun_data_with_fun_evaluations_cost_model(history: History): got = history.fun_data( cost_model=om.timing.fun_evaluations, monotone=False, ) exp = pd.DataFrame( { "fun": [10, 9, np.nan, np.nan, 2, 5], "time": [1, 2, 2, 2, 3, 4], "task": [ "fun", "fun", "jac", "jac", "fun", "fun_and_jac", ], } ) assert_frame_equal(got, exp, check_dtype=False, check_categorical=False) def test_history_fun_data_with_fun_evaluations_cost_model_and_monotone( history: History, ): got = history.fun_data( cost_model=om.timing.fun_evaluations, monotone=True, ) exp = pd.DataFrame( { "fun": [10, 9, np.nan, np.nan, 2, 2], "time": [1, 2, 2, 2, 3, 4], "task": [ "fun", "fun", "jac", "jac", "fun", "fun_and_jac", ], } ) assert_frame_equal(got, exp, check_dtype=False, check_categorical=False) def test_history_fun_data_with_fun_batches_cost_model(history_parallel: History): got = history_parallel.fun_data( cost_model=om.timing.fun_batches, monotone=False, ) exp = pd.DataFrame( { "fun": [9, np.nan, 2, 5], "time": [1.0, 1.0, 2.0, 3.0], "task": [ "fun", "jac", "fun", "fun_and_jac", ], } ) assert_frame_equal(got, exp, check_dtype=False, check_categorical=False) def test_history_fun_data_with_evaluation_time_cost_model(history: History): got = history.fun_data( cost_model=om.timing.evaluation_time, monotone=False, ) exp = pd.DataFrame( { "fun": [10, 9, np.nan, np.nan, 2, 5], "time": [1, 3, 4, 6, 7, 9], "task": [ "fun", "fun", "jac", "jac", "fun", "fun_and_jac", ], } ) assert_frame_equal(got, exp, check_dtype=False, check_categorical=False) def test_fun_property(history: History): assert_array_equal(history.fun, [10, 9, None, None, 2, 5]) def test_monotone_fun_property(history: History): assert_array_equal(history.monotone_fun, np.array([10, 9, np.nan, np.nan, 2, 2])) # Acceptance # -------------------------------------------------------------------------------------- def test_is_accepted_property(history: History): got = history.is_accepted exp = np.array([True, True, False, False, True, False]) assert_array_equal(got, exp) # Parameter data, params, flat params, and flat params names # -------------------------------------------------------------------------------------- def test_params_data_fun_evaluations_cost_model(history: History): got = history.params_data() exp = ( pd.DataFrame( { "counter": np.tile(np.arange(6), reps=4), "name": np.repeat( [ "a", "b_c", "b_d_0", "b_d_1", ], 6, ), "value": np.tile(list(range(6)), 4), "task": np.tile( [ "fun", "fun", "jac", "jac", "fun", "fun_and_jac", ], 4, ), "fun": np.tile( [ 10, 9, np.nan, np.nan, 2, 5, ], 4, ), } ) .set_index(["counter", "name"]) .sort_index() ) assert_frame_equal(got, exp, check_categorical=False, check_dtype=False) def test_params_data_fun_evaluations_cost_model_parallel(history_parallel: History): got = history_parallel.params_data() exp = ( pd.DataFrame( { "counter": np.tile(np.arange(6), reps=4), "name": np.repeat( [ "a", "b_c", "b_d_0", "b_d_1", ], 6, ), "value": np.tile(list(range(6)), 4), "task": np.tile( [ "fun", "fun", "jac", "jac", "fun", "fun_and_jac", ], 4, ), "fun": np.tile( [ 10, 9, np.nan, np.nan, 2, 5, ], 4, ), } ) .set_index(["counter", "name"]) .sort_index() ) assert_frame_equal(got, exp, check_categorical=False, check_dtype=False) def test_params_data_fun_evaluations_cost_model_parallel_collapse_batches( history_parallel: History, ): got = history_parallel.params_data(collapse_batches=True) exp = ( pd.DataFrame( { "counter": np.tile([0, 1, 2, 3], reps=4), "name": np.repeat( [ "a", "b_c", "b_d_0", "b_d_1", ], 4, ), "value": np.tile([1, 2, 4, 5], 4), "task": np.tile(["fun", "jac", "fun", "fun_and_jac"], 4), "fun": np.tile([9, np.nan, 2, 5], 4), } ) .set_index(["counter", "name"]) .sort_index() ) assert_frame_equal(got, exp, check_categorical=False, check_dtype=False) def test_params_property(history, params): assert history.params == params def test_flat_params_property(history: History): got = history.flat_params assert_array_equal(got, [[k for _ in range(4)] for k in range(6)]) def test_flat_param_names(history: History): assert history.flat_param_names == ["a", "b_c", "b_d_0", "b_d_1"] # Time # -------------------------------------------------------------------------------------- def test_get_total_timings_per_task_fun(history: History): got = history._get_timings_per_task(EvalTask.FUN, cost_factor=1) exp = np.array([1, 1, 0, 0, 1, 0]) assert_array_equal(got, exp) def test_get_total_timings_per_task_jac_cost_factor_none(history: History): got = history._get_timings_per_task(EvalTask.JAC, cost_factor=None) exp = np.array([0, 0, 1, 2, 0, 0]) assert_array_equal(got, exp) def test_get_total_timings_per_task_fun_and_jac(history: History): got = history._get_timings_per_task(EvalTask.FUN_AND_JAC, cost_factor=-0.5) exp = np.array([0, 0, 0, 0, 0, -0.5]) assert_array_equal(got, exp) def test_get_total_timings_custom_cost_model(history: History): cost_model = om.timing.CostModel( fun=0.5, jac=1, fun_and_jac=2, label="test", aggregate_batch_time=sum ) got = history._get_total_timings(cost_model) exp = np.array( [ 0.5, 0.5, 1, 1, 0.5, 2, ] ) assert_array_equal(got, exp) def test_get_total_timings_fun_evaluations(history: History): got = history._get_total_timings(cost_model=om.timing.fun_evaluations) exp = np.array([1, 1, 0, 0, 1, 1]) assert_array_equal(got, exp) def test_get_total_timings_fun_batches(history: History): got = history._get_total_timings(cost_model=om.timing.fun_batches) exp = np.array([1, 1, 0, 0, 1, 1]) assert_array_equal(got, exp) def test_get_total_timings_fun_batches_parallel(history_parallel: History): got = history_parallel._get_total_timings(cost_model=om.timing.fun_batches) exp = np.array([1, 1, 0, 0, 1, 1]) assert_array_equal(got, exp) def test_get_total_timings_evaluation_time(history: History): got = history._get_total_timings(cost_model=om.timing.evaluation_time) exp = np.array([1, 2, 1, 2, 1, 2]) assert_array_equal(got, exp) def test_get_total_timings_wall_time(history: History): got = history._get_total_timings(cost_model="wall_time") exp = np.array([1, 4, 6, 9, 11, 14]) assert_array_equal(got, exp) def test_get_total_timings_invalid_cost_model(history: History): with pytest.raises( TypeError, match="cost_model must be a CostModel or 'wall_time'." ): history._get_total_timings(cost_model="invalid") def test_start_time_property(history: History): assert history.start_time == [0, 2, 5, 7, 10, 12] def test_stop_time_property(history: History): assert history.stop_time == [1, 4, 6, 9, 11, 14] # Batches # -------------------------------------------------------------------------------------- def test_batches_property(history: History): assert history.batches == [0, 1, 2, 3, 4, 5] # Tasks # -------------------------------------------------------------------------------------- def test_task_property(history: History): assert history.task == [ EvalTask.FUN, EvalTask.FUN, EvalTask.JAC, EvalTask.JAC, EvalTask.FUN, EvalTask.FUN_AND_JAC, ] # ====================================================================================== # Unit tests # ====================================================================================== def test_is_1d_array(): assert _is_1d_array(np.arange(2)) is True assert _is_1d_array(np.eye(2)) is False assert _is_1d_array([0, 1]) is False def test_get_flat_params_pytree(): params = [ {"a": 1, "b": [0, 1], "c": np.arange(2)}, {"a": 2, "b": [1, 2], "c": np.arange(2)}, ] got = _get_flat_params(params) exp = [ [1, 0, 1, 0, 1], [2, 1, 2, 0, 1], ] assert_array_equal(got, exp) def test_get_flat_params_fast_path(): params = [np.arange(2)] got = _get_flat_params(params) exp = [[0, 1]] assert_array_equal(got, exp) def test_get_flat_param_names_pytree(): got = _get_flat_param_names(param={"a": 0, "b": [0, 1], "c": np.arange(2)}) exp = ["a", "b_0", "b_1", "c_0", "c_1"] assert got == exp def test_get_flat_param_names_fast_path(): got = _get_flat_param_names(param=np.arange(2)) exp = ["0", "1"] assert got == exp def test_calculate_monotone_sequence_maximize(): sequence = [0, 1, 0, 0, 2, 10, 0] exp = [0, 1, 1, 1, 2, 10, 10] got = _calculate_monotone_sequence(sequence, direction=Direction.MAXIMIZE) assert_array_equal(exp, got) def test_calculate_monotone_sequence_minimize(): sequence = [10, 11, 8, 12, 0, 5] exp = [10, 10, 8, 8, 0, 0] got = _calculate_monotone_sequence(sequence, direction=Direction.MINIMIZE) assert_array_equal(exp, got) def test_validate_args_are_all_none_or_lists_of_same_length(): _validate_args_are_all_none_or_lists_of_same_length(None, None) _validate_args_are_all_none_or_lists_of_same_length([1], [1]) with pytest.raises(ValueError, match="All list arguments must have the same"): _validate_args_are_all_none_or_lists_of_same_length([1], [1, 2]) with pytest.raises(ValueError, match="All arguments must be lists of the same"): _validate_args_are_all_none_or_lists_of_same_length(None, [1]) def test_task_as_categorical(): task = [EvalTask.FUN, EvalTask.JAC, EvalTask.FUN_AND_JAC] got = _task_to_categorical(task) assert got.tolist() == ["fun", "jac", "fun_and_jac"] assert isinstance(got.dtype, pd.CategoricalDtype) def test_get_batch_starts_and_stops(): batches = [0, 0, 1, 1, 1, 2, 2, 3] got_starts, got_stops = _get_batch_starts_and_stops(batches) assert got_starts == [0, 2, 5, 7] assert got_stops == [2, 5, 7, 8] def test_apply_to_batch_sum(): data = np.array([0, 1, 2, 3, 4]) batch_ids = [0, 0, 1, 1, 2] exp = np.array([1, 5, 4]) got = _apply_reduction_to_batches(data, batch_ids, sum) assert_array_equal(exp, got) def test_apply_to_batch_max(): data = np.array([0, 1, 2, 3, 4]) batch_ids = [0, 0, 1, 1, 2] exp = np.array([1, 3, 4]) got = _apply_reduction_to_batches(data, batch_ids, max) assert_array_equal(exp, got) def test_apply_to_batch_broken_func(): data = np.array([0, 1, 2, 3, 4]) batch_ids = [0, 0, 1, 1, 2] with pytest.raises(ValueError, match="Calling function on batch [0, 0]"): _apply_reduction_to_batches(data, batch_ids, reduction_function=lambda _: 1 / 0) def test_apply_to_batch_func_with_non_scalar_return(): data = np.array([0, 1, 2, 3, 4]) batch_ids = [0, 0, 1, 1, 2] with pytest.raises(ValueError, match="Function did not return a scalar"): _apply_reduction_to_batches( data, batch_ids, reduction_function=lambda _list: _list ) ================================================ FILE: tests/optimagic/optimization/test_history_collection.py ================================================ import sys from dataclasses import dataclass import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.testing import assert_array_equal as aae from optimagic import SQLiteLogReader, mark from optimagic.algorithms import AVAILABLE_ALGORITHMS from optimagic.logging import SQLiteLogOptions from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.optimize import minimize from optimagic.parameters.bounds import Bounds from optimagic.typing import AggregationLevel OPTIMIZERS = [] BOUNDED = [] for name, algo in AVAILABLE_ALGORITHMS.items(): info = algo.algo_info if not info.disable_history: if info.supports_parallelism: OPTIMIZERS.append(name) if info.supports_bounds: BOUNDED.append(name) @pytest.mark.skipif(sys.platform == "win32", reason="Slow on windows.") @pytest.mark.parametrize("algorithm", OPTIMIZERS) def test_history_collection_with_parallelization(algorithm, tmp_path): lb = np.zeros(5) if algorithm in BOUNDED else None ub = np.full(5, 10) if algorithm in BOUNDED else None path = tmp_path / "log.db" algo_options = {"n_cores": 2} if algorithm == "nevergrad_pso": algo_options["stopping_maxfun"] = 15 else: algo_options["stopping_maxiter"] = 3 collected_hist = minimize( fun=mark.least_squares(lambda x: x), params=np.arange(5), algorithm=algorithm, bounds=Bounds(lower=lb, upper=ub), algo_options=algo_options, logging=SQLiteLogOptions(path=path, if_database_exists="replace"), ).history reader = SQLiteLogReader(path) log_hist = reader.read_history() # We cannot expect the order to be the same aaae(sorted(collected_hist.fun), sorted(log_hist.fun)) @mark.minimizer( name="dummy", solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, needs_jac=False, needs_hess=False, needs_bounds=False, supports_parallelism=True, supports_bounds=False, supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, disable_history=False, ) @dataclass(frozen=True) class DummyOptimizer(Algorithm): n_cores: int = 1 batch_size: int = 1 def _solve_internal_problem(self, problem, x0): assert self.batch_size in [1, 2, 4] xs = np.arange(15).repeat(len(x0)).reshape(15, len(x0)) start_index = 0 for iteration in range(3): start_index = iteration * 5 # do four evaluations in a batch evaluator problem.batch_fun( list(xs[start_index : start_index + 4]), n_cores=self.n_cores, batch_size=self.batch_size, ) # do one evaluation without the batch evaluator problem.fun(xs[start_index + 4]) out = InternalOptimizeResult( x=xs[-1], fun=5, success=True, n_fun_evals=15, n_iterations=3, ) return out def _get_fake_history(batch_size): if batch_size == 1: batches = list(range(15)) elif batch_size == 2: batches = [0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8] elif batch_size == 4: batches = [0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 4, 4, 4, 4, 5] else: raise ValueError("batch_size must be 1, 2 or 4.") out = { "params": list(np.arange(15).repeat(5).reshape(15, 5)), "criterion": [5] * 15, "batches": batches, } return out def _fake_criterion(x): return 5 CASES = [(1, 1), (1, 2), (2, 2), (1, 4), (2, 4)] @pytest.mark.skipif(sys.platform == "win32", reason="Slow on windows.") @pytest.mark.parametrize("n_cores, batch_size", CASES) def test_history_collection_with_dummy_optimizer(n_cores, batch_size): options = { "batch_size": batch_size, "n_cores": n_cores, } res = minimize( fun=_fake_criterion, params=np.arange(5), algorithm=DummyOptimizer, algo_options=options, ) got_history = res.history expected_history = _get_fake_history(batch_size) aae(got_history.batches, expected_history["batches"]) assert got_history.fun == expected_history["criterion"][: len(got_history.fun)] aaae(got_history.params, expected_history["params"][: len(got_history.params)]) ================================================ FILE: tests/optimagic/optimization/test_infinite_and_incomplete_bounds.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import mark from optimagic.config import IS_NEVERGRAD_INSTALLED from optimagic.optimization.optimize import minimize @mark.least_squares def sos(x): return x @pytest.mark.skipif( not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed", ) def test_no_bounds_with_nevergrad(): res = minimize( fun=sos, params=np.arange(3), algorithm="nevergrad_cmaes", collect_history=True, skip_checks=True, algo_options={"seed": 12345, "stopping_maxfun": 10000}, ) aaae(res.x, np.zeros(3), 4) ================================================ FILE: tests/optimagic/optimization/test_internal_optimization_problem.py ================================================ from copy import copy import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import NumdiffOptions from optimagic.batch_evaluators import process_batch_evaluator from optimagic.config import CRITERION_PENALTY_CONSTANT, CRITERION_PENALTY_SLOPE from optimagic.exceptions import UserFunctionRuntimeError from optimagic.optimization.error_penalty import get_error_penalty_function from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, ScalarFunctionValue, ) from optimagic.optimization.internal_optimization_problem import ( InternalBounds, InternalOptimizationProblem, SphereExampleInternalOptimizationProblem, SphereExampleInternalOptimizationProblemWithConverter, ) from optimagic.parameters.conversion import Converter from optimagic.typing import AggregationLevel, Direction, ErrorHandling, EvalTask @pytest.fixture def base_problem(): """Set up a basic InternalOptimizationProblem that can be modified for tests.""" def fun(params): return LeastSquaresFunctionValue(value=params, info={"mean": params.mean()}) def jac(params): return 2 * params def fun_and_jac(params): return fun(params), jac(params) converter = Converter( params_to_internal=lambda x: x, params_from_internal=lambda x: x, derivative_to_internal=lambda d, x: d, has_transforming_constraints=False, ) solver_type = AggregationLevel.SCALAR direction = Direction.MINIMIZE bounds = InternalBounds(lower=None, upper=None) numdiff_options = NumdiffOptions() error_handling = ErrorHandling.RAISE batch_evaluator = process_batch_evaluator(batch_evaluator="joblib") linear_constraints = None nonlinear_constraints = None problem = InternalOptimizationProblem( fun=fun, jac=jac, fun_and_jac=fun_and_jac, converter=converter, solver_type=solver_type, direction=direction, bounds=bounds, numdiff_options=numdiff_options, error_handling=error_handling, error_penalty_func=None, batch_evaluator=batch_evaluator, linear_constraints=linear_constraints, nonlinear_constraints=nonlinear_constraints, logger=None, ) return problem # ====================================================================================== # Test fun, jac, fun_and_jac # ====================================================================================== def test_base_problem_fun(base_problem): got = base_problem.fun(np.array([1, 2, 3])) expected = 14 assert got == expected def test_base_problem_jac(base_problem): got = base_problem.jac(np.array([1, 2, 3])) expected = 2 * np.array([1, 2, 3]) aaae(got, expected) def test_base_problem_fun_and_jac(base_problem): got_fun, got_jac = base_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3])) assert got_fun == expected_fun aaae(got_jac, expected_jac) def test_fun_and_jac_is_called_for_jac_if_jac_is_not_given(base_problem): """This makes sure we don't use numdiff if we don't have to.""" call_log = [] def fun_and_jac(params): call_log.append("fun_and_jac") return LeastSquaresFunctionValue(value=params), 2 * np.array([1, 2, 3]) base_problem._jac = None base_problem._fun_and_jac = fun_and_jac base_problem.jac(np.array([1, 2, 3])) assert call_log == ["fun_and_jac"] def test_jac_is_called_for_fun_and_jac_if_fun_is_not_given(base_problem): """This makes sure we don't use numdiff if we don't have to.""" call_log = [] def jac(params): call_log.append("jac") return 2 * np.array([1, 2, 3]) base_problem._fun_and_jac = None base_problem._jac = jac base_problem.fun_and_jac(np.array([1, 2, 3])) assert call_log == ["jac"] def test_base_problem_jac_via_numdiff(base_problem): base_problem._jac = None base_problem._fun_and_jac = None got = base_problem.jac(np.array([1, 2, 3])) expected = 2 * np.array([1, 2, 3]) aaae(got, expected) def test_base_problem_fun_and_jac_via_numdiff(base_problem): base_problem._jac = None base_problem._fun_and_jac = None got_fun, got_jac = base_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3])) assert got_fun == expected_fun aaae(got_jac, expected_jac) def test_error_in_fun_with_error_handling_raise(base_problem): def fun(params): raise ValueError("Test error") base_problem._fun = fun with pytest.raises(UserFunctionRuntimeError): base_problem.fun(np.array([1, 2, 3])) def test_error_in_fun_during_numdiff_with_error_handling_raise(base_problem): def fun(params): raise ValueError("Test error") base_problem._fun = fun base_problem._jac = None base_problem._fun_and_jac = None with pytest.raises(UserFunctionRuntimeError): base_problem.jac(np.array([1, 2, 3])) def test_base_problem_different_jac_versions(base_problem): got_jac_1 = base_problem.jac(np.array([1, 2, 3])) _, got_jac_2 = base_problem.fun_and_jac(np.array([1, 2, 3])) base_problem._jac = None base_problem._fun_and_jac = None got_jac_3 = base_problem.jac(np.array([1, 2, 3])) aaae(got_jac_1, got_jac_2) aaae(got_jac_1, got_jac_3) def test_base_problem_fun_for_ls_optimizer(base_problem): base_problem._solver_type = AggregationLevel.LEAST_SQUARES got = base_problem.fun(np.array([1, 2, 3])) expected = np.array([1, 2, 3]) aaae(got, expected) def test_base_problem_exploration_fun(base_problem): got = base_problem.exploration_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1 ) expected = [14, 77] assert got == expected # ====================================================================================== # test history # ====================================================================================== def test_history_with_fun(base_problem): base_problem.fun(np.array([1, 2, 3])) assert len(base_problem.history.params) == 1 aaae(base_problem.history.params[0], [1, 2, 3]) assert base_problem.history.fun == [14] assert base_problem.history.task == [EvalTask.FUN] assert base_problem.history.batches == [0] def test_history_with_batch_fun(base_problem): base_problem.batch_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2 ) assert len(base_problem.history.params) == 2 aaae(base_problem.history.params[0], [1, 2, 3]) aaae(base_problem.history.params[1], [4, 5, 6]) assert base_problem.history.fun == [14, 77] assert base_problem.history.task == [EvalTask.FUN, EvalTask.FUN] assert base_problem.history.batches == [0, 0] def test_history_with_jac(base_problem): base_problem.jac(np.array([1, 2, 3])) assert len(base_problem.history.params) == 1 aaae(base_problem.history.params[0], [1, 2, 3]) assert base_problem.history.fun == [None] assert base_problem.history.task == [EvalTask.JAC] assert base_problem.history.batches == [0] def test_history_with_batch_jac(base_problem): base_problem.batch_jac( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2 ) assert len(base_problem.history.params) == 2 aaae(base_problem.history.params[0], [1, 2, 3]) aaae(base_problem.history.params[1], [4, 5, 6]) assert base_problem.history.fun == [None, None] assert base_problem.history.task == [EvalTask.JAC, EvalTask.JAC] assert base_problem.history.batches == [0, 0] def test_history_with_fun_and_jac(base_problem): base_problem.fun_and_jac(np.array([1, 2, 3])) assert len(base_problem.history.params) == 1 aaae(base_problem.history.params[0], [1, 2, 3]) assert base_problem.history.fun == [14] assert base_problem.history.task == [EvalTask.FUN_AND_JAC] assert base_problem.history.batches == [0] def test_history_with_batch_fun_and_jac(base_problem): base_problem.batch_fun_and_jac( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2 ) assert len(base_problem.history.params) == 2 aaae(base_problem.history.params[0], [1, 2, 3]) aaae(base_problem.history.params[1], [4, 5, 6]) assert base_problem.history.fun == [14, 77] assert base_problem.history.task == [EvalTask.FUN_AND_JAC, EvalTask.FUN_AND_JAC] assert base_problem.history.batches == [0, 0] def test_history_with_exploration_fun(base_problem): base_problem.exploration_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2 ) assert len(base_problem.history.params) == 2 aaae(base_problem.history.params[0], [1, 2, 3]) aaae(base_problem.history.params[1], [4, 5, 6]) assert base_problem.history.fun == [14, 77] assert base_problem.history.task == [EvalTask.EXPLORATION, EvalTask.EXPLORATION] assert base_problem.history.batches == [0, 0] def test_with_history_copy_constructor(base_problem): new = base_problem.with_new_history() new.fun(np.array([1, 2, 3])) assert len(new.history.params) == 1 assert len(base_problem.history.params) == 0 # ====================================================================================== # test batch versions # ====================================================================================== @pytest.mark.parametrize("n_cores", [1, 2]) def test_batch_fun(base_problem, n_cores): got = base_problem.batch_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores ) expected = [14, 77] assert got == expected @pytest.mark.parametrize("n_cores", [1, 2]) def test_batch_jac(base_problem, n_cores): got = base_problem.batch_jac( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores ) expected = [2 * np.array([1, 2, 3]), 2 * np.array([4, 5, 6])] aaae(got[0], expected[0]) aaae(got[1], expected[1]) @pytest.mark.parametrize("n_cores", [1, 2]) def test_batch_fun_and_jac(base_problem, n_cores): res = base_problem.batch_fun_and_jac( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores ) got_fun = [r[0] for r in res] got_jac = [r[1] for r in res] expected_fun = [14, 77] expected_jac = [2 * np.array([1, 2, 3]), 2 * np.array([4, 5, 6])] assert got_fun == expected_fun aaae(got_jac, expected_jac) # ====================================================================================== # test sign flipping # ====================================================================================== @pytest.fixture def max_problem(base_problem): """Flip the sign of the functions. The sign should be flipped back by InternalOptimizationProblem such that in the end the same values for fun, jac, and fun_and_jac are returned as for the base_problem. """ def fun(params): return ScalarFunctionValue(value=-params @ params) def jac(params): return -2 * params def fun_and_jac(params): return fun(params), jac(params) max_problem = copy(base_problem) max_problem._direction = Direction.MAXIMIZE max_problem._fun = fun max_problem._jac = jac max_problem._fun_and_jac = fun_and_jac return max_problem def test_max_problem_fun(max_problem): got = max_problem.fun(np.array([1, 2, 3])) expected = 14 assert got == expected def test_max_problem_jac(max_problem): got = max_problem.jac(np.array([1, 2, 3])) expected = 2 * np.array([1, 2, 3]) aaae(got, expected) def test_max_problem_fun_and_jac(max_problem): got_fun, got_jac = max_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3])) assert got_fun == expected_fun aaae(got_jac, expected_jac) def test_jac_via_numdiff(max_problem): max_problem._jac = None max_problem._fun_and_jac = None got = max_problem.jac(np.array([1, 2, 3])) expected = 2 * np.array([1, 2, 3]) aaae(got, expected) def test_fun_and_jac_via_numdiff(max_problem): max_problem._jac = None max_problem._fun_and_jac = None got_fun, got_jac = max_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3])) assert got_fun == expected_fun aaae(got_jac, expected_jac) def test_max_problem_exploration_fun(max_problem): got = max_problem.exploration_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1 ) expected = [14, 77] assert got == expected # ====================================================================================== # test pytree ls output and params # ====================================================================================== @pytest.fixture def pytree_problem(base_problem): def fun(params): assert isinstance(params, dict) return LeastSquaresFunctionValue(value=params) def jac(params): assert isinstance(params, dict) out = {} for outer_key in params: row = {} for inner_key in params: if inner_key == outer_key: row[inner_key] = 1 else: row[inner_key] = 0 out[outer_key] = row return out def fun_and_jac(params): assert isinstance(params, dict) return fun(params), jac(params) def derivative_flatten(tree, x): out = [list(row.values()) for row in tree.values()] return np.array(out) converter = Converter( params_to_internal=lambda x: np.array(list(x.values())), params_from_internal=lambda x: { k: v for k, v in zip(["a", "b", "c"], x, strict=False) }, derivative_to_internal=derivative_flatten, has_transforming_constraints=False, ) solver_type = AggregationLevel.LEAST_SQUARES direction = Direction.MINIMIZE bounds = InternalBounds(lower=None, upper=None) numdiff_options = NumdiffOptions() error_handling = ErrorHandling.RAISE batch_evaluator = process_batch_evaluator(batch_evaluator="joblib") linear_constraints = None nonlinear_constraints = None problem = InternalOptimizationProblem( fun=fun, jac=jac, fun_and_jac=fun_and_jac, converter=converter, solver_type=solver_type, direction=direction, bounds=bounds, numdiff_options=numdiff_options, error_handling=error_handling, error_penalty_func=None, batch_evaluator=batch_evaluator, linear_constraints=linear_constraints, nonlinear_constraints=nonlinear_constraints, logger=None, ) return problem def test_pytree_problem_fun(pytree_problem): got = pytree_problem.fun(np.array([1, 2, 3])) expected = np.array([1, 2, 3]) aaae(got, expected) def test_pytree_problem_fun_scalar_output(pytree_problem): pytree_problem._solver_type = AggregationLevel.SCALAR got = pytree_problem.fun(np.array([1, 2, 3])) expected = 14 assert got == expected def test_pytree_problem_jac(pytree_problem): got = pytree_problem.jac(np.array([1, 2, 3])) expected = np.eye(3) aaae(got, expected) def test_pytree_problem_fun_and_jac(pytree_problem): got_fun, got_jac = pytree_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = np.array([1, 2, 3]), np.eye(3) aaae(got_jac, expected_jac) aaae(got_fun, expected_fun) def test_pytree_problem_exploration_fun(pytree_problem): got = pytree_problem.exploration_fun( [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1 ) expected = [14, 77] assert got == expected def test_numerical_jac_for_pytree_problem(pytree_problem): pytree_problem._jac = None pytree_problem._fun_and_jac = None got = pytree_problem.jac(np.array([1, 2, 3])) expected = np.eye(3) aaae(got, expected) def test_numerical_fun_and_jac_for_pytree_problem(pytree_problem): pytree_problem._jac = None pytree_problem._fun_and_jac = None got_fun, got_jac = pytree_problem.fun_and_jac(np.array([1, 2, 3])) expected_fun, expected_jac = np.array([1, 2, 3]), np.eye(3) aaae(got_fun, expected_fun) aaae(got_jac, expected_jac) # ====================================================================================== # test error penalty with minimize # ====================================================================================== @pytest.fixture def error_min_problem(): """Set up a basic InternalOptimizationProblem that can be modified for tests.""" def fun(params): raise ValueError("Test error") def jac(params): raise ValueError("Test error") def fun_and_jac(params): raise ValueError("Test error") converter = Converter( params_to_internal=lambda x: x, params_from_internal=lambda x: x, derivative_to_internal=lambda d, x: d, has_transforming_constraints=False, ) solver_type = AggregationLevel.SCALAR direction = Direction.MINIMIZE bounds = InternalBounds(lower=None, upper=None) numdiff_options = NumdiffOptions() error_handling = ErrorHandling.CONTINUE batch_evaluator = process_batch_evaluator(batch_evaluator="joblib") linear_constraints = None nonlinear_constraints = None start_params = np.array([1, 2, 3]) error_penalty_function = get_error_penalty_function( start_x=start_params, error_penalty=None, start_criterion=ScalarFunctionValue(14), direction=direction, solver_type=solver_type, ) problem = InternalOptimizationProblem( fun=fun, jac=jac, fun_and_jac=fun_and_jac, converter=converter, solver_type=solver_type, direction=direction, bounds=bounds, numdiff_options=numdiff_options, error_handling=error_handling, error_penalty_func=error_penalty_function, batch_evaluator=batch_evaluator, linear_constraints=linear_constraints, nonlinear_constraints=nonlinear_constraints, logger=None, ) return problem def test_error_in_fun_minimize(error_min_problem): got = error_min_problem.fun(np.array([2, 3, 4])) expected = 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE assert np.allclose(got, expected) def test_error_in_jac_minimize(error_min_problem): got = error_min_problem.jac(np.array([2, 3, 4])) expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) aaae(got, expected) def test_error_in_fun_and_jac_minimize(error_min_problem): got_fun, got_jac = error_min_problem.fun_and_jac(np.array([2, 3, 4])) expected_fun = ( 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE ) expected_jac = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) assert np.allclose(got_fun, expected_fun) aaae(got_jac, expected_jac) def test_error_in_numerical_jac_minimize(error_min_problem): error_min_problem._jac = None error_min_problem._fun_and_jac = None got = error_min_problem.jac(np.array([2, 3, 4])) expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) aaae(got, expected) def test_error_in_exploration_fun_minimize(error_min_problem): got = error_min_problem.exploration_fun( [np.array([2, 3, 4]), np.array([5, 6, 7])], n_cores=1 ) expected = [-np.inf, -np.inf] assert np.allclose(got, expected) # ====================================================================================== # test error penalty with maximize # ====================================================================================== @pytest.fixture def error_max_problem(error_min_problem): problem = copy(error_min_problem) problem._direction = Direction.MAXIMIZE error_penalty_function = get_error_penalty_function( start_x=np.array([1, 2, 3]), error_penalty=None, start_criterion=ScalarFunctionValue(-14), direction=problem._direction, solver_type=problem._solver_type, ) problem._error_penalty_func = error_penalty_function return problem def test_error_in_fun_maximize(error_max_problem): got = error_max_problem.fun(np.array([2, 3, 4])) expected = 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE assert np.allclose(got, expected) def test_error_in_jac_maximize(error_max_problem): got = error_max_problem.jac(np.array([2, 3, 4])) expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) aaae(got, expected) def test_error_in_fun_and_jac_maximize(error_max_problem): got_fun, got_jac = error_max_problem.fun_and_jac(np.array([2, 3, 4])) expected_fun = ( 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE ) expected_jac = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) assert np.allclose(got_fun, expected_fun) aaae(got_jac, expected_jac) def test_error_in_numerical_jac_maximize(error_max_problem): error_max_problem._jac = None error_max_problem._fun_and_jac = None got = error_max_problem.jac(np.array([2, 3, 4])) expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3) aaae(got, expected) def test_error_in_exploration_fun_maximize(error_max_problem): got = error_max_problem.exploration_fun( [np.array([2, 3, 4]), np.array([5, 6, 7])], n_cores=1 ) expected = [-np.inf, -np.inf] assert np.allclose(got, expected) # ====================================================================================== # test SphereExampleInternalOptimizationProblem # ====================================================================================== def test_sphere_example_internal_optimization_problem(): problem = SphereExampleInternalOptimizationProblem() assert problem.fun(np.array([1, 2, 3])) == 14 aaae(problem.jac(np.array([1, 2, 3])), np.array([2, 4, 6])) f, j = problem.fun_and_jac(np.array([1, 2, 3])) assert f == 14 aaae(j, np.array([2, 4, 6])) def test_sphere_example_internal_optimization_problem_with_converter(): problem = SphereExampleInternalOptimizationProblemWithConverter() assert problem.fun(np.array([1, 2, 3])) == 14 aaae(problem.jac(np.array([1, 2, 3])), np.array([2, 4, 6])) f, j = problem.fun_and_jac(np.array([1, 2, 3])) assert f == 14 aaae(j, np.array([2, 4, 6])) ================================================ FILE: tests/optimagic/optimization/test_invalid_jacobian_value.py ================================================ import numpy as np import pytest from optimagic.exceptions import UserFunctionRuntimeError from optimagic.optimization.optimize import minimize # ====================================================================================== # Test setup: # -------------------------------------------------------------------------------------- # We test that minimize raises an error if the user function returns a jacobian # containing invalid values (np.inf, np.nan). To test that this works not only at # the start parameters, we create jac functions that return invalid values if the # parameter norm becomes smaller than one. # ====================================================================================== @pytest.fixture def params(): return {"a": 1, "b": np.array([3, 4])} def sphere(params): return params["a"] ** 2 + (params["b"] ** 2).sum() def sphere_gradient(params): return { "a": 2 * params["a"], "b": 2 * params["b"], } def sphere_and_gradient(params): return sphere(params), sphere_gradient(params) def params_norm(params): squared_norm = params["a"] ** 2 + np.linalg.norm(params["b"]) ** 2 return np.sqrt(squared_norm) def get_invalid_jac(invalid_jac_value): """Get function that returns invalid jac if the parameter norm < 1.""" def jac(params): if params_norm(params) < 1: return invalid_jac_value else: return sphere_gradient(params) return jac def get_invalid_fun_and_jac(invalid_jac_value): """Get function that returns invalid fun and jac if the parameter norm < 1.""" def fun_and_jac(params): if params_norm(params) < 1: return sphere(params), invalid_jac_value else: return sphere_and_gradient(params) return fun_and_jac INVALID_JACOBIAN_VALUES = [ {"a": np.inf, "b": 2 * np.array([1, 2])}, {"a": 1, "b": 2 * np.array([np.inf, 2])}, {"a": np.nan, "b": 2 * np.array([1, 2])}, {"a": 1, "b": 2 * np.array([np.nan, 2])}, ] # ====================================================================================== # Test Invalid Jacobian raises proper error with jac argument # ====================================================================================== @pytest.mark.parametrize("invalid_jac_value", INVALID_JACOBIAN_VALUES) def test_minimize_with_invalid_jac(invalid_jac_value, params): with pytest.raises( UserFunctionRuntimeError, match=( "The optimization failed because the derivative provided via jac " "contains infinite or NaN values." ), ): minimize( fun=sphere, params=params, algorithm="scipy_lbfgsb", jac=get_invalid_jac(invalid_jac_value), ) # ====================================================================================== # Test Invalid Jacobian raises proper error with fun_and_jac argument # ====================================================================================== @pytest.mark.parametrize("invalid_jac_value", INVALID_JACOBIAN_VALUES) def test_minimize_with_invalid_fun_and_jac(invalid_jac_value, params): with pytest.raises( UserFunctionRuntimeError, match=( "The optimization failed because the derivative provided via fun_and_jac " "contains infinite or NaN values." ), ): minimize( params=params, algorithm="scipy_lbfgsb", fun_and_jac=get_invalid_fun_and_jac(invalid_jac_value), ) ================================================ FILE: tests/optimagic/optimization/test_jax_derivatives.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.config import IS_JAX_INSTALLED from optimagic.optimization.optimize import minimize if IS_JAX_INSTALLED: import jax import jax.numpy as jnp @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def test_scipy_conference_example(): def criterion(x): first = (x["a"] - jnp.pi) ** 2 second = jnp.linalg.norm(x["b"] - jnp.arange(3)) third = jnp.linalg.norm(x["c"] - jnp.eye(2)) return first + second + third start_params = { "a": 1.0, "b": jnp.ones(3).astype(float), "c": jnp.ones((2, 2)).astype(float), } gradient = jax.grad(criterion) res = minimize( fun=criterion, jac=gradient, params=start_params, algorithm="scipy_lbfgsb", ) assert isinstance(res.params["b"], jnp.ndarray) aaae(res.params["b"], jnp.arange(3)) aaae(res.params["c"], jnp.eye(2)) assert np.allclose(res.params["a"], np.pi, atol=1e-4) @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def test_params_is_jax_scalar(): def criterion(x): return x**2 res = minimize( fun=criterion, params=jnp.array(1.0), algorithm="scipy_lbfgsb", jac=jax.grad(criterion), ) assert isinstance(res.params, jnp.ndarray) assert np.allclose(res.params, 0.0) @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def params_is_1d_array(): def criterion(x): return x @ x res = minimize( fun=criterion, params=jnp.arange(3), algorithm="scipy_lbfgsb", jac=jax.grad(criterion), ) assert isinstance(res.params, jnp.ndarray) assert aaae(res.params, jnp.arange(3)) @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") @pytest.mark.parametrize("algorithm", ["scipy_lbfgsb", "scipy_ls_lm"]) def test_dict_output_works(algorithm): def criterion(x): return {"root_contributions": x, "value": x @ x} def scalar_wrapper(x): return criterion(x)["value"] def ls_wrapper(x): return criterion(x)["root_contributions"] deriv_dict = { "value": jax.grad(scalar_wrapper), "root_contributions": jax.jacobian(ls_wrapper), } res = minimize( fun=criterion, params=jnp.array([1.0, 2.0, 3.0]), algorithm=algorithm, jac=deriv_dict, ) assert isinstance(res.params, jnp.ndarray) aaae(res.params, np.zeros(3)) @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def test_least_squares_optimizer_pytree(): def criterion(x): return {"root_contributions": x} def ls_wrapper(x): return criterion(x)["root_contributions"] params = {"a": 1.0, "b": 2.0, "c": jnp.array([1.0, 2.0])} jac = jax.jacobian(ls_wrapper) res = minimize( fun=criterion, params=params, algorithm="scipy_ls_lm", jac=jac, ) assert isinstance(res.params, dict) assert np.allclose(res.params["a"], 0) assert np.allclose(res.params["b"], 0) aaae(res.params["c"], np.zeros(2)) ================================================ FILE: tests/optimagic/optimization/test_many_algorithms.py ================================================ """Test all available algorithms on a simple sum of squares function. - only minimize - only numerical derivative """ import sys import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import mark from optimagic.algorithms import AVAILABLE_ALGORITHMS, GLOBAL_ALGORITHMS from optimagic.optimization.optimize import minimize from optimagic.parameters.bounds import Bounds AVAILABLE_LOCAL_ALGORITHMS = [ name for name, algo in AVAILABLE_ALGORITHMS.items() if name not in GLOBAL_ALGORITHMS and name != "bhhh" ] AVAILABLE_BOUNDED_ALGORITHMS = [ name for name, algo in AVAILABLE_ALGORITHMS.items() if algo.algo_info.supports_bounds ] PRECISION_LOOKUP = { "scipy_trust_constr": 3, "iminuit_migrad": 2, } @pytest.fixture def algo(algorithm): return AVAILABLE_ALGORITHMS[algorithm] def _get_options(algo): options = {} "Max time before termination" if hasattr(algo, "stopping_maxtime"): options.update({"stopping_maxtime": 1}) "Fix seed if algorithm is stochastic" if hasattr(algo, "seed"): options.update({"seed": 12345}) return options def _get_required_decimals(algorithm, algo): # if algo is experimental, do not expect solution if algo.algo_info.experimental: return 0 if algorithm in PRECISION_LOOKUP: return PRECISION_LOOKUP[algorithm] else: return 1 if algo.algo_info.is_global else 4 @mark.least_squares def sos(x): return x def _get_params_and_binding_bounds(algo): if algo.algo_info.is_global: params = np.array([0.5, -0.5]) bounds = Bounds(lower=np.array([0.25, -1]), upper=np.array([1, -0.25])) expected = np.array([0.25, -0.25]) else: params = np.array([3, 2, -3]) if algo.algo_info.supports_infinite_bounds: bounds = Bounds( lower=np.array([1, -np.inf, -np.inf]), upper=np.array([np.inf, np.inf, -1]), ) else: bounds = Bounds(lower=np.array([1, -10, -10]), upper=np.array([10, 10, -1])) expected = np.array([1, 0, -1]) return params, bounds, expected # Tests all bounded algorithms with binding bounds @pytest.mark.parametrize("algorithm", AVAILABLE_BOUNDED_ALGORITHMS) def test_sum_of_squares_with_binding_bounds(algorithm, algo): params, bounds, expected = _get_params_and_binding_bounds(algo) algo_options = _get_options(algo) decimal = _get_required_decimals(algorithm, algo) res = minimize( fun=sos, params=params, bounds=bounds, algorithm=algorithm, collect_history=True, algo_options=algo_options, skip_checks=True, ) assert res.success in [True, None] aaae(res.params, expected, decimal) def _get_params_and_bounds_on_local(algo): params = np.arange(3) bounds = None expected = np.zeros(3) if algo.algo_info.needs_bounds: bounds = Bounds(lower=np.full(3, -10), upper=np.full(3, 10)) return params, bounds, expected # Test all local algorithms without bounds unless needed @pytest.mark.parametrize("algorithm", AVAILABLE_LOCAL_ALGORITHMS) def test_sum_of_squares_on_local_algorithms(algorithm, algo): params, bounds, expected = _get_params_and_bounds_on_local(algo) algo_options = _get_options(algo) decimal = _get_required_decimals(algorithm, algo) res = minimize( fun=sos, params=params, bounds=bounds, algorithm=algorithm, collect_history=True, algo_options=algo_options, skip_checks=True, ) assert res.success in [True, None] aaae(res.params, expected, decimal) def _get_params_and_bounds_on_global_and_bounded(algo): if algo.algo_info.is_global: params = np.array([0.35, 0.35]) bounds = Bounds(lower=np.array([-0.2, -0.5]), upper=np.array([1, 0.5])) expected = np.array([0, 0]) else: params = np.arange(3) bounds = Bounds(lower=np.full(3, -10), upper=np.full(3, 10)) expected = np.zeros(3) return params, bounds, expected skip_msg = ( "The very slow tests of global algorithms are only run on linux which always " "runs much faster in continuous integration." ) # Test all global algorithms and local algorithms with bounds @pytest.mark.skipif(sys.platform == "win32", reason=skip_msg) @pytest.mark.parametrize("algorithm", AVAILABLE_BOUNDED_ALGORITHMS) def test_sum_of_squares_on_global_and_bounded_algorithms(algorithm, algo): params, bounds, expected = _get_params_and_bounds_on_global_and_bounded(algo) algo_options = _get_options(algo) decimal = _get_required_decimals(algorithm, algo) res = minimize( fun=sos, params=params, bounds=bounds, algorithm=algorithm, collect_history=True, algo_options=algo_options, skip_checks=True, ) assert res.success in [True, None] aaae(res.params, expected, decimal) ================================================ FILE: tests/optimagic/optimization/test_multistart.py ================================================ from dataclasses import dataclass from itertools import product import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimization.algorithm import InternalOptimizeResult from optimagic.optimization.multistart import ( _draw_exploration_sample, get_batched_optimization_sample, run_explorations, update_convergence_state, ) @pytest.fixture() def params(): df = pd.DataFrame(index=["a", "b", "c"]) df["value"] = [0, 1, 2.0] df["soft_lower_bound"] = [-1, 0, np.nan] df["upper_bound"] = [2, 2, np.nan] return df @pytest.fixture() def constraints(): return [{"type": "fixed", "loc": "c", "value": 2}] dim = 2 distributions = ["uniform", "triangular"] rules = ["sobol", "halton", "latin_hypercube", "random"] lower = [np.zeros(dim), np.ones(dim) * 0.5, -np.ones(dim)] upper = [np.ones(dim), np.ones(dim) * 0.75, np.ones(dim) * 2] test_cases = list(product(distributions, rules, lower, upper)) @pytest.mark.parametrize("dist, rule, lower, upper", test_cases) def test_draw_exploration_sample(dist, rule, lower, upper): results = [] for _ in range(2): results.append( _draw_exploration_sample( x=np.ones_like(lower) * 0.5, lower=lower, upper=upper, n_samples=3, distribution=dist, method=rule, seed=1234, ) ) aaae(results[0], results[1]) calculated = results[0] assert calculated.shape == (3, 2) def test_run_explorations(): @dataclass class Dummy: def exploration_fun(self, x, n_cores): out = [] for vec in x: if vec.sum() == 5: out.append(np.inf) else: out.append(-vec.sum()) return out def with_step_id(self, step_id): return self calculated = run_explorations( internal_problem=Dummy(), sample=np.arange(6).reshape(3, 2), n_cores=1, step_id=0, ) exp_values = np.array([-9, -1]) exp_sample = np.array([[4, 5], [0, 1]]) aaae(calculated.sorted_sample, exp_sample) aaae(calculated.sorted_values, exp_values) def test_get_batched_optimization_sample(): calculated = get_batched_optimization_sample( sorted_sample=np.arange(12).reshape(6, 2), stopping_maxopt=5, batch_size=4, ) expected = [[[0, 1], [2, 3], [4, 5], [6, 7]], [[8, 9]]] assert len(calculated[0]) == 4 assert len(calculated[1]) == 1 assert len(calculated) == 2 for calc_batch, exp_batch in zip(calculated, expected, strict=False): assert isinstance(calc_batch, list) for calc_entry, exp_entry in zip(calc_batch, exp_batch, strict=False): assert isinstance(calc_entry, np.ndarray) assert calc_entry.tolist() == exp_entry @pytest.fixture() def current_state(): state = { "best_x": np.ones(3), "best_y": 5, "best_res": None, "x_history": [np.arange(3) - 1e-20, np.ones(3)], "y_history": [6, 5], "result_history": [], "start_history": [], } return state @pytest.fixture() def starts(): return [np.zeros(3)] @pytest.fixture() def results(): res = InternalOptimizeResult( x=np.arange(3) + 1e-10, fun=4, ) return [res] def test_update_state_converged(current_state, starts, results): criteria = { "xtol": 1e-3, "max_discoveries": 2, } new_state, is_converged = update_convergence_state( current_state=current_state, starts=starts, results=results, convergence_criteria=criteria, solver_type="value", ) aaae(new_state["best_x"], np.arange(3)) assert new_state["best_y"] == 4 assert new_state["y_history"] == [6, 5, 4] assert new_state["result_history"][0].fun == 4 aaae(new_state["start_history"][0], np.zeros(3)) assert is_converged def test_update_state_not_converged(current_state, starts, results): criteria = { "xtol": 1e-3, "max_discoveries": 5, } _, is_converged = update_convergence_state( current_state=current_state, starts=starts, results=results, convergence_criteria=criteria, solver_type="value", ) assert not is_converged ================================================ FILE: tests/optimagic/optimization/test_multistart_options.py ================================================ import numpy as np import pytest from optimagic.exceptions import InvalidMultistartError from optimagic.optimization.multistart_options import ( MultistartOptions, _linear_weights, _tiktak_weights, get_internal_multistart_options_from_public, pre_process_multistart, ) def test_pre_process_multistart_trivial_case(): multistart = MultistartOptions(n_samples=10, convergence_max_discoveries=55) got = pre_process_multistart(multistart) assert got == multistart def test_pre_process_multistart_none_case(): assert pre_process_multistart(None) is None def test_pre_process_multistart_false_case(): assert pre_process_multistart(False) is None def test_pre_process_multistart_dict_case(): got = pre_process_multistart( multistart={ "n_samples": 10, "convergence_max_discoveries": 55, } ) assert got == MultistartOptions( n_samples=10, convergence_max_discoveries=55, ) def test_pre_process_multistart_invalid_type(): with pytest.raises(InvalidMultistartError, match="Invalid multistart options"): pre_process_multistart(multistart="invalid") def test_pre_process_multistart_invalid_dict_key(): with pytest.raises(InvalidMultistartError, match="Invalid multistart options"): pre_process_multistart(multistart={"invalid": "invalid"}) def test_pre_process_multistart_invalid_dict_value(): with pytest.raises(InvalidMultistartError, match="Invalid number of samples"): pre_process_multistart(multistart={"n_samples": "invalid"}) @pytest.mark.parametrize("value", ["invalid", -1]) def test_multistart_options_invalid_n_samples_value(value): with pytest.raises(InvalidMultistartError, match="Invalid number of samples"): MultistartOptions(n_samples=value) @pytest.mark.parametrize("value", ["invalid", -1]) def test_multistart_options_invalid_stopping_maxopt(value): with pytest.raises(InvalidMultistartError, match="Invalid number of optimizations"): MultistartOptions(stopping_maxopt=value) def test_multistart_options_stopping_maxopt_less_than_n_samples(): with pytest.raises(InvalidMultistartError, match="Invalid number of samples"): MultistartOptions(n_samples=1, stopping_maxopt=2) def test_multistart_options_invalid_sampling_distribution(): with pytest.raises(InvalidMultistartError, match="Invalid sampling distribution"): MultistartOptions(sampling_distribution="invalid") def test_multistart_options_invalid_sampling_method(): with pytest.raises(InvalidMultistartError, match="Invalid sampling method"): MultistartOptions(sampling_method="invalid") def test_multistart_options_invalid_mixing_weight_method(): with pytest.raises(InvalidMultistartError, match="Invalid mixing weight method"): MultistartOptions(mixing_weight_method="invalid") @pytest.mark.parametrize("value", [("a", "b"), (1, 2, 3), {"a": 1.0, "b": 3.0}]) def test_multistart_options_invalid_mixing_weight_bounds(value): with pytest.raises(InvalidMultistartError, match="Invalid mixing weight bounds"): MultistartOptions(mixing_weight_bounds=value) def test_multistart_options_invalid_convergence_xtol_rel(): with pytest.raises(InvalidMultistartError, match="Invalid relative params"): MultistartOptions(convergence_xtol_rel="invalid") @pytest.mark.parametrize("value", ["invalid", -1]) def test_multistart_options_invalid_convergence_max_discoveries(value): with pytest.raises(InvalidMultistartError, match="Invalid max discoveries"): MultistartOptions(convergence_max_discoveries=value) @pytest.mark.parametrize("value", ["invalid", -1]) def test_multistart_options_invalid_n_cores(value): with pytest.raises(InvalidMultistartError, match="Invalid number of cores"): MultistartOptions(n_cores=value) @pytest.mark.parametrize("value", ["invalid", -1]) def test_multistart_options_invalid_batch_size(value): with pytest.raises(InvalidMultistartError, match="Invalid batch size"): MultistartOptions(batch_size=value) def test_multistart_options_batch_size_smaller_than_n_cores(): with pytest.raises(InvalidMultistartError, match="Invalid batch size"): MultistartOptions(batch_size=1, n_cores=2) def test_multistart_options_invalid_batch_evaluator(): with pytest.raises(InvalidMultistartError, match="Invalid batch evaluator"): MultistartOptions(batch_evaluator="invalid") def test_multistart_options_invalid_seed(): with pytest.raises(InvalidMultistartError, match="Invalid seed"): MultistartOptions(seed="invalid") def test_multistart_options_invalid_error_handling(): with pytest.raises(InvalidMultistartError, match="Invalid error handling"): MultistartOptions(error_handling="invalid") def test_linear_weights(): calculated = _linear_weights(5, 10, 0.4, 0.8) expected = 0.6 assert np.allclose(calculated, expected) def test_tiktak_weights(): assert np.allclose(0.3, _tiktak_weights(0, 10, 0.3, 0.8)) assert np.allclose(0.8, _tiktak_weights(10, 10, 0.3, 0.8)) def test_get_internal_multistart_options_from_public_defaults(): options = MultistartOptions() got = get_internal_multistart_options_from_public( options, params=np.arange(5), params_to_internal=lambda x: x, ) assert got.convergence_xtol_rel == 0.01 assert got.convergence_max_discoveries == options.convergence_max_discoveries assert got.n_cores == options.n_cores assert got.error_handling == "continue" assert got.n_samples == 500 assert got.stopping_maxopt == 50 assert got.batch_size == 1 ================================================ FILE: tests/optimagic/optimization/test_optimize.py ================================================ """Tests for (almost) algorithm independent properties of maximize and minimize.""" import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.examples.criterion_functions import sos_scalar from optimagic.exceptions import InvalidFunctionError, InvalidNumdiffOptionsError from optimagic.optimization.optimize import maximize, minimize def test_sign_is_switched_back_after_maximization(): params = pd.DataFrame() params["value"] = [1, 2, 3] res = maximize( lambda params: 1 - params["value"] @ params["value"], params=params, algorithm="scipy_lbfgsb", ) assert np.allclose(res.fun, 1) def test_scipy_lbfgsb_actually_calls_criterion_and_derivative(): params = pd.DataFrame(data=np.ones((10, 1)), columns=["value"]) def raising_crit_and_deriv(params): # noqa: ARG001 raise NotImplementedError("This should not be called.") with pytest.raises(InvalidFunctionError, match="Error while evaluating"): minimize( fun=sos_scalar, params=params, algorithm="scipy_lbfgsb", fun_and_jac=raising_crit_and_deriv, ) def test_with_invalid_numdiff_options(): with pytest.raises(InvalidNumdiffOptionsError): minimize( fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb", numdiff_options={"bla": 15}, ) # provided fun or fun_and_jac is provided def test_with_optional_fun_argument(): expected = np.zeros(5) res = minimize( fun_and_jac=lambda x: (x @ x, 2 * x), params=np.arange(5), algorithm="scipy_lbfgsb", ) aaae(res.x, expected) def test_fun_and_jac_list(): with pytest.raises(NotImplementedError): minimize( fun_and_jac=[lambda x: (x @ x, 2 * x)], params=np.arange(5), algorithm="scipy_lbfgsb", ) ================================================ FILE: tests/optimagic/optimization/test_optimize_result.py ================================================ import numpy as np import pandas as pd import pytest from optimagic.optimization.optimize_result import OptimizeResult, _create_stars from optimagic.utilities import get_rng @pytest.fixture() def convergence_report(): conv_report = pd.DataFrame( index=[ "relative_criterion_change", "relative_params_change", "absolute_criterion_change", "absolute_params_change", ], columns=["one_step", "five_steps"], ) u = get_rng(seed=0).uniform conv_report["one_step"] = [ u(1e-12, 1e-10), u(1e-9, 1e-8), u(1e-7, 1e-6), u(1e-6, 1e-5), ] conv_report["five_steps"] = [1e-8, 1e-4, 1e-3, 100] return conv_report @pytest.fixture() def base_inputs(): out = { "params": np.ones(3), "fun": 500, "start_fun": 1000, "start_params": np.full(3, 10), "direction": "minimize", "message": "OPTIMIZATION TERMINATED SUCCESSFULLY", "success": True, "n_fun_evals": 100, "n_jac_evals": 0, "n_iterations": 80, "history": {"criterion": list(range(10))}, "algorithm": "scipy_lbfgsb", "n_free": 2, } return out def test_optimize_result_runs(base_inputs, convergence_report): res = OptimizeResult( convergence_report=convergence_report, **base_inputs, ) res.__repr__() def test_create_stars(): sr = pd.Series([1e-12, 1e-9, 1e-7, 1e-4, 1e-2]) calculated = _create_stars(sr).tolist() expected = ["***", "** ", "* ", " ", " "] assert calculated == expected def test_to_pickle(base_inputs, convergence_report, tmp_path): res = OptimizeResult( convergence_report=convergence_report, **base_inputs, ) res.to_pickle(tmp_path / "bla.pkl") def test_dict_access(base_inputs): res = OptimizeResult(**base_inputs) assert res["fun"] == 500 assert res["nfev"] == 100 ================================================ FILE: tests/optimagic/optimization/test_params_versions.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from pybaum import tree_just_flatten from optimagic.examples.criterion_functions import ( sos_gradient, sos_ls, sos_ls_jacobian, sos_scalar, ) from optimagic.optimization.optimize import minimize from optimagic.parameters.tree_registry import get_registry REGISTRY = get_registry(extended=True) PARAMS = [ {"a": 1.0, "b": 2, "c": 3, "d": 4, "e": 5}, np.arange(5), list(range(5)), tuple(range(5)), pd.Series(np.arange(5)), {"a": 1, "b": np.array([2, 3]), "c": [pd.Series([4, 5])]}, ] SCALAR_PARAMS = [6, 6.2, np.array([4]), np.array([4.5])] @pytest.mark.parametrize("params", PARAMS + SCALAR_PARAMS) def test_tree_params_numerical_derivative_scalar_criterion(params): flat = np.array(tree_just_flatten(params, registry=REGISTRY)) expected = np.zeros_like(flat) res = minimize( fun=sos_scalar, params=params, algorithm="scipy_lbfgsb", ) calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY)) aaae(calculated, expected) @pytest.mark.parametrize("params", PARAMS + SCALAR_PARAMS) def test_tree_params_scalar_criterion(params): flat = np.array(tree_just_flatten(params, registry=REGISTRY)) expected = np.zeros_like(flat) res = minimize( fun=sos_scalar, jac=sos_gradient, params=params, algorithm="scipy_lbfgsb", ) calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY)) aaae(calculated, expected) TEST_CASES_SOS_LS = [] for p in PARAMS: for algo in ["scipy_lbfgsb", "scipy_ls_lm"]: TEST_CASES_SOS_LS.append((p, algo)) @pytest.mark.parametrize("params, algorithm", TEST_CASES_SOS_LS) def test_tree_params_numerical_derivative_sos_ls(params, algorithm): flat = np.array(tree_just_flatten(params, registry=REGISTRY)) expected = np.zeros_like(flat) res = minimize( fun=sos_ls, params=params, algorithm=algorithm, ) calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY)) aaae(calculated, expected) @pytest.mark.parametrize("params, algorithm", TEST_CASES_SOS_LS) def test_tree_params_sos_ls(params, algorithm): flat = np.array(tree_just_flatten(params, registry=REGISTRY)) expected = np.zeros_like(flat) derivatives = [sos_gradient, sos_ls_jacobian] res = minimize( fun=sos_ls, jac=derivatives, params=params, algorithm=algorithm, ) calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY)) aaae(calculated, expected) ================================================ FILE: tests/optimagic/optimization/test_process_result.py ================================================ from optimagic.optimization.process_results import _sum_or_none def test_sum_or_none(): assert _sum_or_none([1, 2, 3]) == 6 assert _sum_or_none([1, 2, None]) is None ================================================ FILE: tests/optimagic/optimization/test_scipy_aliases.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic.exceptions import AliasError def test_x0_works_in_minimize(): res = om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", ) aaae(res.params, np.zeros(3)) def test_x0_works_in_maximize(): res = om.maximize( fun=lambda x: -x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", ) aaae(res.params, np.zeros(3)) def test_x0_and_params_do_not_work_together_in_minimize(): with pytest.raises(AliasError, match="x0 is an alias"): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), params=np.arange(3), algorithm="scipy_lbfgsb", ) def test_x0_and_params_do_not_work_together_in_maximize(): with pytest.raises(AliasError, match="x0 is an alias"): om.maximize( fun=lambda x: -x @ x, x0=np.arange(3), params=np.arange(3), algorithm="scipy_lbfgsb", ) METHODS = [ "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", ] @pytest.mark.parametrize("method", METHODS) def test_method_works_in_minimize(method): res = om.minimize( fun=lambda x: x @ x, x0=np.arange(3), method="L-BFGS-B", ) aaae(res.params, np.zeros(3)) @pytest.mark.parametrize("method", METHODS) def test_method_works_in_maximize(method): res = om.maximize( fun=lambda x: -x @ x, x0=np.arange(3), method="L-BFGS-B", ) aaae(res.params, np.zeros(3)) def test_method_and_algorithm_do_not_work_together_in_minimize(): with pytest.raises(AliasError, match="method is an alias"): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", method="L-BFGS-B", ) def test_method_and_algorithm_do_not_work_together_in_maximize(): with pytest.raises(AliasError, match="method is an alias"): om.maximize( fun=lambda x: -x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", method="L-BFGS-B", ) def test_exception_for_hess(): msg = "The hess argument is not yet supported" with pytest.raises(NotImplementedError, match=msg): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", hess=lambda x: np.eye(len(x)), ) def test_exception_for_hessp(): msg = "The hessp argument is not yet supported" with pytest.raises(NotImplementedError, match=msg): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", hessp=lambda x, p: np.eye(len(x)) @ p, ) def test_exception_for_callback(): msg = "The callback argument is not yet supported" with pytest.raises(NotImplementedError, match=msg): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", callback=print, ) def test_exception_for_options(): msg = "The options argument is not supported" with pytest.raises(NotImplementedError, match=msg): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", options={"maxiter": 100}, ) def test_exception_for_tol(): msg = "The tol argument is not supported" with pytest.raises(NotImplementedError, match=msg): om.minimize( fun=lambda x: x @ x, x0=np.arange(3), algorithm="scipy_lbfgsb", tol=1e-6, ) def test_args_works_in_minimize(): res = om.minimize( fun=lambda x, a: ((x - a) ** 2).sum(), x0=np.arange(3), args=(1,), algorithm="scipy_lbfgsb", ) aaae(res.params, np.ones(3)) def test_args_works_in_maximize(): res = om.maximize( fun=lambda x, a: -((x - a) ** 2).sum(), x0=np.arange(3), args=(1,), algorithm="scipy_lbfgsb", ) aaae(res.params, np.ones(3)) def test_args_does_not_work_with_together_with_any_kwargs(): with pytest.raises(AliasError, match="args is an alternative"): om.minimize( fun=lambda x, a: ((x - a) ** 2).sum(), params=np.arange(3), algorithm="scipy_lbfgsb", args=(1,), fun_kwargs={"a": 1}, ) def test_jac_equal_true_works_in_minimize(): res = om.minimize( fun=lambda x: (x @ x, 2 * x), params=np.arange(3), algorithm="scipy_lbfgsb", jac=True, ) aaae(res.params, np.zeros(3)) def test_jac_equal_true_works_in_maximize(): res = om.maximize( fun=lambda x: (-x @ x, -2 * x), params=np.arange(3), algorithm="scipy_lbfgsb", jac=True, ) aaae(res.params, np.zeros(3)) ================================================ FILE: tests/optimagic/optimization/test_useful_exceptions.py ================================================ import numpy as np import pandas as pd import pytest from optimagic.exceptions import ( InvalidFunctionError, InvalidKwargsError, UserFunctionRuntimeError, ) from optimagic.optimization.optimize import minimize def test_missing_criterion_kwargs(): def f(params, bla, blubb): # noqa: ARG001 return (params["value"].to_numpy() ** 2).sum() params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) with pytest.raises(InvalidKwargsError): minimize(f, params, "scipy_lbfgsb", fun_kwargs={"bla": 3}) def test_missing_derivative_kwargs(): def f(params): return (params["value"].to_numpy() ** 2).sum() def grad(params, bla, blubb): # noqa: ARG001 return params["value"].to_numpy() * 2 params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) with pytest.raises(InvalidKwargsError): minimize(f, params, "scipy_lbfgsb", jac=grad, jac_kwargs={"bla": 3}) def test_missing_criterion_and_derivative_kwargs(): def f(params): return (params["value"].to_numpy() ** 2).sum() def f_and_grad(params, bla, blubb): # noqa: ARG001 return f(params), params["value"].to_numpy() * 2 params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) with pytest.raises(InvalidKwargsError): minimize( f, params, "scipy_lbfgsb", fun_and_jac=f_and_grad, fun_and_jac_kwargs={"bla": 3}, ) def test_typo_in_criterion_kwarg(): def f(params, bla, foo): # noqa: ARG001 return (params["value"].to_numpy() ** 2).sum() params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) snippet = "Did you mean" with pytest.raises(InvalidKwargsError, match=snippet): minimize(f, params, "scipy_lbfgsb", fun_kwargs={"bla": 3, "foa": 4}) def test_criterion_with_runtime_error_derivative_free(): def f(params): x = params["value"].to_numpy() if x.sum() < 1: raise RuntimeError("Great error message") return x @ x params = pd.DataFrame(np.full((3, 1), 10), columns=["value"]) snippet = "when evaluating fun during optimization" with pytest.raises(UserFunctionRuntimeError, match=snippet): minimize(f, params, "scipy_neldermead") def test_criterion_with_runtime_error_during_numerical_derivative(): def f(params): x = params["value"].to_numpy() if (x != 1).any(): raise RuntimeError("Great error message") return x @ x params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) snippet = "evaluating a numerical derivative" with pytest.raises(UserFunctionRuntimeError, match=snippet): minimize(f, params, "scipy_lbfgsb") def test_criterion_fails_at_start_values(): def just_fail(params): # noqa: ARG001 raise RuntimeError() params = pd.DataFrame(np.ones((3, 1)), columns=["value"]) snippet = "Error while evaluating fun at start params." with pytest.raises(InvalidFunctionError, match=snippet): minimize(just_fail, params, "scipy_lbfgsb") ================================================ FILE: tests/optimagic/optimization/test_with_advanced_constraints.py ================================================ """Tests using constraints with optional entries or combination of constraints. - Only sum of squares - Only scipy_lbfgsb - Only minimize """ import itertools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic.examples.criterion_functions import sos_gradient, sos_scalar from optimagic.optimization.optimize import minimize CONSTR_INFO = { "cov_bounds_distance": om.FlatCovConstraint(regularization=0.1), "sdcorr_bounds_distance": om.FlatSDCorrConstraint(regularization=0.1), "fixed_and_decreasing": [ om.DecreasingConstraint(lambda x: x.loc[[1, 2, 3, 4]]), om.FixedConstraint(lambda x: x.loc[2]), ], "fixed_and_increasing": [ om.IncreasingConstraint(lambda x: x.loc[[0, 1, 2, 3]]), om.FixedConstraint(lambda x: x.loc[2]), ], } START_INFO = { "cov_bounds_distance": [1, 0.1, 2, 0.2, 0.3, 3], "sdcorr_bounds_distance": [1, 2, 3, 0.1, 0.2, 0.3], "fixed_and_decreasing": [1, 4, 4, 2, 1], "fixed_and_increasing": [1, 2, 3, 4, 1], } RES_INFO = { "cov_bounds_distance": [0.1, 0, 0.1, 0, 0, 0.1], "sdcorr_bounds_distance": [0.1, 0.1, 0.1, 0, 0, 0.0], "fixed_and_decreasing": [0, 4, 4, 0, 0], "fixed_and_increasing": [0, 0, 3, 3, 0], } derivatives = [sos_gradient, None] constr_names = list(CONSTR_INFO.keys()) test_cases = list(itertools.product(derivatives, constr_names)) @pytest.mark.parametrize("derivative, constr_name", test_cases) def test_with_covariance_constraint_bounds_distance(derivative, constr_name): params = pd.Series(START_INFO[constr_name], name="value").to_frame() res = minimize( fun=sos_scalar, params=params, algorithm="scipy_lbfgsb", jac=derivative, constraints=CONSTR_INFO[constr_name], ) assert res.success, "scipy_lbfgsb did not converge." expected = np.array(RES_INFO[constr_name]) aaae(res.params["value"].to_numpy(), expected, decimal=4) ================================================ FILE: tests/optimagic/optimization/test_with_bounds.py ================================================ import numpy as np from scipy.optimize import Bounds as ScipyBounds from optimagic.optimization.optimize import maximize, minimize def test_minimize_with_scipy_bounds(): minimize( lambda x: x @ x, np.arange(3), bounds=ScipyBounds(np.full(3, -1), np.full(3, 5)), algorithm="scipy_lbfgsb", ) def test_minimize_with_sequence_bounds(): minimize( lambda x: x @ x, np.arange(3), bounds=[(-1, 5)] * 3, algorithm="scipy_lbfgsb", ) def test_maximize_with_scipy_bounds(): maximize( lambda x: -x @ x, np.arange(3), bounds=ScipyBounds(np.full(3, -1), np.full(3, 5)), algorithm="scipy_lbfgsb", ) def test_maximize_with_sequence_bounds(): maximize( lambda x: -x @ x, np.arange(3), bounds=[(-1, 5)] * 3, algorithm="scipy_lbfgsb", ) ================================================ FILE: tests/optimagic/optimization/test_with_constraints.py ================================================ """Test many different criterion functions and many sets of constraints. - only minimize - only gradient based algorithms scipy_lbfgsb (scalar) and scipy_ls_dogbox (least squares) - closed form and numerical derivatives """ from copy import deepcopy import numpy as np import pandas as pd import pytest import statsmodels.api as sm from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic import mark from optimagic.examples.criterion_functions import ( rhe_function_value, rhe_gradient, rosenbrock_function_value, rosenbrock_gradient, sos_gradient, sos_likelihood_jacobian, sos_ls, sos_ls_jacobian, trid_gradient, trid_scalar, ) from optimagic.exceptions import InvalidConstraintError, InvalidParamsError from optimagic.optimization.optimize import maximize, minimize from optimagic.parameters.bounds import Bounds @mark.likelihood def logit_loglike(params, y, x): """Log-likelihood function of a logit model. Args: params (pd.DataFrame): The index consists of the parameter names, the "value" column are the parameter values. y (np.array): 1d numpy array with the dependent variable x (np.array): 2d numpy array with the independent variables Returns: loglike (np.array): 1d numpy array with likelihood contribution per individual """ if isinstance(params, pd.DataFrame): p = params["value"].to_numpy() else: p = params q = 2 * y - 1 loglikes = np.log(1 / (1 + np.exp(-(q * np.dot(x, p))))) return loglikes FUNC_INFO = { "sos": { "criterion": sos_ls, "gradient": sos_gradient, "jacobian": sos_likelihood_jacobian, "ls_jacobian": sos_ls_jacobian, "default_result": np.zeros(3), "fixed_result": [1, 0, 0], "entries": ["value", "contributions", "root_contributions"], "linear_result": [0.8, 1.6, 0], "probability_result": [0.5, 0.5, 0], }, "rotated_hyper_ellipsoid": { "criterion": rhe_function_value, "gradient": rhe_gradient, "entries": ["value", "contributions", "root_contributions"], "default_result": np.zeros(3), "fixed_result": [1, 0, 0], "linear_result": [0.571428571, 1.714285714, 0], "probability_result": [0.4, 0.6, 0], }, "rosenbrock": { "criterion": rosenbrock_function_value, "gradient": rosenbrock_gradient, "entries": ["value", "contributions"], "default_result": np.ones(3), "linear_result": "unknown", "probability_result": "unknown", }, "trid": { "criterion": trid_scalar, "gradient": trid_gradient, "entries": ["value"], "default_result": [3, 4, 3], "fixed_result": [1, 2.666666667, 2.333333333], "equality_result": [3, 3, 3], "pairwise_equality_result": [3.333333333, 3.333333333, 2.666666667], "increasing_result": [2.666666667, 3.3333333, 3.3333333], "decreasing_result": "unknown", "linear_result": [1.185185185, 1.4074074069999998, 1.703703704], "probability_result": [0.272727273, 0.727272727, 1.363636364], "covariance_result": "unknown", "sdcorr_result": "unknown", }, } CONSTR_INFO = { "numpy": { "fixed": om.FixedConstraint(selector=lambda x: x[0]), "equality": om.EqualityConstraint(selector=lambda x: x[[0, 1, 2]]), "pairwise_equality": om.PairwiseEqualityConstraint( selectors=[lambda x: x[0], lambda x: x[1]] ), "increasing": om.IncreasingConstraint(selector=lambda x: x[[1, 2]]), "decreasing": om.DecreasingConstraint(selector=lambda x: x[[0, 1]]), "linear": om.LinearConstraint( selector=lambda x: x[[0, 1]], value=4, weights=[1, 2] ), "probability": om.ProbabilityConstraint(selector=lambda x: x[[0, 1]]), "covariance": om.FlatCovConstraint(selector=lambda x: x[[0, 1, 2]]), "sdcorr": om.FlatSDCorrConstraint(selector=lambda x: x[[0, 1, 2]]), }, "pandas": { "fixed": om.FixedConstraint(selector=lambda p: p.loc[0]), "equality": om.EqualityConstraint(selector=lambda p: p.loc[[0, 1, 2]]), "pairwise_equality": om.PairwiseEqualityConstraint( selectors=[lambda p: p.loc[0], lambda p: p.loc[1]] ), "increasing": om.IncreasingConstraint(selector=lambda p: p.loc[[1, 2]]), "decreasing": om.DecreasingConstraint(selector=lambda p: p.loc[[0, 1]]), "linear": om.LinearConstraint( selector=lambda p: p.loc[[0, 1]], value=4, weights=[1, 2] ), "probability": om.ProbabilityConstraint(selector=lambda p: p.loc[[0, 1]]), "covariance": om.FlatCovConstraint(selector=lambda p: p.loc[[0, 1, 2]]), "sdcorr": om.FlatSDCorrConstraint(selector=lambda p: p.loc[[0, 1, 2]]), }, } START_INFO = { "fixed": [1, 1.5, 4.5], "equality": [1, 1, 1], "pairwise_equality": [2, 2, 3], "increasing": [1, 2, 3], "decreasing": [3, 2, 1], "linear": [2, 1, 3], "probability": [0.8, 0.2, 3], "covariance": [2, 1, 2], "sdcorr": [2, 2, 0.5], } KNOWN_FAILURES = { ("rosenbrock", "equality"), ("rosenbrock", "decreasing"), # imprecise } PARAMS_TYPES = ["numpy", "pandas"] test_cases = [] for crit_name in FUNC_INFO: for ptype in PARAMS_TYPES: for constr_name in CONSTR_INFO[ptype]: unknown_res = FUNC_INFO[crit_name].get(f"{constr_name}_result") == "unknown" known_failure = (crit_name, constr_name) in KNOWN_FAILURES if not any([unknown_res, known_failure]): for deriv in None, FUNC_INFO[crit_name]["gradient"]: test_cases.append( (crit_name, "scipy_lbfgsb", deriv, constr_name, ptype) ) if "root_contributions" in FUNC_INFO[crit_name]["entries"]: for deriv in [FUNC_INFO[crit_name].get("ls_jacobian"), None]: test_cases.append( (crit_name, "scipy_ls_dogbox", deriv, constr_name, ptype) ) @pytest.mark.parametrize( "criterion_name, algorithm, derivative, constraint_name, params_type", test_cases, ) def test_constrained_minimization( criterion_name, algorithm, derivative, constraint_name, params_type ): constraints = CONSTR_INFO[params_type][constraint_name] criterion = FUNC_INFO[criterion_name]["criterion"] if params_type == "pandas": params = pd.Series(START_INFO[constraint_name], name="value").to_frame() else: params = np.array(START_INFO[constraint_name]) res = minimize( fun=criterion, params=params, algorithm=algorithm, jac=derivative, constraints=constraints, algo_options={"convergence.ftol_rel": 1e-12}, ) if params_type == "pandas": calculated = res.params["value"].to_numpy() else: calculated = res.params expected = FUNC_INFO[criterion_name].get( f"{constraint_name}_result", FUNC_INFO[criterion_name]["default_result"] ) aaae(calculated, expected, decimal=4) @pytest.mark.filterwarnings("ignore:Specifying constraints as a dictionary is") def test_fix_that_differs_from_start_value_raises_an_error(): # We use the old constraint interface here, as the new interface prohibits the # usage of the 'value' attribute, rendering the test useless. # TODO: Remove this test when the old constraint interface is deprecated. with pytest.raises(InvalidParamsError): minimize( fun=lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", constraints=[{"selector": lambda x: x[1], "value": 10, "type": "fixed"}], ) def test_three_independent_constraints(): params = np.arange(10) params[0] = 2 constraints = [ om.FlatCovConstraint(lambda x: x[[0, 1, 2]]), om.FixedConstraint(lambda x: x[[4, 5]]), om.LinearConstraint(lambda x: x[[7, 8]], value=15, weights=1), ] res = minimize( fun=lambda x: x @ x, params=params, algorithm="scipy_lbfgsb", constraints=constraints, algo_options={"convergence.ftol_rel": 1e-12}, ) expected = np.array([0] * 4 + [4, 5] + [0] + [7.5] * 2 + [0]) # TODO: Increase precision back to decimal=4. The reduced precision is likely due # to the re-written L-BFGS-B algorithm in SciPy 1.15. # See https://github.com/optimagic-dev/optimagic/issues/556. aaae(res.params, expected, decimal=3) INVALID_CONSTRAINT_COMBIS = [ [ om.FlatCovConstraint(lambda x: x[[1, 0, 2]]), om.ProbabilityConstraint(lambda x: x[[0, 1]]), ], [ om.FlatCovConstraint(lambda x: x[[6, 3, 5, 2, 1, 4]]), om.IncreasingConstraint(lambda x: x[[0, 1, 2]]), ], ] @pytest.mark.parametrize("constraints", INVALID_CONSTRAINT_COMBIS) def test_incompatible_constraints_raise_errors(constraints): params = np.arange(10) with pytest.raises(InvalidConstraintError): minimize( fun=lambda x: x @ x, params=params, algorithm="scipy_lbfgsb", constraints=constraints, ) def test_bug_from_copenhagen_presentation(): # Make sure maximum of work hours is optimal def u(params): return params["work"]["hours"] ** 2 start_params = { "work": {"hourly_wage": 25.5, "hours": 2_000}, "time_budget": 24 * 7 * 365, } def return_all_but_working_hours(params): out = deepcopy(params) del out["work"]["hours"] return out res = maximize( fun=u, params=start_params, algorithm="scipy_lbfgsb", constraints=[ om.FixedConstraint(selector=return_all_but_working_hours), om.IncreasingConstraint(lambda p: [p["work"]["hours"], p["time_budget"]]), ], bounds=Bounds(lower={"work": {"hours": 0}}), ) assert np.allclose(res.params["work"]["hours"], start_params["time_budget"]) def test_constraint_inheritance(): """Test that probability constraint applies both sets of parameters in a pairwise equality constraint, no matter to which set they were applied originally. """ for loc in [[0, 1], [2, 3]]: def selector(x, loc=loc): # bind loc to the function return x[loc] constraints = [ om.PairwiseEqualityConstraint( selectors=[lambda x: x[[0, 1]], lambda x: x[[3, 2]]] ), om.ProbabilityConstraint(selector), ] res = minimize( fun=lambda x: x @ x, params=np.array([0.1, 0.9, 0.9, 0.1]), algorithm="scipy_lbfgsb", constraints=constraints, ) aaae(res.params, [0.5] * 4) def test_invalid_start_params(): def criterion(x): return np.dot(x, x) x = np.arange(3) with pytest.raises(InvalidParamsError): minimize( criterion, params=x, algorithm="scipy_lbfgsb", constraints=om.ProbabilityConstraint(selector=lambda x: x[[1, 2]]), ) def test_covariance_constraint_in_2_by_2_case(): spector_data = sm.datasets.spector.load_pandas() spector_data.exog = sm.add_constant(spector_data.exog) x_df = sm.add_constant(spector_data.exog) start_params = np.array([-10, 2, 0.2, 2]) kwargs = {"y": spector_data.endog, "x": x_df.to_numpy()} result = maximize( fun=logit_loglike, fun_kwargs=kwargs, params=start_params, algorithm="scipy_lbfgsb", constraints=om.FlatCovConstraint(selector=lambda x: x[[1, 2, 3]]), ) expected = np.array([-13.0213351, 2.82611417, 0.09515704, 2.37867869]) aaae(result.params, expected, decimal=4) ================================================ FILE: tests/optimagic/optimization/test_with_logging.py ================================================ """Test optimizations with logging in a temporary database. - Only minimize - Only dict criterion - scipy_lbfgsb and scipy_ls_dogbox - with and without derivatives """ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from pybaum import tree_just_flatten from optimagic import mark from optimagic.examples.criterion_functions import ( sos_derivatives, sos_ls, ) from optimagic.logging.logger import SQLiteLogOptions from optimagic.logging.types import ExistenceStrategy from optimagic.optimization.optimize import minimize from optimagic.parameters.tree_registry import get_registry @mark.least_squares def flexible_sos_ls(params): return params algorithms = ["scipy_lbfgsb", "scipy_ls_dogbox"] derivatives = [None, sos_derivatives] params = [pd.DataFrame({"value": np.arange(3)}), np.arange(3), {"a": 1, "b": 2, "c": 3}] test_cases = [] for algo in algorithms: for p in params: test_cases.append((algo, p)) @pytest.mark.parametrize("algorithm, params", test_cases) def test_optimization_with_valid_logging(algorithm, params): res = minimize( flexible_sos_ls, params=params, algorithm=algorithm, logging="logging.db", ) registry = get_registry(extended=True) flat = np.array(tree_just_flatten(res.params, registry=registry)) aaae(flat, np.zeros(3)) def test_optimization_with_existing_exsting_database(): minimize( sos_ls, pd.Series([1, 2, 3], name="value").to_frame(), algorithm="scipy_lbfgsb", logging=SQLiteLogOptions( "logging.db", if_database_exists=ExistenceStrategy.REPLACE ), ) with pytest.raises(FileExistsError): minimize( sos_ls, pd.Series([1, 2, 3], name="value").to_frame(), algorithm="scipy_lbfgsb", logging=SQLiteLogOptions( "logging.db", if_database_exists=ExistenceStrategy.RAISE ), ) ================================================ FILE: tests/optimagic/optimization/test_with_multistart.py ================================================ import functools import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic.examples.criterion_functions import ( sos_ls, sos_scalar, ) from optimagic.logging import SQLiteLogReader from optimagic.optimization.optimize import maximize, minimize from optimagic.optimization.optimize_result import OptimizeResult from optimagic.parameters.bounds import Bounds criteria = [sos_scalar, sos_ls] @pytest.fixture() def params(): params = pd.DataFrame() params["value"] = np.arange(4) params["soft_lower_bound"] = [-5] * 4 params["soft_upper_bound"] = [10] * 4 return params test_cases = [ (sos_scalar, "minimize"), (sos_ls, "minimize"), (sos_scalar, "maximize"), ] def _switch_sign(func): """Switch sign of all outputs of a function.""" @functools.wraps(func) def wrapper(*args, **kwargs): unswitched = func(*args, **kwargs) if isinstance(unswitched, dict): switched = {key: -val for key, val in unswitched.items()} elif isinstance(unswitched, (tuple, list)): switched = [] for entry in unswitched: if isinstance(entry, dict): switched.append({key: -val for key, val in entry.items()}) else: switched.append(-entry) if isinstance(unswitched, tuple): switched = tuple(switched) else: switched = -unswitched return switched return wrapper @pytest.mark.parametrize("criterion, direction", test_cases) def test_multistart_optimization_with_sum_of_squares_at_defaults( criterion, direction, params ): if direction == "minimize": res = minimize( fun=criterion, params=params, algorithm="scipy_lbfgsb", multistart=True, ) else: res = maximize( fun=_switch_sign(criterion), params=params, algorithm="scipy_lbfgsb", multistart=True, ) assert hasattr(res, "multistart_info") ms_info = res.multistart_info assert len(ms_info.exploration_sample) == 400 assert isinstance(ms_info.exploration_results, list) assert len(ms_info.exploration_results) == 400 assert all(isinstance(entry, float) for entry in ms_info.exploration_results) assert all(isinstance(entry, OptimizeResult) for entry in ms_info.local_optima) assert all(isinstance(entry, pd.DataFrame) for entry in ms_info.start_parameters) assert np.allclose(res.fun, 0) aaae(res.params["value"], np.zeros(4)) def test_multistart_with_existing_sample(params): sample = [params.assign(value=x) for x in np.arange(20).reshape(5, 4) / 10] options = om.MultistartOptions(sample=sample) res = minimize( fun=sos_ls, params=params, algorithm="scipy_lbfgsb", multistart=options, ) assert all( got.equals(expected) for expected, got in zip( sample, res.multistart_info.exploration_sample, strict=False ) ) def test_convergence_via_max_discoveries_works(params): options = om.MultistartOptions( convergence_xtol_rel=np.inf, convergence_max_discoveries=2, ) res = maximize( fun=_switch_sign(sos_scalar), params=params, algorithm="scipy_lbfgsb", multistart=options, ) assert len(res.multistart_info.local_optima) == 2 def test_steps_are_logged_as_skipped_if_convergence(tmp_path, params): options = om.MultistartOptions( n_samples=10 * len(params), convergence_xtol_rel=np.inf, convergence_max_discoveries=2, ) path = tmp_path / "logging.db" minimize( fun=sos_ls, params=params, algorithm="scipy_lbfgsb", multistart=options, logging=path, ) steps_table = SQLiteLogReader(path)._step_store.to_df() expected_status = ["complete", "complete", "complete", "skipped", "skipped"] assert steps_table["status"].tolist() == expected_status def test_all_steps_occur_in_optimization_iterations_if_no_convergence(params): options = om.MultistartOptions( convergence_max_discoveries=np.inf, n_samples=10 * len(params), ) minimize( fun=sos_ls, params=params, algorithm="scipy_lbfgsb", multistart=options, logging="logging.db", ) logging = SQLiteLogReader("logging.db") iterations = logging._iteration_store.to_df() present_steps = set(iterations["step"]) assert present_steps == {1, 2, 3, 4, 5} def test_with_non_transforming_constraints(params): res = minimize( fun=sos_ls, params=params, constraints=om.FixedConstraint(selector=lambda p: p.loc[[0, 1]]), algorithm="scipy_lbfgsb", multistart=om.MultistartOptions(seed=12345), ) aaae(res.params["value"].to_numpy(), np.array([0, 1, 0, 0])) def test_error_is_raised_with_transforming_constraints(params): with pytest.raises(NotImplementedError): minimize( fun=sos_ls, params=params, constraints=om.ProbabilityConstraint(selector=lambda p: p.loc[[0, 1]]), algorithm="scipy_lbfgsb", multistart=om.MultistartOptions(seed=12345), ) def test_multistart_with_numpy_params(): res = minimize( fun=lambda params: params @ params, params=np.arange(5), algorithm="scipy_lbfgsb", bounds=Bounds(soft_lower=np.full(5, -10), soft_upper=np.full(5, 10)), multistart=om.MultistartOptions(seed=12345), ) aaae(res.params, np.zeros(5)) def test_multistart_with_rng_seed(): rng = np.random.default_rng(12345) res = minimize( fun=lambda params: params @ params, params=np.arange(5), algorithm="scipy_lbfgsb", bounds=Bounds(soft_lower=np.full(5, -10), soft_upper=np.full(5, 10)), multistart=om.MultistartOptions(seed=rng), ) aaae(res.params, np.zeros(5)) def test_with_invalid_bounds(): with pytest.raises(ValueError): minimize( fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_neldermead", multistart=True, ) def test_with_scaling(): def _crit(params): x = params - np.arange(len(params)) return x @ x res = minimize( fun=_crit, params=np.full(5, 10), bounds=Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 11)), algorithm="scipy_lbfgsb", multistart=True, ) aaae(res.params, np.arange(5)) def test_with_ackley(): def ackley(x): out = ( -20 * np.exp(-0.2 * np.sqrt(np.mean(x**2))) - np.exp(np.mean(np.cos(2 * np.pi * x))) + 20 + np.exp(1) ) return out dim = 5 kwargs = { "fun": ackley, "params": np.full(dim, -10), "bounds": Bounds(lower=np.full(dim, -32), upper=np.full(dim, 32)), "algo_options": {"stopping.maxfun": 1000}, } minimize( **kwargs, algorithm="scipy_lbfgsb", multistart=om.MultistartOptions( n_samples=200, stopping_maxopt=20, convergence_max_discoveries=10, ), ) def test_multistart_with_least_squares_optimizers(): est = minimize( fun=sos_ls, params=np.array([-1, 1.0]), bounds=Bounds(soft_lower=np.full(2, -10), soft_upper=np.full(2, 10)), algorithm="scipy_ls_trf", multistart=om.MultistartOptions(n_samples=3, stopping_maxopt=3), ) aaae(est.params, np.zeros(2)) def test_with_ackley_using_dict_options(): def ackley(x): out = ( -20 * np.exp(-0.2 * np.sqrt(np.mean(x**2))) - np.exp(np.mean(np.cos(2 * np.pi * x))) + 20 + np.exp(1) ) return out dim = 5 kwargs = { "fun": ackley, "params": np.full(dim, -10), "bounds": Bounds(lower=np.full(dim, -32), upper=np.full(dim, 32)), "algo_options": {"stopping.maxfun": 1000}, } minimize( **kwargs, algorithm="scipy_lbfgsb", multistart={ "n_samples": 200, "stopping_maxopt": 20, "convergence_max_discoveries": 10, }, ) @pytest.mark.slow def test_with_batch_evaluator(params): options = om.MultistartOptions(batch_evaluator="threading") minimize( fun=sos_scalar, params=params, algorithm="scipy_lbfgsb", multistart=options, ) ================================================ FILE: tests/optimagic/optimization/test_with_nonlinear_constraints.py ================================================ import itertools import warnings import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic import maximize, minimize from optimagic.algorithms import NonlinearConstrainedAlgorithms from optimagic.config import IS_CYIPOPT_INSTALLED from optimagic.parameters.bounds import Bounds NLC_ALGORITHMS = NonlinearConstrainedAlgorithms()._available_algorithms_dict # ====================================================================================== # Two-dimension example with equality and inequality constraints # ====================================================================================== @pytest.fixture() def nlc_2d_example(): """Non-linear constraints: 2-dimensional example. See the example section in https://en.wikipedia.org/wiki/Nonlinear_programming. """ def criterion(x): return np.sum(x) def derivative(x): return np.ones_like(x) def constraint_func(x): value = np.dot(x, x) return np.array([value - 1, 2 - value]) def constraint_jac(x): return 2 * np.vstack((x.reshape(1, -1), -x.reshape(1, -1))) constraints_long = om.NonlinearConstraint( func=constraint_func, derivative=constraint_jac, lower_bound=np.zeros(2), tol=1e-8, ) constraints_flat = om.NonlinearConstraint( func=lambda x: np.dot(x, x), derivative=lambda x: 2 * x, lower_bound=1, upper_bound=2, tol=1e-8, ) constraints_equality = om.NonlinearConstraint( func=lambda x: np.dot(x, x), derivative=lambda x: 2 * x, value=2, ) constraints_equality_and_inequality = [ om.NonlinearConstraint( func=lambda x: np.dot(x, x), derivative=lambda x: 2 * x, lower_bound=1, ), om.NonlinearConstraint( func=lambda x: np.dot(x, x), derivative=lambda x: 2 * x, value=2, ), ] _kwargs = { "criterion": criterion, "params": np.array([0, np.sqrt(2)]), "derivative": derivative, "bounds": Bounds(lower=np.zeros(2), upper=2 * np.ones(2)), } kwargs = { "flat": {**_kwargs, "constraints": constraints_flat}, "long": {**_kwargs, "constraints": constraints_long}, "equality": {**_kwargs, "constraints": constraints_equality}, "equality_and_inequality": { **_kwargs, "constraints": constraints_equality_and_inequality, }, } solution_x = np.ones(2) return solution_x, kwargs TEST_CASES = list( itertools.product( NLC_ALGORITHMS, ["flat", "long", "equality", "equality_and_inequality"] ) ) @pytest.mark.parametrize("algorithm, constr_type", TEST_CASES) def test_nonlinear_optimization(nlc_2d_example, algorithm, constr_type): """Test that available nonlinear optimizers solve a nonlinear constraints problem. We test for the cases of "equality", "inequality" and "equality_and_inequality" constraints. """ if "equality" in constr_type and algorithm == "nlopt_mma": pytest.skip(reason="Very slow and low accuracy.") solution_x, kwargs = nlc_2d_example if algorithm == "scipy_cobyla": del kwargs[constr_type]["bounds"] with warnings.catch_warnings(): warnings.simplefilter("ignore") result = maximize(algorithm=algorithm, **kwargs[constr_type]) if NLC_ALGORITHMS[algorithm].algo_info.is_global: decimal = 0 else: decimal = 4 aaae(result.params, solution_x, decimal=decimal) # ====================================================================================== # Documentation example # ====================================================================================== def criterion(params): offset = np.linspace(1, 0, len(params)) x = params - offset return x @ x @pytest.mark.parametrize("algorithm", NLC_ALGORITHMS) def test_documentation_example(algorithm): if algorithm in ("nlopt_mma", "ipopt"): pytest.skip(reason="Slow.") kwargs = { "bounds": Bounds(lower=np.zeros(6), upper=2 * np.ones(6)), } if algorithm == "scipy_cobyla": del kwargs["bounds"] minimize( fun=criterion, params=np.ones(6), algorithm=algorithm, constraints=om.NonlinearConstraint( func=np.prod, selector=lambda x: x[:-1], value=1.0, ), **kwargs, ) # ====================================================================================== # Test: selection + reparametrization constraint + nonlinear constraint # ====================================================================================== @pytest.fixture() def general_example(): params = {"a": np.array([0.1, 0.3, 0.4, 0.2]), "b": np.array([1.5, 2])} def criterion(params): weights = np.array([0, 1, 2, 3]) return params["a"] @ weights + params["b"].sum() def selector_probability_constraint(params): return params["a"] def selector_nonlinear_constraint(params): return {"probs": params["a"][:3][::-1], "unnecessary": params["b"]} def constraint(selected): return selected["probs"] @ selected["probs"] constraints = [ om.ProbabilityConstraint( selector=selector_probability_constraint, ), om.NonlinearConstraint( selector=selector_nonlinear_constraint, upper_bound=0.8, func=constraint, tol=0.01, ), om.NonlinearConstraint( selector=selector_nonlinear_constraint, func=constraint, upper_bound=0.8, tol=0.01, ), ] lower_bound = {"b": np.array([0, 0])} upper_bound = {"b": np.array([2, 2])} kwargs = { "fun": criterion, "params": params, "constraints": constraints, "lower_bounds": lower_bound, "upper_bounds": upper_bound, } return kwargs TEST_CASES = list(itertools.product(["ipopt"], [True, False])) @pytest.mark.skipif(not IS_CYIPOPT_INSTALLED, reason="Needs ipopt") @pytest.mark.parametrize("algorithm, skip_checks", TEST_CASES) def test_general_example(general_example, algorithm, skip_checks): kwargs = general_example with warnings.catch_warnings(): warnings.simplefilter("ignore") res = minimize(algorithm=algorithm, skip_checks=skip_checks, **kwargs) optimal_p1 = 0.5 + np.sqrt(3 / 20) # can be derived analytically optimal_p2 = 1 - optimal_p1 aaae(res.params["a"], np.array([optimal_p1, optimal_p2, 0, 0]), decimal=4) aaae(res.params["b"], np.array([0.0, 0]), decimal=5) ================================================ FILE: tests/optimagic/optimization/test_with_scaling.py ================================================ import numpy as np from numpy.testing import assert_array_almost_equal as aaae import optimagic as om from optimagic.optimization.optimize import maximize, minimize from optimagic.parameters.scaling import ScalingOptions def test_minimize_with_scaling_options(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=ScalingOptions(method="start_values", magnitude=1.2), ) aaae(got.x, np.array([0, 0, 2])) def test_minimize_with_scaling_options_dict(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling={"method": "start_values", "magnitude": 1.2}, ) aaae(got.x, np.array([0, 0, 2])) def test_minimize_with_scaling_true(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=True, ) aaae(got.x, np.array([0, 0, 2])) def test_maximize_with_scaling_options(): got = maximize( fun=lambda x: -x @ x, x0=np.arange(3), jac=lambda x: -2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=ScalingOptions(method="start_values", magnitude=1.2), ) aaae(got.x, np.array([0, 0, 2])) def test_maximize_with_scaling_options_dict(): got = maximize( fun=lambda x: -x @ x, x0=np.arange(3), jac=lambda x: -2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling={"method": "start_values", "magnitude": 1.2}, ) aaae(got.x, np.array([0, 0, 2])) def test_maximize_with_scaling_true(): got = maximize( fun=lambda x: -x @ x, x0=np.arange(3), jac=lambda x: -2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=True, ) aaae(got.x, np.array([0, 0, 2])) def test_minimize_with_scaling_options_with_bounds(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=ScalingOptions(method="bounds", magnitude=1), ) aaae(got.x, np.array([0, 0, 2])) def test_minimize_with_scaling_options_dict_with_bounds(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling={"method": "bounds", "magnitude": 1}, ) aaae(got.x, np.array([0, 0, 2])) def test_minimize_with_scaling_true_with_bounds(): got = minimize( fun=lambda x: x @ x, x0=np.arange(3), bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)), jac=lambda x: 2 * x, algorithm="scipy_lbfgsb", constraints=om.FixedConstraint(lambda x: x[2]), scaling=True, ) aaae(got.x, np.array([0, 0, 2])) ================================================ FILE: tests/optimagic/optimizers/__init__.py ================================================ ================================================ FILE: tests/optimagic/optimizers/_pounders/__init__.py ================================================ ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/add_points_until_main_model_fully_linear_i.yaml ================================================ --- delta: 0.05 history_criterion: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 history_criterion_expected: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 117.9473464966 - 104.1334190546 - 90.05657396185 - 91.2207789054 - 83.93061974583 - 71.30942158128 - 62.32199544308 - 59.88083883476 - 62.27199544308 - 60.05583883476 - 62.77642327569 - 76.9431054476 - 131.4875801385 - -2342.694563021 - -86.51002021515 - -37.31642203542 - -19.79861800279 - 115.6473464966 - 102.3334190546 - 97.45657396185 - 89.9207789054 - 80.83061974583 - 67.20942158128 - 60.52199544308 - 57.68083883476 - 60.39699544308 - 58.40583883476 - 62.55142327569 - 77.3931054476 - 129.3500801385 - -2342.957063021 - -86.32252021515 - -36.19142203542 - -21.71111800279 - 105.0473464966 - 104.4334190546 - 89.65657396185 - 83.5207789054 - 80.03061974583 - 70.50942158128 - 58.02199544308 - 56.68083883476 - 58.07199544308 - 57.28083883476 - 61.50142327569 - 75.6681054476 - 130.4375801385 - -2343.519563021 - -87.11002021515 - -38.55392203542 - -21.86111800279 - 99.14734649662 - 92.73341905458 - 86.65657396185 - 81.8207789054 - 77.13061974583 - 69.00942158128 - 60.62199544308 - 56.68083883476 - 60.58449544308 - 57.43083883476 - 61.80142327569 - 76.3431054476 - 130.7750801385 - -2343.932063021 - -86.88502021515 - -38.70392203542 - -22.19861800279 - 106.5473464966 - 88.25657396185 - 61.9476238727 - 64.62033506263 - 65.33033506263 - 67.77033506263 - -17.38684121661 - 103.0473464966 - 85.75657396185 - 62.6476238727 - 66.05033506263 - 64.96033506263 - 66.83033506263 - -19.07684121661 - 101.8473464966 - 86.85657396185 - 62.3476238727 - 66.08033506263 - 64.02033506263 - 65.52033506263 - -17.57684121661 - 103.0473464966 - 89.35657396185 - 63.2476238727 - 64.77033506263 - -17.38684121661 - 64.96033506263 - 65.33033506263 - -17.57684121661 - 101.8473464966 - 85.85657396185 - 74.63061974583 - 61.4476238727 - 55.64384266576 - 56.01384266576 - 58.81780905206 - 66.16033506263 - 253.6554490216 - -52.38720394238 - -16.34684121661 - 100.8473464966 - 87.85657396185 - 75.63061974583 - 64.6476238727 - 53.44384266576 - 53.76384266576 - 59.30780905206 - 64.96033506263 - 252.8254490216 - -54.07720394238 - -15.59684121661 - 95.54734649662 - 85.35657396185 - 75.33061974583 - 65.2476238727 - 54.44384266576 - 55.11384266576 - 60.99780905206 - 67.85033506263 - 250.5754490216 - -52.83720394238 - -12.89684121661 - 103.0473464966 - 91.43341905458 - 87.85657396185 - 84.3207789054 - 74.53061974583 - 65.80942158128 - 56.48083883476 - 57.35083883476 - 58.90892327569 - 72.8131054476 - 127.6600801385 - -2346.667063021 - -89.36502021515 - -40.65642203542 - -24.07361800279 - 76.40033506263 - 77.97033506263 - 70.28033506263 - 64.02033506263 - 64.28033506263 - 68.33033506263 - 95.84734649662 - 80.55657396185 - 74.83061974583 - 69.2476238727 - 63.24384266576 - 63.69780905206 - 63.06384266576 - 63.80780905206 - 69.91033506263 - 253.9554490216 - -49.31720394238 - -14.27684121661 - 105.7473464966 - 87.15657396185 - 74.33061974583 - 58.4476238727 - 57.44384266576 - 57.69780905206 - 58.00384266576 - 58.66780905206 - 68.45033506263 - 251.1454490216 - -53.51720394238 - -14.50684121661 - 91.74734649662 - 85.05657396185 - 67.63061974583 - 60.1476238727 - 59.14384266576 - 56.29780905206 - 59.43384266576 - 56.94780905206 - 65.56033506263 - 251.0254490216 - -54.18720394238 - -16.37684121661 - 65.90033506263 - 106.0473464966 - 90.35657396185 - 64.9476238727 - 65.52033506263 - -18.14684121661 - 65.15033506263 - -17.95684121661 - 67.40033506263 - 66.83033506263 - 67.85033506263 - 56.72199544308 - 57.17199544308 - 106.7473464966 - 87.45657396185 - 61.02199544308 - 61.03199544308 - 62.20892327569 - 130.2900801385 - 59.62199544308 - 59.91199544308 - 106.3473464966 - 86.75657396185 - 61.68892327569 - 129.9500801385 - 60.12199544308 - 60.17199544308 - - 175.7616267494 - 135.815392655 - 107.427429421 - 99.44443456745 - 85.70608965926 - 64.3171217786 - 44.36460041182 - 32.95338522348 - 44.31460041182 - 33.12838522348 - 24.75298136325 - 19.273476213 - 15.97636612239 - 13.00317519799 - 10.70826764037 - 9.858997178816 - 10.15607036729 - 173.4616267494 - 134.015392655 - 114.827429421 - 98.14443456745 - 82.60608965926 - 60.2171217786 - 42.56460041182 - 30.75338522348 - 42.43960041182 - 31.47838522348 - 24.52798136325 - 19.723476213 - 13.83886612239 - 12.74067519799 - 10.89576764037 - 10.98399717882 - 8.243570367288 - 162.8616267494 - 136.115392655 - 107.027429421 - 91.74443456745 - 81.80608965926 - 63.5171217786 - 40.06460041182 - 29.75338522348 - 40.11460041182 - 30.35338522348 - 23.47798136325 - 17.998476213 - 14.92636612239 - 12.17817519799 - 10.10826764037 - 8.621497178816 - 8.093570367288 - 156.9616267494 - 124.415392655 - 104.027429421 - 90.04443456745 - 78.90608965926 - 62.0171217786 - 42.66460041182 - 29.75338522348 - 42.62710041182 - 30.50338522348 - 23.77798136325 - 18.673476213 - 15.26386612239 - 11.76567519799 - 10.33326764037 - 8.471497178816 - 7.756070367288 - 164.3616267494 - 105.627429421 - 48.8819398286 - 18.5818314891 - 19.2918314891 - 21.7318314891 - 7.60022447721 - 160.8616267494 - 103.127429421 - 49.5819398286 - 20.0118314891 - 18.9218314891 - 20.7918314891 - 5.91022447721 - 159.6616267494 - 104.227429421 - 49.2819398286 - 20.0418314891 - 17.9818314891 - 19.4818314891 - 7.41022447721 - 160.8616267494 - 106.727429421 - 50.1819398286 - 18.7318314891 - 7.60022447721 - 18.9218314891 - 19.2918314891 - 7.41022447721 - 159.6616267494 - 103.227429421 - 76.40608965926 - 48.3819398286 - 33.22986519451 - 33.59986519451 - 26.87225158213 - 20.1218314891 - 15.2111293494 - 11.73215109931 - 8.64022447721 - 158.6616267494 - 105.227429421 - 77.40608965926 - 51.5819398286 - 31.02986519451 - 31.34986519451 - 27.36225158213 - 18.9218314891 - 14.3811293494 - 10.04215109931 - 9.39022447721 - 153.3616267494 - 102.727429421 - 77.10608965926 - 52.1819398286 - 32.02986519451 - 32.69986519451 - 29.05225158213 - 21.8118314891 - 12.1311293494 - 11.28215109931 - 12.09022447721 - 160.8616267494 - 123.115392655 - 105.227429421 - 92.54443456745 - 76.30608965926 - 58.8171217786 - 29.55338522348 - 30.42338522348 - 20.88548136325 - 15.143476213 - 12.14886612239 - 9.030675197988 - 7.853267640371 - 6.518997178816 - 5.881070367288 - 30.3618314891 - 31.9318314891 - 24.2418314891 - 17.9818314891 - 18.2418314891 - 22.2918314891 - 153.6616267494 - 97.927429421 - 76.60608965926 - 56.1819398286 - 40.82986519451 - 31.75225158213 - 40.64986519451 - 31.86225158213 - 23.8718314891 - 15.5111293494 - 14.80215109931 - 10.71022447721 - 163.5616267494 - 104.527429421 - 76.10608965926 - 45.3819398286 - 35.02986519451 - 25.75225158213 - 35.58986519451 - 26.72225158213 - 22.4118314891 - 12.7011293494 - 10.60215109931 - 10.48022447721 - 149.5616267494 - 102.427429421 - 69.40608965926 - 47.0819398286 - 36.72986519451 - 24.35225158213 - 37.01986519451 - 25.00225158213 - 19.5218314891 - 12.5811293494 - 9.93215109931 - 8.61022447721 - 19.8618314891 - 163.8616267494 - 107.727429421 - 51.8819398286 - 19.4818314891 - 6.84022447721 - 19.1118314891 - 7.03022447721 - 21.3618314891 - 20.7918314891 - 21.8118314891 - 38.76460041182 - 39.21460041182 - 164.5616267494 - 104.827429421 - 43.06460041182 - 43.07460041182 - 24.18548136325 - 14.77886612239 - 41.66460041182 - 41.95460041182 - 164.1616267494 - 104.127429421 - 23.66548136325 - 14.43886612239 - 42.16460041182 - 42.21460041182 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 history_x_expected: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 index_best_x: 0 linear_terms: - 168.1373336387 - 5647.516828713 - 6083.95846304 lower_bounds: null model_improving_points: - - 1.0000000000000002 - 0.0 - 0.0 - - 0.010623242412742123 - 0.0 - 0.0 - - -0.2590536815263693 - 0.0 - 0.0 model_indices: - 5 - 4 - 3 - 2 - 1 - 0 - 0 model_indices_expected: - 5 - 6 - 7 - 2 - 1 - 0 - 0 n: 3 n_modelpoints: 1 n_modelpoints_expected: 3 square_terms: - - 1593.211846704 - -3249.345655429 - 7950.494350682 - - -3249.345655429 - 151896.1417043 - 15486.3682482 - - 7950.494350682 - 15486.3682482 - 54810.91090485 upper_bound: null ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/add_points_until_main_model_fully_linear_ii.yaml ================================================ --- delta: 0.025 history_criterion: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 117.9473464966 - 104.1334190546 - 90.05657396185 - 91.2207789054 - 83.93061974583 - 71.30942158128 - 62.32199544308 - 59.88083883476 - 62.27199544308 - 60.05583883476 - 62.77642327569 - 76.9431054476 - 131.4875801385 - -2342.694563021 - -86.51002021515 - -37.31642203542 - -19.79861800279 - 115.6473464966 - 102.3334190546 - 97.45657396185 - 89.9207789054 - 80.83061974583 - 67.20942158128 - 60.52199544308 - 57.68083883476 - 60.39699544308 - 58.40583883476 - 62.55142327569 - 77.3931054476 - 129.3500801385 - -2342.957063021 - -86.32252021515 - -36.19142203542 - -21.71111800279 - 105.0473464966 - 104.4334190546 - 89.65657396185 - 83.5207789054 - 80.03061974583 - 70.50942158128 - 58.02199544308 - 56.68083883476 - 58.07199544308 - 57.28083883476 - 61.50142327569 - 75.6681054476 - 130.4375801385 - -2343.519563021 - -87.11002021515 - -38.55392203542 - -21.86111800279 - 99.14734649662 - 92.73341905458 - 86.65657396185 - 81.8207789054 - 77.13061974583 - 69.00942158128 - 60.62199544308 - 56.68083883476 - 60.58449544308 - 57.43083883476 - 61.80142327569 - 76.3431054476 - 130.7750801385 - -2343.932063021 - -86.88502021515 - -38.70392203542 - -22.19861800279 - 106.5473464966 - 88.25657396185 - 61.9476238727 - 64.62033506263 - 65.33033506263 - 67.77033506263 - -17.38684121661 - 103.0473464966 - 85.75657396185 - 62.6476238727 - 66.05033506263 - 64.96033506263 - 66.83033506263 - -19.07684121661 - 101.8473464966 - 86.85657396185 - 62.3476238727 - 66.08033506263 - 64.02033506263 - 65.52033506263 - -17.57684121661 - 103.0473464966 - 89.35657396185 - 63.2476238727 - 64.77033506263 - -17.38684121661 - 64.96033506263 - 65.33033506263 - -17.57684121661 - 101.8473464966 - 85.85657396185 - 74.63061974583 - 61.4476238727 - 55.64384266576 - 56.01384266576 - 58.81780905206 - 66.16033506263 - 253.6554490216 - -52.38720394238 - -16.34684121661 - 100.8473464966 - 87.85657396185 - 75.63061974583 - 64.6476238727 - 53.44384266576 - 53.76384266576 - 59.30780905206 - 64.96033506263 - 252.8254490216 - -54.07720394238 - -15.59684121661 - 95.54734649662 - 85.35657396185 - 75.33061974583 - 65.2476238727 - 54.44384266576 - 55.11384266576 - 60.99780905206 - 67.85033506263 - 250.5754490216 - -52.83720394238 - -12.89684121661 - 103.0473464966 - 91.43341905458 - 87.85657396185 - 84.3207789054 - 74.53061974583 - 65.80942158128 - 56.48083883476 - 57.35083883476 - 58.90892327569 - 72.8131054476 - 127.6600801385 - -2346.667063021 - -89.36502021515 - -40.65642203542 - -24.07361800279 - 76.40033506263 - 77.97033506263 - 70.28033506263 - 64.02033506263 - 64.28033506263 - 68.33033506263 - 95.84734649662 - 80.55657396185 - 74.83061974583 - 69.2476238727 - 63.24384266576 - 63.69780905206 - 63.06384266576 - 63.80780905206 - 69.91033506263 - 253.9554490216 - -49.31720394238 - -14.27684121661 - 105.7473464966 - 87.15657396185 - 74.33061974583 - 58.4476238727 - 57.44384266576 - 57.69780905206 - 58.00384266576 - 58.66780905206 - 68.45033506263 - 251.1454490216 - -53.51720394238 - -14.50684121661 - 91.74734649662 - 85.05657396185 - 67.63061974583 - 60.1476238727 - 59.14384266576 - 56.29780905206 - 59.43384266576 - 56.94780905206 - 65.56033506263 - 251.0254490216 - -54.18720394238 - -16.37684121661 - 65.90033506263 - 106.0473464966 - 90.35657396185 - 64.9476238727 - 65.52033506263 - -18.14684121661 - 65.15033506263 - -17.95684121661 - 67.40033506263 - 66.83033506263 - 67.85033506263 - 56.72199544308 - 57.17199544308 - 106.7473464966 - 87.45657396185 - 61.02199544308 - 61.03199544308 - 62.20892327569 - 130.2900801385 - 59.62199544308 - 59.91199544308 - 106.3473464966 - 86.75657396185 - 61.68892327569 - 129.9500801385 - 60.12199544308 - 60.17199544308 - - 175.7616267494 - 135.815392655 - 107.427429421 - 99.44443456745 - 85.70608965926 - 64.3171217786 - 44.36460041182 - 32.95338522348 - 44.31460041182 - 33.12838522348 - 24.75298136325 - 19.273476213 - 15.97636612239 - 13.00317519799 - 10.70826764037 - 9.858997178816 - 10.15607036729 - 173.4616267494 - 134.015392655 - 114.827429421 - 98.14443456745 - 82.60608965926 - 60.2171217786 - 42.56460041182 - 30.75338522348 - 42.43960041182 - 31.47838522348 - 24.52798136325 - 19.723476213 - 13.83886612239 - 12.74067519799 - 10.89576764037 - 10.98399717882 - 8.243570367288 - 162.8616267494 - 136.115392655 - 107.027429421 - 91.74443456745 - 81.80608965926 - 63.5171217786 - 40.06460041182 - 29.75338522348 - 40.11460041182 - 30.35338522348 - 23.47798136325 - 17.998476213 - 14.92636612239 - 12.17817519799 - 10.10826764037 - 8.621497178816 - 8.093570367288 - 156.9616267494 - 124.415392655 - 104.027429421 - 90.04443456745 - 78.90608965926 - 62.0171217786 - 42.66460041182 - 29.75338522348 - 42.62710041182 - 30.50338522348 - 23.77798136325 - 18.673476213 - 15.26386612239 - 11.76567519799 - 10.33326764037 - 8.471497178816 - 7.756070367288 - 164.3616267494 - 105.627429421 - 48.8819398286 - 18.5818314891 - 19.2918314891 - 21.7318314891 - 7.60022447721 - 160.8616267494 - 103.127429421 - 49.5819398286 - 20.0118314891 - 18.9218314891 - 20.7918314891 - 5.91022447721 - 159.6616267494 - 104.227429421 - 49.2819398286 - 20.0418314891 - 17.9818314891 - 19.4818314891 - 7.41022447721 - 160.8616267494 - 106.727429421 - 50.1819398286 - 18.7318314891 - 7.60022447721 - 18.9218314891 - 19.2918314891 - 7.41022447721 - 159.6616267494 - 103.227429421 - 76.40608965926 - 48.3819398286 - 33.22986519451 - 33.59986519451 - 26.87225158213 - 20.1218314891 - 15.2111293494 - 11.73215109931 - 8.64022447721 - 158.6616267494 - 105.227429421 - 77.40608965926 - 51.5819398286 - 31.02986519451 - 31.34986519451 - 27.36225158213 - 18.9218314891 - 14.3811293494 - 10.04215109931 - 9.39022447721 - 153.3616267494 - 102.727429421 - 77.10608965926 - 52.1819398286 - 32.02986519451 - 32.69986519451 - 29.05225158213 - 21.8118314891 - 12.1311293494 - 11.28215109931 - 12.09022447721 - 160.8616267494 - 123.115392655 - 105.227429421 - 92.54443456745 - 76.30608965926 - 58.8171217786 - 29.55338522348 - 30.42338522348 - 20.88548136325 - 15.143476213 - 12.14886612239 - 9.030675197988 - 7.853267640371 - 6.518997178816 - 5.881070367288 - 30.3618314891 - 31.9318314891 - 24.2418314891 - 17.9818314891 - 18.2418314891 - 22.2918314891 - 153.6616267494 - 97.927429421 - 76.60608965926 - 56.1819398286 - 40.82986519451 - 31.75225158213 - 40.64986519451 - 31.86225158213 - 23.8718314891 - 15.5111293494 - 14.80215109931 - 10.71022447721 - 163.5616267494 - 104.527429421 - 76.10608965926 - 45.3819398286 - 35.02986519451 - 25.75225158213 - 35.58986519451 - 26.72225158213 - 22.4118314891 - 12.7011293494 - 10.60215109931 - 10.48022447721 - 149.5616267494 - 102.427429421 - 69.40608965926 - 47.0819398286 - 36.72986519451 - 24.35225158213 - 37.01986519451 - 25.00225158213 - 19.5218314891 - 12.5811293494 - 9.93215109931 - 8.61022447721 - 19.8618314891 - 163.8616267494 - 107.727429421 - 51.8819398286 - 19.4818314891 - 6.84022447721 - 19.1118314891 - 7.03022447721 - 21.3618314891 - 20.7918314891 - 21.8118314891 - 38.76460041182 - 39.21460041182 - 164.5616267494 - 104.827429421 - 43.06460041182 - 43.07460041182 - 24.18548136325 - 14.77886612239 - 41.66460041182 - 41.95460041182 - 164.1616267494 - 104.127429421 - 23.66548136325 - 14.43886612239 - 42.16460041182 - 42.21460041182 - - 28.24095690087 - 21.59536620662 - 13.25712029593 - 19.07487076201 - 15.58993695617 - 8.655383440756 - 5.734068533914 - 4.165799626156 - 5.684068533914 - 4.340799626156 - 2.383275058766 - 1.39686578611 - 1.39855054723 - 0.9316549828049 - 0.5908378210265 - 1.296279986221 - 2.850605037275 - 25.94095690087 - 19.79536620662 - 20.65712029593 - 17.77487076201 - 12.48993695617 - 4.555383440756 - 3.934068533914 - 1.965799626156 - 3.809068533914 - 2.690799626156 - 2.158275058766 - 1.84686578611 - -0.7389494527701 - 0.6691549828049 - 0.7783378210265 - 2.421279986221 - 0.9381050372751 - 15.34095690087 - 21.89536620662 - 12.85712029593 - 11.37487076201 - 11.68993695617 - 7.855383440756 - 1.434068533914 - 0.9657996261561 - 1.484068533914 - 1.565799626156 - 1.108275058766 - 0.12186578611 - 0.3485505472299 - 0.1066549828049 - -0.009162178973513 - 0.05877998622137 - 0.7881050372751 - 9.440956900867 - 10.19536620662 - 9.857120295928 - 9.674870762006 - 8.789936956172 - 6.355383440756 - 4.034068533914 - 0.9657996261561 - 3.996568533914 - 1.715799626156 - 1.408275058766 - 0.79686578611 - 0.6860505472299 - -0.3058450171951 - 0.2158378210265 - -0.09122001377863 - 0.4506050372751 - 16.84095690087 - 11.45712029593 - 3.061569786218 - -1.357147755765 - -0.6471477557654 - 1.792852244235 - 0.8347758927937 - 13.34095690087 - 8.957120295928 - 3.761569786218 - 0.07285224423464 - -1.017147755765 - 0.8528522442346 - -0.8552241072063 - 12.14095690087 - 10.05712029593 - 3.461569786218 - 0.1028522442346 - -1.957147755765 - -0.4571477557654 - 0.6447758927937 - 13.34095690087 - 12.55712029593 - 4.361569786218 - -1.207147755765 - 0.8347758927937 - -1.017147755765 - -0.6471477557654 - 0.6447758927937 - 12.14095690087 - 9.057120295928 - 6.289936956172 - 2.561569786218 - 0.09812922077451 - 0.4681292207745 - 1.601218748851 - 0.1828522442346 - 1.967848278564 - 2.434721057271 - 1.874775892794 - 11.14095690087 - 11.05712029593 - 7.289936956172 - 5.761569786218 - -2.101870779225 - -1.781870779225 - 2.091218748851 - -1.017147755765 - 1.137848278564 - 0.744721057271 - 2.624775892794 - 5.840956900867 - 8.557120295928 - 6.989936956172 - 6.361569786218 - -1.101870779225 - -0.4318707792255 - 3.781218748851 - 1.872852244235 - -1.112151721436 - 1.984721057271 - 5.324775892794 - 13.34095690087 - 8.895366206617 - 11.05712029593 - 12.17487076201 - 6.189936956172 - 3.155383440756 - 0.7657996261561 - 1.635799626156 - -1.484224941234 - -2.73313421389 - -2.42894945277 - -3.040845017195 - -2.264162178974 - -2.043720013779 - -1.424394962725 - 10.42285224423 - 11.99285224423 - 4.302852244235 - -1.957147755765 - -1.697147755765 - 2.352852244235 - 6.140956900867 - 3.757120295928 - 6.489936956172 - 10.36156978622 - 7.698129220775 - 6.481218748851 - 7.518129220775 - 6.591218748851 - 3.932852244235 - 2.267848278564 - 5.504721057271 - 3.944775892794 - 16.04095690087 - 10.35712029593 - 5.989936956172 - -0.4384302137822 - 1.898129220775 - 0.4812187488511 - 2.458129220775 - 1.451218748851 - 2.472852244235 - -0.5421517214358 - 1.304721057271 - 3.714775892794 - 2.040956900867 - 8.257120295928 - -0.7100630438283 - 1.261569786218 - 3.598129220775 - -0.9187812511489 - 3.888129220775 - -0.2687812511489 - -0.4171477557654 - -0.6621517214358 - 0.634721057271 - 1.844775892794 - -0.07714775576537 - 16.34095690087 - 13.55712029593 - 6.061569786218 - -0.4571477557654 - 0.07477589279366 - -0.8271477557654 - 0.2647758927937 - 1.422852244235 - 0.8528522442346 - 1.872852244235 - 0.1340685339144 - 0.5840685339144 - 17.04095690087 - 10.65712029593 - 4.434068533914 - 4.444068533914 - 1.815775058766 - 0.2010505472299 - 3.034068533914 - 3.324068533914 - 16.64095690087 - 9.957120295928 - 1.295775058766 - -0.1389494527701 - 3.534068533914 - 3.584068533914 - - 19.67905061421 - 12.78536491634 - 4.453409401868 - 10.42602658124 - 7.181651769754 - 0.8467383120783 - -0.8151544815029 - -1.28878727387 - -0.8651544815029 - -1.11378727387 - -2.178296849214 - -2.4437228135 - -1.857756175876 - -1.847417965917 - -1.795022214911 - -0.7628423028115 - 1.065115779582 - 17.37905061421 - 10.98536491634 - 11.85340940187 - 9.126026581237 - 4.081651769754 - -3.253261687922 - -2.615154481503 - -3.48878727387 - -2.740154481503 - -2.76378727387 - -2.403296849214 - -1.9937228135 - -3.995256175876 - -2.109917965917 - -1.607522214911 - 0.3621576971885 - -0.8473842204181 - 6.779050614207 - 13.08536491634 - 4.053409401868 - 2.726026581237 - 3.281651769754 - 0.04673831207827 - -5.115154481503 - -4.48878727387 - -5.065154481503 - -3.88878727387 - -3.453296849214 - -3.7187228135 - -2.907756175876 - -2.672417965917 - -2.395022214911 - -2.000342302812 - -0.9973842204181 - 0.8790506142075 - 1.385364916339 - 1.053409401868 - 1.026026581237 - 0.3816517697539 - -1.453261687922 - -2.515154481503 - -4.48878727387 - -2.552654481503 - -3.73878727387 - -3.153296849214 - -3.0437228135 - -2.570256175876 - -3.084917965917 - -2.170022214911 - -2.150342302812 - -1.334884220418 - 8.279050614207 - 2.653409401868 - -4.10628705125 - -5.539020595211 - -4.829020595211 - -2.389020595211 - -0.8305062664824 - 4.779050614207 - 0.1534094018679 - -3.40628705125 - -4.109020595211 - -5.199020595211 - -3.329020595211 - -2.520506266482 - 3.579050614207 - 1.253409401868 - -3.70628705125 - -4.079020595211 - -6.139020595211 - -4.639020595211 - -1.020506266482 - 4.779050614207 - 3.753409401868 - -2.80628705125 - -5.389020595211 - -0.8305062664824 - -5.199020595211 - -4.829020595211 - -1.020506266482 - 3.579050614207 - 0.2534094018679 - -2.118348230246 - -4.60628705125 - -5.8778638078 - -5.5078638078 - -3.383083895331 - -3.999020595211 - -1.038044411845 - 0.2196196759168 - 0.2094937335176 - 2.579050614207 - 2.253409401868 - -1.118348230246 - -1.40628705125 - -8.0778638078 - -7.7578638078 - -2.893083895331 - -5.199020595211 - -1.868044411845 - -1.470380324083 - 0.9594937335176 - -2.720949385793 - -0.2465905981321 - -1.418348230246 - -0.8062870512504 - -7.0778638078 - -6.4078638078 - -1.203083895331 - -2.309020595211 - -4.118044411845 - -0.2303803240832 - 3.659493733518 - 4.779050614207 - 0.0853649163389 - 2.253409401868 - 3.526026581237 - -2.218348230246 - -4.653261687922 - -4.68878727387 - -3.81878727387 - -6.045796849214 - -6.5737228135 - -5.685256175876 - -5.819917965917 - -4.650022214911 - -4.102842302812 - -3.209884220418 - 6.240979404789 - 7.810979404789 - 0.1209794047887 - -6.139020595211 - -5.879020595211 - -1.829020595211 - -2.420949385793 - -5.046590598132 - -1.918348230246 - 3.19371294875 - 1.7221361922 - 1.496916104669 - 1.5421361922 - 1.606916104669 - -0.2490205952113 - -0.738044411845 - 3.289619675917 - 2.279493733518 - 7.479050614207 - 1.553409401868 - -2.418348230246 - -7.60628705125 - -4.0778638078 - -4.503083895331 - -3.5178638078 - -3.533083895331 - -1.709020595211 - -3.548044411845 - -0.9103803240832 - 2.049493733518 - -6.520949385793 - -0.5465905981321 - -9.118348230246 - -5.90628705125 - -2.3778638078 - -5.903083895331 - -2.0878638078 - -5.253083895331 - -4.599020595211 - -3.668044411845 - -1.580380324083 - 0.1794937335176 - -4.259020595211 - 7.779050614207 - 4.753409401868 - -1.10628705125 - -4.639020595211 - -1.590506266482 - -5.009020595211 - -1.400506266482 - -2.759020595211 - -3.329020595211 - -2.309020595211 - -6.415154481503 - -5.965154481503 - 8.479050614207 - 1.853409401868 - -2.115154481503 - -2.105154481503 - -2.745796849214 - -3.055256175876 - -3.515154481503 - -3.225154481503 - 8.079050614207 - 1.153409401868 - -3.265796849214 - -3.395256175876 - -3.015154481503 - -2.965154481503 - - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 history_criterion_expected: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 117.9473464966 - 104.1334190546 - 90.05657396185 - 91.2207789054 - 83.93061974583 - 71.30942158128 - 62.32199544308 - 59.88083883476 - 62.27199544308 - 60.05583883476 - 62.77642327569 - 76.9431054476 - 131.4875801385 - -2342.694563021 - -86.51002021515 - -37.31642203542 - -19.79861800279 - 115.6473464966 - 102.3334190546 - 97.45657396185 - 89.9207789054 - 80.83061974583 - 67.20942158128 - 60.52199544308 - 57.68083883476 - 60.39699544308 - 58.40583883476 - 62.55142327569 - 77.3931054476 - 129.3500801385 - -2342.957063021 - -86.32252021515 - -36.19142203542 - -21.71111800279 - 105.0473464966 - 104.4334190546 - 89.65657396185 - 83.5207789054 - 80.03061974583 - 70.50942158128 - 58.02199544308 - 56.68083883476 - 58.07199544308 - 57.28083883476 - 61.50142327569 - 75.6681054476 - 130.4375801385 - -2343.519563021 - -87.11002021515 - -38.55392203542 - -21.86111800279 - 99.14734649662 - 92.73341905458 - 86.65657396185 - 81.8207789054 - 77.13061974583 - 69.00942158128 - 60.62199544308 - 56.68083883476 - 60.58449544308 - 57.43083883476 - 61.80142327569 - 76.3431054476 - 130.7750801385 - -2343.932063021 - -86.88502021515 - -38.70392203542 - -22.19861800279 - 106.5473464966 - 88.25657396185 - 61.9476238727 - 64.62033506263 - 65.33033506263 - 67.77033506263 - -17.38684121661 - 103.0473464966 - 85.75657396185 - 62.6476238727 - 66.05033506263 - 64.96033506263 - 66.83033506263 - -19.07684121661 - 101.8473464966 - 86.85657396185 - 62.3476238727 - 66.08033506263 - 64.02033506263 - 65.52033506263 - -17.57684121661 - 103.0473464966 - 89.35657396185 - 63.2476238727 - 64.77033506263 - -17.38684121661 - 64.96033506263 - 65.33033506263 - -17.57684121661 - 101.8473464966 - 85.85657396185 - 74.63061974583 - 61.4476238727 - 55.64384266576 - 56.01384266576 - 58.81780905206 - 66.16033506263 - 253.6554490216 - -52.38720394238 - -16.34684121661 - 100.8473464966 - 87.85657396185 - 75.63061974583 - 64.6476238727 - 53.44384266576 - 53.76384266576 - 59.30780905206 - 64.96033506263 - 252.8254490216 - -54.07720394238 - -15.59684121661 - 95.54734649662 - 85.35657396185 - 75.33061974583 - 65.2476238727 - 54.44384266576 - 55.11384266576 - 60.99780905206 - 67.85033506263 - 250.5754490216 - -52.83720394238 - -12.89684121661 - 103.0473464966 - 91.43341905458 - 87.85657396185 - 84.3207789054 - 74.53061974583 - 65.80942158128 - 56.48083883476 - 57.35083883476 - 58.90892327569 - 72.8131054476 - 127.6600801385 - -2346.667063021 - -89.36502021515 - -40.65642203542 - -24.07361800279 - 76.40033506263 - 77.97033506263 - 70.28033506263 - 64.02033506263 - 64.28033506263 - 68.33033506263 - 95.84734649662 - 80.55657396185 - 74.83061974583 - 69.2476238727 - 63.24384266576 - 63.69780905206 - 63.06384266576 - 63.80780905206 - 69.91033506263 - 253.9554490216 - -49.31720394238 - -14.27684121661 - 105.7473464966 - 87.15657396185 - 74.33061974583 - 58.4476238727 - 57.44384266576 - 57.69780905206 - 58.00384266576 - 58.66780905206 - 68.45033506263 - 251.1454490216 - -53.51720394238 - -14.50684121661 - 91.74734649662 - 85.05657396185 - 67.63061974583 - 60.1476238727 - 59.14384266576 - 56.29780905206 - 59.43384266576 - 56.94780905206 - 65.56033506263 - 251.0254490216 - -54.18720394238 - -16.37684121661 - 65.90033506263 - 106.0473464966 - 90.35657396185 - 64.9476238727 - 65.52033506263 - -18.14684121661 - 65.15033506263 - -17.95684121661 - 67.40033506263 - 66.83033506263 - 67.85033506263 - 56.72199544308 - 57.17199544308 - 106.7473464966 - 87.45657396185 - 61.02199544308 - 61.03199544308 - 62.20892327569 - 130.2900801385 - 59.62199544308 - 59.91199544308 - 106.3473464966 - 86.75657396185 - 61.68892327569 - 129.9500801385 - 60.12199544308 - 60.17199544308 - - 175.7616267494 - 135.815392655 - 107.427429421 - 99.44443456745 - 85.70608965926 - 64.3171217786 - 44.36460041182 - 32.95338522348 - 44.31460041182 - 33.12838522348 - 24.75298136325 - 19.273476213 - 15.97636612239 - 13.00317519799 - 10.70826764037 - 9.858997178816 - 10.15607036729 - 173.4616267494 - 134.015392655 - 114.827429421 - 98.14443456745 - 82.60608965926 - 60.2171217786 - 42.56460041182 - 30.75338522348 - 42.43960041182 - 31.47838522348 - 24.52798136325 - 19.723476213 - 13.83886612239 - 12.74067519799 - 10.89576764037 - 10.98399717882 - 8.243570367288 - 162.8616267494 - 136.115392655 - 107.027429421 - 91.74443456745 - 81.80608965926 - 63.5171217786 - 40.06460041182 - 29.75338522348 - 40.11460041182 - 30.35338522348 - 23.47798136325 - 17.998476213 - 14.92636612239 - 12.17817519799 - 10.10826764037 - 8.621497178816 - 8.093570367288 - 156.9616267494 - 124.415392655 - 104.027429421 - 90.04443456745 - 78.90608965926 - 62.0171217786 - 42.66460041182 - 29.75338522348 - 42.62710041182 - 30.50338522348 - 23.77798136325 - 18.673476213 - 15.26386612239 - 11.76567519799 - 10.33326764037 - 8.471497178816 - 7.756070367288 - 164.3616267494 - 105.627429421 - 48.8819398286 - 18.5818314891 - 19.2918314891 - 21.7318314891 - 7.60022447721 - 160.8616267494 - 103.127429421 - 49.5819398286 - 20.0118314891 - 18.9218314891 - 20.7918314891 - 5.91022447721 - 159.6616267494 - 104.227429421 - 49.2819398286 - 20.0418314891 - 17.9818314891 - 19.4818314891 - 7.41022447721 - 160.8616267494 - 106.727429421 - 50.1819398286 - 18.7318314891 - 7.60022447721 - 18.9218314891 - 19.2918314891 - 7.41022447721 - 159.6616267494 - 103.227429421 - 76.40608965926 - 48.3819398286 - 33.22986519451 - 33.59986519451 - 26.87225158213 - 20.1218314891 - 15.2111293494 - 11.73215109931 - 8.64022447721 - 158.6616267494 - 105.227429421 - 77.40608965926 - 51.5819398286 - 31.02986519451 - 31.34986519451 - 27.36225158213 - 18.9218314891 - 14.3811293494 - 10.04215109931 - 9.39022447721 - 153.3616267494 - 102.727429421 - 77.10608965926 - 52.1819398286 - 32.02986519451 - 32.69986519451 - 29.05225158213 - 21.8118314891 - 12.1311293494 - 11.28215109931 - 12.09022447721 - 160.8616267494 - 123.115392655 - 105.227429421 - 92.54443456745 - 76.30608965926 - 58.8171217786 - 29.55338522348 - 30.42338522348 - 20.88548136325 - 15.143476213 - 12.14886612239 - 9.030675197988 - 7.853267640371 - 6.518997178816 - 5.881070367288 - 30.3618314891 - 31.9318314891 - 24.2418314891 - 17.9818314891 - 18.2418314891 - 22.2918314891 - 153.6616267494 - 97.927429421 - 76.60608965926 - 56.1819398286 - 40.82986519451 - 31.75225158213 - 40.64986519451 - 31.86225158213 - 23.8718314891 - 15.5111293494 - 14.80215109931 - 10.71022447721 - 163.5616267494 - 104.527429421 - 76.10608965926 - 45.3819398286 - 35.02986519451 - 25.75225158213 - 35.58986519451 - 26.72225158213 - 22.4118314891 - 12.7011293494 - 10.60215109931 - 10.48022447721 - 149.5616267494 - 102.427429421 - 69.40608965926 - 47.0819398286 - 36.72986519451 - 24.35225158213 - 37.01986519451 - 25.00225158213 - 19.5218314891 - 12.5811293494 - 9.93215109931 - 8.61022447721 - 19.8618314891 - 163.8616267494 - 107.727429421 - 51.8819398286 - 19.4818314891 - 6.84022447721 - 19.1118314891 - 7.03022447721 - 21.3618314891 - 20.7918314891 - 21.8118314891 - 38.76460041182 - 39.21460041182 - 164.5616267494 - 104.827429421 - 43.06460041182 - 43.07460041182 - 24.18548136325 - 14.77886612239 - 41.66460041182 - 41.95460041182 - 164.1616267494 - 104.127429421 - 23.66548136325 - 14.43886612239 - 42.16460041182 - 42.21460041182 - - 28.24095690087 - 21.59536620662 - 13.25712029593 - 19.07487076201 - 15.58993695617 - 8.655383440756 - 5.734068533914 - 4.165799626156 - 5.684068533914 - 4.340799626156 - 2.383275058766 - 1.39686578611 - 1.39855054723 - 0.9316549828049 - 0.5908378210265 - 1.296279986221 - 2.850605037275 - 25.94095690087 - 19.79536620662 - 20.65712029593 - 17.77487076201 - 12.48993695617 - 4.555383440756 - 3.934068533914 - 1.965799626156 - 3.809068533914 - 2.690799626156 - 2.158275058766 - 1.84686578611 - -0.7389494527701 - 0.6691549828049 - 0.7783378210265 - 2.421279986221 - 0.9381050372751 - 15.34095690087 - 21.89536620662 - 12.85712029593 - 11.37487076201 - 11.68993695617 - 7.855383440756 - 1.434068533914 - 0.9657996261561 - 1.484068533914 - 1.565799626156 - 1.108275058766 - 0.12186578611 - 0.3485505472299 - 0.1066549828049 - -0.009162178973513 - 0.05877998622137 - 0.7881050372751 - 9.440956900867 - 10.19536620662 - 9.857120295928 - 9.674870762006 - 8.789936956172 - 6.355383440756 - 4.034068533914 - 0.9657996261561 - 3.996568533914 - 1.715799626156 - 1.408275058766 - 0.79686578611 - 0.6860505472299 - -0.3058450171951 - 0.2158378210265 - -0.09122001377863 - 0.4506050372751 - 16.84095690087 - 11.45712029593 - 3.061569786218 - -1.357147755765 - -0.6471477557654 - 1.792852244235 - 0.8347758927937 - 13.34095690087 - 8.957120295928 - 3.761569786218 - 0.07285224423464 - -1.017147755765 - 0.8528522442346 - -0.8552241072063 - 12.14095690087 - 10.05712029593 - 3.461569786218 - 0.1028522442346 - -1.957147755765 - -0.4571477557654 - 0.6447758927937 - 13.34095690087 - 12.55712029593 - 4.361569786218 - -1.207147755765 - 0.8347758927937 - -1.017147755765 - -0.6471477557654 - 0.6447758927937 - 12.14095690087 - 9.057120295928 - 6.289936956172 - 2.561569786218 - 0.09812922077451 - 0.4681292207745 - 1.601218748851 - 0.1828522442346 - 1.967848278564 - 2.434721057271 - 1.874775892794 - 11.14095690087 - 11.05712029593 - 7.289936956172 - 5.761569786218 - -2.101870779225 - -1.781870779225 - 2.091218748851 - -1.017147755765 - 1.137848278564 - 0.744721057271 - 2.624775892794 - 5.840956900867 - 8.557120295928 - 6.989936956172 - 6.361569786218 - -1.101870779225 - -0.4318707792255 - 3.781218748851 - 1.872852244235 - -1.112151721436 - 1.984721057271 - 5.324775892794 - 13.34095690087 - 8.895366206617 - 11.05712029593 - 12.17487076201 - 6.189936956172 - 3.155383440756 - 0.7657996261561 - 1.635799626156 - -1.484224941234 - -2.73313421389 - -2.42894945277 - -3.040845017195 - -2.264162178974 - -2.043720013779 - -1.424394962725 - 10.42285224423 - 11.99285224423 - 4.302852244235 - -1.957147755765 - -1.697147755765 - 2.352852244235 - 6.140956900867 - 3.757120295928 - 6.489936956172 - 10.36156978622 - 7.698129220775 - 6.481218748851 - 7.518129220775 - 6.591218748851 - 3.932852244235 - 2.267848278564 - 5.504721057271 - 3.944775892794 - 16.04095690087 - 10.35712029593 - 5.989936956172 - -0.4384302137822 - 1.898129220775 - 0.4812187488511 - 2.458129220775 - 1.451218748851 - 2.472852244235 - -0.5421517214358 - 1.304721057271 - 3.714775892794 - 2.040956900867 - 8.257120295928 - -0.7100630438283 - 1.261569786218 - 3.598129220775 - -0.9187812511489 - 3.888129220775 - -0.2687812511489 - -0.4171477557654 - -0.6621517214358 - 0.634721057271 - 1.844775892794 - -0.07714775576537 - 16.34095690087 - 13.55712029593 - 6.061569786218 - -0.4571477557654 - 0.07477589279366 - -0.8271477557654 - 0.2647758927937 - 1.422852244235 - 0.8528522442346 - 1.872852244235 - 0.1340685339144 - 0.5840685339144 - 17.04095690087 - 10.65712029593 - 4.434068533914 - 4.444068533914 - 1.815775058766 - 0.2010505472299 - 3.034068533914 - 3.324068533914 - 16.64095690087 - 9.957120295928 - 1.295775058766 - -0.1389494527701 - 3.534068533914 - 3.584068533914 - - 19.67905061421 - 12.78536491634 - 4.453409401868 - 10.42602658124 - 7.181651769754 - 0.8467383120783 - -0.8151544815029 - -1.28878727387 - -0.8651544815029 - -1.11378727387 - -2.178296849214 - -2.4437228135 - -1.857756175876 - -1.847417965917 - -1.795022214911 - -0.7628423028115 - 1.065115779582 - 17.37905061421 - 10.98536491634 - 11.85340940187 - 9.126026581237 - 4.081651769754 - -3.253261687922 - -2.615154481503 - -3.48878727387 - -2.740154481503 - -2.76378727387 - -2.403296849214 - -1.9937228135 - -3.995256175876 - -2.109917965917 - -1.607522214911 - 0.3621576971885 - -0.8473842204181 - 6.779050614207 - 13.08536491634 - 4.053409401868 - 2.726026581237 - 3.281651769754 - 0.04673831207827 - -5.115154481503 - -4.48878727387 - -5.065154481503 - -3.88878727387 - -3.453296849214 - -3.7187228135 - -2.907756175876 - -2.672417965917 - -2.395022214911 - -2.000342302812 - -0.9973842204181 - 0.8790506142075 - 1.385364916339 - 1.053409401868 - 1.026026581237 - 0.3816517697539 - -1.453261687922 - -2.515154481503 - -4.48878727387 - -2.552654481503 - -3.73878727387 - -3.153296849214 - -3.0437228135 - -2.570256175876 - -3.084917965917 - -2.170022214911 - -2.150342302812 - -1.334884220418 - 8.279050614207 - 2.653409401868 - -4.10628705125 - -5.539020595211 - -4.829020595211 - -2.389020595211 - -0.8305062664824 - 4.779050614207 - 0.1534094018679 - -3.40628705125 - -4.109020595211 - -5.199020595211 - -3.329020595211 - -2.520506266482 - 3.579050614207 - 1.253409401868 - -3.70628705125 - -4.079020595211 - -6.139020595211 - -4.639020595211 - -1.020506266482 - 4.779050614207 - 3.753409401868 - -2.80628705125 - -5.389020595211 - -0.8305062664824 - -5.199020595211 - -4.829020595211 - -1.020506266482 - 3.579050614207 - 0.2534094018679 - -2.118348230246 - -4.60628705125 - -5.8778638078 - -5.5078638078 - -3.383083895331 - -3.999020595211 - -1.038044411845 - 0.2196196759168 - 0.2094937335176 - 2.579050614207 - 2.253409401868 - -1.118348230246 - -1.40628705125 - -8.0778638078 - -7.7578638078 - -2.893083895331 - -5.199020595211 - -1.868044411845 - -1.470380324083 - 0.9594937335176 - -2.720949385793 - -0.2465905981321 - -1.418348230246 - -0.8062870512504 - -7.0778638078 - -6.4078638078 - -1.203083895331 - -2.309020595211 - -4.118044411845 - -0.2303803240832 - 3.659493733518 - 4.779050614207 - 0.0853649163389 - 2.253409401868 - 3.526026581237 - -2.218348230246 - -4.653261687922 - -4.68878727387 - -3.81878727387 - -6.045796849214 - -6.5737228135 - -5.685256175876 - -5.819917965917 - -4.650022214911 - -4.102842302812 - -3.209884220418 - 6.240979404789 - 7.810979404789 - 0.1209794047887 - -6.139020595211 - -5.879020595211 - -1.829020595211 - -2.420949385793 - -5.046590598132 - -1.918348230246 - 3.19371294875 - 1.7221361922 - 1.496916104669 - 1.5421361922 - 1.606916104669 - -0.2490205952113 - -0.738044411845 - 3.289619675917 - 2.279493733518 - 7.479050614207 - 1.553409401868 - -2.418348230246 - -7.60628705125 - -4.0778638078 - -4.503083895331 - -3.5178638078 - -3.533083895331 - -1.709020595211 - -3.548044411845 - -0.9103803240832 - 2.049493733518 - -6.520949385793 - -0.5465905981321 - -9.118348230246 - -5.90628705125 - -2.3778638078 - -5.903083895331 - -2.0878638078 - -5.253083895331 - -4.599020595211 - -3.668044411845 - -1.580380324083 - 0.1794937335176 - -4.259020595211 - 7.779050614207 - 4.753409401868 - -1.10628705125 - -4.639020595211 - -1.590506266482 - -5.009020595211 - -1.400506266482 - -2.759020595211 - -3.329020595211 - -2.309020595211 - -6.415154481503 - -5.965154481503 - 8.479050614207 - 1.853409401868 - -2.115154481503 - -2.105154481503 - -2.745796849214 - -3.055256175876 - -3.515154481503 - -3.225154481503 - 8.079050614207 - 1.153409401868 - -3.265796849214 - -3.395256175876 - -3.015154481503 - -2.965154481503 - - 1050.519509418 - 2633.521525076 - -3381.282955438 - -933.4604542894 - -516.3685328612 - -257.5208155114 - -113.8324082065 - -66.57519632661 - -113.8824082065 - -66.40019632661 - -44.86075536786 - -32.39601556788 - -23.86432104696 - -18.5440484742 - -14.76646254613 - -11.02441930035 - -7.169273972705 - 1048.219509418 - 2631.721525076 - -3373.882955438 - -934.7604542894 - -519.4685328612 - -261.6208155114 - -115.6324082065 - -68.77519632661 - -115.7574082065 - -68.05019632661 - -45.08575536786 - -31.94601556788 - -26.00182104696 - -18.8065484742 - -14.57896254613 - -9.899419300347 - -9.081773972705 - 1037.619509418 - 2633.821525076 - -3381.682955438 - -941.1604542894 - -520.2685328612 - -258.3208155114 - -118.1324082065 - -69.77519632661 - -118.0824082065 - -69.17519632661 - -46.13575536786 - -33.67101556788 - -24.91432104696 - -19.3690484742 - -15.36646254613 - -12.26191930035 - -9.231773972705 - 1031.719509418 - 2622.121525076 - -3384.682955438 - -942.8604542894 - -523.1685328612 - -259.8208155114 - -115.5324082065 - -69.77519632661 - -115.5699082065 - -69.02519632661 - -45.83575536786 - -32.99601556788 - -24.57682104696 - -19.7815484742 - -15.14146254613 - -12.41191930035 - -9.569273972705 - 1039.119509418 - -3383.082955438 - -165.9142242952 - -41.04746332279 - -40.33746332279 - -37.89746332279 - -8.239694706637 - 1035.619509418 - -3385.582955438 - -165.2142242952 - -39.61746332279 - -40.70746332279 - -38.83746332279 - -9.929694706637 - 1034.419509418 - -3384.482955438 - -165.5142242952 - -39.58746332279 - -41.64746332279 - -40.14746332279 - -8.429694706637 - 1035.619509418 - -3381.982955438 - -164.6142242952 - -40.89746332279 - -8.239694706637 - -40.70746332279 - -40.33746332279 - -8.429694706637 - 1034.419509418 - -3385.482955438 - -525.6685328612 - -166.4142242952 - -89.99572115575 - -89.62572115575 - -55.58585272365 - -39.50746332279 - -20.14157855679 - -11.29463725292 - -7.199694706637 - 1033.419509418 - -3383.482955438 - -524.6685328612 - -163.2142242952 - -92.19572115575 - -91.87572115575 - -55.09585272365 - -40.70746332279 - -20.97157855679 - -12.98463725292 - -6.449694706637 - 1028.119509418 - -3385.982955438 - -524.9685328612 - -162.6142242952 - -91.19572115575 - -90.52572115575 - -53.40585272365 - -37.81746332279 - -23.22157855679 - -11.74463725292 - -3.749694706637 - 1035.619509418 - 2620.821525076 - -3383.482955438 - -940.3604542894 - -525.7685328612 - -263.0208155114 - -69.97519632661 - -69.10519632661 - -48.72825536786 - -36.52601556788 - -27.69182104696 - -22.5165484742 - -17.62146254613 - -14.36441930035 - -11.4442739727 - -29.26746332279 - -27.69746332279 - -35.38746332279 - -41.64746332279 - -41.38746332279 - -37.33746332279 - 1028.419509418 - -3390.782955438 - -525.4685328612 - -158.6142242952 - -82.39572115575 - -50.70585272365 - -82.57572115575 - -50.59585272365 - -35.75746332279 - -19.84157855679 - -8.224637252918 - -5.129694706637 - 1038.319509418 - -3384.182955438 - -525.9685328612 - -169.4142242952 - -88.19572115575 - -56.70585272365 - -87.63572115575 - -55.73585272365 - -37.21746332279 - -22.65157855679 - -12.42463725292 - -5.359694706637 - 1024.319509418 - -3386.282955438 - -532.6685328612 - -167.7142242952 - -86.49572115575 - -58.10585272365 - -86.20572115575 - -57.45585272365 - -40.10746332279 - -22.77157855679 - -13.09463725292 - -7.229694706637 - -39.76746332279 - 1038.619509418 - -3380.982955438 - -162.9142242952 - -40.14746332279 - -8.999694706637 - -40.51746332279 - -8.809694706637 - -38.26746332279 - -38.83746332279 - -37.81746332279 - -119.4324082065 - -118.9824082065 - 1039.319509418 - -3383.882955438 - -115.1324082065 - -115.1224082065 - -45.42825536786 - -25.06182104696 - -116.5324082065 - -116.2424082065 - 1038.919509418 - -3384.582955438 - -45.94825536786 - -25.40182104696 - -116.0324082065 - -115.9824082065 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.0 - 0.0 - 0.0 history_x_expected: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 index_best_x: 9 linear_terms: - -1.782609615475e-10 - -3.274180926383e-11 - 2.546585164964e-11 lower_bounds: null model_improving_points: - - 0.025661432597987588 - 0.004659684320230673 - 0.0 - - -0.005326886922235527 - -0.003923230748772294 - 0.0 - - 0.14451544438283606 - 0.03415020862754581 - 0.0 model_indices: - 8 - 0 - 7 - 6 - 5 - 4 - 3 model_indices_expected: - 8 - 0 - 10 - 6 - 5 - 4 - 3 n: 3 n_modelpoints: 2 n_modelpoints_expected: 3 square_terms: - - 23918483.46505 - -221133.0482641 - -3862092.694171 - - -221133.0482641 - 3420438.11792 - -157370.8759191 - - -3862092.694171 - -157370.8759191 - 925172.8526537 upper_bound: null ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_i.yaml ================================================ --- c: 10 delta: 0.05 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 model_improving_points: - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 model_improving_points_expected: - - 0.19235564910118408 - 0.0 - 0.0 - - -1.6679249464133494 - 0.0 - 2.0 - - 1.5532770479784463 - 2.0 - 0.0 model_indices: - 1 - 2 - 3 - 0 - 2 - 0 - 2 model_indices_expected: - 4 - 3 - 2 - 0 - 2 - 0 - 2 n: 3 n_modelpoints: 0 n_modelpoints_expected: 3 project_x_onto_null: true theta1: 1.0e-05 x_accepted: - 0.15 - 0.008 - 0.01 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_ii.yaml ================================================ --- c: 10 delta: 0.025 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 model_improving_points: - - 0.021001748277756915 - 0.0 - 0.0 - - -0.001403656173463233 - 0.0 - 0.0 - - 0.11036523575529027 - 0.0 - 0.0 model_improving_points_expected: - - 0.021001748277756915 - -0.5015248401252026 - 0.02056647070703521 - - -0.001403656173463233 - -0.0026205861698429256 - -1.9998925353155312 - - 0.11036523575529027 - -1.9360955470393286 - -0.0026205861698429256 model_indices: - 8 - 7 - 6 - 5 - 4 - 3 - 2 model_indices_expected: - 8 - 7 - 6 - 5 - 4 - 3 - 2 n: 3 n_modelpoints: 1 n_modelpoints_expected: 3 project_x_onto_null: true theta1: 1.0e-05 x_accepted: - 0.15 - 0.008 - 0.01 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_iii.yaml ================================================ --- c: 10 delta: 0.00625 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 - - 0.1499498551576 - 0.008185153997901 - 0.009255435636305 - - 0.1486949409413 - 0.001680047032405 - 0.01940631659429 - - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 - - 0.149295008289 - 0.006607320278458 - 0.009649557844843 - - 0.149373572031 - 0.006510591080504 - 0.01023212020758 - - 0.1492202503973 - 0.005397396245708 - 0.01181780391516 - - 0.1493007023164 - 0.005582573542213 - 0.01196522368907 - - 0.1493027858782 - 0.005554596994372 - 0.01202415479218 - - 0.1485523407947 - 0.005613111126492 - 0.01186195785149 - - 0.1490051236084 - 0.005576645446634 - 0.01199907703224 - - 0.1524257023164 - 0.005515392037394 - 0.01196491883293 - - 0.1554482518753 - 0.005387691742364 - 0.01208573313392 - - 0.1555507023164 - 0.00569480453193 - 0.01169255682528 - - 0.1618007023164 - 0.0057804309848 - 0.01147013266041 - - 0.1743007023164 - 0.005924712736456 - 0.01107680666362 - - 0.1895416078574 - 0.006133481950173 - 0.01054081360503 - - 0.1903649025976 - 0.006140000867368 - 0.01052352873034 - - 0.1653649025976 - 0.006034565329377 - 0.01081209329213 - - 0.1902653863983 - 0.006141938821062 - 0.01051661018242 model_improving_points: - - -0.015922591887171933 - -0.13172715842924898 - 0.0 - - 0.00031007259112286745 - -0.0010430267511385427 - 0.0 - - -0.0011069676676378482 - 0.002765579949600694 - 0.0 model_improving_points_expected: - - -0.015922591887171933 - -0.13172715842924898 - -3.999999999999999 - - 0.00031007259112286745 - -0.0010430267511385427 - -0.01686968607843814 - - -0.0011069676676378482 - 0.002765579949600694 - 0.04617032988619707 model_indices: - 29 - 26 - 25 - 5 - 28 - 24 - 23 model_indices_expected: - 29 - 26 - 28 - 5 - 28 - 24 - 23 n: 3 n_modelpoints: 2 n_modelpoints_expected: 3 project_x_onto_null: true theta1: 1.0e-05 x_accepted: - 0.1903649025976 - 0.006140000867368 - 0.01052352873034 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_i.yaml ================================================ --- c: 1.7320508075688772 delta: 0.05 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 model_improving_points: - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 model_improving_points_expected: - - -0.2507624200626013 - 0.010283235353517606 - 1.0000000000000002 - - -0.0013102930849214628 - -0.9999462676577656 - 0.010623242412742123 - - -0.9680477735196643 - -0.0013102930849214628 - -0.2590536815263693 model_indices: - 5 - 6 - 7 - 2 - 1 - 0 - 0 model_indices_expected: - 7 - 6 - 5 - 2 - 1 - 0 - 0 n: 3 n_modelpoints: 0 n_modelpoints_expected: 3 project_x_onto_null: false theta1: 1.0e-05 x_accepted: - 0.15 - 0.008 - 0.01 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_ii.yaml ================================================ --- c: 1.7320508075688772 delta: 0.05 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 model_improving_points: - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 model_improving_points_expected: - - 0.4357553543466791 - 0.012830716298993794 - -0.24843257790248596 - - -0.22967014635560642 - -0.0026634434611177635 - -0.0032719084593076098 - - -0.0858422683250766 - 0.07225772219141803 - -0.9509726692058914 model_indices: - 8 - 0 - 10 - 6 - 5 - 4 - 3 model_indices_expected: - 10 - 8 - 7 - 6 - 5 - 4 - 3 n: 3 n_modelpoints: 0 n_modelpoints_expected: 3 project_x_onto_null: false theta1: 1.0e-05 x_accepted: - 0.149883507892 - 0.008098080768719 - 0.009146244784311 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_iii.yaml ================================================ --- c: 1.7320508075688772 delta: 0.05 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 model_improving_points: - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 model_improving_points_expected: - - 1.0 - 0.0 - 0.0 - - 0.010623242412742123 - 0.0 - 0.0 - - -0.2590536815263693 - 0.0 - 0.0 model_indices: - 0 - 4 - 3 - 2 - 1 - 0 - 0 model_indices_expected: - 5 - 4 - 3 - 2 - 1 - 0 - 0 n: 3 n_modelpoints: 0 n_modelpoints_expected: 1 project_x_onto_null: false theta1: 1.0e-05 x_accepted: - 0.15 - 0.008 - 0.01 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_iv.yaml ================================================ --- c: 1.7320508075688772 delta: 0.00625 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 - - 0.1499498551576 - 0.008185153997901 - 0.009255435636305 - - 0.1486949409413 - 0.001680047032405 - 0.01940631659429 - - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 - - 0.149295008289 - 0.006607320278458 - 0.009649557844843 - - 0.149373572031 - 0.006510591080504 - 0.01023212020758 - - 0.1492202503973 - 0.005397396245708 - 0.01181780391516 - - 0.1493007023164 - 0.005582573542213 - 0.01196522368907 - - 0.1493027858782 - 0.005554596994372 - 0.01202415479218 - - 0.1485523407947 - 0.005613111126492 - 0.01186195785149 - - 0.1490051236084 - 0.005576645446634 - 0.01199907703224 - - 0.1524257023164 - 0.005515392037394 - 0.01196491883293 - - 0.1554482518753 - 0.005387691742364 - 0.01208573313392 - - 0.1555507023164 - 0.00569480453193 - 0.01169255682528 - - 0.1618007023164 - 0.0057804309848 - 0.01147013266041 - - 0.1743007023164 - 0.005924712736456 - 0.01107680666362 - - 0.1895416078574 - 0.006133481950173 - 0.01054081360503 - - 0.1903649025976 - 0.006140000867368 - 0.01052352873034 - - 0.1653649025976 - 0.006034565329377 - 0.01081209329213 - - 0.1902653863983 - 0.006141938821062 - 0.01051661018242 - - 0.1936923871033 - 0.006154783888531 - 0.01043074413202 model_improving_points: - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 model_improving_points_expected: - - 0.5323975209038645 - -0.015922591887171933 - -0.13172715842924898 - - 0.0023652833861750877 - 0.00031007259112286745 - -0.0010430267511385427 - - -0.014845535731521364 - -0.0011069676676378482 - 0.002765579949600694 model_indices: - 27 - 29 - 26 - 28 - 25 - 24 - 23 model_indices_expected: - 30 - 29 - 26 - 28 - 25 - 24 - 23 n: 3 n_modelpoints: 0 n_modelpoints_expected: 3 project_x_onto_null: false theta1: 1.0e-05 x_accepted: - 0.1903649025976 - 0.006140000867368 - 0.01052352873034 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/get_coefficients_residual_model.yaml ================================================ --- basis_null_space: - - -0.4583543462791 - -0.319506030216 - -0.6977037060623 - - 0.2311109444943 - -0.207102182158 - 0.2709008772413 - - -0.391511898797 - -0.2526774248775 - 0.6371234121103 - - 0.008236522535421 - -0.04467864269942 - -0.001160198289494 - - 0.7520131885729 - -0.1064594438631 - -0.1818415477397 - - -0.1324539613206 - 0.8805459169394 - -0.02874512409319 - - -0.009040449205893 - 0.04987780687459 - 0.001426286833001 f_interpolated: - - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.640310345903 - 2.498147595925 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 1.972205368828 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 1.360815770929 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 2.303282937582 - 2.303282937582 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - - 1.24344978758e-14 - -5.329070518201e-14 - 9.947598300641e-14 - 2.13162820728e-14 - 3.552713678801e-15 - -3.552713678801e-15 - 0.0 - 3.552713678801e-15 - 3.552713678801e-15 - -2.6645352591e-15 - -4.440892098501e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 6.661338147751e-15 - 4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - -1.59872115546e-14 - -1.7763568394e-14 - -2.48689957516e-14 - -3.552713678801e-15 - -1.42108547152e-14 - -5.329070518201e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 1.7763568394e-15 - 1.7763568394e-15 - 0.0 - 1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - -1.24344978758e-14 - -5.151434834261e-14 - -3.19744231092e-14 - -1.42108547152e-14 - -1.7763568394e-14 - 0.0 - 3.552713678801e-15 - 0.0 - -1.7763568394e-15 - -4.440892098501e-15 - 0.0 - 8.881784197001e-16 - 0.0 - 6.217248937901e-15 - 1.998401444325e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - -1.136868377216e-13 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 0.0 - -1.7763568394e-15 - 0.0 - 1.7763568394e-15 - 3.552713678801e-15 - -7.993605777300e-15 - 2.6645352591e-15 - -2.22044604925e-15 - 3.10862446895e-15 - 1.33226762955e-15 - 2.22044604925e-16 - -1.110223024625e-15 - -3.552713678801e-15 - 9.947598300641e-14 - -3.552713678801e-15 - 5.551115123126e-15 - 1.7763568394e-15 - 1.7763568394e-15 - -8.881784197001e-16 - -1.24344978758e-14 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 1.33226762955e-14 - 6.217248937901e-15 - -5.551115123126e-16 - -1.86517468137e-14 - 1.101341240428e-13 - -7.105427357601e-15 - 2.6645352591e-15 - 2.22044604925e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -1.24344978758e-14 - -1.06581410364e-14 - 3.552713678801e-15 - 6.661338147751e-15 - -8.881784197001e-16 - 1.33226762955e-14 - 1.7763568394e-15 - -8.881784197001e-16 - -1.86517468137e-14 - 3.19744231092e-14 - 1.68753899743e-14 - -2.6645352591e-15 - 6.217248937901e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 6.217248937901e-15 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.50990331349e-14 - 9.592326932761e-14 - 0.0 - 0.0 - 6.661338147751e-16 - -1.110223024625e-15 - -1.7763568394e-15 - 1.33226762955e-14 - 0.0 - -8.881784197001e-16 - 4.440892098501e-16 - 1.06581410364e-14 - 1.7763568394e-14 - -2.6645352591e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 3.552713678801e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 0.0 - -1.24344978758e-14 - -2.13162820728e-14 - 9.592326932761e-14 - 0.0 - -2.57571741713e-14 - 1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 7.771561172376e-16 - -1.998401444325e-15 - 4.440892098501e-15 - 1.443289932013e-15 - 2.775557561563e-16 - -1.665334536938e-16 - 7.105427357601e-15 - 8.881784197001e-15 - -1.7763568394e-15 - 2.22044604925e-15 - 1.24344978758e-14 - 3.552713678801e-15 - 1.24344978758e-14 - 1.42108547152e-14 - -2.6645352591e-15 - -5.329070518201e-15 - 0.0 - 0.0 - 0.0 - 1.7763568394e-15 - 7.105427357601e-15 - -8.881784197001e-16 - 0.0 - 0.0 - -2.6645352591e-15 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 2.6645352591e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 5.329070518201e-15 - -2.22044604925e-16 - 4.440892098501e-16 - 8.881784197001e-16 - 7.105427357601e-15 - 7.105427357601e-15 - 3.153033389935e-14 - 0.0 - -4.440892098501e-15 - 0.0 - 2.6645352591e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 4.884981308351e-15 - 8.881784197001e-16 - -1.06581410364e-14 - -1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - -7.549516567451e-15 - -4.440892098501e-16 - 6.217248937901e-15 - 6.217248937901e-15 - 3.552713678801e-15 - 4.440892098501e-16 - -1.7763568394e-15 - -1.7763568394e-15 - 4.618527782441e-14 - 1.7763568394e-15 - -1.7763568394e-15 - -1.7763568394e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 8.881784197001e-15 - 5.684341886081e-14 - 0.0 - -2.22044604925e-15 - -8.881784197001e-16 - -1.7763568394e-15 - - -1.06581410364e-14 - -1.95399252334e-14 - 1.24344978758e-14 - 7.105427357601e-15 - 4.440892098501e-15 - 2.442490654175e-15 - -1.998401444325e-15 - 3.330669073875e-16 - -1.33226762955e-15 - -1.110223024625e-16 - 1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - 4.440892098501e-16 - 0.0 - 2.22044604925e-16 - 0.0 - 0.0 - 3.19744231092e-14 - -1.7763568394e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - 0.0 - 0.0 - 0.0 - 0.0 - -1.33226762955e-15 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - 0.0 - 1.110223024625e-16 - -2.22044604925e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 1.59872115546e-14 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 1.33226762955e-15 - -4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -1.110223024625e-16 - 8.881784197001e-16 - -2.30926389122e-14 - 1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -2.22044604925e-15 - 1.33226762955e-15 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 4.440892098501e-16 - 4.440892098501e-16 - -1.06581410364e-14 - -1.24344978758e-14 - 0.0 - 8.881784197001e-16 - -2.6645352591e-15 - -4.440892098501e-16 - -3.330669073875e-16 - 1.7763568394e-15 - 3.552713678801e-14 - -8.881784197001e-16 - 1.7763568394e-15 - 3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -4.440892098501e-15 - 6.217248937901e-15 - -3.552713678801e-15 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 1.7763568394e-15 - 2.84217094304e-14 - -4.440892098501e-15 - 2.6645352591e-15 - -3.330669073875e-16 - 3.552713678801e-15 - -2.6645352591e-15 - 0.0 - -4.440892098501e-15 - -1.7763568394e-15 - 9.103828801926e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -4.440892098501e-16 - 2.6645352591e-15 - -4.440892098501e-16 - 1.110223024625e-16 - 1.110223024625e-16 - -8.881784197001e-16 - -7.993605777300e-15 - 1.33226762955e-15 - 6.661338147751e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - 4.440892098501e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 1.33226762955e-15 - -1.59872115546e-14 - 5.551115123126e-15 - 2.22044604925e-16 - -8.881784197001e-16 - 8.881784197001e-16 - 1.110223024625e-16 - -3.552713678801e-15 - -8.881784197001e-16 - -4.718447854657e-16 - 0.0 - 1.7763568394e-15 - -3.552713678801e-15 - -7.993605777300e-15 - 7.993605777301e-15 - -1.199040866595e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -5.329070518201e-15 - 0.0 - 4.773959005888e-15 - -8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -1.398881011028e-14 - -1.7763568394e-15 - 5.551115123126e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -2.22044604925e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -2.6645352591e-14 - 8.659739592076e-15 - 3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 0.0 - -1.7763568394e-15 - 1.59872115546e-14 - 1.7763568394e-15 - 0.0 - 4.440892098501e-16 - 0.0 - -4.440892098501e-16 - 1.7763568394e-15 - 2.6645352591e-15 - -4.440892098501e-16 - 2.22044604925e-16 - 1.665334536938e-16 - 5.329070518201e-15 - 3.552713678801e-15 - 2.84217094304e-14 - -4.440892098501e-16 - 0.0 - -6.661338147751e-16 - -3.552713678801e-15 - 4.440892098501e-16 - 8.881784197001e-16 - -4.440892098501e-16 - -3.552713678801e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 6.217248937901e-15 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - -3.10862446895e-15 - -1.7763568394e-15 - 1.24344978758e-14 - 1.59872115546e-14 - -4.440892098501e-16 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - - -2.735578163993e-08 - -5.45305738342e-08 - 4.426374289324e-08 - -4.691855792771e-10 - -2.819547262334e-09 - -5.672461611539e-09 - -6.022133902661e-09 - -5.938645131209e-09 - -6.026155574546e-09 - -5.914529310758e-09 - -1.108718095111e-08 - -5.205436082178e-11 - -1.303774865846e-09 - 3.85213638765e-10 - -1.12522080542e-09 - -1.038296559841e-09 - -9.272147494244e-10 - -2.739852789091e-08 - -5.461970431497e-08 - 4.424782673595e-08 - -4.685034582508e-10 - -2.810566002154e-09 - -5.653987500409e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -6.050512979527e-09 - -5.92861226778e-09 - -1.110882408284e-08 - -5.200462283028e-11 - -1.299493845863e-09 - 3.87061049878e-10 - -1.12522080542e-09 - -1.042401720497e-09 - -9.273968260004e-10 - -2.746151039901e-08 - -5.433957994683e-08 - 4.423691279953e-08 - -4.692992661148e-10 - -2.817955646606e-09 - -5.670756308973e-09 - -6.043791245247e-09 - -5.925457458034e-09 - -6.050569822946e-09 - -5.927120128035e-09 - -1.106290881125e-08 - -5.205436082178e-11 - -1.303028795974e-09 - 3.85909970646e-10 - -1.127881787966e-09 - -1.040435293476e-09 - -9.260201494499e-10 - -2.734941517701e-08 - -5.424772098195e-08 - 4.424055077834e-08 - -4.674802767113e-10 - -2.812839738908e-09 - -5.670756308973e-09 - -6.039726940799e-09 - -5.925457458034e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -1.104555735765e-08 - -5.22533127878e-11 - -1.303213537085e-09 - 3.862474784455e-10 - -1.128016791085e-09 - -1.036601915416e-09 - -9.278338097829e-10 - -2.736760507105e-08 - 4.430148692336e-08 - -6.124395213192e-09 - 6.223643822523e-09 - 6.227637072698e-09 - 6.216559711447e-09 - -8.725535849408e-10 - -2.746151039901e-08 - 4.422872734722e-08 - -6.108592742748e-09 - 6.207599767549e-09 - 6.222357740171e-09 - 6.224738058336e-09 - -8.727347733384e-10 - -2.734918780334e-08 - 4.453613655642e-08 - -6.112173878137e-09 - 6.22451068466e-09 - 6.227359961031e-09 - 6.222300896752e-09 - -8.725198341608e-10 - -2.746151039901e-08 - 4.457479008124e-08 - -6.087390147513e-09 - 6.217128145636e-09 - -8.725535849408e-10 - 6.222357740171e-09 - 6.227637072698e-09 - -8.725198341608e-10 - -2.734918780334e-08 - 4.454204827198e-08 - -2.825686351571e-09 - -6.100492555561e-09 - -5.898698418605e-09 - -5.89793103245e-09 - -6.570232358172e-09 - 6.235552518774e-09 - -1.462325371904e-09 - -1.088713119657e-09 - -8.705525189612e-10 - -2.73569185083e-08 - 4.454068402993e-08 - -2.808064891724e-09 - -6.131557483968e-09 - -5.876401587557e-09 - -5.897845767322e-09 - -6.565869625774e-09 - 6.222357740171e-09 - -1.463405396862e-09 - -1.089073720095e-09 - -8.69984972951e-10 - -2.731030690484e-08 - 4.454204827198e-08 - -2.806132215483e-09 - -6.112287564974e-09 - -5.890328225178e-09 - -5.881020115339e-09 - -6.602697055769e-09 - 6.208729530499e-09 - -1.467945764944e-09 - -1.086476686396e-09 - -8.746043889118e-10 - -2.746151039901e-08 - -5.454558049678e-08 - 4.454068402993e-08 - -4.674802767113e-10 - -2.816477717715e-09 - -5.670813152392e-09 - -5.936954039498e-09 - -5.93111337821e-09 - -1.104545788166e-08 - -5.2018833685e-11 - -1.29870869614e-09 - 3.867874909247e-10 - -1.12415321496e-09 - -1.038033659029e-09 - -9.241301057727e-10 - 6.206445135604e-09 - 6.206281710774e-09 - 6.214690984052e-09 - 6.227359961031e-09 - 6.216112069524e-09 - 6.232355076463e-09 - -2.740466698015e-08 - 4.426556188264e-08 - -2.806132215483e-09 - -6.098474614191e-09 - -5.856662710357e-09 - -6.574133237791e-09 - -5.885155474061e-09 - -6.587036693873e-09 - 6.232085070224e-09 - -1.464133703166e-09 - -1.089770051976e-09 - -8.732499168218e-10 - -2.739443516475e-08 - 4.424418875715e-08 - -2.815454536176e-09 - -6.108251682235e-09 - -5.863171281817e-09 - -6.585437972717e-09 - -5.886150233891e-09 - -6.576577504802e-09 - 6.217746317816e-09 - -1.467178378789e-09 - -1.090377566015e-09 - -8.717009336578e-10 - -2.741853677435e-08 - 4.456569513422e-08 - -2.813635546772e-09 - -6.107967465141e-09 - -5.881588549528e-09 - -6.590155976482e-09 - -5.854928986082e-09 - -6.584819800537e-09 - 6.221597459444e-09 - -1.46228273934e-09 - -1.089073720095e-09 - -8.693215036715e-10 - 6.218208170594e-09 - -2.752040018095e-08 - 4.457479008124e-08 - -6.096939841882e-09 - 6.222300896752e-09 - -8.720668631668e-10 - 6.209738501184e-09 - -8.718927801965e-10 - 6.226734683423e-09 - 6.224738058336e-09 - 6.208729530499e-09 - -6.025345555827e-09 - -6.015824283168e-09 - -2.741762727965e-08 - 4.41937118012e-08 - -6.025857146597e-09 - -6.039897471055e-09 - -1.109327030235e-08 - -1.30298616341e-09 - -6.022730758559e-09 - -6.028955112924e-09 - -2.738147486525e-08 - 4.430512490217e-08 - -1.106290881125e-08 - -1.301476260096e-09 - -6.022730758559e-09 - -6.022105480952e-09 - - -3.552713678801e-15 - -3.552713678801e-15 - 9.769962616701e-15 - 5.329070518201e-15 - 6.217248937901e-15 - 3.774758283726e-15 - -2.553512956638e-15 - 4.440892098501e-16 - -1.110223024625e-15 - 0.0 - 8.881784197001e-16 - 1.7763568394e-15 - 2.22044604925e-16 - 0.0 - 2.22044604925e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 0.0 - 1.42108547152e-14 - -1.42108547152e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - -5.551115123126e-16 - -1.110223024625e-16 - -8.881784197001e-16 - 0.0 - 1.95399252334e-14 - 5.329070518201e-15 - -5.329070518201e-15 - 2.442490654175e-15 - 8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -2.22044604925e-14 - 1.95399252334e-14 - 6.439293542826e-15 - 6.883382752676e-15 - 2.442490654175e-15 - -8.881784197001e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - 8.881784197001e-16 - -4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 2.22044604925e-16 - -7.105427357601e-15 - 0.0 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -2.22044604925e-16 - -8.881784197001e-16 - 3.28626015289e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 3.552713678801e-15 - -1.33226762955e-15 - 0.0 - 3.552713678801e-15 - 1.42108547152e-14 - -2.6645352591e-15 - 8.881784197001e-16 - -2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -8.881784197001e-16 - 1.95399252334e-14 - -3.10862446895e-15 - 1.7763568394e-15 - -2.22044604925e-16 - 3.552713678801e-15 - -8.881784197001e-16 - 0.0 - 3.552713678801e-15 - -1.95399252334e-14 - 7.105427357601e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.110223024625e-16 - -8.326672684688e-17 - -3.552713678801e-15 - 0.0 - -2.22044604925e-15 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 6.661338147751e-16 - 0.0 - 0.0 - 5.329070518201e-15 - -3.37507799486e-14 - -6.661338147751e-16 - 2.553512956638e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -3.053113317719e-16 - 0.0 - -8.881784197001e-16 - -3.37507799486e-14 - 0.0 - 1.24344978758e-14 - -1.199040866595e-14 - 0.0 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 0.0 - -7.105427357601e-15 - -1.7763568394e-15 - 2.997602166488e-15 - -2.6645352591e-15 - -2.6645352591e-15 - -1.554312234475e-15 - -1.06581410364e-14 - -3.28626015289e-14 - -6.661338147751e-16 - -2.22044604925e-15 - -1.110223024625e-15 - 4.440892098501e-16 - 1.110223024625e-15 - -4.440892098501e-16 - 1.998401444325e-15 - 4.440892098501e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-15 - 5.773159728051e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - 8.881784197001e-16 - 1.110223024625e-16 - 4.440892098501e-16 - 2.6645352591e-15 - 2.13162820728e-14 - 0.0 - 0.0 - 8.881784197001e-16 - 0.0 - -8.881784197001e-16 - 8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 0.0 - -2.775557561563e-17 - 3.552713678801e-15 - 3.552713678801e-15 - 1.95399252334e-14 - -1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - -1.7763568394e-15 - 0.0 - 0.0 - -1.33226762955e-15 - 0.0 - 2.6645352591e-15 - 0.0 - 7.105427357601e-15 - 1.68753899743e-14 - -1.33226762955e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.7763568394e-15 - -1.33226762955e-15 - 1.24344978758e-14 - 0.0 - -4.440892098501e-16 - 4.440892098501e-16 - -1.7763568394e-15 - 1.7763568394e-15 - - -1.06581410364e-14 - 3.552713678801e-15 - 3.552713678801e-15 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -2.6645352591e-15 - 8.881784197001e-16 - -1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -1.110223024625e-16 - -2.22044604925e-16 - 0.0 - -3.552713678801e-15 - 4.263256414561e-14 - -7.105427357601e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.22044604925e-16 - -8.881784197001e-16 - 0.0 - -2.6645352591e-15 - -4.440892098501e-16 - -1.554312234475e-15 - 0.0 - -1.110223024625e-16 - 0.0 - -2.22044604925e-16 - 3.552713678801e-15 - 1.7763568394e-14 - 8.881784197001e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -7.771561172376e-16 - -1.33226762955e-15 - -1.33226762955e-15 - -4.440892098501e-16 - -8.881784197001e-16 - -1.443289932013e-15 - -8.881784197001e-16 - -2.081668171172e-16 - -2.775557561563e-17 - -3.330669073875e-16 - 3.552713678801e-15 - -5.329070518201e-14 - -2.48689957516e-14 - -7.105427357601e-15 - -5.329070518201e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -7.771561172376e-16 - -8.881784197001e-16 - 2.22044604925e-16 - -1.7763568394e-15 - 2.22044604925e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -1.110223024625e-16 - -1.665334536938e-16 - 5.551115123126e-17 - 7.105427357601e-15 - -3.19744231092e-14 - -1.7763568394e-15 - 4.440892098501e-16 - -5.773159728051e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 1.59872115546e-14 - -8.881784197001e-16 - 1.33226762955e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - 5.329070518201e-15 - 1.7763568394e-15 - -3.552713678801e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 8.881784197001e-15 - -2.6645352591e-15 - 5.773159728051e-15 - -4.440892098501e-16 - 2.6645352591e-15 - -5.773159728051e-15 - -4.440892098501e-16 - 5.329070518201e-15 - -3.730349362741e-14 - 8.881784197001e-15 - -2.6645352591e-15 - -1.110223024625e-16 - 9.436895709314e-16 - -1.7763568394e-15 - -1.33226762955e-15 - -8.881784197001e-16 - -4.440892098501e-16 - 2.22044604925e-16 - 5.329070518201e-15 - -3.01980662698e-14 - 8.881784197001e-16 - -1.7763568394e-15 - -1.33226762955e-15 - 0.0 - -1.7763568394e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - -4.440892098501e-16 - 0.0 - -5.151434834261e-14 - 5.329070518201e-15 - -2.6645352591e-15 - -1.998401444325e-15 - 4.996003610813e-16 - -4.440892098501e-16 - -3.996802888651e-15 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - -8.881784197001e-15 - -3.01980662698e-14 - 5.329070518201e-15 - -1.42108547152e-14 - -8.881784197001e-16 - 1.110223024625e-16 - -4.440892098501e-16 - 6.661338147751e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -2.6645352591e-15 - 0.0 - 0.0 - -2.22044604925e-16 - -7.105427357601e-15 - -3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -3.10862446895e-15 - -8.881784197001e-16 - -6.217248937901e-15 - -2.13162820728e-14 - 5.329070518201e-15 - -5.329070518201e-15 - -1.7763568394e-15 - 0.0 - -1.7763568394e-15 - -8.881784197001e-16 - 2.22044604925e-15 - 4.440892098501e-16 - 8.881784197001e-16 - 0.0 - 7.105427357601e-15 - -1.59872115546e-14 - 7.993605777301e-15 - 2.22044604925e-15 - 4.440892098501e-16 - -9.992007221626e-16 - 0.0 - 2.22044604925e-16 - -6.661338147751e-15 - 0.0 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 1.06581410364e-14 - 4.440892098501e-16 - -2.442490654175e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 2.22044604925e-15 - -6.661338147751e-16 - -3.330669073875e-16 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -1.7763568394e-15 - -7.910339050454e-16 - -5.329070518201e-15 - -2.22044604925e-16 - 2.22044604925e-15 - 0.0 - -3.996802888651e-15 - 1.998401444325e-15 - -2.22044604925e-15 - 1.06581410364e-14 - -3.01980662698e-14 - 0.0 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - -3.552713678801e-15 - 0.0 - -3.552713678801e-14 - -4.440892098501e-16 - 3.330669073875e-16 - -2.22044604925e-15 - 0.0 - - -2.453347747178e-08 - -4.890711124972e-08 - 3.971540252223e-08 - -4.215081617076e-10 - -2.531166387598e-09 - -5.090015520182e-09 - -5.402824854173e-09 - -5.326945995421e-09 - -5.406533887253e-09 - -5.303881778218e-09 - -9.95139259885e-09 - -4.664002517529e-11 - -1.169468077933e-09 - 3.465174813755e-10 - -1.009041739053e-09 - -9.316067917097e-10 - -8.316902722072e-10 - -2.457070991113e-08 - -4.9003332947e-08 - 3.969984163632e-08 - -4.210960469209e-10 - -2.520792463656e-09 - -5.072479325463e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -5.43165867839e-09 - -5.320373475115e-09 - -9.971742542803e-09 - -4.661160346586e-11 - -1.165034291262e-09 - 3.48336470779e-10 - -1.009041739053e-09 - -9.349250262858e-10 - -8.319460675921e-10 - -2.462979864504e-08 - -4.874540593391e-08 - 3.969002193571e-08 - -4.214655291435e-10 - -2.529361609049e-09 - -5.086377541375e-09 - -5.424837468126e-09 - -5.313083306646e-09 - -5.429683369584e-09 - -5.316017848145e-09 - -9.926125699167e-09 - -4.666844688472e-11 - -1.168785956907e-09 - 3.474269760773e-10 - -1.012267603073e-09 - -9.336105222246e-10 - -8.307345922276e-10 - -2.453501224409e-08 - -4.864099878432e-08 - 3.969411466187e-08 - -4.200586545267e-10 - -2.522682507333e-09 - -5.086377541375e-09 - -5.417248871709e-09 - -5.313083306646e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -9.905932074616e-09 - -4.685318799602e-11 - -1.16909859571e-09 - 3.474269760773e-10 - -1.011144945551e-09 - -9.30121757392e-10 - -8.32123703276e-10 - -2.455203684804e-08 - 3.975333129347e-08 - -5.494243282556e-09 - 5.58331691991e-09 - 5.584197992903e-09 - 5.577660999734e-09 - -7.832063886326e-10 - -2.462979864504e-08 - 3.968375494878e-08 - -5.482178266902e-09 - 5.567116545535e-09 - 5.582947437688e-09 - 5.582720064012e-09 - -7.826947978629e-10 - -2.453300851357e-08 - 3.997169528702e-08 - -5.48229195374e-09 - 5.582549533756e-09 - 5.58533486128e-09 - 5.58033264042e-09 - -7.824034753412e-10 - -2.462979864504e-08 - 4.000852982244e-08 - -5.459440899358e-09 - 5.578741024692e-09 - -7.832063886326e-10 - 5.582947437688e-09 - 5.584197992903e-09 - -7.824034753412e-10 - -2.453300851357e-08 - 3.997578801318e-08 - -2.534619625294e-09 - -5.472671205098e-09 - -5.292946525515e-09 - -5.289514604101e-09 - -5.893511456634e-09 - 5.593420837613e-09 - -1.311946107307e-09 - -9.763994057721e-10 - -7.807834379037e-10 - -2.454028447119e-08 - 3.997533326583e-08 - -2.518589781175e-09 - -5.499970257006e-09 - -5.27222709934e-09 - -5.289443549827e-09 - -5.889233989365e-09 - 5.582947437688e-09 - -1.31279875859e-09 - -9.767973097040e-10 - -7.801634893667e-10 - -2.450508418406e-08 - 3.997578801318e-08 - -2.517722919038e-09 - -5.481609832714e-09 - -5.282728920974e-09 - -5.276390879771e-09 - -5.921897638927e-09 - 5.568864480665e-09 - -1.316720954492e-09 - -9.741683015817e-10 - -7.849578764763e-10 - -2.462979864504e-08 - -4.892184790606e-08 - 3.997533326583e-08 - -4.198312808512e-10 - -2.525382569729e-09 - -5.087684940008e-09 - -5.325020424607e-09 - -5.321375340372e-09 - -9.905946285471e-09 - -4.662581432058e-11 - -1.165076923826e-09 - 3.474269760773e-10 - -1.008160666061e-09 - -9.312302040598e-10 - -8.287912578453e-10 - 5.564075422626e-09 - 5.566761274167e-09 - 5.577334150075e-09 - 5.58533486128e-09 - 5.575742534347e-09 - 5.589768647951e-09 - -2.458268966166e-08 - 3.971631201694e-08 - -2.517722919038e-09 - -5.472060138345e-09 - -5.253625090518e-09 - -5.896666266381e-09 - -5.281549420033e-09 - -5.911225287036e-09 - 5.58955548513e-09 - -1.31430510919e-09 - -9.776002229954e-10 - -7.833289572545e-10 - -2.456549452745e-08 - 3.969729789333e-08 - -2.525453624003e-09 - -5.481140874508e-09 - -5.259096269583e-09 - -5.908617595196e-09 - -5.280242021399e-09 - -5.902698774207e-09 - 5.579806838796e-09 - -1.316578845945e-09 - -9.776002229954e-10 - -7.816733926802e-10 - -2.458754977397e-08 - 3.999780062713e-08 - -2.523478315197e-09 - -5.481275877628e-09 - -5.276277192934e-09 - -5.912788481055e-09 - -5.252047685644e-09 - -5.904489341901e-09 - 5.580417905549e-09 - -1.312287167821e-09 - -9.767973097040e-10 - -7.796998602316e-10 - 5.577490469477e-09 - -2.470176241332e-08 - 4.000852982244e-08 - -5.468805852615e-09 - 5.58033264042e-09 - -7.824567660464e-10 - 5.57135138024e-09 - -7.82140574529e-10 - 5.586642259914e-09 - 5.582720064012e-09 - 5.568864480665e-09 - -5.405695446825e-09 - -5.398412383784e-09 - -2.459630366047e-08 - 3.965256212268e-08 - -5.406420200416e-09 - -5.417355453119e-09 - -9.949395973763e-09 - -1.168757535197e-09 - -5.401048497333e-09 - -5.409134473666e-09 - -2.456927461481e-08 - 3.975651452492e-08 - -9.926125699167e-09 - -1.167023810922e-09 - -5.401048497333e-09 - -5.400458746863e-09 linear_terms_expected: - - -9.787418445994 - -22.54377112295 - -9.534168263258 - - -10.78162277843 - -24.83376357139 - -10.50264745927 - - -10.20110565515 - -23.49663439001 - -9.937151359945 - - -10.98873563222 - -25.31081542293 - -10.70440130878 - - -10.99335463641 - -25.3214545631 - -10.70890079348 - - -10.68909540507 - -24.62064151412 - -10.41251428865 - - -9.405382108517 - -21.66381086468 - -9.162017165394 - - -8.053437460602 - -18.54982008449 - -7.845054182336 - - -9.405382108518 - -21.66381086468 - -9.162017165391 - - -8.053437460579 - -18.5498200845 - -7.845054182338 - - -7.201412728984 - -16.587315791 - -7.015075646482 - - -5.482178202194 - -12.62733086319 - -5.340326447796 - - -4.683343670952 - -10.78734179755 - -4.56216181608 - - -6.699455120896 - -15.43113581877 - -6.526106239334 - - -3.676712397266 - -8.46872578782 - -3.581577199013 - - -3.14429118694 - -7.242377695456 - -3.062932425796 - - -2.721950037932 - -6.269581622886 - -2.651519384438 - - -9.787418446035 - -22.54377112294 - -9.534168263254 - - -10.78162277851 - -24.83376357135 - -10.50264745926 - - -10.20110565517 - -23.49663439 - -9.937151359944 - - -10.98873563222 - -25.31081542293 - -10.70440130878 - - -10.9933546364 - -25.3214545631 - -10.70890079348 - - -10.68909540505 - -24.62064151412 - -10.41251428865 - - -9.405382108542 - -21.66381086467 - -9.162017165389 - - -8.053437460596 - -18.54982008448 - -7.845054182333 - - -9.405382108542 - -21.66381086467 - -9.162017165389 - - -8.053437460588 - -18.54982008448 - -7.845054182332 - - -7.201412729004 - -16.58731579099 - -7.015075646479 - - -5.482178202195 - -12.6273308632 - -5.340326447797 - - -4.683343670949 - -10.78734179755 - -4.562161816081 - - -6.699455120895 - -15.43113581877 - -6.526106239334 - - -3.676712397266 - -8.46872578782 - -3.581577199013 - - -3.144291186945 - -7.242377695458 - -3.062932425798 - - -2.721950037932 - -6.269581622886 - -2.651519384437 - - -9.787418446088 - -22.54377112291 - -9.534168263241 - - -10.78162277827 - -24.83376357148 - -10.50264745931 - - -10.20110565518 - -23.49663439 - -9.937151359945 - - -10.98873563222 - -25.31081542292 - -10.70440130878 - - -10.99335463641 - -25.32145456311 - -10.70890079349 - - -10.68909540507 - -24.62064151412 - -10.41251428865 - - -9.405382108538 - -21.66381086467 - -9.162017165392 - - -8.053437460588 - -18.54982008449 - -7.845054182336 - - -9.405382108543 - -21.66381086467 - -9.16201716539 - - -8.053437460585 - -18.54982008448 - -7.845054182331 - - -7.201412728965 - -16.58731579101 - -7.015075646488 - - -5.482178202194 - -12.6273308632 - -5.340326447796 - - -4.683343670951 - -10.78734179755 - -4.56216181608 - - -6.699455120897 - -15.43113581877 - -6.526106239335 - - -3.676712397267 - -8.468725787817 - -3.581577199011 - - -3.144291186944 - -7.242377695458 - -3.062932425798 - - -2.721950037931 - -6.269581622886 - -2.651519384438 - - -9.787418445987 - -22.54377112295 - -9.534168263257 - - -10.78162277819 - -24.83376357152 - -10.50264745933 - - -10.20110565518 - -23.49663439 - -9.937151359945 - - -10.98873563222 - -25.31081542293 - -10.70440130878 - - -10.9933546364 - -25.3214545631 - -10.70890079348 - - -10.68909540507 - -24.62064151412 - -10.41251428865 - - -9.405382108534 - -21.66381086467 - -9.162017165392 - - -8.053437460588 - -18.54982008449 - -7.845054182336 - - -9.405382108542 - -21.66381086467 - -9.162017165389 - - -8.053437460599 - -18.54982008449 - -7.845054182336 - - -7.201412728951 - -16.58731579102 - -7.015075646493 - - -5.482178202195 - -12.6273308632 - -5.340326447797 - - -4.683343670951 - -10.78734179755 - -4.56216181608 - - -6.699455120896 - -15.43113581877 - -6.526106239335 - - -3.676712397269 - -8.468725787819 - -3.581577199013 - - -3.144291186939 - -7.242377695458 - -3.062932425797 - - -2.721950037932 - -6.269581622885 - -2.651519384437 - - -9.787418446009 - -22.54377112295 - -9.534168263261 - - -10.20110565512 - -23.49663439003 - -9.937151359956 - - -10.09564030674 - -23.25371150768 - -9.834414883103 - - -5.556847609202 - -12.79932005394 - -5.413063785002 - - -5.5568476092 - -12.79932005394 - -5.413063785004 - - -5.55684760921 - -12.79932005394 - -5.413063785003 - - -2.538438570694 - -5.846891967821 - -2.472756289638 - - -9.787418446088 - -22.54377112291 - -9.534168263241 - - -10.20110565518 - -23.49663438999 - -9.937151359941 - - -10.09564030673 - -23.25371150769 - -9.834414883109 - - -5.556847609218 - -12.79932005394 - -5.413063785002 - - -5.556847609206 - -12.79932005394 - -5.413063785005 - - -5.556847609203 - -12.79932005394 - -5.413063785005 - - -2.538438570694 - -5.846891967822 - -2.472756289638 - - -9.787418445993 - -22.54377112296 - -9.534168263264 - - -10.20110565492 - -23.49663439014 - -9.93715136 - - -10.09564030673 - -23.25371150769 - -9.834414883108 - - -5.556847609201 - -12.79932005394 - -5.413063785003 - - -5.556847609201 - -12.79932005394 - -5.413063785005 - - -5.556847609204 - -12.79932005394 - -5.413063785003 - - -2.538438570694 - -5.846891967822 - -2.472756289638 - - -9.787418446088 - -22.54377112291 - -9.534168263241 - - -10.20110565488 - -23.49663439015 - -9.937151360001 - - -10.09564030671 - -23.2537115077 - -9.834414883111 - - -5.556847609208 - -12.79932005394 - -5.413063785002 - - -2.538438570694 - -5.846891967821 - -2.472756289638 - - -5.556847609206 - -12.79932005394 - -5.413063785005 - - -5.5568476092 - -12.79932005394 - -5.413063785004 - - -2.538438570694 - -5.846891967822 - -2.472756289638 - - -9.787418445993 - -22.54377112296 - -9.534168263264 - - -10.20110565491 - -23.49663439014 - -9.937151360001 - - -10.99335463642 - -25.3214545631 - -10.70890079349 - - -10.09564030672 - -23.25371150769 - -9.834414883108 - - -8.709021129272 - -20.05985343038 - -8.483674576579 - - -8.709021129268 - -20.05985343037 - -8.483674576577 - - -7.480399516444 - -17.22991776468 - -7.28684363545 - - -5.556847609193 - -12.79932005395 - -5.413063785006 - - -4.20144396755 - -9.677362009312 - -4.092731301918 - - -3.391504667746 - -7.81179486874 - -3.303749239997 - - -2.538438570692 - -5.846891967823 - -2.472756289639 - - -9.787418445998 - -22.54377112296 - -9.534168263261 - - -10.20110565491 - -23.49663439014 - -9.937151359998 - - -10.9933546364 - -25.32145456311 - -10.70890079349 - - -10.09564030674 - -23.25371150767 - -9.834414883101 - - -8.709021129253 - -20.05985343039 - -8.483674576584 - - -8.709021129268 - -20.05985343037 - -8.483674576576 - - -7.48039951644 - -17.22991776469 - -7.28684363545 - - -5.556847609206 - -12.79932005394 - -5.413063785005 - - -4.20144396755 - -9.67736200931 - -4.092731301916 - - -3.391504667747 - -7.81179486874 - -3.303749239997 - - -2.538438570692 - -5.846891967823 - -2.472756289639 - - -9.787418445957 - -22.54377112297 - -9.534168263268 - - -10.20110565491 - -23.49663439014 - -9.937151360001 - - -10.9933546364 - -25.32145456311 - -10.70890079349 - - -10.09564030673 - -23.25371150769 - -9.834414883109 - - -8.709021129265 - -20.05985343038 - -8.483674576582 - - -8.709021129256 - -20.05985343039 - -8.483674576583 - - -7.480399516473 - -17.22991776467 - -7.286843635444 - - -5.556847609215 - -12.79932005393 - -5.413063784999 - - -4.201443967553 - -9.677362009307 - -4.092731301915 - - -3.391504667745 - -7.811794868741 - -3.303749239997 - - -2.538438570696 - -5.846891967821 - -2.472756289638 - - -9.787418446088 - -22.54377112291 - -9.534168263241 - - -10.78162277845 - -24.83376357139 - -10.50264745927 - - -10.20110565491 - -23.49663439014 - -9.937151359998 - - -10.98873563222 - -25.31081542293 - -10.70440130878 - - -10.99335463641 - -25.3214545631 - -10.70890079348 - - -10.68909540506 - -24.62064151411 - -10.41251428864 - - -8.053437460599 - -18.54982008449 - -7.845054182335 - - -8.053437460597 - -18.54982008449 - -7.845054182339 - - -7.201412728949 - -16.58731579102 - -7.015075646491 - - -5.482178202193 - -12.62733086319 - -5.340326447795 - - -4.683343670948 - -10.78734179755 - -4.562161816081 - - -6.699455120895 - -15.43113581877 - -6.526106239335 - - -3.676712397264 - -8.468725787819 - -3.581577199012 - - -3.144291186941 - -7.242377695458 - -3.062932425797 - - -2.721950037928 - -6.269581622887 - -2.651519384438 - - -5.556847609219 - -12.79932005393 - -5.413063785001 - - -5.556847609216 - -12.79932005393 - -5.413063784998 - - -5.556847609211 - -12.79932005394 - -5.413063785002 - - -5.556847609201 - -12.79932005394 - -5.413063785005 - - -5.556847609211 - -12.79932005394 - -5.413063785004 - - -5.556847609195 - -12.79932005395 - -5.413063785005 - - -9.787418446041 - -22.54377112293 - -9.534168263253 - - -10.20110565515 - -23.49663439001 - -9.93715135995 - - -10.9933546364 - -25.32145456311 - -10.70890079349 - - -10.09564030672 - -23.25371150769 - -9.834414883109 - - -8.709021129238 - -20.0598534304 - -8.48367457659 - - -7.480399516449 - -17.22991776469 - -7.286843635451 - - -8.709021129259 - -20.05985343038 - -8.483674576582 - - -7.480399516461 - -17.22991776468 - -7.286843635449 - - -5.556847609196 - -12.79932005395 - -5.413063785006 - - -4.201443967551 - -9.67736200931 - -4.092731301917 - - -3.391504667748 - -7.81179486874 - -3.303749239997 - - -2.538438570695 - -5.846891967823 - -2.472756289639 - - -9.78741844603 - -22.54377112294 - -9.534168263253 - - -10.20110565517 - -23.49663439 - -9.937151359944 - - -10.9933546364 - -25.3214545631 - -10.70890079348 - - -10.09564030673 - -23.25371150769 - -9.834414883112 - - -8.70902112924 - -20.05985343039 - -8.483674576586 - - -7.480399516459 - -17.22991776468 - -7.286843635448 - - -8.70902112926 - -20.05985343038 - -8.483674576581 - - -7.48039951645 - -17.22991776468 - -7.286843635449 - - -5.556847609209 - -12.79932005394 - -5.413063785003 - - -4.201443967552 - -9.677362009307 - -4.092731301915 - - -3.391504667747 - -7.811794868738 - -3.303749239996 - - -2.538438570693 - -5.846891967823 - -2.472756289639 - - -9.787418446052 - -22.54377112293 - -9.53416826325 - - -10.20110565489 - -23.49663439015 - -9.937151360001 - - -10.9933546364 - -25.3214545631 - -10.70890079348 - - -10.09564030673 - -23.25371150769 - -9.834414883108 - - -8.709021129257 - -20.05985343039 - -8.483674576583 - - -7.480399516466 - -17.22991776468 - -7.28684363545 - - -8.709021129236 - -20.0598534304 - -8.48367457659 - - -7.480399516456 - -17.22991776468 - -7.286843635447 - - -5.556847609203 - -12.79932005394 - -5.413063785001 - - -4.201443967549 - -9.677362009311 - -4.092731301917 - - -3.391504667747 - -7.81179486874 - -3.303749239997 - - -2.538438570692 - -5.846891967825 - -2.47275628964 - - -5.556847609209 - -12.79932005394 - -5.413063785004 - - -9.787418446139 - -22.54377112288 - -9.534168263229 - - -10.20110565488 - -23.49663439015 - -9.937151360001 - - -10.09564030672 - -23.25371150769 - -9.834414883111 - - -5.556847609204 - -12.79932005394 - -5.413063785003 - - -2.538438570693 - -5.846891967821 - -2.472756289637 - - -5.556847609215 - -12.79932005393 - -5.413063785001 - - -2.538438570694 - -5.846891967823 - -2.472756289639 - - -5.5568476092 - -12.79932005394 - -5.413063785004 - - -5.556847609203 - -12.79932005394 - -5.413063785005 - - -5.556847609215 - -12.79932005393 - -5.413063784999 - - -9.405382108518 - -21.66381086468 - -9.162017165391 - - -9.40538210851 - -21.66381086468 - -9.162017165394 - - -9.787418446052 - -22.54377112293 - -9.53416826325 - - -10.20110565522 - -23.49663438999 - -9.937151359941 - - -9.405382108525 - -21.66381086468 - -9.162017165398 - - -9.405382108532 - -21.66381086467 - -9.16201716539 - - -7.201412728992 - -16.587315791 - -7.015075646484 - - -4.683343670952 - -10.78734179755 - -4.56216181608 - - -9.405382108516 - -21.66381086468 - -9.162017165392 - - -9.405382108524 - -21.66381086468 - -9.162017165394 - - -9.787418446016 - -22.54377112294 - -9.534168263253 - - -10.20110565512 - -23.49663439003 - -9.937151359954 - - -7.201412728965 - -16.58731579101 - -7.015075646488 - - -4.68334367095 - -10.78734179755 - -4.56216181608 - - -9.405382108516 - -21.66381086468 - -9.162017165392 - - -9.405382108518 - -21.66381086468 - -9.162017165395 lower_triangular: - - -0.1507316305838 - 0.3652114026571 - -0.009929602309966 - -0.01905060831947 - 0.003262911080217 - -0.01695118100081 - -0.0002604899716981 - - 0.1044075404358 - -0.2033554793648 - -0.04901244210073 - 0.002419549056951 - -0.000600520783894 - 0.008574072391572 - 0.001713650401476 - - 0.08359492814255 - -0.1891075948275 - 0.003335257628927 - 0.0118310729907 - -0.003470346928518 - 0.01057835837333 - -0.002245237316082 - - -0.7199325351227 - -0.1628252921563 - 1.64250205349 - 0.2023897915246 - -0.01194347807638 - 0.08702438194994 - 0.008210075299701 - - -0.07653108379319 - 0.06625813621784 - 0.1592803660839 - 0.02907110965794 - -0.01119440066196 - 0.01377712744979 - -0.01253655518419 - - -0.04476255198824 - 0.03879496991897 - -0.001710465543054 - -0.01718375258754 - 0.01249119968731 - -0.01285690490792 - 0.0138241209585 monomial_basis: - - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - - 0.000421998138113 - 0.003227454378072 - -0.005190483660171 - 0.01234183379235 - -0.02806999966988 - 0.0319208998729 - - 0.0002235545535664 - 0.001541433438831 - -0.002293087033098 - 0.005314177252134 - -0.01118012852874 - 0.01176054805746 - - 0.39605157772 - -0.2263871121141 - -0.2073114454022 - 0.06470258851941 - 0.08379309531591 - 0.05425812925951 - - 0.0001709597243704 - 0.001302429203959 - -0.002062390549796 - 0.004961173860017 - -0.01111005551172 - 0.01243993225759 - - 0.0009747214789788 - 0.002943599394654 - -0.0004126754404125 - 0.004444745285232 - -0.000881235114793 - 8.735881110268e-05 - - 0.0009555976207896 - -0.05886419129446 - -0.003901308645878 - 1.81299792997 - 0.1699305387922 - 0.007963712350931 n_modelpoints: 7 square_terms_expected: - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -6.026354345269 - 12.09263433612 - -65.19840466722 - - 12.09263433612 - -20.07818166598 - -50.39406779834 - - -65.19840466722 - -50.39406779834 - -203.1711567547 - - - -5.701876161034 - 11.44152824299 - -61.68791411975 - - 11.44152824299 - -18.99710817628 - -47.68069007599 - - -61.68791411975 - -47.68069007599 - -192.2317721381 - - - -6.142119454382 - 12.32493151818 - -66.45085349382 - - 12.32493151818 - -20.46387967883 - -51.36212815726 - - -66.45085349382 - -51.36212815726 - -207.0740356415 - - - -6.144701231192 - 12.33011217653 - -66.4787854274 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.4787854274 - -51.3837177002 - -207.1610770849 - - - -5.974636482892 - 11.98885597141 - -64.63887531859 - - 11.98885597141 - -19.90587174644 - -49.96158850494 - - -64.63887531859 - -49.96158850494 - -201.4275523606 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.025207143397 - 8.077081984078 - -43.54823317181 - - 8.077081984078 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -3.06424638066 - 6.148793926422 - -33.1517139682 - - 6.148793926422 - -10.20923961279 - -25.62408896111 - - -33.1517139682 - -25.62408896111 - -103.3073141862 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -3.744639512008 - 7.514087911923 - -40.51280562804 - - 7.514087911923 - -12.47612538042 - -31.31372744325 - - -40.51280562804 - -31.31372744325 - -126.2459353865 - - - -2.05508690845 - 4.123789125024 - -22.23373870938 - - 4.123789125024 - -6.846993376853 - -17.18521398846 - - -22.23373870938 - -17.18521398846 - -69.28473841762 - - - -1.757491736179 - 3.526627160717 - -19.0140922437 - - 3.526627160717 - -5.855486805939 - -14.69663956546 - - -19.0140922437 - -14.69663956546 - -59.25168162552 - - - -1.52142547032 - 3.052930648937 - -16.46011963474 - - 3.052930648937 - -5.068977898606 - -12.72258714093 - - -16.46011963474 - -12.72258714093 - -51.29299656357 - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -6.026354345269 - 12.09263433613 - -65.19840466724 - - 12.09263433613 - -20.07818166599 - -50.39406779837 - - -65.19840466724 - -50.39406779837 - -203.1711567547 - - - -5.701876161034 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.231772138 - - - -6.142119454382 - 12.32493151818 - -66.45085349382 - - 12.32493151818 - -20.46387967883 - -51.36212815727 - - -66.45085349382 - -51.36212815727 - -207.0740356415 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.47878542741 - -51.3837177002 - -207.1610770849 - - - -5.974636482893 - 11.98885597141 - -64.63887531859 - - 11.98885597141 - -19.90587174644 - -49.96158850494 - - -64.63887531859 - -49.96158850494 - -201.4275523606 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.025207143397 - 8.077081984077 - -43.54823317181 - - 8.077081984077 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294897 - - - -3.06424638066 - 6.148793926422 - -33.1517139682 - - 6.148793926422 - -10.20923961279 - -25.62408896111 - - -33.1517139682 - -25.62408896111 - -103.3073141862 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244759 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -3.744639512008 - 7.514087911923 - -40.51280562804 - - 7.514087911923 - -12.47612538042 - -31.31372744325 - - -40.51280562804 - -31.31372744325 - -126.2459353865 - - - -2.05508690845 - 4.123789125024 - -22.23373870938 - - 4.123789125024 - -6.846993376853 - -17.18521398846 - - -22.23373870938 - -17.18521398846 - -69.28473841762 - - - -1.757491736179 - 3.526627160717 - -19.0140922437 - - 3.526627160717 - -5.85548680594 - -14.69663956546 - - -19.0140922437 - -14.69663956546 - -59.25168162552 - - - -1.521425470319 - 3.052930648937 - -16.46011963474 - - 3.052930648937 - -5.068977898607 - -12.72258714093 - - -16.46011963474 - -12.72258714093 - -51.29299656357 - - - -5.470646952694 - 10.97753789279 - -59.1862730562 - - 10.97753789279 - -18.22671503454 - -45.74708648664 - - -59.1862730562 - -45.74708648664 - -184.4361625451 - - - -6.026354345269 - 12.09263433612 - -65.19840466723 - - 12.09263433612 - -20.07818166599 - -50.39406779835 - - -65.19840466723 - -50.39406779835 - -203.1711567547 - - - -5.701876161033 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.231772138 - - - -6.142119454382 - 12.32493151818 - -66.45085349382 - - 12.32493151818 - -20.46387967883 - -51.36212815726 - - -66.45085349382 - -51.36212815726 - -207.0740356415 - - - -6.144701231192 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.38371770021 - - -66.47878542741 - -51.38371770021 - -207.1610770849 - - - -5.974636482892 - 11.98885597141 - -64.63887531859 - - 11.98885597141 - -19.90587174644 - -49.96158850494 - - -64.63887531859 - -49.96158850494 - -201.4275523606 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.025207143397 - 8.077081984077 - -43.54823317181 - - 8.077081984077 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -3.06424638066 - 6.148793926422 - -33.1517139682 - - 6.148793926422 - -10.20923961279 - -25.62408896111 - - -33.1517139682 - -25.62408896111 - -103.3073141862 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -3.744639512008 - 7.514087911923 - -40.51280562804 - - 7.514087911923 - -12.47612538042 - -31.31372744325 - - -40.51280562804 - -31.31372744325 - -126.2459353865 - - - -2.05508690845 - 4.123789125024 - -22.23373870938 - - 4.123789125024 - -6.846993376854 - -17.18521398846 - - -22.23373870938 - -17.18521398846 - -69.28473841762 - - - -1.757491736179 - 3.526627160717 - -19.0140922437 - - 3.526627160717 - -5.855486805939 - -14.69663956546 - - -19.0140922437 - -14.69663956546 - -59.25168162552 - - - -1.52142547032 - 3.052930648937 - -16.46011963474 - - 3.052930648937 - -5.068977898607 - -12.72258714093 - - -16.46011963474 - -12.72258714093 - -51.29299656357 - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -6.026354345272 - 12.09263433612 - -65.19840466723 - - 12.09263433612 - -20.07818166598 - -50.39406779835 - - -65.19840466723 - -50.39406779835 - -203.1711567547 - - - -5.701876161033 - 11.44152824299 - -61.68791411973 - - 11.44152824299 - -18.99710817628 - -47.68069007597 - - -61.68791411973 - -47.68069007597 - -192.231772138 - - - -6.142119454382 - 12.32493151818 - -66.45085349382 - - 12.32493151818 - -20.46387967883 - -51.36212815726 - - -66.45085349382 - -51.36212815726 - -207.0740356415 - - - -6.144701231192 - 12.33011217653 - -66.4787854274 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.4787854274 - -51.3837177002 - -207.1610770849 - - - -5.974636482892 - 11.98885597141 - -64.63887531859 - - 11.98885597141 - -19.90587174644 - -49.96158850494 - - -64.63887531859 - -49.96158850494 - -201.4275523606 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.025207143398 - 8.077081984078 - -43.54823317181 - - 8.077081984078 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -3.06424638066 - 6.148793926422 - -33.1517139682 - - 6.148793926422 - -10.20923961279 - -25.62408896111 - - -33.1517139682 - -25.62408896111 - -103.3073141862 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -3.744639512008 - 7.514087911923 - -40.51280562804 - - 7.514087911923 - -12.47612538042 - -31.31372744325 - - -40.51280562804 - -31.31372744325 - -126.2459353865 - - - -2.05508690845 - 4.123789125024 - -22.23373870938 - - 4.123789125024 - -6.846993376853 - -17.18521398846 - - -22.23373870938 - -17.18521398846 - -69.28473841762 - - - -1.757491736179 - 3.526627160717 - -19.0140922437 - - 3.526627160717 - -5.855486805939 - -14.69663956546 - - -19.0140922437 - -14.69663956546 - -59.25168162552 - - - -1.52142547032 - 3.052930648937 - -16.46011963474 - - 3.052930648937 - -5.068977898606 - -12.72258714093 - - -16.46011963474 - -12.72258714093 - -51.29299656357 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161034 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007597 - - -61.68791411974 - -47.68069007597 - -192.231772138 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345168 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.1862730562 - - 10.97753789279 - -18.22671503454 - -45.74708648664 - - -59.1862730562 - -45.74708648664 - -184.4361625451 - - - -5.701876161034 - 11.44152824299 - -61.68791411975 - - 11.44152824299 - -18.99710817628 - -47.68069007599 - - -61.68791411975 - -47.68069007599 - -192.2317721381 - - - -5.642926609318 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161035 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.2317721381 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.1862730562 - - 10.97753789279 - -18.22671503454 - -45.74708648664 - - -59.1862730562 - -45.74708648664 - -184.4361625451 - - - -5.701876161036 - 11.44152824299 - -61.68791411975 - - 11.44152824299 - -18.99710817628 - -47.68069007599 - - -61.68791411975 - -47.68069007599 - -192.2317721381 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345168 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161036 - 11.441528243 - -61.68791411976 - - 11.441528243 - -18.99710817628 - -47.680690076 - - -61.68791411976 - -47.680690076 - -192.2317721381 - - - -6.144701231192 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.47878542741 - -51.3837177002 - -207.1610770849 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.348383980319 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.824179840489 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512005 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161035 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.2317721381 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.38371770021 - - -66.47878542741 - -51.38371770021 - -207.1610770849 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.867880151755 - 9.768010855988 - -52.66501135193 - - 9.768010855988 - -16.21845919082 - -40.70658118362 - - -52.66501135193 - -40.70658118362 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.34838398032 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.824179840489 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512005 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161036 - 11.441528243 - -61.68791411976 - - 11.441528243 - -18.99710817628 - -47.680690076 - - -61.68791411976 - -47.680690076 - -192.2317721381 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.38371770021 - - -66.47878542741 - -51.38371770021 - -207.1610770849 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -4.867880151755 - 9.768010855988 - -52.66501135193 - - 9.768010855988 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -3.105982615525 - 6.232542905949 - -33.60325328599 - - 6.232542905949 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.348383980319 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.82417984049 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512006 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.1862730562 - - 10.97753789279 - -18.22671503454 - -45.74708648664 - - -59.1862730562 - -45.74708648664 - -184.4361625451 - - - -6.026354345272 - 12.09263433613 - -65.19840466725 - - 12.09263433613 - -20.07818166598 - -50.39406779838 - - -65.19840466725 - -50.39406779838 - -203.1711567548 - - - -5.701876161035 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.2317721381 - - - -6.142119454382 - 12.32493151818 - -66.45085349382 - - 12.32493151818 - -20.46387967883 - -51.36212815726 - - -66.45085349382 - -51.36212815726 - -207.0740356415 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146038 - -51.3837177002 - - -66.47878542741 - -51.3837177002 - -207.1610770849 - - - -5.974636482892 - 11.98885597141 - -64.63887531859 - - 11.98885597141 - -19.90587174644 - -49.96158850494 - - -64.63887531859 - -49.96158850494 - -201.4275523606 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.501443708068 - 9.032710263457 - -48.70057942984 - - 9.032710263457 - -14.9975921352 - -37.642336712 - - -48.70057942984 - -37.642336712 - -151.7606620589 - - - -4.025207143398 - 8.077081984078 - -43.54823317181 - - 8.077081984078 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -3.06424638066 - 6.148793926422 - -33.1517139682 - - 6.148793926422 - -10.20923961279 - -25.62408896111 - - -33.1517139682 - -25.62408896111 - -103.3073141862 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -3.744639512008 - 7.514087911923 - -40.51280562804 - - 7.514087911923 - -12.47612538042 - -31.31372744325 - - -40.51280562804 - -31.31372744325 - -126.2459353865 - - - -2.05508690845 - 4.123789125024 - -22.23373870938 - - 4.123789125024 - -6.846993376853 - -17.18521398846 - - -22.23373870938 - -17.18521398846 - -69.28473841762 - - - -1.757491736179 - 3.526627160717 - -19.0140922437 - - 3.526627160717 - -5.85548680594 - -14.69663956546 - - -19.0140922437 - -14.69663956546 - -59.25168162552 - - - -1.52142547032 - 3.052930648937 - -16.46011963474 - - 3.052930648937 - -5.068977898606 - -12.72258714093 - - -16.46011963474 - -12.72258714093 - -51.29299656357 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345168 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161035 - 11.441528243 - -61.68791411976 - - 11.441528243 - -18.99710817628 - -47.68069007601 - - -61.68791411976 - -47.68069007601 - -192.2317721381 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.38371770021 - - -66.47878542741 - -51.38371770021 - -207.1610770849 - - - -5.642926609318 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -4.867880151755 - 9.768010855988 - -52.66501135193 - - 9.768010855988 - -16.21845919082 - -40.70658118362 - - -52.66501135193 - -40.70658118362 - -164.1146180123 - - - -4.18114593895 - 8.389992696807 - -45.23531629256 - - 8.389992696807 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.348383980319 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.824179840489 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512006 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161033 - 11.44152824299 - -61.68791411973 - - 11.44152824299 - -18.99710817628 - -47.68069007597 - - -61.68791411973 - -47.68069007597 - -192.231772138 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.47878542741 - -51.3837177002 - -207.1610770849 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736708 - - -61.05014599234 - -47.18773736708 - -190.2443601916 - - - -4.867880151755 - 9.768010855988 - -52.66501135193 - - 9.768010855988 - -16.21845919082 - -40.70658118362 - - -52.66501135193 - -40.70658118362 - -164.1146180123 - - - -4.18114593895 - 8.389992696807 - -45.23531629256 - - 8.389992696807 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629257 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629257 - -34.96391679718 - -140.9622150161 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.348383980319 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.82417984049 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512006 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -5.470646952694 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161035 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.2317721381 - - - -6.144701231193 - 12.33011217653 - -66.47878542741 - - 12.33011217653 - -20.47248146039 - -51.3837177002 - - -66.47878542741 - -51.3837177002 - -207.1610770849 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -4.867880151755 - 9.768010855988 - -52.66501135193 - - 9.768010855988 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.18114593895 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -4.867880151755 - 9.768010855989 - -52.66501135193 - - 9.768010855989 - -16.21845919082 - -40.70658118363 - - -52.66501135193 - -40.70658118363 - -164.1146180123 - - - -4.181145938951 - 8.389992696808 - -45.23531629256 - - 8.389992696808 - -13.93044665596 - -34.96391679718 - - -45.23531629256 - -34.96391679718 - -140.9622150161 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -2.348383980319 - 4.712326412864 - -25.40688454246 - - 4.712326412864 - -7.82417984049 - -19.6378464886 - - -25.40688454246 - -19.6378464886 - -79.17289001818 - - - -1.895670938996 - 3.803901027596 - -20.50903646132 - - 3.803901027596 - -6.315862512005 - -15.85213287302 - - -20.50903646132 - -15.85213287302 - -63.91022423146 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -5.470646952691 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503455 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161036 - 11.44152824299 - -61.68791411975 - - 11.44152824299 - -18.99710817628 - -47.68069007599 - - -61.68791411975 - -47.68069007599 - -192.2317721381 - - - -5.642926609319 - 11.32323858867 - -61.05014599234 - - 11.32323858867 - -18.80070422381 - -47.18773736707 - - -61.05014599234 - -47.18773736707 - -190.2443601916 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -1.418852308994 - 2.847104761256 - -15.35039290828 - - 2.847104761256 - -4.727231886138 - -11.86484155382 - - -15.35039290828 - -11.86484155382 - -47.83486804263 - - - -3.105982615526 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.23254290595 - -33.60325328599 - - 6.23254290595 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -3.105982615525 - 6.232542905949 - -33.60325328599 - - 6.232542905949 - -10.34829345167 - -25.97309908035 - - -33.60325328599 - -25.97309908035 - -104.7144002338 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161033 - 11.44152824299 - -61.68791411974 - - 11.44152824299 - -18.99710817628 - -47.68069007598 - - -61.68791411974 - -47.68069007598 - -192.231772138 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 - - - -4.025207143398 - 8.077081984078 - -43.54823317181 - - 8.077081984078 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169055 - - -56.8760309773 - -43.96142169055 - -177.2369901437 - - - -5.470646952693 - 10.97753789279 - -59.18627305619 - - 10.97753789279 - -18.22671503454 - -45.74708648663 - - -59.18627305619 - -45.74708648663 - -184.4361625451 - - - -5.701876161035 - 11.44152824299 - -61.68791411975 - - 11.44152824299 - -18.99710817628 - -47.680690076 - - -61.68791411975 - -47.680690076 - -192.2317721381 - - - -4.025207143397 - 8.077081984077 - -43.54823317181 - - 8.077081984077 - -13.41090079341 - -33.65991278671 - - -43.54823317181 - -33.65991278671 - -135.7049294898 - - - -2.617740314238 - 5.252823613251 - -28.32101840387 - - 5.252823613251 - -8.721602244758 - -21.89027981319 - - -28.32101840387 - -21.89027981319 - -88.25390895707 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 - - - -5.257108945726 - 10.54904715237 - -56.8760309773 - - 10.54904715237 - -17.51526418892 - -43.96142169054 - - -56.8760309773 - -43.96142169054 - -177.2369901437 x_sample_monomial_basis: - - 1.0 - 0.0 - 0.0 - 0.0 - - 1.0 - -0.0290516140038 - -0.1571103675277 - 0.2526693486472 - - 1.0 - 0.02114495464958 - 0.1030939110921 - -0.1533658896721 - - 1.0 - 0.8900017727174 - -0.3597293107864 - -0.329418060402 - - 1.0 - 0.01849106402403 - 0.09961098192486 - -0.1577335237518 - - 1.0 - 0.04415249662202 - 0.09428409500262 - -0.01321807936901 - - 1.0 - 0.0437172190513 - -1.904204784139 - -0.1262039012941 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/get_interpolation_matrices_residual_model.yaml ================================================ --- basis_null_space_expected: - - -0.4583543462791 - -0.319506030216 - -0.6977037060623 - - 0.2311109444943 - -0.207102182158 - 0.2709008772413 - - -0.391511898797 - -0.2526774248775 - 0.6371234121103 - - 0.008236522535421 - -0.04467864269942 - -0.001160198289494 - - 0.7520131885729 - -0.1064594438631 - -0.1818415477397 - - -0.1324539613206 - 0.8805459169394 - -0.02874512409319 - - -0.009040449205893 - 0.04987780687459 - 0.001426286833001 delta: 0.025 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 - - 0.1499498551576 - 0.008185153997901 - 0.009255435636305 - - 0.1486949409413 - 0.001680047032405 - 0.01940631659429 - - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 lower_triangular_expected: - - -0.1507316305838 - 0.3652114026571 - -0.009929602309966 - -0.01905060831947 - 0.003262911080217 - -0.01695118100081 - -0.0002604899716981 - - 0.1044075404358 - -0.2033554793648 - -0.04901244210073 - 0.002419549056951 - -0.000600520783894 - 0.008574072391572 - 0.001713650401476 - - 0.08359492814255 - -0.1891075948275 - 0.003335257628927 - 0.0118310729907 - -0.003470346928518 - 0.01057835837333 - -0.002245237316082 - - -0.7199325351227 - -0.1628252921563 - 1.64250205349 - 0.2023897915246 - -0.01194347807638 - 0.08702438194994 - 0.008210075299701 - - -0.07653108379319 - 0.06625813621784 - 0.1592803660839 - 0.02907110965794 - -0.01119440066196 - 0.01377712744979 - -0.01253655518419 - - -0.04476255198824 - 0.03879496991897 - -0.001710465543054 - -0.01718375258754 - 0.01249119968731 - -0.01285690490792 - 0.0138241209585 model_indices: - 13 - 12 - 11 - 10 - 10 - 7 - 6 monomial_basis_expected: - - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - 0.0 - - 0.000421998138113 - 0.003227454378072 - -0.005190483660171 - 0.01234183379235 - -0.02806999966988 - 0.0319208998729 - - 0.0002235545535664 - 0.001541433438831 - -0.002293087033098 - 0.005314177252134 - -0.01118012852874 - 0.01176054805746 - - 0.39605157772 - -0.2263871121141 - -0.2073114454022 - 0.06470258851941 - 0.08379309531591 - 0.05425812925951 - - 0.0001709597243704 - 0.001302429203959 - -0.002062390549796 - 0.004961173860017 - -0.01111005551172 - 0.01243993225759 - - 0.0009747214789788 - 0.002943599394654 - -0.0004126754404125 - 0.004444745285232 - -0.000881235114793 - 8.735881110268e-05 - - 0.0009555976207896 - -0.05886419129446 - -0.003901308645878 - 1.81299792997 - 0.1699305387922 - 0.007963712350931 n_modelpoints: 4 n_modelpoints_expected: 7 x_accepted: - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 x_sample_monomial_basis_expected: - - 1.0 - 0.0 - 0.0 - 0.0 - - 1.0 - -0.0290516140038 - -0.1571103675277 - 0.2526693486472 - - 1.0 - 0.02114495464958 - 0.1030939110921 - -0.1533658896721 - - 1.0 - 0.8900017727174 - -0.3597293107864 - -0.329418060402 - - 1.0 - 0.01849106402403 - 0.09961098192486 - -0.1577335237518 - - 1.0 - 0.04415249662202 - 0.09428409500262 - -0.01321807936901 - - 1.0 - 0.0437172190513 - -1.904204784139 - -0.1262039012941 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/interpolate_f_iter_4.yaml ================================================ --- delta_old: 0.0125 f_interpolated_expected: - - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.640310345903 - 2.498147595925 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 1.972205368828 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 1.360815770929 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 2.303282937582 - 2.303282937582 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - - 1.24344978758e-14 - -5.329070518201e-14 - 9.947598300641e-14 - 2.13162820728e-14 - 3.552713678801e-15 - -3.552713678801e-15 - 0.0 - 3.552713678801e-15 - 3.552713678801e-15 - -2.6645352591e-15 - -4.440892098501e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 6.661338147751e-15 - 4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - -1.59872115546e-14 - -1.7763568394e-14 - -2.48689957516e-14 - -3.552713678801e-15 - -1.42108547152e-14 - -5.329070518201e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 1.7763568394e-15 - 1.7763568394e-15 - 0.0 - 1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - -1.24344978758e-14 - -5.151434834261e-14 - -3.19744231092e-14 - -1.42108547152e-14 - -1.7763568394e-14 - 0.0 - 3.552713678801e-15 - 0.0 - -1.7763568394e-15 - -4.440892098501e-15 - 0.0 - 8.881784197001e-16 - 0.0 - 6.217248937901e-15 - 1.998401444325e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - -1.136868377216e-13 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 0.0 - -1.7763568394e-15 - 0.0 - 1.7763568394e-15 - 3.552713678801e-15 - -7.993605777300e-15 - 2.6645352591e-15 - -2.22044604925e-15 - 3.10862446895e-15 - 1.33226762955e-15 - 2.22044604925e-16 - -1.110223024625e-15 - -3.552713678801e-15 - 9.947598300641e-14 - -3.552713678801e-15 - 5.551115123126e-15 - 1.7763568394e-15 - 1.7763568394e-15 - -8.881784197001e-16 - -1.24344978758e-14 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 1.33226762955e-14 - 6.217248937901e-15 - -5.551115123126e-16 - -1.86517468137e-14 - 1.101341240428e-13 - -7.105427357601e-15 - 2.6645352591e-15 - 2.22044604925e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -1.24344978758e-14 - -1.06581410364e-14 - 3.552713678801e-15 - 6.661338147751e-15 - -8.881784197001e-16 - 1.33226762955e-14 - 1.7763568394e-15 - -8.881784197001e-16 - -1.86517468137e-14 - 3.19744231092e-14 - 1.68753899743e-14 - -2.6645352591e-15 - 6.217248937901e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 6.217248937901e-15 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.50990331349e-14 - 9.592326932761e-14 - 0.0 - 0.0 - 6.661338147751e-16 - -1.110223024625e-15 - -1.7763568394e-15 - 1.33226762955e-14 - 0.0 - -8.881784197001e-16 - 4.440892098501e-16 - 1.06581410364e-14 - 1.7763568394e-14 - -2.6645352591e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 3.552713678801e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 0.0 - -1.24344978758e-14 - -2.13162820728e-14 - 9.592326932761e-14 - 0.0 - -2.57571741713e-14 - 1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 7.771561172376e-16 - -1.998401444325e-15 - 4.440892098501e-15 - 1.443289932013e-15 - 2.775557561563e-16 - -1.665334536938e-16 - 7.105427357601e-15 - 8.881784197001e-15 - -1.7763568394e-15 - 2.22044604925e-15 - 1.24344978758e-14 - 3.552713678801e-15 - 1.24344978758e-14 - 1.42108547152e-14 - -2.6645352591e-15 - -5.329070518201e-15 - 0.0 - 0.0 - 0.0 - 1.7763568394e-15 - 7.105427357601e-15 - -8.881784197001e-16 - 0.0 - 0.0 - -2.6645352591e-15 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 2.6645352591e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 5.329070518201e-15 - -2.22044604925e-16 - 4.440892098501e-16 - 8.881784197001e-16 - 7.105427357601e-15 - 7.105427357601e-15 - 3.153033389935e-14 - 0.0 - -4.440892098501e-15 - 0.0 - 2.6645352591e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 4.884981308351e-15 - 8.881784197001e-16 - -1.06581410364e-14 - -1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - -7.549516567451e-15 - -4.440892098501e-16 - 6.217248937901e-15 - 6.217248937901e-15 - 3.552713678801e-15 - 4.440892098501e-16 - -1.7763568394e-15 - -1.7763568394e-15 - 4.618527782441e-14 - 1.7763568394e-15 - -1.7763568394e-15 - -1.7763568394e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 8.881784197001e-15 - 5.684341886081e-14 - 0.0 - -2.22044604925e-15 - -8.881784197001e-16 - -1.7763568394e-15 - - -1.06581410364e-14 - -1.95399252334e-14 - 1.24344978758e-14 - 7.105427357601e-15 - 4.440892098501e-15 - 2.442490654175e-15 - -1.998401444325e-15 - 3.330669073875e-16 - -1.33226762955e-15 - -1.110223024625e-16 - 1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - 4.440892098501e-16 - 0.0 - 2.22044604925e-16 - 0.0 - 0.0 - 3.19744231092e-14 - -1.7763568394e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - 0.0 - 0.0 - 0.0 - 0.0 - -1.33226762955e-15 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - 0.0 - 1.110223024625e-16 - -2.22044604925e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 1.59872115546e-14 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 1.33226762955e-15 - -4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -1.110223024625e-16 - 8.881784197001e-16 - -2.30926389122e-14 - 1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -2.22044604925e-15 - 1.33226762955e-15 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 4.440892098501e-16 - 4.440892098501e-16 - -1.06581410364e-14 - -1.24344978758e-14 - 0.0 - 8.881784197001e-16 - -2.6645352591e-15 - -4.440892098501e-16 - -3.330669073875e-16 - 1.7763568394e-15 - 3.552713678801e-14 - -8.881784197001e-16 - 1.7763568394e-15 - 3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -4.440892098501e-15 - 6.217248937901e-15 - -3.552713678801e-15 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 1.7763568394e-15 - 2.84217094304e-14 - -4.440892098501e-15 - 2.6645352591e-15 - -3.330669073875e-16 - 3.552713678801e-15 - -2.6645352591e-15 - 0.0 - -4.440892098501e-15 - -1.7763568394e-15 - 9.103828801926e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -4.440892098501e-16 - 2.6645352591e-15 - -4.440892098501e-16 - 1.110223024625e-16 - 1.110223024625e-16 - -8.881784197001e-16 - -7.993605777300e-15 - 1.33226762955e-15 - 6.661338147751e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - 4.440892098501e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 1.33226762955e-15 - -1.59872115546e-14 - 5.551115123126e-15 - 2.22044604925e-16 - -8.881784197001e-16 - 8.881784197001e-16 - 1.110223024625e-16 - -3.552713678801e-15 - -8.881784197001e-16 - -4.718447854657e-16 - 0.0 - 1.7763568394e-15 - -3.552713678801e-15 - -7.993605777300e-15 - 7.993605777301e-15 - -1.199040866595e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -5.329070518201e-15 - 0.0 - 4.773959005888e-15 - -8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -1.398881011028e-14 - -1.7763568394e-15 - 5.551115123126e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -2.22044604925e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -2.6645352591e-14 - 8.659739592076e-15 - 3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 0.0 - -1.7763568394e-15 - 1.59872115546e-14 - 1.7763568394e-15 - 0.0 - 4.440892098501e-16 - 0.0 - -4.440892098501e-16 - 1.7763568394e-15 - 2.6645352591e-15 - -4.440892098501e-16 - 2.22044604925e-16 - 1.665334536938e-16 - 5.329070518201e-15 - 3.552713678801e-15 - 2.84217094304e-14 - -4.440892098501e-16 - 0.0 - -6.661338147751e-16 - -3.552713678801e-15 - 4.440892098501e-16 - 8.881784197001e-16 - -4.440892098501e-16 - -3.552713678801e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 6.217248937901e-15 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - -3.10862446895e-15 - -1.7763568394e-15 - 1.24344978758e-14 - 1.59872115546e-14 - -4.440892098501e-16 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - - -2.735578163993e-08 - -5.45305738342e-08 - 4.426374289324e-08 - -4.691855792771e-10 - -2.819547262334e-09 - -5.672461611539e-09 - -6.022133902661e-09 - -5.938645131209e-09 - -6.026155574546e-09 - -5.914529310758e-09 - -1.108718095111e-08 - -5.205436082178e-11 - -1.303774865846e-09 - 3.85213638765e-10 - -1.12522080542e-09 - -1.038296559841e-09 - -9.272147494244e-10 - -2.739852789091e-08 - -5.461970431497e-08 - 4.424782673595e-08 - -4.685034582508e-10 - -2.810566002154e-09 - -5.653987500409e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -6.050512979527e-09 - -5.92861226778e-09 - -1.110882408284e-08 - -5.200462283028e-11 - -1.299493845863e-09 - 3.87061049878e-10 - -1.12522080542e-09 - -1.042401720497e-09 - -9.273968260004e-10 - -2.746151039901e-08 - -5.433957994683e-08 - 4.423691279953e-08 - -4.692992661148e-10 - -2.817955646606e-09 - -5.670756308973e-09 - -6.043791245247e-09 - -5.925457458034e-09 - -6.050569822946e-09 - -5.927120128035e-09 - -1.106290881125e-08 - -5.205436082178e-11 - -1.303028795974e-09 - 3.85909970646e-10 - -1.127881787966e-09 - -1.040435293476e-09 - -9.260201494499e-10 - -2.734941517701e-08 - -5.424772098195e-08 - 4.424055077834e-08 - -4.674802767113e-10 - -2.812839738908e-09 - -5.670756308973e-09 - -6.039726940799e-09 - -5.925457458034e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -1.104555735765e-08 - -5.22533127878e-11 - -1.303213537085e-09 - 3.862474784455e-10 - -1.128016791085e-09 - -1.036601915416e-09 - -9.278338097829e-10 - -2.736760507105e-08 - 4.430148692336e-08 - -6.124395213192e-09 - 6.223643822523e-09 - 6.227637072698e-09 - 6.216559711447e-09 - -8.725535849408e-10 - -2.746151039901e-08 - 4.422872734722e-08 - -6.108592742748e-09 - 6.207599767549e-09 - 6.222357740171e-09 - 6.224738058336e-09 - -8.727347733384e-10 - -2.734918780334e-08 - 4.453613655642e-08 - -6.112173878137e-09 - 6.22451068466e-09 - 6.227359961031e-09 - 6.222300896752e-09 - -8.725198341608e-10 - -2.746151039901e-08 - 4.457479008124e-08 - -6.087390147513e-09 - 6.217128145636e-09 - -8.725535849408e-10 - 6.222357740171e-09 - 6.227637072698e-09 - -8.725198341608e-10 - -2.734918780334e-08 - 4.454204827198e-08 - -2.825686351571e-09 - -6.100492555561e-09 - -5.898698418605e-09 - -5.89793103245e-09 - -6.570232358172e-09 - 6.235552518774e-09 - -1.462325371904e-09 - -1.088713119657e-09 - -8.705525189612e-10 - -2.73569185083e-08 - 4.454068402993e-08 - -2.808064891724e-09 - -6.131557483968e-09 - -5.876401587557e-09 - -5.897845767322e-09 - -6.565869625774e-09 - 6.222357740171e-09 - -1.463405396862e-09 - -1.089073720095e-09 - -8.69984972951e-10 - -2.731030690484e-08 - 4.454204827198e-08 - -2.806132215483e-09 - -6.112287564974e-09 - -5.890328225178e-09 - -5.881020115339e-09 - -6.602697055769e-09 - 6.208729530499e-09 - -1.467945764944e-09 - -1.086476686396e-09 - -8.746043889118e-10 - -2.746151039901e-08 - -5.454558049678e-08 - 4.454068402993e-08 - -4.674802767113e-10 - -2.816477717715e-09 - -5.670813152392e-09 - -5.936954039498e-09 - -5.93111337821e-09 - -1.104545788166e-08 - -5.2018833685e-11 - -1.29870869614e-09 - 3.867874909247e-10 - -1.12415321496e-09 - -1.038033659029e-09 - -9.241301057727e-10 - 6.206445135604e-09 - 6.206281710774e-09 - 6.214690984052e-09 - 6.227359961031e-09 - 6.216112069524e-09 - 6.232355076463e-09 - -2.740466698015e-08 - 4.426556188264e-08 - -2.806132215483e-09 - -6.098474614191e-09 - -5.856662710357e-09 - -6.574133237791e-09 - -5.885155474061e-09 - -6.587036693873e-09 - 6.232085070224e-09 - -1.464133703166e-09 - -1.089770051976e-09 - -8.732499168218e-10 - -2.739443516475e-08 - 4.424418875715e-08 - -2.815454536176e-09 - -6.108251682235e-09 - -5.863171281817e-09 - -6.585437972717e-09 - -5.886150233891e-09 - -6.576577504802e-09 - 6.217746317816e-09 - -1.467178378789e-09 - -1.090377566015e-09 - -8.717009336578e-10 - -2.741853677435e-08 - 4.456569513422e-08 - -2.813635546772e-09 - -6.107967465141e-09 - -5.881588549528e-09 - -6.590155976482e-09 - -5.854928986082e-09 - -6.584819800537e-09 - 6.221597459444e-09 - -1.46228273934e-09 - -1.089073720095e-09 - -8.693215036715e-10 - 6.218208170594e-09 - -2.752040018095e-08 - 4.457479008124e-08 - -6.096939841882e-09 - 6.222300896752e-09 - -8.720668631668e-10 - 6.209738501184e-09 - -8.718927801965e-10 - 6.226734683423e-09 - 6.224738058336e-09 - 6.208729530499e-09 - -6.025345555827e-09 - -6.015824283168e-09 - -2.741762727965e-08 - 4.41937118012e-08 - -6.025857146597e-09 - -6.039897471055e-09 - -1.109327030235e-08 - -1.30298616341e-09 - -6.022730758559e-09 - -6.028955112924e-09 - -2.738147486525e-08 - 4.430512490217e-08 - -1.106290881125e-08 - -1.301476260096e-09 - -6.022730758559e-09 - -6.022105480952e-09 - - -3.552713678801e-15 - -3.552713678801e-15 - 9.769962616701e-15 - 5.329070518201e-15 - 6.217248937901e-15 - 3.774758283726e-15 - -2.553512956638e-15 - 4.440892098501e-16 - -1.110223024625e-15 - 0.0 - 8.881784197001e-16 - 1.7763568394e-15 - 2.22044604925e-16 - 0.0 - 2.22044604925e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 0.0 - 1.42108547152e-14 - -1.42108547152e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - -5.551115123126e-16 - -1.110223024625e-16 - -8.881784197001e-16 - 0.0 - 1.95399252334e-14 - 5.329070518201e-15 - -5.329070518201e-15 - 2.442490654175e-15 - 8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -2.22044604925e-14 - 1.95399252334e-14 - 6.439293542826e-15 - 6.883382752676e-15 - 2.442490654175e-15 - -8.881784197001e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - 8.881784197001e-16 - -4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 2.22044604925e-16 - -7.105427357601e-15 - 0.0 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -2.22044604925e-16 - -8.881784197001e-16 - 3.28626015289e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 3.552713678801e-15 - -1.33226762955e-15 - 0.0 - 3.552713678801e-15 - 1.42108547152e-14 - -2.6645352591e-15 - 8.881784197001e-16 - -2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -8.881784197001e-16 - 1.95399252334e-14 - -3.10862446895e-15 - 1.7763568394e-15 - -2.22044604925e-16 - 3.552713678801e-15 - -8.881784197001e-16 - 0.0 - 3.552713678801e-15 - -1.95399252334e-14 - 7.105427357601e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.110223024625e-16 - -8.326672684688e-17 - -3.552713678801e-15 - 0.0 - -2.22044604925e-15 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 6.661338147751e-16 - 0.0 - 0.0 - 5.329070518201e-15 - -3.37507799486e-14 - -6.661338147751e-16 - 2.553512956638e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -3.053113317719e-16 - 0.0 - -8.881784197001e-16 - -3.37507799486e-14 - 0.0 - 1.24344978758e-14 - -1.199040866595e-14 - 0.0 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 0.0 - -7.105427357601e-15 - -1.7763568394e-15 - 2.997602166488e-15 - -2.6645352591e-15 - -2.6645352591e-15 - -1.554312234475e-15 - -1.06581410364e-14 - -3.28626015289e-14 - -6.661338147751e-16 - -2.22044604925e-15 - -1.110223024625e-15 - 4.440892098501e-16 - 1.110223024625e-15 - -4.440892098501e-16 - 1.998401444325e-15 - 4.440892098501e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-15 - 5.773159728051e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - 8.881784197001e-16 - 1.110223024625e-16 - 4.440892098501e-16 - 2.6645352591e-15 - 2.13162820728e-14 - 0.0 - 0.0 - 8.881784197001e-16 - 0.0 - -8.881784197001e-16 - 8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 0.0 - -2.775557561563e-17 - 3.552713678801e-15 - 3.552713678801e-15 - 1.95399252334e-14 - -1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - -1.7763568394e-15 - 0.0 - 0.0 - -1.33226762955e-15 - 0.0 - 2.6645352591e-15 - 0.0 - 7.105427357601e-15 - 1.68753899743e-14 - -1.33226762955e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.7763568394e-15 - -1.33226762955e-15 - 1.24344978758e-14 - 0.0 - -4.440892098501e-16 - 4.440892098501e-16 - -1.7763568394e-15 - 1.7763568394e-15 - - -1.06581410364e-14 - 3.552713678801e-15 - 3.552713678801e-15 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -2.6645352591e-15 - 8.881784197001e-16 - -1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -1.110223024625e-16 - -2.22044604925e-16 - 0.0 - -3.552713678801e-15 - 4.263256414561e-14 - -7.105427357601e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.22044604925e-16 - -8.881784197001e-16 - 0.0 - -2.6645352591e-15 - -4.440892098501e-16 - -1.554312234475e-15 - 0.0 - -1.110223024625e-16 - 0.0 - -2.22044604925e-16 - 3.552713678801e-15 - 1.7763568394e-14 - 8.881784197001e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -7.771561172376e-16 - -1.33226762955e-15 - -1.33226762955e-15 - -4.440892098501e-16 - -8.881784197001e-16 - -1.443289932013e-15 - -8.881784197001e-16 - -2.081668171172e-16 - -2.775557561563e-17 - -3.330669073875e-16 - 3.552713678801e-15 - -5.329070518201e-14 - -2.48689957516e-14 - -7.105427357601e-15 - -5.329070518201e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -7.771561172376e-16 - -8.881784197001e-16 - 2.22044604925e-16 - -1.7763568394e-15 - 2.22044604925e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -1.110223024625e-16 - -1.665334536938e-16 - 5.551115123126e-17 - 7.105427357601e-15 - -3.19744231092e-14 - -1.7763568394e-15 - 4.440892098501e-16 - -5.773159728051e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 1.59872115546e-14 - -8.881784197001e-16 - 1.33226762955e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - 5.329070518201e-15 - 1.7763568394e-15 - -3.552713678801e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 8.881784197001e-15 - -2.6645352591e-15 - 5.773159728051e-15 - -4.440892098501e-16 - 2.6645352591e-15 - -5.773159728051e-15 - -4.440892098501e-16 - 5.329070518201e-15 - -3.730349362741e-14 - 8.881784197001e-15 - -2.6645352591e-15 - -1.110223024625e-16 - 9.436895709314e-16 - -1.7763568394e-15 - -1.33226762955e-15 - -8.881784197001e-16 - -4.440892098501e-16 - 2.22044604925e-16 - 5.329070518201e-15 - -3.01980662698e-14 - 8.881784197001e-16 - -1.7763568394e-15 - -1.33226762955e-15 - 0.0 - -1.7763568394e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - -4.440892098501e-16 - 0.0 - -5.151434834261e-14 - 5.329070518201e-15 - -2.6645352591e-15 - -1.998401444325e-15 - 4.996003610813e-16 - -4.440892098501e-16 - -3.996802888651e-15 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - -8.881784197001e-15 - -3.01980662698e-14 - 5.329070518201e-15 - -1.42108547152e-14 - -8.881784197001e-16 - 1.110223024625e-16 - -4.440892098501e-16 - 6.661338147751e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -2.6645352591e-15 - 0.0 - 0.0 - -2.22044604925e-16 - -7.105427357601e-15 - -3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -3.10862446895e-15 - -8.881784197001e-16 - -6.217248937901e-15 - -2.13162820728e-14 - 5.329070518201e-15 - -5.329070518201e-15 - -1.7763568394e-15 - 0.0 - -1.7763568394e-15 - -8.881784197001e-16 - 2.22044604925e-15 - 4.440892098501e-16 - 8.881784197001e-16 - 0.0 - 7.105427357601e-15 - -1.59872115546e-14 - 7.993605777301e-15 - 2.22044604925e-15 - 4.440892098501e-16 - -9.992007221626e-16 - 0.0 - 2.22044604925e-16 - -6.661338147751e-15 - 0.0 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 1.06581410364e-14 - 4.440892098501e-16 - -2.442490654175e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 2.22044604925e-15 - -6.661338147751e-16 - -3.330669073875e-16 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -1.7763568394e-15 - -7.910339050454e-16 - -5.329070518201e-15 - -2.22044604925e-16 - 2.22044604925e-15 - 0.0 - -3.996802888651e-15 - 1.998401444325e-15 - -2.22044604925e-15 - 1.06581410364e-14 - -3.01980662698e-14 - 0.0 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - -3.552713678801e-15 - 0.0 - -3.552713678801e-14 - -4.440892098501e-16 - 3.330669073875e-16 - -2.22044604925e-15 - 0.0 - - -2.453347747178e-08 - -4.890711124972e-08 - 3.971540252223e-08 - -4.215081617076e-10 - -2.531166387598e-09 - -5.090015520182e-09 - -5.402824854173e-09 - -5.326945995421e-09 - -5.406533887253e-09 - -5.303881778218e-09 - -9.95139259885e-09 - -4.664002517529e-11 - -1.169468077933e-09 - 3.465174813755e-10 - -1.009041739053e-09 - -9.316067917097e-10 - -8.316902722072e-10 - -2.457070991113e-08 - -4.9003332947e-08 - 3.969984163632e-08 - -4.210960469209e-10 - -2.520792463656e-09 - -5.072479325463e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -5.43165867839e-09 - -5.320373475115e-09 - -9.971742542803e-09 - -4.661160346586e-11 - -1.165034291262e-09 - 3.48336470779e-10 - -1.009041739053e-09 - -9.349250262858e-10 - -8.319460675921e-10 - -2.462979864504e-08 - -4.874540593391e-08 - 3.969002193571e-08 - -4.214655291435e-10 - -2.529361609049e-09 - -5.086377541375e-09 - -5.424837468126e-09 - -5.313083306646e-09 - -5.429683369584e-09 - -5.316017848145e-09 - -9.926125699167e-09 - -4.666844688472e-11 - -1.168785956907e-09 - 3.474269760773e-10 - -1.012267603073e-09 - -9.336105222246e-10 - -8.307345922276e-10 - -2.453501224409e-08 - -4.864099878432e-08 - 3.969411466187e-08 - -4.200586545267e-10 - -2.522682507333e-09 - -5.086377541375e-09 - -5.417248871709e-09 - -5.313083306646e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -9.905932074616e-09 - -4.685318799602e-11 - -1.16909859571e-09 - 3.474269760773e-10 - -1.011144945551e-09 - -9.30121757392e-10 - -8.32123703276e-10 - -2.455203684804e-08 - 3.975333129347e-08 - -5.494243282556e-09 - 5.58331691991e-09 - 5.584197992903e-09 - 5.577660999734e-09 - -7.832063886326e-10 - -2.462979864504e-08 - 3.968375494878e-08 - -5.482178266902e-09 - 5.567116545535e-09 - 5.582947437688e-09 - 5.582720064012e-09 - -7.826947978629e-10 - -2.453300851357e-08 - 3.997169528702e-08 - -5.48229195374e-09 - 5.582549533756e-09 - 5.58533486128e-09 - 5.58033264042e-09 - -7.824034753412e-10 - -2.462979864504e-08 - 4.000852982244e-08 - -5.459440899358e-09 - 5.578741024692e-09 - -7.832063886326e-10 - 5.582947437688e-09 - 5.584197992903e-09 - -7.824034753412e-10 - -2.453300851357e-08 - 3.997578801318e-08 - -2.534619625294e-09 - -5.472671205098e-09 - -5.292946525515e-09 - -5.289514604101e-09 - -5.893511456634e-09 - 5.593420837613e-09 - -1.311946107307e-09 - -9.763994057721e-10 - -7.807834379037e-10 - -2.454028447119e-08 - 3.997533326583e-08 - -2.518589781175e-09 - -5.499970257006e-09 - -5.27222709934e-09 - -5.289443549827e-09 - -5.889233989365e-09 - 5.582947437688e-09 - -1.31279875859e-09 - -9.767973097040e-10 - -7.801634893667e-10 - -2.450508418406e-08 - 3.997578801318e-08 - -2.517722919038e-09 - -5.481609832714e-09 - -5.282728920974e-09 - -5.276390879771e-09 - -5.921897638927e-09 - 5.568864480665e-09 - -1.316720954492e-09 - -9.741683015817e-10 - -7.849578764763e-10 - -2.462979864504e-08 - -4.892184790606e-08 - 3.997533326583e-08 - -4.198312808512e-10 - -2.525382569729e-09 - -5.087684940008e-09 - -5.325020424607e-09 - -5.321375340372e-09 - -9.905946285471e-09 - -4.662581432058e-11 - -1.165076923826e-09 - 3.474269760773e-10 - -1.008160666061e-09 - -9.312302040598e-10 - -8.287912578453e-10 - 5.564075422626e-09 - 5.566761274167e-09 - 5.577334150075e-09 - 5.58533486128e-09 - 5.575742534347e-09 - 5.589768647951e-09 - -2.458268966166e-08 - 3.971631201694e-08 - -2.517722919038e-09 - -5.472060138345e-09 - -5.253625090518e-09 - -5.896666266381e-09 - -5.281549420033e-09 - -5.911225287036e-09 - 5.58955548513e-09 - -1.31430510919e-09 - -9.776002229954e-10 - -7.833289572545e-10 - -2.456549452745e-08 - 3.969729789333e-08 - -2.525453624003e-09 - -5.481140874508e-09 - -5.259096269583e-09 - -5.908617595196e-09 - -5.280242021399e-09 - -5.902698774207e-09 - 5.579806838796e-09 - -1.316578845945e-09 - -9.776002229954e-10 - -7.816733926802e-10 - -2.458754977397e-08 - 3.999780062713e-08 - -2.523478315197e-09 - -5.481275877628e-09 - -5.276277192934e-09 - -5.912788481055e-09 - -5.252047685644e-09 - -5.904489341901e-09 - 5.580417905549e-09 - -1.312287167821e-09 - -9.767973097040e-10 - -7.796998602316e-10 - 5.577490469477e-09 - -2.470176241332e-08 - 4.000852982244e-08 - -5.468805852615e-09 - 5.58033264042e-09 - -7.824567660464e-10 - 5.57135138024e-09 - -7.82140574529e-10 - 5.586642259914e-09 - 5.582720064012e-09 - 5.568864480665e-09 - -5.405695446825e-09 - -5.398412383784e-09 - -2.459630366047e-08 - 3.965256212268e-08 - -5.406420200416e-09 - -5.417355453119e-09 - -9.949395973763e-09 - -1.168757535197e-09 - -5.401048497333e-09 - -5.409134473666e-09 - -2.456927461481e-08 - 3.975651452492e-08 - -9.926125699167e-09 - -1.167023810922e-09 - -5.401048497333e-09 - -5.400458746863e-09 history_criterion: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 117.9473464966 - 104.1334190546 - 90.05657396185 - 91.2207789054 - 83.93061974583 - 71.30942158128 - 62.32199544308 - 59.88083883476 - 62.27199544308 - 60.05583883476 - 62.77642327569 - 76.9431054476 - 131.4875801385 - -2342.694563021 - -86.51002021515 - -37.31642203542 - -19.79861800279 - 115.6473464966 - 102.3334190546 - 97.45657396185 - 89.9207789054 - 80.83061974583 - 67.20942158128 - 60.52199544308 - 57.68083883476 - 60.39699544308 - 58.40583883476 - 62.55142327569 - 77.3931054476 - 129.3500801385 - -2342.957063021 - -86.32252021515 - -36.19142203542 - -21.71111800279 - 105.0473464966 - 104.4334190546 - 89.65657396185 - 83.5207789054 - 80.03061974583 - 70.50942158128 - 58.02199544308 - 56.68083883476 - 58.07199544308 - 57.28083883476 - 61.50142327569 - 75.6681054476 - 130.4375801385 - -2343.519563021 - -87.11002021515 - -38.55392203542 - -21.86111800279 - 99.14734649662 - 92.73341905458 - 86.65657396185 - 81.8207789054 - 77.13061974583 - 69.00942158128 - 60.62199544308 - 56.68083883476 - 60.58449544308 - 57.43083883476 - 61.80142327569 - 76.3431054476 - 130.7750801385 - -2343.932063021 - -86.88502021515 - -38.70392203542 - -22.19861800279 - 106.5473464966 - 88.25657396185 - 61.9476238727 - 64.62033506263 - 65.33033506263 - 67.77033506263 - -17.38684121661 - 103.0473464966 - 85.75657396185 - 62.6476238727 - 66.05033506263 - 64.96033506263 - 66.83033506263 - -19.07684121661 - 101.8473464966 - 86.85657396185 - 62.3476238727 - 66.08033506263 - 64.02033506263 - 65.52033506263 - -17.57684121661 - 103.0473464966 - 89.35657396185 - 63.2476238727 - 64.77033506263 - -17.38684121661 - 64.96033506263 - 65.33033506263 - -17.57684121661 - 101.8473464966 - 85.85657396185 - 74.63061974583 - 61.4476238727 - 55.64384266576 - 56.01384266576 - 58.81780905206 - 66.16033506263 - 253.6554490216 - -52.38720394238 - -16.34684121661 - 100.8473464966 - 87.85657396185 - 75.63061974583 - 64.6476238727 - 53.44384266576 - 53.76384266576 - 59.30780905206 - 64.96033506263 - 252.8254490216 - -54.07720394238 - -15.59684121661 - 95.54734649662 - 85.35657396185 - 75.33061974583 - 65.2476238727 - 54.44384266576 - 55.11384266576 - 60.99780905206 - 67.85033506263 - 250.5754490216 - -52.83720394238 - -12.89684121661 - 103.0473464966 - 91.43341905458 - 87.85657396185 - 84.3207789054 - 74.53061974583 - 65.80942158128 - 56.48083883476 - 57.35083883476 - 58.90892327569 - 72.8131054476 - 127.6600801385 - -2346.667063021 - -89.36502021515 - -40.65642203542 - -24.07361800279 - 76.40033506263 - 77.97033506263 - 70.28033506263 - 64.02033506263 - 64.28033506263 - 68.33033506263 - 95.84734649662 - 80.55657396185 - 74.83061974583 - 69.2476238727 - 63.24384266576 - 63.69780905206 - 63.06384266576 - 63.80780905206 - 69.91033506263 - 253.9554490216 - -49.31720394238 - -14.27684121661 - 105.7473464966 - 87.15657396185 - 74.33061974583 - 58.4476238727 - 57.44384266576 - 57.69780905206 - 58.00384266576 - 58.66780905206 - 68.45033506263 - 251.1454490216 - -53.51720394238 - -14.50684121661 - 91.74734649662 - 85.05657396185 - 67.63061974583 - 60.1476238727 - 59.14384266576 - 56.29780905206 - 59.43384266576 - 56.94780905206 - 65.56033506263 - 251.0254490216 - -54.18720394238 - -16.37684121661 - 65.90033506263 - 106.0473464966 - 90.35657396185 - 64.9476238727 - 65.52033506263 - -18.14684121661 - 65.15033506263 - -17.95684121661 - 67.40033506263 - 66.83033506263 - 67.85033506263 - 56.72199544308 - 57.17199544308 - 106.7473464966 - 87.45657396185 - 61.02199544308 - 61.03199544308 - 62.20892327569 - 130.2900801385 - 59.62199544308 - 59.91199544308 - 106.3473464966 - 86.75657396185 - 61.68892327569 - 129.9500801385 - 60.12199544308 - 60.17199544308 - - 175.7616267494 - 135.815392655 - 107.427429421 - 99.44443456745 - 85.70608965926 - 64.3171217786 - 44.36460041182 - 32.95338522348 - 44.31460041182 - 33.12838522348 - 24.75298136325 - 19.273476213 - 15.97636612239 - 13.00317519799 - 10.70826764037 - 9.858997178816 - 10.15607036729 - 173.4616267494 - 134.015392655 - 114.827429421 - 98.14443456745 - 82.60608965926 - 60.2171217786 - 42.56460041182 - 30.75338522348 - 42.43960041182 - 31.47838522348 - 24.52798136325 - 19.723476213 - 13.83886612239 - 12.74067519799 - 10.89576764037 - 10.98399717882 - 8.243570367288 - 162.8616267494 - 136.115392655 - 107.027429421 - 91.74443456745 - 81.80608965926 - 63.5171217786 - 40.06460041182 - 29.75338522348 - 40.11460041182 - 30.35338522348 - 23.47798136325 - 17.998476213 - 14.92636612239 - 12.17817519799 - 10.10826764037 - 8.621497178816 - 8.093570367288 - 156.9616267494 - 124.415392655 - 104.027429421 - 90.04443456745 - 78.90608965926 - 62.0171217786 - 42.66460041182 - 29.75338522348 - 42.62710041182 - 30.50338522348 - 23.77798136325 - 18.673476213 - 15.26386612239 - 11.76567519799 - 10.33326764037 - 8.471497178816 - 7.756070367288 - 164.3616267494 - 105.627429421 - 48.8819398286 - 18.5818314891 - 19.2918314891 - 21.7318314891 - 7.60022447721 - 160.8616267494 - 103.127429421 - 49.5819398286 - 20.0118314891 - 18.9218314891 - 20.7918314891 - 5.91022447721 - 159.6616267494 - 104.227429421 - 49.2819398286 - 20.0418314891 - 17.9818314891 - 19.4818314891 - 7.41022447721 - 160.8616267494 - 106.727429421 - 50.1819398286 - 18.7318314891 - 7.60022447721 - 18.9218314891 - 19.2918314891 - 7.41022447721 - 159.6616267494 - 103.227429421 - 76.40608965926 - 48.3819398286 - 33.22986519451 - 33.59986519451 - 26.87225158213 - 20.1218314891 - 15.2111293494 - 11.73215109931 - 8.64022447721 - 158.6616267494 - 105.227429421 - 77.40608965926 - 51.5819398286 - 31.02986519451 - 31.34986519451 - 27.36225158213 - 18.9218314891 - 14.3811293494 - 10.04215109931 - 9.39022447721 - 153.3616267494 - 102.727429421 - 77.10608965926 - 52.1819398286 - 32.02986519451 - 32.69986519451 - 29.05225158213 - 21.8118314891 - 12.1311293494 - 11.28215109931 - 12.09022447721 - 160.8616267494 - 123.115392655 - 105.227429421 - 92.54443456745 - 76.30608965926 - 58.8171217786 - 29.55338522348 - 30.42338522348 - 20.88548136325 - 15.143476213 - 12.14886612239 - 9.030675197988 - 7.853267640371 - 6.518997178816 - 5.881070367288 - 30.3618314891 - 31.9318314891 - 24.2418314891 - 17.9818314891 - 18.2418314891 - 22.2918314891 - 153.6616267494 - 97.927429421 - 76.60608965926 - 56.1819398286 - 40.82986519451 - 31.75225158213 - 40.64986519451 - 31.86225158213 - 23.8718314891 - 15.5111293494 - 14.80215109931 - 10.71022447721 - 163.5616267494 - 104.527429421 - 76.10608965926 - 45.3819398286 - 35.02986519451 - 25.75225158213 - 35.58986519451 - 26.72225158213 - 22.4118314891 - 12.7011293494 - 10.60215109931 - 10.48022447721 - 149.5616267494 - 102.427429421 - 69.40608965926 - 47.0819398286 - 36.72986519451 - 24.35225158213 - 37.01986519451 - 25.00225158213 - 19.5218314891 - 12.5811293494 - 9.93215109931 - 8.61022447721 - 19.8618314891 - 163.8616267494 - 107.727429421 - 51.8819398286 - 19.4818314891 - 6.84022447721 - 19.1118314891 - 7.03022447721 - 21.3618314891 - 20.7918314891 - 21.8118314891 - 38.76460041182 - 39.21460041182 - 164.5616267494 - 104.827429421 - 43.06460041182 - 43.07460041182 - 24.18548136325 - 14.77886612239 - 41.66460041182 - 41.95460041182 - 164.1616267494 - 104.127429421 - 23.66548136325 - 14.43886612239 - 42.16460041182 - 42.21460041182 - - 28.24095690087 - 21.59536620662 - 13.25712029593 - 19.07487076201 - 15.58993695617 - 8.655383440756 - 5.734068533914 - 4.165799626156 - 5.684068533914 - 4.340799626156 - 2.383275058766 - 1.39686578611 - 1.39855054723 - 0.9316549828049 - 0.5908378210265 - 1.296279986221 - 2.850605037275 - 25.94095690087 - 19.79536620662 - 20.65712029593 - 17.77487076201 - 12.48993695617 - 4.555383440756 - 3.934068533914 - 1.965799626156 - 3.809068533914 - 2.690799626156 - 2.158275058766 - 1.84686578611 - -0.7389494527701 - 0.6691549828049 - 0.7783378210265 - 2.421279986221 - 0.9381050372751 - 15.34095690087 - 21.89536620662 - 12.85712029593 - 11.37487076201 - 11.68993695617 - 7.855383440756 - 1.434068533914 - 0.9657996261561 - 1.484068533914 - 1.565799626156 - 1.108275058766 - 0.12186578611 - 0.3485505472299 - 0.1066549828049 - -0.009162178973513 - 0.05877998622137 - 0.7881050372751 - 9.440956900867 - 10.19536620662 - 9.857120295928 - 9.674870762006 - 8.789936956172 - 6.355383440756 - 4.034068533914 - 0.9657996261561 - 3.996568533914 - 1.715799626156 - 1.408275058766 - 0.79686578611 - 0.6860505472299 - -0.3058450171951 - 0.2158378210265 - -0.09122001377863 - 0.4506050372751 - 16.84095690087 - 11.45712029593 - 3.061569786218 - -1.357147755765 - -0.6471477557654 - 1.792852244235 - 0.8347758927937 - 13.34095690087 - 8.957120295928 - 3.761569786218 - 0.07285224423464 - -1.017147755765 - 0.8528522442346 - -0.8552241072063 - 12.14095690087 - 10.05712029593 - 3.461569786218 - 0.1028522442346 - -1.957147755765 - -0.4571477557654 - 0.6447758927937 - 13.34095690087 - 12.55712029593 - 4.361569786218 - -1.207147755765 - 0.8347758927937 - -1.017147755765 - -0.6471477557654 - 0.6447758927937 - 12.14095690087 - 9.057120295928 - 6.289936956172 - 2.561569786218 - 0.09812922077451 - 0.4681292207745 - 1.601218748851 - 0.1828522442346 - 1.967848278564 - 2.434721057271 - 1.874775892794 - 11.14095690087 - 11.05712029593 - 7.289936956172 - 5.761569786218 - -2.101870779225 - -1.781870779225 - 2.091218748851 - -1.017147755765 - 1.137848278564 - 0.744721057271 - 2.624775892794 - 5.840956900867 - 8.557120295928 - 6.989936956172 - 6.361569786218 - -1.101870779225 - -0.4318707792255 - 3.781218748851 - 1.872852244235 - -1.112151721436 - 1.984721057271 - 5.324775892794 - 13.34095690087 - 8.895366206617 - 11.05712029593 - 12.17487076201 - 6.189936956172 - 3.155383440756 - 0.7657996261561 - 1.635799626156 - -1.484224941234 - -2.73313421389 - -2.42894945277 - -3.040845017195 - -2.264162178974 - -2.043720013779 - -1.424394962725 - 10.42285224423 - 11.99285224423 - 4.302852244235 - -1.957147755765 - -1.697147755765 - 2.352852244235 - 6.140956900867 - 3.757120295928 - 6.489936956172 - 10.36156978622 - 7.698129220775 - 6.481218748851 - 7.518129220775 - 6.591218748851 - 3.932852244235 - 2.267848278564 - 5.504721057271 - 3.944775892794 - 16.04095690087 - 10.35712029593 - 5.989936956172 - -0.4384302137822 - 1.898129220775 - 0.4812187488511 - 2.458129220775 - 1.451218748851 - 2.472852244235 - -0.5421517214358 - 1.304721057271 - 3.714775892794 - 2.040956900867 - 8.257120295928 - -0.7100630438283 - 1.261569786218 - 3.598129220775 - -0.9187812511489 - 3.888129220775 - -0.2687812511489 - -0.4171477557654 - -0.6621517214358 - 0.634721057271 - 1.844775892794 - -0.07714775576537 - 16.34095690087 - 13.55712029593 - 6.061569786218 - -0.4571477557654 - 0.07477589279366 - -0.8271477557654 - 0.2647758927937 - 1.422852244235 - 0.8528522442346 - 1.872852244235 - 0.1340685339144 - 0.5840685339144 - 17.04095690087 - 10.65712029593 - 4.434068533914 - 4.444068533914 - 1.815775058766 - 0.2010505472299 - 3.034068533914 - 3.324068533914 - 16.64095690087 - 9.957120295928 - 1.295775058766 - -0.1389494527701 - 3.534068533914 - 3.584068533914 - - 19.67905061421 - 12.78536491634 - 4.453409401868 - 10.42602658124 - 7.181651769754 - 0.8467383120783 - -0.8151544815029 - -1.28878727387 - -0.8651544815029 - -1.11378727387 - -2.178296849214 - -2.4437228135 - -1.857756175876 - -1.847417965917 - -1.795022214911 - -0.7628423028115 - 1.065115779582 - 17.37905061421 - 10.98536491634 - 11.85340940187 - 9.126026581237 - 4.081651769754 - -3.253261687922 - -2.615154481503 - -3.48878727387 - -2.740154481503 - -2.76378727387 - -2.403296849214 - -1.9937228135 - -3.995256175876 - -2.109917965917 - -1.607522214911 - 0.3621576971885 - -0.8473842204181 - 6.779050614207 - 13.08536491634 - 4.053409401868 - 2.726026581237 - 3.281651769754 - 0.04673831207827 - -5.115154481503 - -4.48878727387 - -5.065154481503 - -3.88878727387 - -3.453296849214 - -3.7187228135 - -2.907756175876 - -2.672417965917 - -2.395022214911 - -2.000342302812 - -0.9973842204181 - 0.8790506142075 - 1.385364916339 - 1.053409401868 - 1.026026581237 - 0.3816517697539 - -1.453261687922 - -2.515154481503 - -4.48878727387 - -2.552654481503 - -3.73878727387 - -3.153296849214 - -3.0437228135 - -2.570256175876 - -3.084917965917 - -2.170022214911 - -2.150342302812 - -1.334884220418 - 8.279050614207 - 2.653409401868 - -4.10628705125 - -5.539020595211 - -4.829020595211 - -2.389020595211 - -0.8305062664824 - 4.779050614207 - 0.1534094018679 - -3.40628705125 - -4.109020595211 - -5.199020595211 - -3.329020595211 - -2.520506266482 - 3.579050614207 - 1.253409401868 - -3.70628705125 - -4.079020595211 - -6.139020595211 - -4.639020595211 - -1.020506266482 - 4.779050614207 - 3.753409401868 - -2.80628705125 - -5.389020595211 - -0.8305062664824 - -5.199020595211 - -4.829020595211 - -1.020506266482 - 3.579050614207 - 0.2534094018679 - -2.118348230246 - -4.60628705125 - -5.8778638078 - -5.5078638078 - -3.383083895331 - -3.999020595211 - -1.038044411845 - 0.2196196759168 - 0.2094937335176 - 2.579050614207 - 2.253409401868 - -1.118348230246 - -1.40628705125 - -8.0778638078 - -7.7578638078 - -2.893083895331 - -5.199020595211 - -1.868044411845 - -1.470380324083 - 0.9594937335176 - -2.720949385793 - -0.2465905981321 - -1.418348230246 - -0.8062870512504 - -7.0778638078 - -6.4078638078 - -1.203083895331 - -2.309020595211 - -4.118044411845 - -0.2303803240832 - 3.659493733518 - 4.779050614207 - 0.0853649163389 - 2.253409401868 - 3.526026581237 - -2.218348230246 - -4.653261687922 - -4.68878727387 - -3.81878727387 - -6.045796849214 - -6.5737228135 - -5.685256175876 - -5.819917965917 - -4.650022214911 - -4.102842302812 - -3.209884220418 - 6.240979404789 - 7.810979404789 - 0.1209794047887 - -6.139020595211 - -5.879020595211 - -1.829020595211 - -2.420949385793 - -5.046590598132 - -1.918348230246 - 3.19371294875 - 1.7221361922 - 1.496916104669 - 1.5421361922 - 1.606916104669 - -0.2490205952113 - -0.738044411845 - 3.289619675917 - 2.279493733518 - 7.479050614207 - 1.553409401868 - -2.418348230246 - -7.60628705125 - -4.0778638078 - -4.503083895331 - -3.5178638078 - -3.533083895331 - -1.709020595211 - -3.548044411845 - -0.9103803240832 - 2.049493733518 - -6.520949385793 - -0.5465905981321 - -9.118348230246 - -5.90628705125 - -2.3778638078 - -5.903083895331 - -2.0878638078 - -5.253083895331 - -4.599020595211 - -3.668044411845 - -1.580380324083 - 0.1794937335176 - -4.259020595211 - 7.779050614207 - 4.753409401868 - -1.10628705125 - -4.639020595211 - -1.590506266482 - -5.009020595211 - -1.400506266482 - -2.759020595211 - -3.329020595211 - -2.309020595211 - -6.415154481503 - -5.965154481503 - 8.479050614207 - 1.853409401868 - -2.115154481503 - -2.105154481503 - -2.745796849214 - -3.055256175876 - -3.515154481503 - -3.225154481503 - 8.079050614207 - 1.153409401868 - -3.265796849214 - -3.395256175876 - -3.015154481503 - -2.965154481503 - - 1050.519509418 - 2633.521525076 - -3381.282955438 - -933.4604542894 - -516.3685328612 - -257.5208155114 - -113.8324082065 - -66.57519632661 - -113.8824082065 - -66.40019632661 - -44.86075536786 - -32.39601556788 - -23.86432104696 - -18.5440484742 - -14.76646254613 - -11.02441930035 - -7.169273972705 - 1048.219509418 - 2631.721525076 - -3373.882955438 - -934.7604542894 - -519.4685328612 - -261.6208155114 - -115.6324082065 - -68.77519632661 - -115.7574082065 - -68.05019632661 - -45.08575536786 - -31.94601556788 - -26.00182104696 - -18.8065484742 - -14.57896254613 - -9.899419300347 - -9.081773972705 - 1037.619509418 - 2633.821525076 - -3381.682955438 - -941.1604542894 - -520.2685328612 - -258.3208155114 - -118.1324082065 - -69.77519632661 - -118.0824082065 - -69.17519632661 - -46.13575536786 - -33.67101556788 - -24.91432104696 - -19.3690484742 - -15.36646254613 - -12.26191930035 - -9.231773972705 - 1031.719509418 - 2622.121525076 - -3384.682955438 - -942.8604542894 - -523.1685328612 - -259.8208155114 - -115.5324082065 - -69.77519632661 - -115.5699082065 - -69.02519632661 - -45.83575536786 - -32.99601556788 - -24.57682104696 - -19.7815484742 - -15.14146254613 - -12.41191930035 - -9.569273972705 - 1039.119509418 - -3383.082955438 - -165.9142242952 - -41.04746332279 - -40.33746332279 - -37.89746332279 - -8.239694706637 - 1035.619509418 - -3385.582955438 - -165.2142242952 - -39.61746332279 - -40.70746332279 - -38.83746332279 - -9.929694706637 - 1034.419509418 - -3384.482955438 - -165.5142242952 - -39.58746332279 - -41.64746332279 - -40.14746332279 - -8.429694706637 - 1035.619509418 - -3381.982955438 - -164.6142242952 - -40.89746332279 - -8.239694706637 - -40.70746332279 - -40.33746332279 - -8.429694706637 - 1034.419509418 - -3385.482955438 - -525.6685328612 - -166.4142242952 - -89.99572115575 - -89.62572115575 - -55.58585272365 - -39.50746332279 - -20.14157855679 - -11.29463725292 - -7.199694706637 - 1033.419509418 - -3383.482955438 - -524.6685328612 - -163.2142242952 - -92.19572115575 - -91.87572115575 - -55.09585272365 - -40.70746332279 - -20.97157855679 - -12.98463725292 - -6.449694706637 - 1028.119509418 - -3385.982955438 - -524.9685328612 - -162.6142242952 - -91.19572115575 - -90.52572115575 - -53.40585272365 - -37.81746332279 - -23.22157855679 - -11.74463725292 - -3.749694706637 - 1035.619509418 - 2620.821525076 - -3383.482955438 - -940.3604542894 - -525.7685328612 - -263.0208155114 - -69.97519632661 - -69.10519632661 - -48.72825536786 - -36.52601556788 - -27.69182104696 - -22.5165484742 - -17.62146254613 - -14.36441930035 - -11.4442739727 - -29.26746332279 - -27.69746332279 - -35.38746332279 - -41.64746332279 - -41.38746332279 - -37.33746332279 - 1028.419509418 - -3390.782955438 - -525.4685328612 - -158.6142242952 - -82.39572115575 - -50.70585272365 - -82.57572115575 - -50.59585272365 - -35.75746332279 - -19.84157855679 - -8.224637252918 - -5.129694706637 - 1038.319509418 - -3384.182955438 - -525.9685328612 - -169.4142242952 - -88.19572115575 - -56.70585272365 - -87.63572115575 - -55.73585272365 - -37.21746332279 - -22.65157855679 - -12.42463725292 - -5.359694706637 - 1024.319509418 - -3386.282955438 - -532.6685328612 - -167.7142242952 - -86.49572115575 - -58.10585272365 - -86.20572115575 - -57.45585272365 - -40.10746332279 - -22.77157855679 - -13.09463725292 - -7.229694706637 - -39.76746332279 - 1038.619509418 - -3380.982955438 - -162.9142242952 - -40.14746332279 - -8.999694706637 - -40.51746332279 - -8.809694706637 - -38.26746332279 - -38.83746332279 - -37.81746332279 - -119.4324082065 - -118.9824082065 - 1039.319509418 - -3383.882955438 - -115.1324082065 - -115.1224082065 - -45.42825536786 - -25.06182104696 - -116.5324082065 - -116.2424082065 - 1038.919509418 - -3384.582955438 - -45.94825536786 - -25.40182104696 - -116.0324082065 - -115.9824082065 - - 20.49103808147 - 13.5209126197 - 5.123721658539 - 11.04005890825 - 7.746672181034 - 1.330664211629 - -0.447396135285 - -0.9996323863606 - -0.497396135285 - -0.8246323863606 - -1.945368320316 - -2.252654921573 - -1.698804957691 - -1.71369037214 - -1.681477631675 - -0.6656948221704 - 1.148772894321 - 18.19103808147 - 11.7209126197 - 12.52372165854 - 9.740058908254 - 4.646672181034 - -2.769335788371 - -2.247396135285 - -3.199632386361 - -2.372396135285 - -2.474632386361 - -2.170368320316 - -1.802654921573 - -3.836304957691 - -1.97619037214 - -1.493977631675 - 0.4593051778296 - -0.7637271056786 - 7.591038081469 - 13.8209126197 - 4.723721658539 - 3.340058908254 - 3.846672181034 - 0.5306642116288 - -4.747396135285 - -4.199632386361 - -4.697396135285 - -3.599632386361 - -3.220368320316 - -3.527654921573 - -2.748804957691 - -2.53869037214 - -2.281477631675 - -1.90319482217 - -0.9137271056786 - 1.691038081469 - 2.120912619699 - 1.723721658539 - 1.640058908254 - 0.9466721810337 - -0.9693357883712 - -2.147396135285 - -4.199632386361 - -2.184896135285 - -3.449632386361 - -2.920368320316 - -2.852654921573 - -2.411304957691 - -2.95119037214 - -2.056477631675 - -2.05319482217 - -1.251227105679 - 9.091038081469 - 3.323721658539 - -3.686566865047 - -5.328474711208 - -4.618474711208 - -2.178474711208 - -0.7527095065441 - 5.591038081469 - 0.8237216585391 - -2.986566865047 - -3.898474711208 - -4.988474711208 - -3.118474711208 - -2.442709506544 - 4.391038081469 - 1.923721658539 - -3.286566865047 - -3.868474711208 - -5.928474711208 - -4.428474711208 - -0.9427095065441 - 5.591038081469 - 4.423721658539 - -2.386566865047 - -5.178474711208 - -0.7527095065441 - -4.988474711208 - -4.618474711208 - -0.9427095065441 - 4.391038081469 - 0.9237216585391 - -1.553327818966 - -4.186566865047 - -5.552915024831 - -5.182915024831 - -3.124230683974 - -3.788474711208 - -0.892433332114 - 0.3245543770619 - 0.2872904934559 - 3.391038081469 - 2.923721658539 - -0.5533278189663 - -0.9865668650472 - -7.752915024831 - -7.432915024831 - -2.634230683974 - -4.988474711208 - -1.722433332114 - -1.365445622938 - 1.037290493456 - -1.908961918531 - 0.4237216585391 - -0.8533278189662 - -0.3865668650472 - -6.752915024831 - -6.082915024831 - -0.9442306839744 - -2.098474711208 - -3.972433332114 - -0.1254456229381 - 3.737290493456 - 5.591038081469 - 0.8209126196993 - 2.923721658539 - 4.140058908254 - -1.653327818966 - -4.169335788371 - -4.399632386361 - -3.529632386361 - -5.812868320316 - -6.382654921573 - -5.526304957691 - -5.68619037214 - -4.536477631675 - -4.00569482217 - -3.126227105679 - 6.451525288792 - 8.021525288792 - 0.3315252887919 - -5.928474711208 - -5.668474711208 - -1.618474711208 - -1.608961918531 - -4.376278341461 - -1.353327818966 - 3.613433134953 - 2.047084975169 - 1.755769316026 - 1.867084975169 - 1.865769316026 - -0.03847471120814 - -0.592433332114 - 3.394554377062 - 2.357290493456 - 8.291038081469 - 2.223721658539 - -1.853327818966 - -7.186566865047 - -3.752915024831 - -4.244230683974 - -3.192915024831 - -3.274230683974 - -1.498474711208 - -3.402433332114 - -0.8054456229381 - 2.127290493456 - -5.708961918531 - 0.1237216585391 - -8.553327818966 - -5.486566865047 - -2.052915024831 - -5.644230683974 - -1.762915024831 - -4.994230683974 - -4.388474711208 - -3.522433332114 - -1.475445622938 - 0.2572904934559 - -4.048474711208 - 8.591038081469 - 5.423721658539 - -0.6865668650472 - -4.428474711208 - -1.512709506544 - -4.798474711208 - -1.322709506544 - -2.548474711208 - -3.118474711208 - -2.098474711208 - -6.047396135285 - -5.597396135285 - 9.291038081469 - 2.523721658539 - -1.747396135285 - -1.737396135285 - -2.512868320316 - -2.896304957691 - -3.147396135285 - -2.857396135285 - 8.891038081469 - 1.823721658539 - -3.032868320316 - -3.236304957691 - -2.647396135285 - -2.597396135285 - - 11.34572446599 - 12.71016226644 - 9.103954561983 - 17.84889991643 - 16.22846806054 - 11.28579424593 - 9.470899876731 - 7.817399827028 - 9.420899876731 - 7.992399827028 - 5.668390695887 - 4.274674199655 - 3.897111252284 - 3.096292463526 - 2.467532445354 - 2.926528936865 - 4.270267329615 - 9.045724465995 - 10.91016226644 - 16.50395456198 - 16.54889991643 - 13.12846806054 - 7.185794245929 - 7.670899876731 - 5.617399827028 - 7.545899876731 - 6.342399827028 - 5.443390695887 - 4.724674199655 - 1.759611252284 - 2.833792463526 - 2.655032445354 - 4.051528936865 - 2.357767329615 - -1.554275534005 - 13.01016226644 - 8.703954561983 - 10.14889991643 - 12.32846806054 - 10.48579424593 - 5.170899876731 - 4.617399827028 - 5.220899876731 - 5.217399827028 - 4.393390695887 - 2.999674199655 - 2.847111252284 - 2.271292463526 - 1.867532445354 - 1.689028936865 - 2.207767329615 - -7.454275534005 - 1.310162266438 - 5.703954561983 - 8.448899916427 - 9.428468060537 - 8.985794245929 - 7.770899876731 - 4.617399827028 - 7.733399876731 - 5.367399827028 - 4.693390695887 - 3.674674199655 - 3.184611252284 - 1.858792463526 - 2.092532445354 - 1.539028936865 - 1.870267329615 - -0.05427553400547 - 7.303954561983 - 6.51450668858 - 1.723185425401 - 2.433185425401 - 4.873185425401 - 2.160876526885 - -3.554275534005 - 4.803954561983 - 7.21450668858 - 3.153185425401 - 2.063185425401 - 3.933185425401 - 0.4708765268852 - -4.754275534005 - 5.903954561983 - 6.91450668858 - 3.183185425401 - 1.123185425401 - 2.623185425401 - 1.970876526885 - -3.554275534005 - 8.403954561983 - 7.81450668858 - 1.873185425401 - 2.160876526885 - 2.063185425401 - 2.433185425401 - 1.970876526885 - -4.754275534005 - 4.903954561983 - 6.928468060537 - 6.01450668858 - 3.857070725448 - 4.227070725448 - 5.083097819871 - 3.263185425401 - 4.293542360661 - 4.18335415557 - 3.200876526885 - -5.754275534005 - 6.903954561983 - 7.928468060537 - 9.21450668858 - 1.657070725448 - 1.977070725448 - 5.573097819871 - 2.063185425401 - 3.463542360661 - 2.49335415557 - 3.950876526885 - -11.05427553401 - 4.403954561983 - 7.628468060537 - 9.81450668858 - 2.657070725448 - 3.327070725448 - 7.263097819871 - 4.953185425401 - 1.213542360661 - 3.73335415557 - 6.650876526885 - -3.554275534005 - 0.01016226643847 - 6.903954561983 - 10.94889991643 - 6.828468060537 - 5.785794245929 - 4.417399827028 - 5.287399827028 - 1.800890695887 - 0.1446741996551 - 0.06961125228437 - -0.8762075364739 - -0.3874675546463 - -0.4134710631349 - -0.004732670384874 - 13.5031854254 - 15.0731854254 - 7.383185425401 - 1.123185425401 - 1.383185425401 - 5.433185425401 - -10.75427553401 - -0.3960454380168 - 7.128468060537 - 13.81450668858 - 11.45707072545 - 9.963097819871 - 11.27707072545 - 10.07309781987 - 7.013185425401 - 4.593542360661 - 7.25335415557 - 5.270876526885 - -0.8542755340055 - 6.203954561983 - 6.628468060537 - 3.01450668858 - 5.657070725448 - 3.963097819871 - 6.217070725448 - 4.933097819871 - 5.553185425401 - 1.783542360661 - 3.05335415557 - 5.040876526885 - -14.85427553401 - 4.103954561983 - -0.07153193946291 - 4.71450668858 - 7.357070725448 - 2.563097819871 - 7.647070725448 - 3.213097819871 - 2.663185425401 - 1.663542360661 - 2.38335415557 - 3.170876526885 - 3.003185425401 - -0.5542755340055 - 9.403954561983 - 9.51450668858 - 2.623185425401 - 1.400876526885 - 2.253185425401 - 1.590876526885 - 4.503185425401 - 3.933185425401 - 4.953185425401 - 3.870899876731 - 4.320899876731 - 0.1457244659945 - 6.503954561983 - 8.170899876731 - 8.180899876731 - 5.100890695887 - 2.699611252284 - 6.770899876731 - 7.060899876731 - -0.2542755340055 - 5.803954561983 - 4.580890695887 - 2.359611252284 - 7.270899876731 - 7.320899876731 - - 16.53673795362 - 12.64348483951 - 6.243066148003 - 13.47083954897 - 11.03975774331 - 5.537722754593 - 4.09957193688 - 3.220802215329 - 4.04957193688 - 3.395802215329 - 1.800447382832 - 1.020568885813 - 1.147168191638 - 0.7592201452716 - 0.470019408067 - 1.210124173449 - 2.788233035035 - 14.23673795362 - 10.84348483951 - 13.643066148 - 12.17083954897 - 7.939757743311 - 1.437722754593 - 2.29957193688 - 1.020802215329 - 2.17457193688 - 1.745802215329 - 1.575447382832 - 1.470568885813 - -0.9903318083618 - 0.4967201452716 - 0.657519408067 - 2.335124173449 - 0.8757330350355 - 3.636737953619 - 12.94348483951 - 5.843066148002 - 5.770839548972 - 7.139757743311 - 4.737722754593 - -0.2004280631201 - 0.02080221532934 - -0.1504280631201 - 0.6208022153293 - 0.525447382832 - -0.2544311141866 - 0.09716819163822 - -0.06577985472837 - -0.129980591933 - -0.02737582655083 - 0.7257330350355 - -2.263262046381 - 1.24348483951 - 2.843066148002 - 4.070839548972 - 4.239757743311 - 3.237722754593 - 2.39957193688 - 0.02080221532934 - 2.36207193688 - 0.7708022153293 - 0.825447382832 - 0.4205688858134 - 0.4346681916382 - -0.4782798547284 - 0.09501940806695 - -0.1773758265508 - 0.3882330350355 - 5.136737953619 - 4.443066148003 - 0.8383235109659 - -1.82318329708 - -1.11318329708 - 1.32681670292 - 0.7814350074993 - 1.636737953619 - 1.943066148003 - 1.538323510966 - -0.39318329708 - -1.48318329708 - 0.38681670292 - -0.9085649925007 - 0.4367379536194 - 3.043066148003 - 1.238323510966 - -0.36318329708 - -2.42318329708 - -0.92318329708 - 0.5914350074993 - 1.636737953619 - 5.543066148003 - 2.138323510966 - -1.67318329708 - 0.7814350074993 - -1.48318329708 - -1.11318329708 - 0.5914350074993 - 0.4367379536194 - 2.043066148003 - 1.739757743311 - 0.3383235109659 - -1.132711355225 - -0.7627113552248 - 0.8640180395549 - -0.28318329708 - 1.760258757134 - 2.332901512346 - 1.821435007499 - -0.5632620463806 - 4.043066148003 - 2.739757743311 - 3.538323510966 - -3.332711355225 - -3.012711355225 - 1.354018039555 - -1.48318329708 - 0.9302587571339 - 0.6429015123457 - 2.571435007499 - -5.863262046381 - 1.543066148003 - 2.439757743311 - 4.138323510966 - -2.332711355225 - -1.662711355225 - 3.044018039555 - 1.40681670292 - -1.319741242866 - 1.882901512346 - 5.271435007499 - 1.636737953619 - -0.05651516048991 - 4.043066148003 - 6.570839548972 - 1.639757743311 - 0.03772275459342 - -0.1791977846707 - 0.6908022153293 - -2.067052617168 - -3.109431114187 - -2.680331808362 - -3.213279854728 - -2.384980591933 - -2.129875826551 - -1.486766964965 - 9.95681670292 - 11.52681670292 - 3.83681670292 - -2.42318329708 - -2.16318329708 - 1.88681670292 - -5.563262046381 - -3.256933851997 - 1.939757743311 - 8.138323510966 - 6.467288644775 - 5.744018039555 - 6.287288644775 - 5.854018039555 - 3.46681670292 - 2.060258757134 - 5.402901512346 - 3.891435007499 - 4.336737953619 - 3.343066148002 - 1.439757743311 - -2.661676489034 - 0.6672886447752 - -0.2559819604451 - 1.227288644775 - 0.7140180395549 - 2.00681670292 - -0.7497412428661 - 1.202901512346 - 3.661435007499 - -9.663262046381 - 1.243066148003 - -5.260242256689 - -0.9616764890341 - 2.367288644775 - -1.655981960445 - 2.657288644775 - -1.005981960445 - -0.88318329708 - -0.8697412428661 - 0.5329015123457 - 1.791435007499 - -0.54318329708 - 4.636737953619 - 6.543066148003 - 3.838323510966 - -0.92318329708 - 0.02143500749933 - -1.29318329708 - 0.2114350074993 - 0.95681670292 - 0.38681670292 - 1.40681670292 - -1.50042806312 - -1.05042806312 - 5.336737953619 - 3.643066148003 - 2.79957193688 - 2.80957193688 - 1.232947382832 - -0.05033180836178 - 1.39957193688 - 1.68957193688 - 4.936737953619 - 2.943066148003 - 0.712947382832 - -0.3903318083618 - 1.89957193688 - 1.94957193688 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 - - 0.1499498551576 - 0.008185153997901 - 0.009255435636305 - - 0.1486949409413 - 0.001680047032405 - 0.01940631659429 - - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 interpolation_set_expected: - - 0.0 - 0.0 - 0.0 - - -0.0581032280076 - -0.3142207350554 - 0.5053386972944 - - 0.04228990929916 - 0.2061878221842 - -0.3067317793442 - - 1.780003545435 - -0.7194586215727 - -0.658836120804 - - 0.03698212804807 - 0.1992219638497 - -0.3154670475037 - - 0.08830499324404 - 0.1885681900052 - -0.02643615873801 - - 0.0874344381026 - -3.808409568279 - -0.2524078025883 linear_terms_residual_model: - - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.05431142241 - 7.210065582227 - -32.05431142241 - 7.210065582217 - 128.5030418743 - -61.94075414453 - -28.86164804598 - 59.16327636 - -6.773101689526 - -5.359113834066 - -3.97310556824 - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.0543114224 - 7.210065582226 - -32.0543114224 - 7.210065582223 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689526 - -5.359113834064 - -3.97310556824 - 723.7257702008 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.0543114224 - 7.210065582221 - -32.0543114224 - 7.210065582222 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689525 - -5.359113834065 - -3.97310556824 - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.05431142241 - 7.210065582221 - -32.0543114224 - 7.210065582226 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689524 - -5.359113834066 - -3.97310556824 - 723.7257702007 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702008 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702008 - -2111.94123313 - -63.20258122702 - -187.067690334 - -3.412910016525 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122702 - -11.3061006031 - -11.3061006031 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.202581227 - -11.30610060311 - -11.3061006031 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060311 - -11.30610060311 - 33.45212714559 - -187.067690334 - -26.50116734207 - -6.144592795851 - -3.412910016524 - 723.7257702008 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - 7.210065582226 - 7.210065582224 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689526 - -5.359113834066 - -3.973105568241 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122702 - -11.30610060312 - 33.45212714558 - -11.30610060311 - 33.45212714559 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060312 - 33.45212714558 - -11.30610060311 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060311 - 33.45212714559 - -11.30610060312 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016527 - -187.067690334 - 723.7257702008 - -2111.94123313 - -63.20258122702 - -187.067690334 - -3.412910016525 - -187.067690334 - -3.412910016526 - -187.067690334 - -187.067690334 - -187.067690334 - -32.05431142241 - -32.05431142242 - 723.7257702007 - -2111.94123313 - -32.05431142241 - -32.05431142241 - 128.5030418743 - -28.86164804599 - -32.05431142241 - -32.05431142241 - 723.7257702007 - -2111.94123313 - 128.5030418743 - -28.86164804599 - -32.05431142241 - -32.05431142241 - - -250.9049856979 - -713.548298078 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.98074162601 - 13.74284457971 - 35.98074162601 - 13.74284457972 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915525 - 6.047825533545 - -250.9049856979 - -713.548298078 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774507 - 35.980741626 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307755 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915524 - 6.047825533545 - -250.904985698 - -713.5482980779 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.980741626 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915524 - 6.047825533545 - -250.9049856979 - -713.5482980779 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.98074162601 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915525 - 6.047825533545 - -250.9049856979 - 991.3297761498 - 53.62515459183 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.904985698 - 991.3297761498 - 53.62515459184 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.9049856979 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.904985698 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 23.96018695733 - 1.082529471011 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459183 - 23.96018695733 - 23.96018695733 - 1.082529471012 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 23.96018695733 - 1.082529471004 - 84.70726176104 - 14.82949233042 - 8.477216658369 - 5.458199688096 - -250.904985698 - -713.548298078 - 991.3297761499 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 13.74284457971 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.623990025561 - 7.529398915525 - 6.047825533545 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - -250.904985698 - 991.3297761498 - 167.0701872133 - 53.62515459184 - 23.96018695734 - 1.08252947101 - 23.96018695733 - 1.082529471007 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688096 - -250.9049856979 - 991.3297761498 - 167.0701872133 - 53.62515459184 - 23.96018695734 - 1.082529471008 - 23.96018695733 - 1.08252947101 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688096 - -250.904985698 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 1.082529471007 - 23.96018695734 - 1.082529471008 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - 84.70726176104 - -250.904985698 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 35.98074162601 - 35.98074162601 - -250.904985698 - 991.3297761498 - 35.98074162601 - 35.98074162601 - -38.33093307755 - 18.14259771035 - 35.98074162601 - 35.98074162601 - -250.9049856979 - 991.3297761498 - -38.33093307754 - 18.14259771035 - 35.98074162601 - 35.98074162601 - - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391073 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391093 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671898 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391052 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391036 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744828 - 7.034149386141 - -86.32920671897 - 379.8255147156 - 37.38830735093 - 39.79428584982 - 39.79428584982 - 39.79428584981 - 6.487537441591 - -86.32920671898 - 379.8255147156 - 37.38830735093 - 39.79428584981 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 37.38830735093 - 39.79428584982 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671898 - 379.8255147156 - 37.38830735094 - 39.79428584982 - 6.487537441591 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671896 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584981 - 12.42064972558 - 9.152506987831 - 6.487537441591 - -86.32920671898 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 19.43979000176 - 19.43979000176 - -0.5081983391036 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - 39.79428584981 - 39.79428584981 - 39.79428584981 - 39.79428584982 - 39.79428584981 - 39.79428584982 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735094 - 24.27471941817 - 13.90491560132 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941817 - 13.90491560132 - 24.27471941816 - 13.90491560132 - 39.79428584981 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 13.90491560132 - 24.27471941817 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - 39.79428584982 - -86.32920671898 - 379.8255147156 - 37.38830735094 - 39.79428584982 - 6.487537441591 - 39.79428584981 - 6.487537441591 - 39.79428584982 - 39.79428584982 - 39.79428584981 - 29.83406116421 - 29.83406116421 - -86.32920671897 - 379.8255147156 - 29.83406116421 - 29.83406116421 - -0.5081983391078 - 14.64616559016 - 29.83406116421 - 29.83406116421 - -86.32920671897 - 379.8255147156 - -0.5081983391052 - 14.64616559016 - 29.83406116421 - 29.83406116421 model_indices: - 13 - 12 - 11 - 10 - 10 - 7 - 6 n_modelpoints: 7 residuals: - 14.13989818606 - 10.00317449361 - 3.744918552079 - 10.77980934854 - 8.347596395984 - 2.920071415924 - 1.796288999298 - 1.248596846502 - 1.746288999298 - 1.423596846502 - 0.03689425329096 - -0.3219611012974 - 0.0002647009432151 - -0.881408646043 - -0.4303702662499 - 0.4401190678521 - 2.121655004644 - 11.83989818606 - 8.203174493607 - 11.14491855208 - 9.479809348539 - 5.247596395983 - -1.179928584076 - -0.003711000701976 - -0.9514031534983 - -0.128711000702 - -0.2264031534983 - -0.188105746709 - 0.1280388987026 - -2.137235299057 - -1.143908646043 - -0.2428702662499 - 1.565119067852 - 0.2091550046441 - 1.239898186057 - 10.30317449361 - 3.344918552079 - 3.079809348539 - 4.447596395984 - 2.120071415924 - -2.503711000702 - -1.951403153498 - -2.453711000702 - -1.351403153498 - -1.238105746709 - -1.596961101297 - -1.049735299057 - -1.706408646043 - -1.03037026625 - -0.7973809321479 - 0.05915500464407 - -4.660101813943 - -1.396825506394 - 0.3449185520786 - 1.379809348539 - 1.547596395983 - 0.6200714159243 - 0.09628899929797 - -1.951403153498 - 0.05878899929802 - -1.201403153498 - -0.9381057467091 - -0.9219611012974 - -0.7122352990568 - -2.118908646043 - -0.8053702662499 - -0.9473809321479 - -0.2783449953559 - 2.739898186057 - 1.944918552078 - -1.63399667708 - -3.183999068009 - -2.473999068009 - -0.03399906800905 - 0.159797079125 - -0.7601018139425 - -0.5550814479215 - -0.9339966770796 - -1.753999068009 - -2.843999068009 - -0.9739990680091 - -1.530202920875 - -1.960101813943 - 0.5449185520779 - -1.23399667708 - -1.723999068009 - -3.783999068009 - -2.283999068009 - -0.03020292087504 - -0.7601018139425 - 3.044918552078 - -0.3339966770796 - -3.033999068009 - 0.159797079125 - -2.843999068009 - -2.473999068009 - -0.03020292087504 - -1.960101813943 - -0.4550814479221 - -0.9524036040165 - -2.13399667708 - -3.265462533007 - -2.895462533007 - -0.9678561331385 - -1.643999068009 - 0.7313676292134 - 1.502356325981 - 1.199797079125 - -2.960101813943 - 1.544918552078 - 0.04759639598348 - 1.06600332292 - -5.465462533007 - -5.145462533007 - -0.4778561331385 - -2.843999068009 - -0.09863237078665 - -0.1876436740187 - 1.949797079125 - -8.260101813943 - -0.9550814479221 - -0.2524036040165 - 1.66600332292 - -4.465462533007 - -3.795462533007 - 1.212143866862 - 0.04600093199099 - -2.348632370787 - 1.052356325981 - 4.649797079125 - -0.7601018139425 - -2.696825506393 - 1.544918552078 - 3.879809348539 - -1.052403604016 - -2.579928584076 - -2.151403153498 - -1.281403153498 - -3.830605746709 - -4.451961101297 - -3.827235299057 - -4.853908646043 - -3.28537026625 - -2.899880932148 - -2.153344995356 - 8.596000931991 - 10.16600093199 - 2.476000931991 - -3.783999068009 - -3.523999068009 - 0.5260009319909 - -7.960101813943 - -5.755081447921 - -0.7524036040165 - 5.66600332292 - 4.334537466993 - 3.912143866861 - 4.154537466993 - 4.022143866862 - 2.106000931991 - 1.031367629213 - 4.572356325981 - 3.269797079125 - 1.939898186057 - 0.8449185520786 - -1.252403604017 - -5.13399667708 - -1.465462533007 - -2.087856133139 - -0.9054625330074 - -1.117856133138 - 0.6460009319909 - -1.778632370787 - 0.3723563259813 - 3.039797079125 - -12.06010181394 - -1.255081447922 - -7.952403604016 - -3.43399667708 - 0.2345374669926 - -3.487856133138 - 0.5245374669926 - -2.837856133139 - -2.243999068009 - -1.898632370787 - -0.2976436740187 - 1.169797079125 - -1.903999068009 - 2.239898186058 - 4.044918552078 - 1.36600332292 - -2.283999068009 - -0.600202920875 - -2.653999068009 - -0.410202920875 - -0.4039990680091 - -0.9739990680091 - 0.04600093199099 - -3.803711000702 - -3.353711000702 - 2.939898186057 - 1.144918552079 - 0.496288999298 - 0.506288999298 - -0.5306057467091 - -1.197235299057 - -0.9037110007021 - -0.6137110007021 - 2.539898186057 - 0.4449185520785 - -1.050605746709 - -1.537235299057 - -0.4037110007021 - -0.3537110007021 square_terms_residual_model: - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638637 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554024 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114451 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114448 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638632 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554029 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114453 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701694 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721924 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518941 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638635 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554026 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082801 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114447 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082801 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114448 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227361 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554026 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082804 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114447 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693998 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762072 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638635 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002871 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002872 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049041 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028759 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002872 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028717 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028754 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272406 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554025 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693998 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714455 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028804 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002871 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028763 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028752 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638634 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028753 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028805 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049041 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082804 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 x_accepted: - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/interpolate_f_iter_7.yaml ================================================ --- delta_old: 0.0125 f_interpolated_expected: - - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345903 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.640310345904 - 2.498147595924 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 2.303282937582 - 1.972205368828 - 2.303282937582 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188045 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.640310345903 - 2.498147595925 - 2.691030200433 - 2.692161347328 - 2.617651338669 - 1.972205368828 - 1.972205368828 - 1.763553129541 - 1.342529987111 - 1.146903490695 - 1.640628791315 - 0.9003896743169 - 0.7700051055971 - 0.6665780303914 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188046 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595924 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 2.396839767562 - 2.498147595925 - 2.692161347328 - 2.472320188045 - 2.132751177783 - 1.831874172693 - 2.132751177783 - 1.831874172693 - 1.360815770929 - 1.028891127921 - 0.8305451863645 - 0.6216379283744 - 1.360815770929 - 2.396839767562 - 2.498147595925 - 2.472320188046 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 0.6216379283744 - 1.360815770929 - 1.360815770929 - 1.360815770929 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 2.303282937582 - 2.303282937582 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - 2.396839767562 - 2.498147595924 - 1.763553129541 - 1.146903490695 - 2.303282937582 - 2.303282937582 - - 1.24344978758e-14 - -5.329070518201e-14 - 9.947598300641e-14 - 2.13162820728e-14 - 3.552713678801e-15 - -3.552713678801e-15 - 0.0 - 3.552713678801e-15 - 3.552713678801e-15 - -2.6645352591e-15 - -4.440892098501e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 6.661338147751e-15 - 4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - -1.59872115546e-14 - -1.7763568394e-14 - -2.48689957516e-14 - -3.552713678801e-15 - -1.42108547152e-14 - -5.329070518201e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 1.7763568394e-15 - 1.7763568394e-15 - 0.0 - 1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - -1.24344978758e-14 - -5.151434834261e-14 - -3.19744231092e-14 - -1.42108547152e-14 - -1.7763568394e-14 - 0.0 - 3.552713678801e-15 - 0.0 - -1.7763568394e-15 - -4.440892098501e-15 - 0.0 - 8.881784197001e-16 - 0.0 - 6.217248937901e-15 - 1.998401444325e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - -1.136868377216e-13 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 0.0 - -1.7763568394e-15 - 0.0 - 1.7763568394e-15 - 3.552713678801e-15 - -7.993605777300e-15 - 2.6645352591e-15 - -2.22044604925e-15 - 3.10862446895e-15 - 1.33226762955e-15 - 2.22044604925e-16 - -1.110223024625e-15 - -3.552713678801e-15 - 9.947598300641e-14 - -3.552713678801e-15 - 5.551115123126e-15 - 1.7763568394e-15 - 1.7763568394e-15 - -8.881784197001e-16 - -1.24344978758e-14 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 1.33226762955e-14 - 6.217248937901e-15 - -5.551115123126e-16 - -1.86517468137e-14 - 1.101341240428e-13 - -7.105427357601e-15 - 2.6645352591e-15 - 2.22044604925e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -1.24344978758e-14 - -1.06581410364e-14 - 3.552713678801e-15 - 6.661338147751e-15 - -8.881784197001e-16 - 1.33226762955e-14 - 1.7763568394e-15 - -8.881784197001e-16 - -1.86517468137e-14 - 3.19744231092e-14 - 1.68753899743e-14 - -2.6645352591e-15 - 6.217248937901e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 6.217248937901e-15 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.50990331349e-14 - 9.592326932761e-14 - 0.0 - 0.0 - 6.661338147751e-16 - -1.110223024625e-15 - -1.7763568394e-15 - 1.33226762955e-14 - 0.0 - -8.881784197001e-16 - 4.440892098501e-16 - 1.06581410364e-14 - 1.7763568394e-14 - -2.6645352591e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 3.552713678801e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 0.0 - -1.24344978758e-14 - -2.13162820728e-14 - 9.592326932761e-14 - 0.0 - -2.57571741713e-14 - 1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 7.771561172376e-16 - -1.998401444325e-15 - 4.440892098501e-15 - 1.443289932013e-15 - 2.775557561563e-16 - -1.665334536938e-16 - 7.105427357601e-15 - 8.881784197001e-15 - -1.7763568394e-15 - 2.22044604925e-15 - 1.24344978758e-14 - 3.552713678801e-15 - 1.24344978758e-14 - 1.42108547152e-14 - -2.6645352591e-15 - -5.329070518201e-15 - 0.0 - 0.0 - 0.0 - 1.7763568394e-15 - 7.105427357601e-15 - -8.881784197001e-16 - 0.0 - 0.0 - -2.6645352591e-15 - -4.618527782441e-14 - -7.105427357601e-15 - 5.329070518201e-15 - 2.6645352591e-15 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 5.329070518201e-15 - -2.22044604925e-16 - 4.440892098501e-16 - 8.881784197001e-16 - 7.105427357601e-15 - 7.105427357601e-15 - 3.153033389935e-14 - 0.0 - -4.440892098501e-15 - 0.0 - 2.6645352591e-15 - -1.7763568394e-15 - -8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 4.884981308351e-15 - 8.881784197001e-16 - -1.06581410364e-14 - -1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - -7.549516567451e-15 - -4.440892098501e-16 - 6.217248937901e-15 - 6.217248937901e-15 - 3.552713678801e-15 - 4.440892098501e-16 - -1.7763568394e-15 - -1.7763568394e-15 - 4.618527782441e-14 - 1.7763568394e-15 - -1.7763568394e-15 - -1.7763568394e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 8.881784197001e-15 - 5.684341886081e-14 - 0.0 - -2.22044604925e-15 - -8.881784197001e-16 - -1.7763568394e-15 - - -1.06581410364e-14 - -1.95399252334e-14 - 1.24344978758e-14 - 7.105427357601e-15 - 4.440892098501e-15 - 2.442490654175e-15 - -1.998401444325e-15 - 3.330669073875e-16 - -1.33226762955e-15 - -1.110223024625e-16 - 1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - 4.440892098501e-16 - 0.0 - 2.22044604925e-16 - 0.0 - 0.0 - 3.19744231092e-14 - -1.7763568394e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - 0.0 - 0.0 - 0.0 - 0.0 - -1.33226762955e-15 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - 0.0 - 1.110223024625e-16 - -2.22044604925e-16 - 1.7763568394e-15 - -3.552713678801e-15 - 1.59872115546e-14 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 1.33226762955e-15 - -4.440892098501e-16 - 4.440892098501e-16 - -8.881784197001e-16 - 1.7763568394e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -1.110223024625e-16 - 8.881784197001e-16 - -2.30926389122e-14 - 1.7763568394e-15 - 2.6645352591e-15 - 2.22044604925e-16 - 4.440892098501e-16 - 0.0 - 8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -2.22044604925e-15 - 1.33226762955e-15 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 4.440892098501e-16 - 4.440892098501e-16 - -1.06581410364e-14 - -1.24344978758e-14 - 0.0 - 8.881784197001e-16 - -2.6645352591e-15 - -4.440892098501e-16 - -3.330669073875e-16 - 1.7763568394e-15 - 3.552713678801e-14 - -8.881784197001e-16 - 1.7763568394e-15 - 3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -4.440892098501e-15 - 6.217248937901e-15 - -3.552713678801e-15 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 1.7763568394e-15 - 2.84217094304e-14 - -4.440892098501e-15 - 2.6645352591e-15 - -3.330669073875e-16 - 3.552713678801e-15 - -2.6645352591e-15 - 0.0 - -4.440892098501e-15 - -1.7763568394e-15 - 9.103828801926e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -4.440892098501e-16 - 2.6645352591e-15 - -4.440892098501e-16 - 1.110223024625e-16 - 1.110223024625e-16 - -8.881784197001e-16 - -7.993605777300e-15 - 1.33226762955e-15 - 6.661338147751e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - 4.440892098501e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 1.33226762955e-15 - -1.59872115546e-14 - 5.551115123126e-15 - 2.22044604925e-16 - -8.881784197001e-16 - 8.881784197001e-16 - 1.110223024625e-16 - -3.552713678801e-15 - -8.881784197001e-16 - -4.718447854657e-16 - 0.0 - 1.7763568394e-15 - -3.552713678801e-15 - -7.993605777300e-15 - 7.993605777301e-15 - -1.199040866595e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 8.881784197001e-16 - 8.881784197001e-16 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -5.329070518201e-15 - 0.0 - 4.773959005888e-15 - -8.881784197001e-16 - 0.0 - 4.440892098501e-16 - -1.398881011028e-14 - -1.7763568394e-15 - 5.551115123126e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 0.0 - -2.22044604925e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -2.6645352591e-14 - 8.659739592076e-15 - 3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 0.0 - -1.7763568394e-15 - 1.59872115546e-14 - 1.7763568394e-15 - 0.0 - 4.440892098501e-16 - 0.0 - -4.440892098501e-16 - 1.7763568394e-15 - 2.6645352591e-15 - -4.440892098501e-16 - 2.22044604925e-16 - 1.665334536938e-16 - 5.329070518201e-15 - 3.552713678801e-15 - 2.84217094304e-14 - -4.440892098501e-16 - 0.0 - -6.661338147751e-16 - -3.552713678801e-15 - 4.440892098501e-16 - 8.881784197001e-16 - -4.440892098501e-16 - -3.552713678801e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 3.552713678801e-15 - 6.217248937901e-15 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 0.0 - -3.10862446895e-15 - -1.7763568394e-15 - 1.24344978758e-14 - 1.59872115546e-14 - -4.440892098501e-16 - 8.881784197001e-16 - -3.10862446895e-15 - 4.440892098501e-16 - - -2.735578163993e-08 - -5.45305738342e-08 - 4.426374289324e-08 - -4.691855792771e-10 - -2.819547262334e-09 - -5.672461611539e-09 - -6.022133902661e-09 - -5.938645131209e-09 - -6.026155574546e-09 - -5.914529310758e-09 - -1.108718095111e-08 - -5.205436082178e-11 - -1.303774865846e-09 - 3.85213638765e-10 - -1.12522080542e-09 - -1.038296559841e-09 - -9.272147494244e-10 - -2.739852789091e-08 - -5.461970431497e-08 - 4.424782673595e-08 - -4.685034582508e-10 - -2.810566002154e-09 - -5.653987500409e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -6.050512979527e-09 - -5.92861226778e-09 - -1.110882408284e-08 - -5.200462283028e-11 - -1.299493845863e-09 - 3.87061049878e-10 - -1.12522080542e-09 - -1.042401720497e-09 - -9.273968260004e-10 - -2.746151039901e-08 - -5.433957994683e-08 - 4.423691279953e-08 - -4.692992661148e-10 - -2.817955646606e-09 - -5.670756308973e-09 - -6.043791245247e-09 - -5.925457458034e-09 - -6.050569822946e-09 - -5.927120128035e-09 - -1.106290881125e-08 - -5.205436082178e-11 - -1.303028795974e-09 - 3.85909970646e-10 - -1.127881787966e-09 - -1.040435293476e-09 - -9.260201494499e-10 - -2.734941517701e-08 - -5.424772098195e-08 - 4.424055077834e-08 - -4.674802767113e-10 - -2.812839738908e-09 - -5.670756308973e-09 - -6.039726940799e-09 - -5.925457458034e-09 - -6.050512979527e-09 - -5.93539084548e-09 - -1.104555735765e-08 - -5.22533127878e-11 - -1.303213537085e-09 - 3.862474784455e-10 - -1.128016791085e-09 - -1.036601915416e-09 - -9.278338097829e-10 - -2.736760507105e-08 - 4.430148692336e-08 - -6.124395213192e-09 - 6.223643822523e-09 - 6.227637072698e-09 - 6.216559711447e-09 - -8.725535849408e-10 - -2.746151039901e-08 - 4.422872734722e-08 - -6.108592742748e-09 - 6.207599767549e-09 - 6.222357740171e-09 - 6.224738058336e-09 - -8.727347733384e-10 - -2.734918780334e-08 - 4.453613655642e-08 - -6.112173878137e-09 - 6.22451068466e-09 - 6.227359961031e-09 - 6.222300896752e-09 - -8.725198341608e-10 - -2.746151039901e-08 - 4.457479008124e-08 - -6.087390147513e-09 - 6.217128145636e-09 - -8.725535849408e-10 - 6.222357740171e-09 - 6.227637072698e-09 - -8.725198341608e-10 - -2.734918780334e-08 - 4.454204827198e-08 - -2.825686351571e-09 - -6.100492555561e-09 - -5.898698418605e-09 - -5.89793103245e-09 - -6.570232358172e-09 - 6.235552518774e-09 - -1.462325371904e-09 - -1.088713119657e-09 - -8.705525189612e-10 - -2.73569185083e-08 - 4.454068402993e-08 - -2.808064891724e-09 - -6.131557483968e-09 - -5.876401587557e-09 - -5.897845767322e-09 - -6.565869625774e-09 - 6.222357740171e-09 - -1.463405396862e-09 - -1.089073720095e-09 - -8.69984972951e-10 - -2.731030690484e-08 - 4.454204827198e-08 - -2.806132215483e-09 - -6.112287564974e-09 - -5.890328225178e-09 - -5.881020115339e-09 - -6.602697055769e-09 - 6.208729530499e-09 - -1.467945764944e-09 - -1.086476686396e-09 - -8.746043889118e-10 - -2.746151039901e-08 - -5.454558049678e-08 - 4.454068402993e-08 - -4.674802767113e-10 - -2.816477717715e-09 - -5.670813152392e-09 - -5.936954039498e-09 - -5.93111337821e-09 - -1.104545788166e-08 - -5.2018833685e-11 - -1.29870869614e-09 - 3.867874909247e-10 - -1.12415321496e-09 - -1.038033659029e-09 - -9.241301057727e-10 - 6.206445135604e-09 - 6.206281710774e-09 - 6.214690984052e-09 - 6.227359961031e-09 - 6.216112069524e-09 - 6.232355076463e-09 - -2.740466698015e-08 - 4.426556188264e-08 - -2.806132215483e-09 - -6.098474614191e-09 - -5.856662710357e-09 - -6.574133237791e-09 - -5.885155474061e-09 - -6.587036693873e-09 - 6.232085070224e-09 - -1.464133703166e-09 - -1.089770051976e-09 - -8.732499168218e-10 - -2.739443516475e-08 - 4.424418875715e-08 - -2.815454536176e-09 - -6.108251682235e-09 - -5.863171281817e-09 - -6.585437972717e-09 - -5.886150233891e-09 - -6.576577504802e-09 - 6.217746317816e-09 - -1.467178378789e-09 - -1.090377566015e-09 - -8.717009336578e-10 - -2.741853677435e-08 - 4.456569513422e-08 - -2.813635546772e-09 - -6.107967465141e-09 - -5.881588549528e-09 - -6.590155976482e-09 - -5.854928986082e-09 - -6.584819800537e-09 - 6.221597459444e-09 - -1.46228273934e-09 - -1.089073720095e-09 - -8.693215036715e-10 - 6.218208170594e-09 - -2.752040018095e-08 - 4.457479008124e-08 - -6.096939841882e-09 - 6.222300896752e-09 - -8.720668631668e-10 - 6.209738501184e-09 - -8.718927801965e-10 - 6.226734683423e-09 - 6.224738058336e-09 - 6.208729530499e-09 - -6.025345555827e-09 - -6.015824283168e-09 - -2.741762727965e-08 - 4.41937118012e-08 - -6.025857146597e-09 - -6.039897471055e-09 - -1.109327030235e-08 - -1.30298616341e-09 - -6.022730758559e-09 - -6.028955112924e-09 - -2.738147486525e-08 - 4.430512490217e-08 - -1.106290881125e-08 - -1.301476260096e-09 - -6.022730758559e-09 - -6.022105480952e-09 - - -3.552713678801e-15 - -3.552713678801e-15 - 9.769962616701e-15 - 5.329070518201e-15 - 6.217248937901e-15 - 3.774758283726e-15 - -2.553512956638e-15 - 4.440892098501e-16 - -1.110223024625e-15 - 0.0 - 8.881784197001e-16 - 1.7763568394e-15 - 2.22044604925e-16 - 0.0 - 2.22044604925e-16 - 2.22044604925e-16 - -2.22044604925e-16 - 0.0 - 1.42108547152e-14 - -1.42108547152e-14 - -1.7763568394e-15 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 4.440892098501e-16 - 2.22044604925e-16 - -5.551115123126e-16 - -1.110223024625e-16 - -8.881784197001e-16 - 0.0 - 1.95399252334e-14 - 5.329070518201e-15 - -5.329070518201e-15 - 2.442490654175e-15 - 8.881784197001e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 4.440892098501e-16 - -4.440892098501e-16 - 0.0 - 0.0 - 4.440892098501e-16 - -2.22044604925e-14 - 1.95399252334e-14 - 6.439293542826e-15 - 6.883382752676e-15 - 2.442490654175e-15 - -8.881784197001e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - 8.881784197001e-16 - -4.440892098501e-16 - -1.33226762955e-15 - 0.0 - 0.0 - 2.22044604925e-16 - -7.105427357601e-15 - 0.0 - 1.7763568394e-15 - 2.6645352591e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -2.22044604925e-16 - -8.881784197001e-16 - 3.28626015289e-14 - 1.7763568394e-15 - 1.7763568394e-15 - 3.552713678801e-15 - -1.33226762955e-15 - 0.0 - 3.552713678801e-15 - 1.42108547152e-14 - -2.6645352591e-15 - 8.881784197001e-16 - -2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -8.881784197001e-16 - 1.95399252334e-14 - -3.10862446895e-15 - 1.7763568394e-15 - -2.22044604925e-16 - 3.552713678801e-15 - -8.881784197001e-16 - 0.0 - 3.552713678801e-15 - -1.95399252334e-14 - 7.105427357601e-15 - -3.552713678801e-15 - 0.0 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.110223024625e-16 - -8.326672684688e-17 - -3.552713678801e-15 - 0.0 - -2.22044604925e-15 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-16 - 3.552713678801e-15 - 6.661338147751e-16 - 0.0 - 0.0 - 5.329070518201e-15 - -3.37507799486e-14 - -6.661338147751e-16 - 2.553512956638e-15 - -8.881784197001e-16 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -3.053113317719e-16 - 0.0 - -8.881784197001e-16 - -3.37507799486e-14 - 0.0 - 1.24344978758e-14 - -1.199040866595e-14 - 0.0 - 8.881784197001e-16 - 4.440892098501e-16 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - -8.881784197001e-16 - 0.0 - 0.0 - 0.0 - -7.105427357601e-15 - -1.7763568394e-15 - 2.997602166488e-15 - -2.6645352591e-15 - -2.6645352591e-15 - -1.554312234475e-15 - -1.06581410364e-14 - -3.28626015289e-14 - -6.661338147751e-16 - -2.22044604925e-15 - -1.110223024625e-15 - 4.440892098501e-16 - 1.110223024625e-15 - -4.440892098501e-16 - 1.998401444325e-15 - 4.440892098501e-16 - 4.440892098501e-16 - 0.0 - 0.0 - -8.881784197001e-15 - 5.773159728051e-15 - 2.6645352591e-15 - 1.7763568394e-15 - 0.0 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - 8.881784197001e-16 - 1.110223024625e-16 - 4.440892098501e-16 - 2.6645352591e-15 - 2.13162820728e-14 - 0.0 - 0.0 - 8.881784197001e-16 - 0.0 - -8.881784197001e-16 - 8.881784197001e-16 - 2.6645352591e-15 - 0.0 - 0.0 - -2.775557561563e-17 - 3.552713678801e-15 - 3.552713678801e-15 - 1.95399252334e-14 - -1.33226762955e-15 - 1.7763568394e-15 - -6.661338147751e-16 - -1.7763568394e-15 - 0.0 - 0.0 - -1.33226762955e-15 - 0.0 - 2.6645352591e-15 - 0.0 - 7.105427357601e-15 - 1.68753899743e-14 - -1.33226762955e-15 - 8.881784197001e-16 - -8.881784197001e-16 - 0.0 - -1.7763568394e-15 - -1.33226762955e-15 - 1.24344978758e-14 - 0.0 - -4.440892098501e-16 - 4.440892098501e-16 - -1.7763568394e-15 - 1.7763568394e-15 - - -1.06581410364e-14 - 3.552713678801e-15 - 3.552713678801e-15 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -2.6645352591e-15 - 8.881784197001e-16 - -1.7763568394e-15 - -8.881784197001e-16 - 1.7763568394e-15 - 8.881784197001e-16 - 0.0 - 1.7763568394e-15 - -1.110223024625e-16 - -2.22044604925e-16 - 0.0 - -3.552713678801e-15 - 4.263256414561e-14 - -7.105427357601e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 2.22044604925e-16 - -8.881784197001e-16 - 0.0 - -2.6645352591e-15 - -4.440892098501e-16 - -1.554312234475e-15 - 0.0 - -1.110223024625e-16 - 0.0 - -2.22044604925e-16 - 3.552713678801e-15 - 1.7763568394e-14 - 8.881784197001e-15 - -3.552713678801e-15 - -3.552713678801e-15 - -8.881784197001e-16 - 4.440892098501e-16 - -7.771561172376e-16 - -1.33226762955e-15 - -1.33226762955e-15 - -4.440892098501e-16 - -8.881784197001e-16 - -1.443289932013e-15 - -8.881784197001e-16 - -2.081668171172e-16 - -2.775557561563e-17 - -3.330669073875e-16 - 3.552713678801e-15 - -5.329070518201e-14 - -2.48689957516e-14 - -7.105427357601e-15 - -5.329070518201e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -7.771561172376e-16 - -8.881784197001e-16 - 2.22044604925e-16 - -1.7763568394e-15 - 2.22044604925e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -1.110223024625e-16 - -1.665334536938e-16 - 5.551115123126e-17 - 7.105427357601e-15 - -3.19744231092e-14 - -1.7763568394e-15 - 4.440892098501e-16 - -5.773159728051e-15 - -3.552713678801e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 1.59872115546e-14 - -8.881784197001e-16 - 1.33226762955e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - 5.329070518201e-15 - 1.7763568394e-15 - -3.552713678801e-15 - -8.881784197001e-16 - -8.881784197001e-16 - -1.7763568394e-15 - -4.440892098501e-16 - 3.552713678801e-15 - 8.881784197001e-15 - -2.6645352591e-15 - 5.773159728051e-15 - -4.440892098501e-16 - 2.6645352591e-15 - -5.773159728051e-15 - -4.440892098501e-16 - 5.329070518201e-15 - -3.730349362741e-14 - 8.881784197001e-15 - -2.6645352591e-15 - -1.110223024625e-16 - 9.436895709314e-16 - -1.7763568394e-15 - -1.33226762955e-15 - -8.881784197001e-16 - -4.440892098501e-16 - 2.22044604925e-16 - 5.329070518201e-15 - -3.01980662698e-14 - 8.881784197001e-16 - -1.7763568394e-15 - -1.33226762955e-15 - 0.0 - -1.7763568394e-15 - 2.6645352591e-15 - 0.0 - -3.330669073875e-16 - -4.440892098501e-16 - 0.0 - -5.151434834261e-14 - 5.329070518201e-15 - -2.6645352591e-15 - -1.998401444325e-15 - 4.996003610813e-16 - -4.440892098501e-16 - -3.996802888651e-15 - 0.0 - -8.881784197001e-16 - -8.881784197001e-16 - 3.552713678801e-15 - -8.881784197001e-15 - -3.01980662698e-14 - 5.329070518201e-15 - -1.42108547152e-14 - -8.881784197001e-16 - 1.110223024625e-16 - -4.440892098501e-16 - 6.661338147751e-16 - -8.881784197001e-16 - -8.881784197001e-16 - -2.6645352591e-15 - 0.0 - 0.0 - -2.22044604925e-16 - -7.105427357601e-15 - -3.552713678801e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -3.10862446895e-15 - -8.881784197001e-16 - -6.217248937901e-15 - -2.13162820728e-14 - 5.329070518201e-15 - -5.329070518201e-15 - -1.7763568394e-15 - 0.0 - -1.7763568394e-15 - -8.881784197001e-16 - 2.22044604925e-15 - 4.440892098501e-16 - 8.881784197001e-16 - 0.0 - 7.105427357601e-15 - -1.59872115546e-14 - 7.993605777301e-15 - 2.22044604925e-15 - 4.440892098501e-16 - -9.992007221626e-16 - 0.0 - 2.22044604925e-16 - -6.661338147751e-15 - 0.0 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 1.06581410364e-14 - 4.440892098501e-16 - -2.442490654175e-15 - 8.881784197001e-16 - -8.881784197001e-16 - -1.33226762955e-15 - 0.0 - 2.22044604925e-15 - -6.661338147751e-16 - -3.330669073875e-16 - 0.0 - 8.881784197001e-16 - 7.105427357601e-15 - 8.881784197001e-15 - 0.0 - -1.7763568394e-15 - -7.910339050454e-16 - -5.329070518201e-15 - -2.22044604925e-16 - 2.22044604925e-15 - 0.0 - -3.996802888651e-15 - 1.998401444325e-15 - -2.22044604925e-15 - 1.06581410364e-14 - -3.01980662698e-14 - 0.0 - -8.881784197001e-16 - -4.440892098501e-16 - -8.881784197001e-16 - -2.22044604925e-15 - -3.552713678801e-15 - 0.0 - -3.552713678801e-14 - -4.440892098501e-16 - 3.330669073875e-16 - -2.22044604925e-15 - 0.0 - - -2.453347747178e-08 - -4.890711124972e-08 - 3.971540252223e-08 - -4.215081617076e-10 - -2.531166387598e-09 - -5.090015520182e-09 - -5.402824854173e-09 - -5.326945995421e-09 - -5.406533887253e-09 - -5.303881778218e-09 - -9.95139259885e-09 - -4.664002517529e-11 - -1.169468077933e-09 - 3.465174813755e-10 - -1.009041739053e-09 - -9.316067917097e-10 - -8.316902722072e-10 - -2.457070991113e-08 - -4.9003332947e-08 - 3.969984163632e-08 - -4.210960469209e-10 - -2.520792463656e-09 - -5.072479325463e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -5.43165867839e-09 - -5.320373475115e-09 - -9.971742542803e-09 - -4.661160346586e-11 - -1.165034291262e-09 - 3.48336470779e-10 - -1.009041739053e-09 - -9.349250262858e-10 - -8.319460675921e-10 - -2.462979864504e-08 - -4.874540593391e-08 - 3.969002193571e-08 - -4.214655291435e-10 - -2.529361609049e-09 - -5.086377541375e-09 - -5.424837468126e-09 - -5.313083306646e-09 - -5.429683369584e-09 - -5.316017848145e-09 - -9.926125699167e-09 - -4.666844688472e-11 - -1.168785956907e-09 - 3.474269760773e-10 - -1.012267603073e-09 - -9.336105222246e-10 - -8.307345922276e-10 - -2.453501224409e-08 - -4.864099878432e-08 - 3.969411466187e-08 - -4.200586545267e-10 - -2.522682507333e-09 - -5.086377541375e-09 - -5.417248871709e-09 - -5.313083306646e-09 - -5.43165867839e-09 - -5.325112795163e-09 - -9.905932074616e-09 - -4.685318799602e-11 - -1.16909859571e-09 - 3.474269760773e-10 - -1.011144945551e-09 - -9.30121757392e-10 - -8.32123703276e-10 - -2.455203684804e-08 - 3.975333129347e-08 - -5.494243282556e-09 - 5.58331691991e-09 - 5.584197992903e-09 - 5.577660999734e-09 - -7.832063886326e-10 - -2.462979864504e-08 - 3.968375494878e-08 - -5.482178266902e-09 - 5.567116545535e-09 - 5.582947437688e-09 - 5.582720064012e-09 - -7.826947978629e-10 - -2.453300851357e-08 - 3.997169528702e-08 - -5.48229195374e-09 - 5.582549533756e-09 - 5.58533486128e-09 - 5.58033264042e-09 - -7.824034753412e-10 - -2.462979864504e-08 - 4.000852982244e-08 - -5.459440899358e-09 - 5.578741024692e-09 - -7.832063886326e-10 - 5.582947437688e-09 - 5.584197992903e-09 - -7.824034753412e-10 - -2.453300851357e-08 - 3.997578801318e-08 - -2.534619625294e-09 - -5.472671205098e-09 - -5.292946525515e-09 - -5.289514604101e-09 - -5.893511456634e-09 - 5.593420837613e-09 - -1.311946107307e-09 - -9.763994057721e-10 - -7.807834379037e-10 - -2.454028447119e-08 - 3.997533326583e-08 - -2.518589781175e-09 - -5.499970257006e-09 - -5.27222709934e-09 - -5.289443549827e-09 - -5.889233989365e-09 - 5.582947437688e-09 - -1.31279875859e-09 - -9.767973097040e-10 - -7.801634893667e-10 - -2.450508418406e-08 - 3.997578801318e-08 - -2.517722919038e-09 - -5.481609832714e-09 - -5.282728920974e-09 - -5.276390879771e-09 - -5.921897638927e-09 - 5.568864480665e-09 - -1.316720954492e-09 - -9.741683015817e-10 - -7.849578764763e-10 - -2.462979864504e-08 - -4.892184790606e-08 - 3.997533326583e-08 - -4.198312808512e-10 - -2.525382569729e-09 - -5.087684940008e-09 - -5.325020424607e-09 - -5.321375340372e-09 - -9.905946285471e-09 - -4.662581432058e-11 - -1.165076923826e-09 - 3.474269760773e-10 - -1.008160666061e-09 - -9.312302040598e-10 - -8.287912578453e-10 - 5.564075422626e-09 - 5.566761274167e-09 - 5.577334150075e-09 - 5.58533486128e-09 - 5.575742534347e-09 - 5.589768647951e-09 - -2.458268966166e-08 - 3.971631201694e-08 - -2.517722919038e-09 - -5.472060138345e-09 - -5.253625090518e-09 - -5.896666266381e-09 - -5.281549420033e-09 - -5.911225287036e-09 - 5.58955548513e-09 - -1.31430510919e-09 - -9.776002229954e-10 - -7.833289572545e-10 - -2.456549452745e-08 - 3.969729789333e-08 - -2.525453624003e-09 - -5.481140874508e-09 - -5.259096269583e-09 - -5.908617595196e-09 - -5.280242021399e-09 - -5.902698774207e-09 - 5.579806838796e-09 - -1.316578845945e-09 - -9.776002229954e-10 - -7.816733926802e-10 - -2.458754977397e-08 - 3.999780062713e-08 - -2.523478315197e-09 - -5.481275877628e-09 - -5.276277192934e-09 - -5.912788481055e-09 - -5.252047685644e-09 - -5.904489341901e-09 - 5.580417905549e-09 - -1.312287167821e-09 - -9.767973097040e-10 - -7.796998602316e-10 - 5.577490469477e-09 - -2.470176241332e-08 - 4.000852982244e-08 - -5.468805852615e-09 - 5.58033264042e-09 - -7.824567660464e-10 - 5.57135138024e-09 - -7.82140574529e-10 - 5.586642259914e-09 - 5.582720064012e-09 - 5.568864480665e-09 - -5.405695446825e-09 - -5.398412383784e-09 - -2.459630366047e-08 - 3.965256212268e-08 - -5.406420200416e-09 - -5.417355453119e-09 - -9.949395973763e-09 - -1.168757535197e-09 - -5.401048497333e-09 - -5.409134473666e-09 - -2.456927461481e-08 - 3.975651452492e-08 - -9.926125699167e-09 - -1.167023810922e-09 - -5.401048497333e-09 - -5.400458746863e-09 history_criterion: - - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 - - 25.01562287811 - 18.67576650474 - 10.71425043997 - 16.92850306334 - 13.83328982937 - 7.61143273431 - 5.780449944004 - 4.918595910462 - 5.730449944004 - 5.093595910462 - 3.573230198002 - 2.843276294294 - 2.98078293018 - 2.569118760852 - 2.229814978179 - 2.901300021005 - 4.398727952741 - 22.71562287811 - 16.87576650474 - 18.11425043997 - 15.62850306334 - 10.73328982937 - 3.51143273431 - 3.980449944004 - 2.718595910462 - 3.855449944004 - 3.443595910462 - 3.348230198002 - 3.293276294294 - 0.8432829301802 - 2.306618760852 - 2.417314978179 - 4.026300021005 - 2.486227952741 - 12.11562287811 - 18.97576650474 - 10.31425043997 - 9.22850306334 - 9.933289829366 - 6.81143273431 - 1.480449944004 - 1.718595910462 - 1.530449944004 - 2.318595910462 - 2.298230198002 - 1.568276294294 - 1.93078293018 - 1.744118760852 - 1.629814978179 - 1.663800021005 - 2.336227952741 - 6.215622878108 - 7.275766504742 - 7.314250439974 - 7.52850306334 - 7.033289829366 - 5.31143273431 - 4.080449944004 - 1.718595910462 - 4.042949944004 - 2.468595910462 - 2.598230198002 - 2.243276294294 - 2.26828293018 - 1.331618760852 - 1.854814978179 - 1.513800021005 - 1.998727952741 - 13.61562287811 - 8.914250439974 - 2.617857443871 - -0.02069875634249 - 0.6893012436575 - 3.129301243658 - 2.348674115464 - 10.11562287811 - 6.414250439974 - 3.317857443871 - 1.409301243658 - 0.3193012436575 - 2.189301243658 - 0.6586741154643 - 8.915622878108 - 7.514250439974 - 3.017857443871 - 1.439301243658 - -0.6206987563425 - 0.8793012436575 - 2.158674115464 - 10.11562287811 - 10.01425043997 - 3.917857443871 - 0.1293012436575 - 2.348674115464 - 0.3193012436575 - 0.6893012436575 - 2.158674115464 - 8.915622878108 - 6.514250439974 - 4.533289829366 - 2.117857443871 - 0.5381907245488 - 0.9081907245488 - 2.599956711546 - 1.519301243658 - 3.585844975595 - 4.0602621231 - 3.388674115464 - 7.915622878108 - 8.514250439974 - 5.533289829366 - 5.317857443871 - -1.661809275451 - -1.341809275451 - 3.089956711546 - 0.3193012436575 - 2.755844975595 - 2.3702621231 - 4.138674115464 - 2.615622878108 - 6.014250439974 - 5.233289829366 - 5.917857443871 - -0.6618092754512 - 0.008190724548808 - 4.779956711546 - 3.209301243658 - 0.505844975595 - 3.6102621231 - 6.838674115464 - 10.11562287811 - 5.975766504742 - 8.514250439974 - 10.02850306334 - 4.433289829366 - 2.11143273431 - 1.518595910462 - 2.388595910462 - -0.2942698019983 - -1.286723705706 - -0.8467170698198 - -1.403381239148 - -0.6251850218209 - -0.4386999789948 - 0.1237279527411 - 11.75930124366 - 13.32930124366 - 5.639301243658 - -0.6206987563425 - -0.3606987563425 - 3.689301243658 - 2.915622878108 - 1.214250439974 - 4.733289829366 - 9.917857443871 - 8.138190724549 - 7.479956711546 - 7.958190724549 - 7.589956711546 - 5.269301243658 - 3.885844975595 - 7.1302621231 - 5.458674115464 - 12.81562287811 - 7.814250439974 - 4.233289829366 - -0.8821425561292 - 2.338190724549 - 1.479956711546 - 2.898190724549 - 2.449956711546 - 3.809301243658 - 1.075844975595 - 2.9302621231 - 5.228674115464 - -1.184377121892 - 5.714250439974 - -2.466710170634 - 0.8178574438708 - 4.038190724549 - 0.07995671154575 - 4.328190724549 - 0.7299567115457 - 0.9193012436575 - 0.955844975595 - 2.2602621231 - 3.358674115464 - 1.259301243658 - 13.11562287811 - 11.01425043997 - 5.617857443871 - 0.8793012436575 - 1.588674115464 - 0.5093012436575 - 1.778674115464 - 2.759301243658 - 2.189301243658 - 3.209301243658 - 0.1804499440042 - 0.6304499440042 - 13.81562287811 - 8.114250439974 - 4.480449944004 - 4.490449944004 - 3.005730198002 - 1.78328293018 - 3.080449944004 - 3.370449944004 - 13.41562287811 - 7.414250439974 - 2.485730198002 - 1.44328293018 - 3.580449944004 - 3.630449944004 - - 84.68988065196 - 70.73054388289 - 56.46322643196 - 57.38823556867 - 49.80586460657 - 36.42009030556 - 24.97150307276 - 18.1321687762 - 24.92150307276 - 18.3071687762 - 12.85194135284 - 9.428755329368 - 7.671466840337 - 5.900125885276 - 4.571195356425 - 4.515228493968 - 5.474513604881 - 82.38988065196 - 68.93054388289 - 63.86322643196 - 56.08823556867 - 46.70586460657 - 32.32009030556 - 23.17150307276 - 15.9321687762 - 23.04650307276 - 16.6571687762 - 12.62694135284 - 9.878755329368 - 5.533966840337 - 5.637625885276 - 4.758695356425 - 5.640228493968 - 3.562013604881 - 71.78988065196 - 71.03054388289 - 56.06322643196 - 49.68823556867 - 45.90586460657 - 35.62009030556 - 20.67150307276 - 14.9321687762 - 20.72150307276 - 15.5321687762 - 11.57694135284 - 8.153755329368 - 6.621466840337 - 5.075125885276 - 3.971195356425 - 3.277728493968 - 3.412013604881 - 65.88988065196 - 59.33054388289 - 53.06322643196 - 47.98823556867 - 43.00586460657 - 34.12009030556 - 23.27150307276 - 14.9321687762 - 23.23400307276 - 15.6821687762 - 11.87694135284 - 8.828755329368 - 6.958966840337 - 4.662625885276 - 4.196195356425 - 3.127728493968 - 3.074513604881 - 73.28988065196 - 54.66322643196 - 26.00799822147 - 7.789506147668 - 8.499506147668 - 10.93950614767 - 3.209942501544 - 69.78988065196 - 52.16322643196 - 26.70799822147 - 9.219506147668 - 8.129506147668 - 9.999506147668 - 1.519942501544 - 68.58988065196 - 53.26322643196 - 26.40799822147 - 9.249506147668 - 7.189506147668 - 8.689506147668 - 3.019942501544 - 69.78988065196 - 55.76322643196 - 27.30799822147 - 7.939506147668 - 3.209942501544 - 8.129506147668 - 8.499506147668 - 3.019942501544 - 68.58988065196 - 52.26322643196 - 40.50586460657 - 25.50799822147 - 16.41235765092 - 16.78235765092 - 13.65241143766 - 9.329506147668 - 7.541813269635 - 6.010338273791 - 4.249942501544 - 67.58988065196 - 54.26322643196 - 41.50586460657 - 28.70799822147 - 14.21235765092 - 14.53235765092 - 14.14241143766 - 8.129506147668 - 6.711813269635 - 4.320338273791 - 4.999942501544 - 62.28988065196 - 51.76322643196 - 41.20586460657 - 29.30799822147 - 15.21235765092 - 15.88235765092 - 15.83241143766 - 11.01950614767 - 4.461813269635 - 5.560338273791 - 7.699942501544 - 69.78988065196 - 58.03054388289 - 54.26322643196 - 50.48823556867 - 40.40586460657 - 30.92009030556 - 14.7321687762 - 15.6021687762 - 8.984441352835 - 5.298755329368 - 3.843966840337 - 1.927625885276 - 1.716195356425 - 1.175228493968 - 1.199513604881 - 19.56950614767 - 21.13950614767 - 13.44950614767 - 7.189506147668 - 7.449506147668 - 11.49950614767 - 62.58988065196 - 46.96322643196 - 40.70586460657 - 33.30799822147 - 24.01235765092 - 18.53241143766 - 23.83235765092 - 18.64241143766 - 13.07950614767 - 7.841813269635 - 9.080338273791 - 6.319942501544 - 72.48988065196 - 53.56322643196 - 40.20586460657 - 22.50799822147 - 18.21235765092 - 12.53241143766 - 18.77235765092 - 13.50241143766 - 11.61950614767 - 5.031813269635 - 4.880338273791 - 6.089942501544 - 58.48988065196 - 51.46322643196 - 33.50586460657 - 24.20799822147 - 19.91235765092 - 11.13241143766 - 20.20235765092 - 11.78241143766 - 8.729506147668 - 4.911813269635 - 4.210338273791 - 4.219942501544 - 9.069506147668 - 72.78988065196 - 56.76322643196 - 29.00799822147 - 8.689506147668 - 2.449942501544 - 8.319506147668 - 2.639942501544 - 10.56950614767 - 9.999506147668 - 11.01950614767 - 19.37150307276 - 19.82150307276 - 73.48988065196 - 53.86322643196 - 23.67150307276 - 23.68150307276 - 12.28444135284 - 6.473966840337 - 22.27150307276 - 22.56150307276 - 73.08988065196 - 53.16322643196 - 11.76444135284 - 6.133966840337 - 22.77150307276 - 22.82150307276 - - 78.17391291542 - 66.8366728159 - 54.32599616455 - 56.48754438985 - 49.80586460657 - 37.60220537333 - 27.26395828245 - 20.80723297571 - 27.21395828245 - 20.98223297571 - 15.60547682225 - 12.11967202128 - 10.23248733714 - 8.30080219923 - 6.80055773407 - 6.572919168714 - 7.365975022026 - 75.87391291542 - 65.0366728159 - 61.72599616455 - 55.18754438985 - 46.70586460657 - 33.50220537333 - 25.46395828245 - 18.60723297571 - 25.33895828245 - 19.33223297571 - 15.38047682225 - 12.56967202128 - 8.094987337144 - 8.03830219923 - 6.98805773407 - 7.697919168714 - 5.453475022026 - 65.27391291542 - 67.1366728159 - 53.92599616455 - 48.78754438985 - 45.90586460657 - 36.80220537333 - 22.96395828245 - 17.60723297571 - 23.01395828245 - 18.20723297571 - 14.33047682225 - 10.84467202128 - 9.182487337144 - 7.47580219923 - 6.20055773407 - 5.335419168714 - 5.303475022026 - 59.37391291542 - 55.4366728159 - 50.92599616455 - 47.08754438985 - 43.00586460657 - 35.30220537333 - 25.56395828245 - 17.60723297571 - 25.52645828245 - 18.35723297571 - 14.63047682225 - 11.51967202128 - 9.519987337144 - 7.06330219923 - 6.42555773407 - 5.185419168714 - 4.965975022026 - 66.77391291542 - 52.52599616455 - 27.88429931353 - 10.52352617863 - 11.23352617863 - 13.67352617863 - 5.021362784819 - 63.27391291542 - 50.02599616455 - 28.58429931353 - 11.95352617863 - 10.86352617863 - 12.73352617863 - 3.331362784819 - 62.07391291542 - 51.12599616455 - 28.28429931353 - 11.98352617863 - 9.923526178634 - 11.42352617863 - 4.831362784819 - 63.27391291542 - 53.62599616455 - 29.18429931353 - 10.67352617863 - 5.021362784819 - 10.86352617863 - 11.23352617863 - 4.831362784819 - 62.07391291542 - 50.12599616455 - 40.50586460657 - 27.38429931353 - 18.95079727771 - 19.32079727771 - 16.391415976 - 12.06352617863 - 10.02497402658 - 8.153464959245 - 6.061362784819 - 61.07391291542 - 52.12599616455 - 41.50586460657 - 30.58429931353 - 16.75079727771 - 17.07079727771 - 16.881415976 - 10.86352617863 - 9.194974026576 - 6.463464959245 - 6.811362784819 - 55.77391291542 - 49.62599616455 - 41.20586460657 - 31.18429931353 - 17.75079727771 - 18.42079727771 - 18.571415976 - 13.75352617863 - 6.944974026576 - 7.703464959245 - 9.511362784819 - 63.27391291542 - 54.1366728159 - 52.12599616455 - 49.58754438985 - 40.40586460657 - 32.10220537333 - 17.40723297571 - 18.27723297571 - 11.73797682225 - 7.989672021276 - 6.404987337144 - 4.32830219923 - 3.94555773407 - 3.232919168714 - 3.090975022026 - 22.30352617863 - 23.87352617863 - 16.18352617863 - 9.923526178634 - 10.18352617863 - 14.23352617863 - 56.07391291542 - 44.82599616455 - 40.70586460657 - 35.18429931353 - 26.55079727771 - 21.271415976 - 26.37079727771 - 21.381415976 - 15.81352617863 - 10.32497402658 - 11.22346495925 - 8.131362784819 - 65.97391291542 - 51.42599616455 - 40.20586460657 - 24.38429931353 - 20.75079727771 - 15.271415976 - 21.31079727771 - 16.241415976 - 14.35352617863 - 7.514974026576 - 7.023464959245 - 7.901362784819 - 51.97391291542 - 49.32599616455 - 33.50586460657 - 26.08429931353 - 22.45079727771 - 13.871415976 - 22.74079727771 - 14.521415976 - 11.46352617863 - 7.394974026576 - 6.353464959245 - 6.031362784819 - 11.80352617863 - 66.27391291542 - 54.62599616455 - 30.88429931353 - 11.42352617863 - 4.261362784819 - 11.05352617863 - 4.451362784819 - 13.30352617863 - 12.73352617863 - 13.75352617863 - 21.66395828245 - 22.11395828245 - 66.97391291542 - 51.72599616455 - 25.96395828245 - 25.97395828245 - 15.03797682225 - 9.034987337144 - 24.56395828245 - 24.85395828245 - 66.57391291542 - 51.02599616455 - 14.51797682225 - 8.694987337144 - 25.06395828245 - 25.11395828245 - - 122.1511527598 - 122.6211240639 - 156.1508499375 - -599.1450584808 - -12.38948471101 - 19.33755088278 - 21.40590723345 - 17.86925432536 - 21.35590723345 - 18.04425432536 - 13.84616571438 - 10.95881863539 - 9.418115456519 - 7.704952567843 - 6.351080428381 - 6.22595148445 - 7.093292608367 - 119.8511527598 - 120.8211240639 - 163.5508499375 - -600.4450584808 - -15.48948471101 - 15.23755088278 - 19.60590723345 - 15.66925432536 - 19.48090723345 - 16.39425432536 - 13.62116571438 - 11.40881863539 - 7.280615456519 - 7.442452567843 - 6.538580428381 - 7.35095148445 - 5.180792608367 - 109.2511527598 - 122.9211240639 - 155.7508499375 - -606.8450584808 - -16.28948471101 - 18.53755088278 - 17.10590723345 - 14.66925432536 - 17.15590723345 - 15.26925432536 - 12.57116571438 - 9.683818635389 - 8.368115456519 - 6.879952567843 - 5.751080428381 - 4.98845148445 - 5.030792608367 - 103.3511527598 - 111.2211240639 - 152.7508499375 - -608.5450584808 - -19.18948471101 - 17.03755088278 - 19.70590723345 - 14.66925432536 - 19.66840723345 - 15.41925432536 - 12.87116571438 - 10.35881863539 - 8.705615456519 - 6.467452567843 - 5.976080428381 - 4.83845148445 - 4.693292608367 - 110.7511527598 - 154.3508499375 - 18.46995434894 - 9.107714197815 - 9.817714197815 - 12.25771419782 - 4.778284133588 - 107.2511527598 - 151.8508499375 - 19.16995434894 - 10.53771419782 - 9.447714197815 - 11.31771419782 - 3.088284133588 - 106.0511527598 - 152.9508499375 - 18.86995434894 - 10.56771419782 - 8.507714197815 - 10.00771419782 - 4.588284133588 - 107.2511527598 - 155.4508499375 - 19.76995434894 - 9.257714197815 - 4.778284133588 - 9.447714197815 - 9.817714197815 - 4.588284133588 - 106.0511527598 - 151.9508499375 - -21.68948471101 - 17.96995434894 - 14.92797213765 - 15.29797213765 - 14.15288063203 - 10.64771419782 - 9.331447324355 - 7.759548240156 - 5.818284133588 - 105.0511527598 - 153.9508499375 - -20.68948471101 - 21.16995434894 - 12.72797213765 - 13.04797213765 - 14.64288063203 - 9.447714197815 - 8.501447324355 - 6.069548240156 - 6.568284133588 - 99.75115275983 - 151.4508499375 - -20.98948471101 - 21.76995434894 - 13.72797213765 - 14.39797213765 - 16.33288063203 - 12.33771419782 - 6.251447324355 - 7.309548240156 - 9.268284133588 - 107.2511527598 - 109.9211240639 - 153.9508499375 - -606.0450584808 - -21.78948471101 - 13.83755088278 - 14.46925432536 - 15.33925432536 - 9.978665714379 - 6.828818635389 - 5.590615456519 - 3.732452567843 - 3.496080428381 - 2.88595148445 - 2.818292608367 - 20.88771419782 - 22.45771419782 - 14.76771419782 - 8.507714197815 - 8.767714197815 - 12.81771419782 - 100.0511527598 - 146.6508499375 - -21.48948471101 - 25.76995434894 - 22.52797213765 - 19.03288063203 - 22.34797213765 - 19.14288063203 - 14.39771419782 - 9.631447324355 - 10.82954824016 - 7.888284133588 - 109.9511527598 - 153.2508499375 - -21.98948471101 - 14.96995434894 - 16.72797213765 - 13.03288063203 - 17.28797213765 - 14.00288063203 - 12.93771419782 - 6.821447324355 - 6.629548240156 - 7.658284133588 - 95.95115275983 - 151.1508499375 - -28.68948471101 - 16.66995434894 - 18.42797213765 - 11.63288063203 - 18.71797213765 - 12.28288063203 - 10.04771419782 - 6.701447324355 - 5.959548240156 - 5.788284133588 - 10.38771419782 - 110.2511527598 - 156.4508499375 - 21.46995434894 - 10.00771419782 - 4.018284133588 - 9.637714197815 - 4.208284133588 - 11.88771419782 - 11.31771419782 - 12.33771419782 - 15.80590723345 - 16.25590723345 - 110.9511527598 - 153.5508499375 - 20.10590723345 - 20.11590723345 - 13.27866571438 - 8.220615456519 - 18.70590723345 - 18.99590723345 - 110.5511527598 - 152.8508499375 - 12.75866571438 - 7.880615456519 - 19.20590723345 - 19.25590723345 - - -35.35804307658 - -53.29699942572 - -72.06021768605 - -76.24300976651 - -89.66597211166 - -117.5990460751 - -178.3813984168 - -314.1942358597 - -178.4313984168 - -314.0192358597 - -1385.074532566 - 503.9556946656 - 197.4559049595 - 115.7942913474 - 78.11722466414 - 57.55318477565 - 45.51113019711 - -37.65804307658 - -55.09699942572 - -64.66021768605 - -77.54300976651 - -92.76597211166 - -121.6990460751 - -180.1813984168 - -316.3942358597 - -180.3063984168 - -315.6692358597 - -1385.299532566 - 504.4056946656 - 195.3184049595 - 115.5317913474 - 78.30472466414 - 58.67818477565 - 43.59863019711 - -48.25804307658 - -52.99699942572 - -72.46021768605 - -83.94300976651 - -93.56597211166 - -118.3990460751 - -182.6813984168 - -317.3942358597 - -182.6313984168 - -316.7942358597 - -1386.349532566 - 502.6806946656 - 196.4059049595 - 114.9692913474 - 77.51722466414 - 56.31568477565 - 43.44863019711 - -54.15804307658 - -64.69699942572 - -75.46021768605 - -85.64300976651 - -96.46597211166 - -119.8990460751 - -180.0813984168 - -317.3942358597 - -180.1188984168 - -316.6442358597 - -1386.049532566 - 503.3556946656 - 196.7434049595 - 114.5567913474 - 77.74222466414 - 56.16568477565 - 43.11113019711 - -46.75804307658 - -73.86021768605 - -148.0932859294 - 1691.297253326 - 1692.007253326 - 1694.447253326 - 38.42217232425 - -50.25804307658 - -76.36021768605 - -147.3932859294 - 1692.727253326 - 1691.637253326 - 1693.507253326 - 36.73217232425 - -51.45804307658 - -75.26021768605 - -147.6932859294 - 1692.757253326 - 1690.697253326 - 1692.197253326 - 38.23217232425 - -50.25804307658 - -72.76021768605 - -146.7932859294 - 1691.447253326 - 38.42217232425 - 1691.637253326 - 1692.007253326 - 38.23217232425 - -51.45804307658 - -76.26021768605 - -98.96597211166 - -148.5932859294 - -233.0828036794 - -232.7128036794 - -508.8494431202 - 1692.837253326 - 148.258324502 - 68.02827286157 - 39.46217232425 - -52.45804307658 - -74.26021768605 - -97.96597211166 - -145.3932859294 - -235.2828036794 - -234.9628036794 - -508.3594431202 - 1691.637253326 - 147.428324502 - 66.33827286157 - 40.21217232425 - -57.75804307658 - -76.76021768605 - -98.26597211166 - -144.7932859294 - -234.2828036794 - -233.6128036794 - -506.6694431202 - 1694.527253326 - 145.178324502 - 67.57827286157 - 42.91217232425 - -50.25804307658 - -65.99699942572 - -74.26021768605 - -83.14300976651 - -99.06597211166 - -123.0990460751 - -317.5942358597 - -316.7242358597 - -1388.942032566 - 499.8256946656 - 193.6284049595 - 111.8217913474 - 75.26222466414 - 54.21318477565 - 41.23613019711 - 1703.077253326 - 1704.647253326 - 1696.957253326 - 1690.697253326 - 1690.957253326 - 1695.007253326 - -57.45804307658 - -81.56021768605 - -98.76597211166 - -140.7932859294 - -225.4828036794 - -503.9694431202 - -225.6628036794 - -503.8594431202 - 1696.587253326 - 148.558324502 - 71.09827286157 - 41.53217232425 - -47.55804307658 - -74.96021768605 - -99.26597211166 - -151.5932859294 - -231.2828036794 - -509.9694431202 - -230.7228036794 - -508.9994431202 - 1695.127253326 - 145.748324502 - 66.89827286157 - 41.30217232425 - -61.55804307658 - -77.06021768605 - -105.9659721117 - -149.8932859294 - -229.5828036794 - -511.3694431202 - -229.2928036794 - -510.7194431202 - 1692.237253326 - 145.628324502 - 66.22827286157 - 39.43217232425 - 1692.577253326 - -47.25804307658 - -71.76021768605 - -145.0932859294 - 1692.197253326 - 37.66217232425 - 1691.827253326 - 37.85217232425 - 1694.077253326 - 1693.507253326 - 1694.527253326 - -183.9813984168 - -183.5313984168 - -46.55804307658 - -74.66021768605 - -179.6813984168 - -179.6713984168 - -1385.642032566 - 196.2584049595 - -181.0813984168 - -180.7913984168 - -46.95804307658 - -75.36021768605 - -1386.162032566 - 195.9184049595 - -180.5813984168 - -180.5313984168 - - 117.9473464966 - 104.1334190546 - 90.05657396185 - 91.2207789054 - 83.93061974583 - 71.30942158128 - 62.32199544308 - 59.88083883476 - 62.27199544308 - 60.05583883476 - 62.77642327569 - 76.9431054476 - 131.4875801385 - -2342.694563021 - -86.51002021515 - -37.31642203542 - -19.79861800279 - 115.6473464966 - 102.3334190546 - 97.45657396185 - 89.9207789054 - 80.83061974583 - 67.20942158128 - 60.52199544308 - 57.68083883476 - 60.39699544308 - 58.40583883476 - 62.55142327569 - 77.3931054476 - 129.3500801385 - -2342.957063021 - -86.32252021515 - -36.19142203542 - -21.71111800279 - 105.0473464966 - 104.4334190546 - 89.65657396185 - 83.5207789054 - 80.03061974583 - 70.50942158128 - 58.02199544308 - 56.68083883476 - 58.07199544308 - 57.28083883476 - 61.50142327569 - 75.6681054476 - 130.4375801385 - -2343.519563021 - -87.11002021515 - -38.55392203542 - -21.86111800279 - 99.14734649662 - 92.73341905458 - 86.65657396185 - 81.8207789054 - 77.13061974583 - 69.00942158128 - 60.62199544308 - 56.68083883476 - 60.58449544308 - 57.43083883476 - 61.80142327569 - 76.3431054476 - 130.7750801385 - -2343.932063021 - -86.88502021515 - -38.70392203542 - -22.19861800279 - 106.5473464966 - 88.25657396185 - 61.9476238727 - 64.62033506263 - 65.33033506263 - 67.77033506263 - -17.38684121661 - 103.0473464966 - 85.75657396185 - 62.6476238727 - 66.05033506263 - 64.96033506263 - 66.83033506263 - -19.07684121661 - 101.8473464966 - 86.85657396185 - 62.3476238727 - 66.08033506263 - 64.02033506263 - 65.52033506263 - -17.57684121661 - 103.0473464966 - 89.35657396185 - 63.2476238727 - 64.77033506263 - -17.38684121661 - 64.96033506263 - 65.33033506263 - -17.57684121661 - 101.8473464966 - 85.85657396185 - 74.63061974583 - 61.4476238727 - 55.64384266576 - 56.01384266576 - 58.81780905206 - 66.16033506263 - 253.6554490216 - -52.38720394238 - -16.34684121661 - 100.8473464966 - 87.85657396185 - 75.63061974583 - 64.6476238727 - 53.44384266576 - 53.76384266576 - 59.30780905206 - 64.96033506263 - 252.8254490216 - -54.07720394238 - -15.59684121661 - 95.54734649662 - 85.35657396185 - 75.33061974583 - 65.2476238727 - 54.44384266576 - 55.11384266576 - 60.99780905206 - 67.85033506263 - 250.5754490216 - -52.83720394238 - -12.89684121661 - 103.0473464966 - 91.43341905458 - 87.85657396185 - 84.3207789054 - 74.53061974583 - 65.80942158128 - 56.48083883476 - 57.35083883476 - 58.90892327569 - 72.8131054476 - 127.6600801385 - -2346.667063021 - -89.36502021515 - -40.65642203542 - -24.07361800279 - 76.40033506263 - 77.97033506263 - 70.28033506263 - 64.02033506263 - 64.28033506263 - 68.33033506263 - 95.84734649662 - 80.55657396185 - 74.83061974583 - 69.2476238727 - 63.24384266576 - 63.69780905206 - 63.06384266576 - 63.80780905206 - 69.91033506263 - 253.9554490216 - -49.31720394238 - -14.27684121661 - 105.7473464966 - 87.15657396185 - 74.33061974583 - 58.4476238727 - 57.44384266576 - 57.69780905206 - 58.00384266576 - 58.66780905206 - 68.45033506263 - 251.1454490216 - -53.51720394238 - -14.50684121661 - 91.74734649662 - 85.05657396185 - 67.63061974583 - 60.1476238727 - 59.14384266576 - 56.29780905206 - 59.43384266576 - 56.94780905206 - 65.56033506263 - 251.0254490216 - -54.18720394238 - -16.37684121661 - 65.90033506263 - 106.0473464966 - 90.35657396185 - 64.9476238727 - 65.52033506263 - -18.14684121661 - 65.15033506263 - -17.95684121661 - 67.40033506263 - 66.83033506263 - 67.85033506263 - 56.72199544308 - 57.17199544308 - 106.7473464966 - 87.45657396185 - 61.02199544308 - 61.03199544308 - 62.20892327569 - 130.2900801385 - 59.62199544308 - 59.91199544308 - 106.3473464966 - 86.75657396185 - 61.68892327569 - 129.9500801385 - 60.12199544308 - 60.17199544308 - - 175.7616267494 - 135.815392655 - 107.427429421 - 99.44443456745 - 85.70608965926 - 64.3171217786 - 44.36460041182 - 32.95338522348 - 44.31460041182 - 33.12838522348 - 24.75298136325 - 19.273476213 - 15.97636612239 - 13.00317519799 - 10.70826764037 - 9.858997178816 - 10.15607036729 - 173.4616267494 - 134.015392655 - 114.827429421 - 98.14443456745 - 82.60608965926 - 60.2171217786 - 42.56460041182 - 30.75338522348 - 42.43960041182 - 31.47838522348 - 24.52798136325 - 19.723476213 - 13.83886612239 - 12.74067519799 - 10.89576764037 - 10.98399717882 - 8.243570367288 - 162.8616267494 - 136.115392655 - 107.027429421 - 91.74443456745 - 81.80608965926 - 63.5171217786 - 40.06460041182 - 29.75338522348 - 40.11460041182 - 30.35338522348 - 23.47798136325 - 17.998476213 - 14.92636612239 - 12.17817519799 - 10.10826764037 - 8.621497178816 - 8.093570367288 - 156.9616267494 - 124.415392655 - 104.027429421 - 90.04443456745 - 78.90608965926 - 62.0171217786 - 42.66460041182 - 29.75338522348 - 42.62710041182 - 30.50338522348 - 23.77798136325 - 18.673476213 - 15.26386612239 - 11.76567519799 - 10.33326764037 - 8.471497178816 - 7.756070367288 - 164.3616267494 - 105.627429421 - 48.8819398286 - 18.5818314891 - 19.2918314891 - 21.7318314891 - 7.60022447721 - 160.8616267494 - 103.127429421 - 49.5819398286 - 20.0118314891 - 18.9218314891 - 20.7918314891 - 5.91022447721 - 159.6616267494 - 104.227429421 - 49.2819398286 - 20.0418314891 - 17.9818314891 - 19.4818314891 - 7.41022447721 - 160.8616267494 - 106.727429421 - 50.1819398286 - 18.7318314891 - 7.60022447721 - 18.9218314891 - 19.2918314891 - 7.41022447721 - 159.6616267494 - 103.227429421 - 76.40608965926 - 48.3819398286 - 33.22986519451 - 33.59986519451 - 26.87225158213 - 20.1218314891 - 15.2111293494 - 11.73215109931 - 8.64022447721 - 158.6616267494 - 105.227429421 - 77.40608965926 - 51.5819398286 - 31.02986519451 - 31.34986519451 - 27.36225158213 - 18.9218314891 - 14.3811293494 - 10.04215109931 - 9.39022447721 - 153.3616267494 - 102.727429421 - 77.10608965926 - 52.1819398286 - 32.02986519451 - 32.69986519451 - 29.05225158213 - 21.8118314891 - 12.1311293494 - 11.28215109931 - 12.09022447721 - 160.8616267494 - 123.115392655 - 105.227429421 - 92.54443456745 - 76.30608965926 - 58.8171217786 - 29.55338522348 - 30.42338522348 - 20.88548136325 - 15.143476213 - 12.14886612239 - 9.030675197988 - 7.853267640371 - 6.518997178816 - 5.881070367288 - 30.3618314891 - 31.9318314891 - 24.2418314891 - 17.9818314891 - 18.2418314891 - 22.2918314891 - 153.6616267494 - 97.927429421 - 76.60608965926 - 56.1819398286 - 40.82986519451 - 31.75225158213 - 40.64986519451 - 31.86225158213 - 23.8718314891 - 15.5111293494 - 14.80215109931 - 10.71022447721 - 163.5616267494 - 104.527429421 - 76.10608965926 - 45.3819398286 - 35.02986519451 - 25.75225158213 - 35.58986519451 - 26.72225158213 - 22.4118314891 - 12.7011293494 - 10.60215109931 - 10.48022447721 - 149.5616267494 - 102.427429421 - 69.40608965926 - 47.0819398286 - 36.72986519451 - 24.35225158213 - 37.01986519451 - 25.00225158213 - 19.5218314891 - 12.5811293494 - 9.93215109931 - 8.61022447721 - 19.8618314891 - 163.8616267494 - 107.727429421 - 51.8819398286 - 19.4818314891 - 6.84022447721 - 19.1118314891 - 7.03022447721 - 21.3618314891 - 20.7918314891 - 21.8118314891 - 38.76460041182 - 39.21460041182 - 164.5616267494 - 104.827429421 - 43.06460041182 - 43.07460041182 - 24.18548136325 - 14.77886612239 - 41.66460041182 - 41.95460041182 - 164.1616267494 - 104.127429421 - 23.66548136325 - 14.43886612239 - 42.16460041182 - 42.21460041182 - - 28.24095690087 - 21.59536620662 - 13.25712029593 - 19.07487076201 - 15.58993695617 - 8.655383440756 - 5.734068533914 - 4.165799626156 - 5.684068533914 - 4.340799626156 - 2.383275058766 - 1.39686578611 - 1.39855054723 - 0.9316549828049 - 0.5908378210265 - 1.296279986221 - 2.850605037275 - 25.94095690087 - 19.79536620662 - 20.65712029593 - 17.77487076201 - 12.48993695617 - 4.555383440756 - 3.934068533914 - 1.965799626156 - 3.809068533914 - 2.690799626156 - 2.158275058766 - 1.84686578611 - -0.7389494527701 - 0.6691549828049 - 0.7783378210265 - 2.421279986221 - 0.9381050372751 - 15.34095690087 - 21.89536620662 - 12.85712029593 - 11.37487076201 - 11.68993695617 - 7.855383440756 - 1.434068533914 - 0.9657996261561 - 1.484068533914 - 1.565799626156 - 1.108275058766 - 0.12186578611 - 0.3485505472299 - 0.1066549828049 - -0.009162178973513 - 0.05877998622137 - 0.7881050372751 - 9.440956900867 - 10.19536620662 - 9.857120295928 - 9.674870762006 - 8.789936956172 - 6.355383440756 - 4.034068533914 - 0.9657996261561 - 3.996568533914 - 1.715799626156 - 1.408275058766 - 0.79686578611 - 0.6860505472299 - -0.3058450171951 - 0.2158378210265 - -0.09122001377863 - 0.4506050372751 - 16.84095690087 - 11.45712029593 - 3.061569786218 - -1.357147755765 - -0.6471477557654 - 1.792852244235 - 0.8347758927937 - 13.34095690087 - 8.957120295928 - 3.761569786218 - 0.07285224423464 - -1.017147755765 - 0.8528522442346 - -0.8552241072063 - 12.14095690087 - 10.05712029593 - 3.461569786218 - 0.1028522442346 - -1.957147755765 - -0.4571477557654 - 0.6447758927937 - 13.34095690087 - 12.55712029593 - 4.361569786218 - -1.207147755765 - 0.8347758927937 - -1.017147755765 - -0.6471477557654 - 0.6447758927937 - 12.14095690087 - 9.057120295928 - 6.289936956172 - 2.561569786218 - 0.09812922077451 - 0.4681292207745 - 1.601218748851 - 0.1828522442346 - 1.967848278564 - 2.434721057271 - 1.874775892794 - 11.14095690087 - 11.05712029593 - 7.289936956172 - 5.761569786218 - -2.101870779225 - -1.781870779225 - 2.091218748851 - -1.017147755765 - 1.137848278564 - 0.744721057271 - 2.624775892794 - 5.840956900867 - 8.557120295928 - 6.989936956172 - 6.361569786218 - -1.101870779225 - -0.4318707792255 - 3.781218748851 - 1.872852244235 - -1.112151721436 - 1.984721057271 - 5.324775892794 - 13.34095690087 - 8.895366206617 - 11.05712029593 - 12.17487076201 - 6.189936956172 - 3.155383440756 - 0.7657996261561 - 1.635799626156 - -1.484224941234 - -2.73313421389 - -2.42894945277 - -3.040845017195 - -2.264162178974 - -2.043720013779 - -1.424394962725 - 10.42285224423 - 11.99285224423 - 4.302852244235 - -1.957147755765 - -1.697147755765 - 2.352852244235 - 6.140956900867 - 3.757120295928 - 6.489936956172 - 10.36156978622 - 7.698129220775 - 6.481218748851 - 7.518129220775 - 6.591218748851 - 3.932852244235 - 2.267848278564 - 5.504721057271 - 3.944775892794 - 16.04095690087 - 10.35712029593 - 5.989936956172 - -0.4384302137822 - 1.898129220775 - 0.4812187488511 - 2.458129220775 - 1.451218748851 - 2.472852244235 - -0.5421517214358 - 1.304721057271 - 3.714775892794 - 2.040956900867 - 8.257120295928 - -0.7100630438283 - 1.261569786218 - 3.598129220775 - -0.9187812511489 - 3.888129220775 - -0.2687812511489 - -0.4171477557654 - -0.6621517214358 - 0.634721057271 - 1.844775892794 - -0.07714775576537 - 16.34095690087 - 13.55712029593 - 6.061569786218 - -0.4571477557654 - 0.07477589279366 - -0.8271477557654 - 0.2647758927937 - 1.422852244235 - 0.8528522442346 - 1.872852244235 - 0.1340685339144 - 0.5840685339144 - 17.04095690087 - 10.65712029593 - 4.434068533914 - 4.444068533914 - 1.815775058766 - 0.2010505472299 - 3.034068533914 - 3.324068533914 - 16.64095690087 - 9.957120295928 - 1.295775058766 - -0.1389494527701 - 3.534068533914 - 3.584068533914 - - 19.67905061421 - 12.78536491634 - 4.453409401868 - 10.42602658124 - 7.181651769754 - 0.8467383120783 - -0.8151544815029 - -1.28878727387 - -0.8651544815029 - -1.11378727387 - -2.178296849214 - -2.4437228135 - -1.857756175876 - -1.847417965917 - -1.795022214911 - -0.7628423028115 - 1.065115779582 - 17.37905061421 - 10.98536491634 - 11.85340940187 - 9.126026581237 - 4.081651769754 - -3.253261687922 - -2.615154481503 - -3.48878727387 - -2.740154481503 - -2.76378727387 - -2.403296849214 - -1.9937228135 - -3.995256175876 - -2.109917965917 - -1.607522214911 - 0.3621576971885 - -0.8473842204181 - 6.779050614207 - 13.08536491634 - 4.053409401868 - 2.726026581237 - 3.281651769754 - 0.04673831207827 - -5.115154481503 - -4.48878727387 - -5.065154481503 - -3.88878727387 - -3.453296849214 - -3.7187228135 - -2.907756175876 - -2.672417965917 - -2.395022214911 - -2.000342302812 - -0.9973842204181 - 0.8790506142075 - 1.385364916339 - 1.053409401868 - 1.026026581237 - 0.3816517697539 - -1.453261687922 - -2.515154481503 - -4.48878727387 - -2.552654481503 - -3.73878727387 - -3.153296849214 - -3.0437228135 - -2.570256175876 - -3.084917965917 - -2.170022214911 - -2.150342302812 - -1.334884220418 - 8.279050614207 - 2.653409401868 - -4.10628705125 - -5.539020595211 - -4.829020595211 - -2.389020595211 - -0.8305062664824 - 4.779050614207 - 0.1534094018679 - -3.40628705125 - -4.109020595211 - -5.199020595211 - -3.329020595211 - -2.520506266482 - 3.579050614207 - 1.253409401868 - -3.70628705125 - -4.079020595211 - -6.139020595211 - -4.639020595211 - -1.020506266482 - 4.779050614207 - 3.753409401868 - -2.80628705125 - -5.389020595211 - -0.8305062664824 - -5.199020595211 - -4.829020595211 - -1.020506266482 - 3.579050614207 - 0.2534094018679 - -2.118348230246 - -4.60628705125 - -5.8778638078 - -5.5078638078 - -3.383083895331 - -3.999020595211 - -1.038044411845 - 0.2196196759168 - 0.2094937335176 - 2.579050614207 - 2.253409401868 - -1.118348230246 - -1.40628705125 - -8.0778638078 - -7.7578638078 - -2.893083895331 - -5.199020595211 - -1.868044411845 - -1.470380324083 - 0.9594937335176 - -2.720949385793 - -0.2465905981321 - -1.418348230246 - -0.8062870512504 - -7.0778638078 - -6.4078638078 - -1.203083895331 - -2.309020595211 - -4.118044411845 - -0.2303803240832 - 3.659493733518 - 4.779050614207 - 0.0853649163389 - 2.253409401868 - 3.526026581237 - -2.218348230246 - -4.653261687922 - -4.68878727387 - -3.81878727387 - -6.045796849214 - -6.5737228135 - -5.685256175876 - -5.819917965917 - -4.650022214911 - -4.102842302812 - -3.209884220418 - 6.240979404789 - 7.810979404789 - 0.1209794047887 - -6.139020595211 - -5.879020595211 - -1.829020595211 - -2.420949385793 - -5.046590598132 - -1.918348230246 - 3.19371294875 - 1.7221361922 - 1.496916104669 - 1.5421361922 - 1.606916104669 - -0.2490205952113 - -0.738044411845 - 3.289619675917 - 2.279493733518 - 7.479050614207 - 1.553409401868 - -2.418348230246 - -7.60628705125 - -4.0778638078 - -4.503083895331 - -3.5178638078 - -3.533083895331 - -1.709020595211 - -3.548044411845 - -0.9103803240832 - 2.049493733518 - -6.520949385793 - -0.5465905981321 - -9.118348230246 - -5.90628705125 - -2.3778638078 - -5.903083895331 - -2.0878638078 - -5.253083895331 - -4.599020595211 - -3.668044411845 - -1.580380324083 - 0.1794937335176 - -4.259020595211 - 7.779050614207 - 4.753409401868 - -1.10628705125 - -4.639020595211 - -1.590506266482 - -5.009020595211 - -1.400506266482 - -2.759020595211 - -3.329020595211 - -2.309020595211 - -6.415154481503 - -5.965154481503 - 8.479050614207 - 1.853409401868 - -2.115154481503 - -2.105154481503 - -2.745796849214 - -3.055256175876 - -3.515154481503 - -3.225154481503 - 8.079050614207 - 1.153409401868 - -3.265796849214 - -3.395256175876 - -3.015154481503 - -2.965154481503 - - 1050.519509418 - 2633.521525076 - -3381.282955438 - -933.4604542894 - -516.3685328612 - -257.5208155114 - -113.8324082065 - -66.57519632661 - -113.8824082065 - -66.40019632661 - -44.86075536786 - -32.39601556788 - -23.86432104696 - -18.5440484742 - -14.76646254613 - -11.02441930035 - -7.169273972705 - 1048.219509418 - 2631.721525076 - -3373.882955438 - -934.7604542894 - -519.4685328612 - -261.6208155114 - -115.6324082065 - -68.77519632661 - -115.7574082065 - -68.05019632661 - -45.08575536786 - -31.94601556788 - -26.00182104696 - -18.8065484742 - -14.57896254613 - -9.899419300347 - -9.081773972705 - 1037.619509418 - 2633.821525076 - -3381.682955438 - -941.1604542894 - -520.2685328612 - -258.3208155114 - -118.1324082065 - -69.77519632661 - -118.0824082065 - -69.17519632661 - -46.13575536786 - -33.67101556788 - -24.91432104696 - -19.3690484742 - -15.36646254613 - -12.26191930035 - -9.231773972705 - 1031.719509418 - 2622.121525076 - -3384.682955438 - -942.8604542894 - -523.1685328612 - -259.8208155114 - -115.5324082065 - -69.77519632661 - -115.5699082065 - -69.02519632661 - -45.83575536786 - -32.99601556788 - -24.57682104696 - -19.7815484742 - -15.14146254613 - -12.41191930035 - -9.569273972705 - 1039.119509418 - -3383.082955438 - -165.9142242952 - -41.04746332279 - -40.33746332279 - -37.89746332279 - -8.239694706637 - 1035.619509418 - -3385.582955438 - -165.2142242952 - -39.61746332279 - -40.70746332279 - -38.83746332279 - -9.929694706637 - 1034.419509418 - -3384.482955438 - -165.5142242952 - -39.58746332279 - -41.64746332279 - -40.14746332279 - -8.429694706637 - 1035.619509418 - -3381.982955438 - -164.6142242952 - -40.89746332279 - -8.239694706637 - -40.70746332279 - -40.33746332279 - -8.429694706637 - 1034.419509418 - -3385.482955438 - -525.6685328612 - -166.4142242952 - -89.99572115575 - -89.62572115575 - -55.58585272365 - -39.50746332279 - -20.14157855679 - -11.29463725292 - -7.199694706637 - 1033.419509418 - -3383.482955438 - -524.6685328612 - -163.2142242952 - -92.19572115575 - -91.87572115575 - -55.09585272365 - -40.70746332279 - -20.97157855679 - -12.98463725292 - -6.449694706637 - 1028.119509418 - -3385.982955438 - -524.9685328612 - -162.6142242952 - -91.19572115575 - -90.52572115575 - -53.40585272365 - -37.81746332279 - -23.22157855679 - -11.74463725292 - -3.749694706637 - 1035.619509418 - 2620.821525076 - -3383.482955438 - -940.3604542894 - -525.7685328612 - -263.0208155114 - -69.97519632661 - -69.10519632661 - -48.72825536786 - -36.52601556788 - -27.69182104696 - -22.5165484742 - -17.62146254613 - -14.36441930035 - -11.4442739727 - -29.26746332279 - -27.69746332279 - -35.38746332279 - -41.64746332279 - -41.38746332279 - -37.33746332279 - 1028.419509418 - -3390.782955438 - -525.4685328612 - -158.6142242952 - -82.39572115575 - -50.70585272365 - -82.57572115575 - -50.59585272365 - -35.75746332279 - -19.84157855679 - -8.224637252918 - -5.129694706637 - 1038.319509418 - -3384.182955438 - -525.9685328612 - -169.4142242952 - -88.19572115575 - -56.70585272365 - -87.63572115575 - -55.73585272365 - -37.21746332279 - -22.65157855679 - -12.42463725292 - -5.359694706637 - 1024.319509418 - -3386.282955438 - -532.6685328612 - -167.7142242952 - -86.49572115575 - -58.10585272365 - -86.20572115575 - -57.45585272365 - -40.10746332279 - -22.77157855679 - -13.09463725292 - -7.229694706637 - -39.76746332279 - 1038.619509418 - -3380.982955438 - -162.9142242952 - -40.14746332279 - -8.999694706637 - -40.51746332279 - -8.809694706637 - -38.26746332279 - -38.83746332279 - -37.81746332279 - -119.4324082065 - -118.9824082065 - 1039.319509418 - -3383.882955438 - -115.1324082065 - -115.1224082065 - -45.42825536786 - -25.06182104696 - -116.5324082065 - -116.2424082065 - 1038.919509418 - -3384.582955438 - -45.94825536786 - -25.40182104696 - -116.0324082065 - -115.9824082065 - - 20.49103808147 - 13.5209126197 - 5.123721658539 - 11.04005890825 - 7.746672181034 - 1.330664211629 - -0.447396135285 - -0.9996323863606 - -0.497396135285 - -0.8246323863606 - -1.945368320316 - -2.252654921573 - -1.698804957691 - -1.71369037214 - -1.681477631675 - -0.6656948221704 - 1.148772894321 - 18.19103808147 - 11.7209126197 - 12.52372165854 - 9.740058908254 - 4.646672181034 - -2.769335788371 - -2.247396135285 - -3.199632386361 - -2.372396135285 - -2.474632386361 - -2.170368320316 - -1.802654921573 - -3.836304957691 - -1.97619037214 - -1.493977631675 - 0.4593051778296 - -0.7637271056786 - 7.591038081469 - 13.8209126197 - 4.723721658539 - 3.340058908254 - 3.846672181034 - 0.5306642116288 - -4.747396135285 - -4.199632386361 - -4.697396135285 - -3.599632386361 - -3.220368320316 - -3.527654921573 - -2.748804957691 - -2.53869037214 - -2.281477631675 - -1.90319482217 - -0.9137271056786 - 1.691038081469 - 2.120912619699 - 1.723721658539 - 1.640058908254 - 0.9466721810337 - -0.9693357883712 - -2.147396135285 - -4.199632386361 - -2.184896135285 - -3.449632386361 - -2.920368320316 - -2.852654921573 - -2.411304957691 - -2.95119037214 - -2.056477631675 - -2.05319482217 - -1.251227105679 - 9.091038081469 - 3.323721658539 - -3.686566865047 - -5.328474711208 - -4.618474711208 - -2.178474711208 - -0.7527095065441 - 5.591038081469 - 0.8237216585391 - -2.986566865047 - -3.898474711208 - -4.988474711208 - -3.118474711208 - -2.442709506544 - 4.391038081469 - 1.923721658539 - -3.286566865047 - -3.868474711208 - -5.928474711208 - -4.428474711208 - -0.9427095065441 - 5.591038081469 - 4.423721658539 - -2.386566865047 - -5.178474711208 - -0.7527095065441 - -4.988474711208 - -4.618474711208 - -0.9427095065441 - 4.391038081469 - 0.9237216585391 - -1.553327818966 - -4.186566865047 - -5.552915024831 - -5.182915024831 - -3.124230683974 - -3.788474711208 - -0.892433332114 - 0.3245543770619 - 0.2872904934559 - 3.391038081469 - 2.923721658539 - -0.5533278189663 - -0.9865668650472 - -7.752915024831 - -7.432915024831 - -2.634230683974 - -4.988474711208 - -1.722433332114 - -1.365445622938 - 1.037290493456 - -1.908961918531 - 0.4237216585391 - -0.8533278189662 - -0.3865668650472 - -6.752915024831 - -6.082915024831 - -0.9442306839744 - -2.098474711208 - -3.972433332114 - -0.1254456229381 - 3.737290493456 - 5.591038081469 - 0.8209126196993 - 2.923721658539 - 4.140058908254 - -1.653327818966 - -4.169335788371 - -4.399632386361 - -3.529632386361 - -5.812868320316 - -6.382654921573 - -5.526304957691 - -5.68619037214 - -4.536477631675 - -4.00569482217 - -3.126227105679 - 6.451525288792 - 8.021525288792 - 0.3315252887919 - -5.928474711208 - -5.668474711208 - -1.618474711208 - -1.608961918531 - -4.376278341461 - -1.353327818966 - 3.613433134953 - 2.047084975169 - 1.755769316026 - 1.867084975169 - 1.865769316026 - -0.03847471120814 - -0.592433332114 - 3.394554377062 - 2.357290493456 - 8.291038081469 - 2.223721658539 - -1.853327818966 - -7.186566865047 - -3.752915024831 - -4.244230683974 - -3.192915024831 - -3.274230683974 - -1.498474711208 - -3.402433332114 - -0.8054456229381 - 2.127290493456 - -5.708961918531 - 0.1237216585391 - -8.553327818966 - -5.486566865047 - -2.052915024831 - -5.644230683974 - -1.762915024831 - -4.994230683974 - -4.388474711208 - -3.522433332114 - -1.475445622938 - 0.2572904934559 - -4.048474711208 - 8.591038081469 - 5.423721658539 - -0.6865668650472 - -4.428474711208 - -1.512709506544 - -4.798474711208 - -1.322709506544 - -2.548474711208 - -3.118474711208 - -2.098474711208 - -6.047396135285 - -5.597396135285 - 9.291038081469 - 2.523721658539 - -1.747396135285 - -1.737396135285 - -2.512868320316 - -2.896304957691 - -3.147396135285 - -2.857396135285 - 8.891038081469 - 1.823721658539 - -3.032868320316 - -3.236304957691 - -2.647396135285 - -2.597396135285 - - 11.34572446599 - 12.71016226644 - 9.103954561983 - 17.84889991643 - 16.22846806054 - 11.28579424593 - 9.470899876731 - 7.817399827028 - 9.420899876731 - 7.992399827028 - 5.668390695887 - 4.274674199655 - 3.897111252284 - 3.096292463526 - 2.467532445354 - 2.926528936865 - 4.270267329615 - 9.045724465995 - 10.91016226644 - 16.50395456198 - 16.54889991643 - 13.12846806054 - 7.185794245929 - 7.670899876731 - 5.617399827028 - 7.545899876731 - 6.342399827028 - 5.443390695887 - 4.724674199655 - 1.759611252284 - 2.833792463526 - 2.655032445354 - 4.051528936865 - 2.357767329615 - -1.554275534005 - 13.01016226644 - 8.703954561983 - 10.14889991643 - 12.32846806054 - 10.48579424593 - 5.170899876731 - 4.617399827028 - 5.220899876731 - 5.217399827028 - 4.393390695887 - 2.999674199655 - 2.847111252284 - 2.271292463526 - 1.867532445354 - 1.689028936865 - 2.207767329615 - -7.454275534005 - 1.310162266438 - 5.703954561983 - 8.448899916427 - 9.428468060537 - 8.985794245929 - 7.770899876731 - 4.617399827028 - 7.733399876731 - 5.367399827028 - 4.693390695887 - 3.674674199655 - 3.184611252284 - 1.858792463526 - 2.092532445354 - 1.539028936865 - 1.870267329615 - -0.05427553400547 - 7.303954561983 - 6.51450668858 - 1.723185425401 - 2.433185425401 - 4.873185425401 - 2.160876526885 - -3.554275534005 - 4.803954561983 - 7.21450668858 - 3.153185425401 - 2.063185425401 - 3.933185425401 - 0.4708765268852 - -4.754275534005 - 5.903954561983 - 6.91450668858 - 3.183185425401 - 1.123185425401 - 2.623185425401 - 1.970876526885 - -3.554275534005 - 8.403954561983 - 7.81450668858 - 1.873185425401 - 2.160876526885 - 2.063185425401 - 2.433185425401 - 1.970876526885 - -4.754275534005 - 4.903954561983 - 6.928468060537 - 6.01450668858 - 3.857070725448 - 4.227070725448 - 5.083097819871 - 3.263185425401 - 4.293542360661 - 4.18335415557 - 3.200876526885 - -5.754275534005 - 6.903954561983 - 7.928468060537 - 9.21450668858 - 1.657070725448 - 1.977070725448 - 5.573097819871 - 2.063185425401 - 3.463542360661 - 2.49335415557 - 3.950876526885 - -11.05427553401 - 4.403954561983 - 7.628468060537 - 9.81450668858 - 2.657070725448 - 3.327070725448 - 7.263097819871 - 4.953185425401 - 1.213542360661 - 3.73335415557 - 6.650876526885 - -3.554275534005 - 0.01016226643847 - 6.903954561983 - 10.94889991643 - 6.828468060537 - 5.785794245929 - 4.417399827028 - 5.287399827028 - 1.800890695887 - 0.1446741996551 - 0.06961125228437 - -0.8762075364739 - -0.3874675546463 - -0.4134710631349 - -0.004732670384874 - 13.5031854254 - 15.0731854254 - 7.383185425401 - 1.123185425401 - 1.383185425401 - 5.433185425401 - -10.75427553401 - -0.3960454380168 - 7.128468060537 - 13.81450668858 - 11.45707072545 - 9.963097819871 - 11.27707072545 - 10.07309781987 - 7.013185425401 - 4.593542360661 - 7.25335415557 - 5.270876526885 - -0.8542755340055 - 6.203954561983 - 6.628468060537 - 3.01450668858 - 5.657070725448 - 3.963097819871 - 6.217070725448 - 4.933097819871 - 5.553185425401 - 1.783542360661 - 3.05335415557 - 5.040876526885 - -14.85427553401 - 4.103954561983 - -0.07153193946291 - 4.71450668858 - 7.357070725448 - 2.563097819871 - 7.647070725448 - 3.213097819871 - 2.663185425401 - 1.663542360661 - 2.38335415557 - 3.170876526885 - 3.003185425401 - -0.5542755340055 - 9.403954561983 - 9.51450668858 - 2.623185425401 - 1.400876526885 - 2.253185425401 - 1.590876526885 - 4.503185425401 - 3.933185425401 - 4.953185425401 - 3.870899876731 - 4.320899876731 - 0.1457244659945 - 6.503954561983 - 8.170899876731 - 8.180899876731 - 5.100890695887 - 2.699611252284 - 6.770899876731 - 7.060899876731 - -0.2542755340055 - 5.803954561983 - 4.580890695887 - 2.359611252284 - 7.270899876731 - 7.320899876731 - - 16.53673795362 - 12.64348483951 - 6.243066148003 - 13.47083954897 - 11.03975774331 - 5.537722754593 - 4.09957193688 - 3.220802215329 - 4.04957193688 - 3.395802215329 - 1.800447382832 - 1.020568885813 - 1.147168191638 - 0.7592201452716 - 0.470019408067 - 1.210124173449 - 2.788233035035 - 14.23673795362 - 10.84348483951 - 13.643066148 - 12.17083954897 - 7.939757743311 - 1.437722754593 - 2.29957193688 - 1.020802215329 - 2.17457193688 - 1.745802215329 - 1.575447382832 - 1.470568885813 - -0.9903318083618 - 0.4967201452716 - 0.657519408067 - 2.335124173449 - 0.8757330350355 - 3.636737953619 - 12.94348483951 - 5.843066148002 - 5.770839548972 - 7.139757743311 - 4.737722754593 - -0.2004280631201 - 0.02080221532934 - -0.1504280631201 - 0.6208022153293 - 0.525447382832 - -0.2544311141866 - 0.09716819163822 - -0.06577985472837 - -0.129980591933 - -0.02737582655083 - 0.7257330350355 - -2.263262046381 - 1.24348483951 - 2.843066148002 - 4.070839548972 - 4.239757743311 - 3.237722754593 - 2.39957193688 - 0.02080221532934 - 2.36207193688 - 0.7708022153293 - 0.825447382832 - 0.4205688858134 - 0.4346681916382 - -0.4782798547284 - 0.09501940806695 - -0.1773758265508 - 0.3882330350355 - 5.136737953619 - 4.443066148003 - 0.8383235109659 - -1.82318329708 - -1.11318329708 - 1.32681670292 - 0.7814350074993 - 1.636737953619 - 1.943066148003 - 1.538323510966 - -0.39318329708 - -1.48318329708 - 0.38681670292 - -0.9085649925007 - 0.4367379536194 - 3.043066148003 - 1.238323510966 - -0.36318329708 - -2.42318329708 - -0.92318329708 - 0.5914350074993 - 1.636737953619 - 5.543066148003 - 2.138323510966 - -1.67318329708 - 0.7814350074993 - -1.48318329708 - -1.11318329708 - 0.5914350074993 - 0.4367379536194 - 2.043066148003 - 1.739757743311 - 0.3383235109659 - -1.132711355225 - -0.7627113552248 - 0.8640180395549 - -0.28318329708 - 1.760258757134 - 2.332901512346 - 1.821435007499 - -0.5632620463806 - 4.043066148003 - 2.739757743311 - 3.538323510966 - -3.332711355225 - -3.012711355225 - 1.354018039555 - -1.48318329708 - 0.9302587571339 - 0.6429015123457 - 2.571435007499 - -5.863262046381 - 1.543066148003 - 2.439757743311 - 4.138323510966 - -2.332711355225 - -1.662711355225 - 3.044018039555 - 1.40681670292 - -1.319741242866 - 1.882901512346 - 5.271435007499 - 1.636737953619 - -0.05651516048991 - 4.043066148003 - 6.570839548972 - 1.639757743311 - 0.03772275459342 - -0.1791977846707 - 0.6908022153293 - -2.067052617168 - -3.109431114187 - -2.680331808362 - -3.213279854728 - -2.384980591933 - -2.129875826551 - -1.486766964965 - 9.95681670292 - 11.52681670292 - 3.83681670292 - -2.42318329708 - -2.16318329708 - 1.88681670292 - -5.563262046381 - -3.256933851997 - 1.939757743311 - 8.138323510966 - 6.467288644775 - 5.744018039555 - 6.287288644775 - 5.854018039555 - 3.46681670292 - 2.060258757134 - 5.402901512346 - 3.891435007499 - 4.336737953619 - 3.343066148002 - 1.439757743311 - -2.661676489034 - 0.6672886447752 - -0.2559819604451 - 1.227288644775 - 0.7140180395549 - 2.00681670292 - -0.7497412428661 - 1.202901512346 - 3.661435007499 - -9.663262046381 - 1.243066148003 - -5.260242256689 - -0.9616764890341 - 2.367288644775 - -1.655981960445 - 2.657288644775 - -1.005981960445 - -0.88318329708 - -0.8697412428661 - 0.5329015123457 - 1.791435007499 - -0.54318329708 - 4.636737953619 - 6.543066148003 - 3.838323510966 - -0.92318329708 - 0.02143500749933 - -1.29318329708 - 0.2114350074993 - 0.95681670292 - 0.38681670292 - 1.40681670292 - -1.50042806312 - -1.05042806312 - 5.336737953619 - 3.643066148003 - 2.79957193688 - 2.80957193688 - 1.232947382832 - -0.05033180836178 - 1.39957193688 - 1.68957193688 - 4.936737953619 - 2.943066148003 - 0.712947382832 - -0.3903318083618 - 1.89957193688 - 1.94957193688 history_x: - - 0.15 - 0.008 - 0.01 - - 0.25 - 0.008 - 0.01 - - 0.15 - 0.108 - 0.01 - - 0.15 - 0.008 - 0.11 - - 0.1596177824551 - -0.07539624732067 - 0.08766385239892 - - 0.2 - 0.008531162120637 - -0.002952684076318 - - 0.1505141617677 - -0.04199731338289 - 0.009934485345754 - - 0.1374618789969 - 0.007934485345754 - -0.03840238867598 - - 0.1505250437069 - 0.007964908595663 - 0.01275913089388 - - 0.149883507892 - 0.008098080768719 - 0.009146244784311 - - 0.1716712756093 - -0.003385426549061 - 0.004854131368058 - - 0.1499498551576 - 0.008185153997901 - 0.009255435636305 - - 0.1486949409413 - 0.001680047032405 - 0.01940631659429 - - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 interpolation_set_expected: - - 0.0 - 0.0 - 0.0 - - -0.0581032280076 - -0.3142207350554 - 0.5053386972944 - - 0.04228990929916 - 0.2061878221842 - -0.3067317793442 - - 1.780003545435 - -0.7194586215727 - -0.658836120804 - - 0.03698212804807 - 0.1992219638497 - -0.3154670475037 - - 0.08830499324404 - 0.1885681900052 - -0.02643615873801 - - 0.0874344381026 - -3.808409568279 - -0.2524078025883 linear_terms_residual_model: - - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.05431142241 - 7.210065582227 - -32.05431142241 - 7.210065582217 - 128.5030418743 - -61.94075414453 - -28.86164804598 - 59.16327636 - -6.773101689526 - -5.359113834066 - -3.97310556824 - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.0543114224 - 7.210065582226 - -32.0543114224 - 7.210065582223 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689526 - -5.359113834064 - -3.97310556824 - 723.7257702008 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.0543114224 - 7.210065582221 - -32.0543114224 - 7.210065582222 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689525 - -5.359113834065 - -3.97310556824 - 723.7257702007 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - -32.05431142241 - 7.210065582221 - -32.0543114224 - 7.210065582226 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689524 - -5.359113834066 - -3.97310556824 - 723.7257702007 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702008 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -63.20258122701 - -187.067690334 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702008 - -2111.94123313 - -63.20258122702 - -187.067690334 - -3.412910016525 - -187.067690334 - -187.067690334 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122702 - -11.3061006031 - -11.3061006031 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.202581227 - -11.30610060311 - -11.3061006031 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060311 - -11.30610060311 - 33.45212714559 - -187.067690334 - -26.50116734207 - -6.144592795851 - -3.412910016524 - 723.7257702008 - 1733.358000128 - -2111.94123313 - -558.4561073307 - -289.1782671082 - -123.1292919174 - 7.210065582226 - 7.210065582224 - 128.5030418743 - -61.94075414453 - -28.86164804599 - 59.16327636 - -6.773101689526 - -5.359113834066 - -3.973105568241 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - -187.067690334 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122702 - -11.30610060312 - 33.45212714558 - -11.30610060311 - 33.45212714559 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016525 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060312 - 33.45212714558 - -11.30610060311 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016526 - 723.7257702007 - -2111.94123313 - -289.1782671082 - -63.20258122701 - -11.30610060311 - 33.45212714559 - -11.30610060312 - 33.45212714558 - -187.067690334 - -26.50116734208 - -6.14459279585 - -3.412910016527 - -187.067690334 - 723.7257702008 - -2111.94123313 - -63.20258122702 - -187.067690334 - -3.412910016525 - -187.067690334 - -3.412910016526 - -187.067690334 - -187.067690334 - -187.067690334 - -32.05431142241 - -32.05431142242 - 723.7257702007 - -2111.94123313 - -32.05431142241 - -32.05431142241 - 128.5030418743 - -28.86164804599 - -32.05431142241 - -32.05431142241 - 723.7257702007 - -2111.94123313 - 128.5030418743 - -28.86164804599 - -32.05431142241 - -32.05431142241 - - -250.9049856979 - -713.548298078 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.98074162601 - 13.74284457971 - 35.98074162601 - 13.74284457972 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915525 - 6.047825533545 - -250.9049856979 - -713.548298078 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774507 - 35.980741626 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307755 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915524 - 6.047825533545 - -250.904985698 - -713.5482980779 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.980741626 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915524 - 6.047825533545 - -250.9049856979 - -713.5482980779 - 991.3297761498 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 35.98074162601 - 13.74284457971 - 35.980741626 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.62399002556 - 7.529398915525 - 6.047825533545 - -250.9049856979 - 991.3297761498 - 53.62515459183 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.904985698 - 991.3297761498 - 53.62515459184 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.9049856979 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.904985698 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 84.70726176104 - 5.458199688096 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 23.96018695733 - 1.082529471011 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459183 - 23.96018695733 - 23.96018695733 - 1.082529471012 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - -250.9049856979 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 23.96018695733 - 1.082529471004 - 84.70726176104 - 14.82949233042 - 8.477216658369 - 5.458199688096 - -250.904985698 - -713.548298078 - 991.3297761499 - 292.2346826038 - 167.0701872133 - 85.53108774506 - 13.74284457971 - 13.74284457971 - -38.33093307754 - 33.90567353701 - 18.14259771035 - 14.18583175432 - 9.623990025561 - 7.529398915525 - 6.047825533545 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 84.70726176104 - -250.904985698 - 991.3297761498 - 167.0701872133 - 53.62515459184 - 23.96018695734 - 1.08252947101 - 23.96018695733 - 1.082529471007 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688096 - -250.9049856979 - 991.3297761498 - 167.0701872133 - 53.62515459184 - 23.96018695734 - 1.082529471008 - 23.96018695733 - 1.08252947101 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688096 - -250.904985698 - 991.3297761499 - 167.0701872133 - 53.62515459184 - 23.96018695733 - 1.082529471007 - 23.96018695734 - 1.082529471008 - 84.70726176104 - 14.82949233042 - 8.477216658368 - 5.458199688097 - 84.70726176104 - -250.904985698 - 991.3297761499 - 53.62515459184 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 5.458199688096 - 84.70726176104 - 84.70726176104 - 84.70726176104 - 35.98074162601 - 35.98074162601 - -250.904985698 - 991.3297761498 - 35.98074162601 - 35.98074162601 - -38.33093307755 - 18.14259771035 - 35.98074162601 - 35.98074162601 - -250.9049856979 - 991.3297761498 - -38.33093307754 - 18.14259771035 - 35.98074162601 - 35.98074162601 - - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391073 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391093 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671898 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391052 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - -86.32920671897 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 29.83406116421 - 19.43979000176 - 29.83406116421 - 19.43979000176 - -0.5081983391036 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744828 - 7.034149386141 - -86.32920671897 - 379.8255147156 - 37.38830735093 - 39.79428584982 - 39.79428584982 - 39.79428584981 - 6.487537441591 - -86.32920671898 - 379.8255147156 - 37.38830735093 - 39.79428584981 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 37.38830735093 - 39.79428584982 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671898 - 379.8255147156 - 37.38830735094 - 39.79428584982 - 6.487537441591 - 39.79428584982 - 39.79428584982 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671896 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 24.27471941816 - 13.90491560132 - 39.79428584981 - 12.42064972558 - 9.152506987831 - 6.487537441591 - -86.32920671898 - -249.9457650428 - 379.8255147156 - 124.7291668319 - 79.62656596282 - 49.85277513245 - 19.43979000176 - 19.43979000176 - -0.5081983391036 - 21.74505371124 - 14.64616559016 - 20.52866927178 - 10.12254826871 - 8.345670744827 - 7.034149386141 - 39.79428584981 - 39.79428584981 - 39.79428584981 - 39.79428584982 - 39.79428584981 - 39.79428584982 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735094 - 24.27471941817 - 13.90491560132 - 24.27471941816 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941817 - 13.90491560132 - 24.27471941816 - 13.90491560132 - 39.79428584981 - 12.42064972558 - 9.15250698783 - 6.487537441591 - -86.32920671897 - 379.8255147156 - 79.62656596282 - 37.38830735093 - 24.27471941816 - 13.90491560132 - 24.27471941817 - 13.90491560132 - 39.79428584982 - 12.42064972558 - 9.15250698783 - 6.487537441591 - 39.79428584982 - -86.32920671898 - 379.8255147156 - 37.38830735094 - 39.79428584982 - 6.487537441591 - 39.79428584981 - 6.487537441591 - 39.79428584982 - 39.79428584982 - 39.79428584981 - 29.83406116421 - 29.83406116421 - -86.32920671897 - 379.8255147156 - 29.83406116421 - 29.83406116421 - -0.5081983391078 - 14.64616559016 - 29.83406116421 - 29.83406116421 - -86.32920671897 - 379.8255147156 - -0.5081983391052 - 14.64616559016 - 29.83406116421 - 29.83406116421 model_indices: - 13 - 12 - 11 - 10 - 9 - 8 - 6 n_modelpoints: 7 residuals: - 14.13989818606 - 10.00317449361 - 3.744918552079 - 10.77980934854 - 8.347596395984 - 2.920071415924 - 1.796288999298 - 1.248596846502 - 1.746288999298 - 1.423596846502 - 0.03689425329096 - -0.3219611012974 - 0.0002647009432151 - -0.881408646043 - -0.4303702662499 - 0.4401190678521 - 2.121655004644 - 11.83989818606 - 8.203174493607 - 11.14491855208 - 9.479809348539 - 5.247596395983 - -1.179928584076 - -0.003711000701976 - -0.9514031534983 - -0.128711000702 - -0.2264031534983 - -0.188105746709 - 0.1280388987026 - -2.137235299057 - -1.143908646043 - -0.2428702662499 - 1.565119067852 - 0.2091550046441 - 1.239898186057 - 10.30317449361 - 3.344918552079 - 3.079809348539 - 4.447596395984 - 2.120071415924 - -2.503711000702 - -1.951403153498 - -2.453711000702 - -1.351403153498 - -1.238105746709 - -1.596961101297 - -1.049735299057 - -1.706408646043 - -1.03037026625 - -0.7973809321479 - 0.05915500464407 - -4.660101813943 - -1.396825506394 - 0.3449185520786 - 1.379809348539 - 1.547596395983 - 0.6200714159243 - 0.09628899929797 - -1.951403153498 - 0.05878899929802 - -1.201403153498 - -0.9381057467091 - -0.9219611012974 - -0.7122352990568 - -2.118908646043 - -0.8053702662499 - -0.9473809321479 - -0.2783449953559 - 2.739898186057 - 1.944918552078 - -1.63399667708 - -3.183999068009 - -2.473999068009 - -0.03399906800905 - 0.159797079125 - -0.7601018139425 - -0.5550814479215 - -0.9339966770796 - -1.753999068009 - -2.843999068009 - -0.9739990680091 - -1.530202920875 - -1.960101813943 - 0.5449185520779 - -1.23399667708 - -1.723999068009 - -3.783999068009 - -2.283999068009 - -0.03020292087504 - -0.7601018139425 - 3.044918552078 - -0.3339966770796 - -3.033999068009 - 0.159797079125 - -2.843999068009 - -2.473999068009 - -0.03020292087504 - -1.960101813943 - -0.4550814479221 - -0.9524036040165 - -2.13399667708 - -3.265462533007 - -2.895462533007 - -0.9678561331385 - -1.643999068009 - 0.7313676292134 - 1.502356325981 - 1.199797079125 - -2.960101813943 - 1.544918552078 - 0.04759639598348 - 1.06600332292 - -5.465462533007 - -5.145462533007 - -0.4778561331385 - -2.843999068009 - -0.09863237078665 - -0.1876436740187 - 1.949797079125 - -8.260101813943 - -0.9550814479221 - -0.2524036040165 - 1.66600332292 - -4.465462533007 - -3.795462533007 - 1.212143866862 - 0.04600093199099 - -2.348632370787 - 1.052356325981 - 4.649797079125 - -0.7601018139425 - -2.696825506393 - 1.544918552078 - 3.879809348539 - -1.052403604016 - -2.579928584076 - -2.151403153498 - -1.281403153498 - -3.830605746709 - -4.451961101297 - -3.827235299057 - -4.853908646043 - -3.28537026625 - -2.899880932148 - -2.153344995356 - 8.596000931991 - 10.16600093199 - 2.476000931991 - -3.783999068009 - -3.523999068009 - 0.5260009319909 - -7.960101813943 - -5.755081447921 - -0.7524036040165 - 5.66600332292 - 4.334537466993 - 3.912143866861 - 4.154537466993 - 4.022143866862 - 2.106000931991 - 1.031367629213 - 4.572356325981 - 3.269797079125 - 1.939898186057 - 0.8449185520786 - -1.252403604017 - -5.13399667708 - -1.465462533007 - -2.087856133139 - -0.9054625330074 - -1.117856133138 - 0.6460009319909 - -1.778632370787 - 0.3723563259813 - 3.039797079125 - -12.06010181394 - -1.255081447922 - -7.952403604016 - -3.43399667708 - 0.2345374669926 - -3.487856133138 - 0.5245374669926 - -2.837856133139 - -2.243999068009 - -1.898632370787 - -0.2976436740187 - 1.169797079125 - -1.903999068009 - 2.239898186058 - 4.044918552078 - 1.36600332292 - -2.283999068009 - -0.600202920875 - -2.653999068009 - -0.410202920875 - -0.4039990680091 - -0.9739990680091 - 0.04600093199099 - -3.803711000702 - -3.353711000702 - 2.939898186057 - 1.144918552079 - 0.496288999298 - 0.506288999298 - -0.5306057467091 - -1.197235299057 - -0.9037110007021 - -0.6137110007021 - 2.539898186057 - 0.4449185520785 - -1.050605746709 - -1.537235299057 - -0.4037110007021 - -0.3537110007021 square_terms_residual_model: - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638637 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554024 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114451 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114448 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638632 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554029 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114453 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701694 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721924 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518941 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638635 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554026 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082801 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114447 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082801 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114448 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227361 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554026 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082804 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114447 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082796 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693998 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762072 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714454 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638635 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002871 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002872 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049041 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028759 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002872 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028717 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028754 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272406 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - -693.1880101263 - -282.0272651787 - 185.6089587299 - - -282.0272651787 - -377.0062670135 - -199.8947029577 - - 185.6089587299 - -199.8947029577 - 32.55049992815 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 259.2154297329 - 115.0514237096 - -9.284737880698 - - 115.0514237096 - 174.2494476566 - 49.11184063816 - - -9.284737880698 - 49.11184063816 - -1.549507008423 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638631 - - - 68.45844162431 - 32.65296277576 - 25.04360055654 - - 32.65296277576 - 59.00150809543 - 0.7592426398398 - - 25.04360055654 - 0.7592426398398 - 0.9871650554025 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -14.88013773366 - -1.672249752224 - 30.57297543811 - - -1.672249752224 - 17.31712592596 - -10.5469407781 - - 30.57297543811 - -10.5469407781 - -0.7358906114452 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693998 - - - 87.41811326922 - 33.86514838909 - -5.240102033061 - - 33.86514838909 - 29.49048109152 - 14.67250320165 - - -5.240102033061 - 14.67250320165 - 4.833599928218 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 2.547257721925 - -41.98520512744 - 20.72868343281 - - -41.98520512744 - -302.2224872041 - -113.9339256901 - - 20.72868343281 - -113.9339256901 - -2.362980518942 - - - 18.58084217516 - 5.678085135653 - 6.602937980917 - - 5.678085135653 - -5.592544704067 - -3.940958740204 - - 6.602937980917 - -3.940958740204 - 0.9575365227362 - - - 14.66994772782 - 5.070231742209 - 5.910736412971 - - 5.070231742209 - -0.4082883762073 - -1.880932217244 - - 5.910736412971 - -1.880932217244 - 0.79286999734 - - - 11.79669162928 - 4.251219274511 - 5.316966607371 - - 4.251219274511 - 0.8144741803491 - -1.252738735688 - - 5.316966607371 - -1.252738735688 - 0.6496190714455 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028804 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.0893332002871 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638633 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028763 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028752 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 143.5729232274 - 64.4900364164 - 14.94615333046 - - 64.4900364164 - 105.0390768054 - 17.44042411675 - - 14.94615333046 - 17.44042411675 - -0.4689104638634 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860058 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028753 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710657 - - - 3.158995638313 - 5.285050749783 - 28.72100787546 - - 5.285050749783 - 23.11411940654 - -8.290517826993 - - 28.72100787546 - -8.290517826993 - 0.08933320028805 - - - -46.24243692467 - -13.19929340666 - 36.398937508 - - -13.19929340666 - 10.49974863698 - -15.59542019426 - - 36.398937508 - -15.59542019426 - -2.340461710656 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 34.65099735562 - 17.97663481639 - 4.117471070847 - - 17.97663481639 - 42.36208423833 - 14.06237417874 - - 4.117471070847 - 14.06237417874 - 2.203133844605 - - - 16.48276796572 - 5.469469601922 - 6.230099813495 - - 5.469469601922 - -1.991487592538 - -2.545944896211 - - 6.230099813495 - -2.545944896211 - 0.8761849272405 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049041 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.38438988059 - - - 37.86067159491 - 19.63578725902 - 27.33833014146 - - 19.63578725902 - 40.52405881727 - -4.310564613888 - - 27.33833014146 - -4.310564613888 - 1.001499860059 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.590211704904 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 10.65126838649 - 3.886653870481 - 5.035741025472 - - 3.886653870481 - 1.042562599207 - -1.080339920451 - - 5.035741025472 - -1.080339920451 - 0.5902117049039 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.8045838275 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 256.1687039175 - 93.85492401693 - -48.56016854851 - - 93.85492401693 - 55.16906109347 - 47.3133808107 - - -48.56016854851 - 47.3133808107 - 13.80458382749 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082804 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082805 - - - -266.0047978153 - -104.4240748307 - 113.152993159 - - -104.4240748307 - -122.1523164991 - -86.07363307296 - - 113.152993159 - -86.07363307296 - 16.31641633252 - - - 912.6451934849 - 383.7119079265 - -122.603887661 - - 383.7119079265 - 559.8400701698 - 213.0361667162 - - -122.603887661 - 213.0361667162 - -29.3843898806 - - - -172.4440433656 - -58.30234348421 - 67.88122528662 - - -58.30234348421 - -10.11688714194 - -39.74497829255 - - 67.88122528662 - -39.74497829255 - -9.026701693999 - - - 41.68746835262 - 18.32521144382 - 3.890270269493 - - 18.32521144382 - 28.10847211295 - 9.269126205224 - - 3.890270269493 - 9.269126205224 - 2.450369115355 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 - - - 18.93707066102 - 11.68664155631 - 28.03242287255 - - 11.68664155631 - 30.15119945711 - -6.606690969349 - - 28.03242287255 - -6.606690969349 - 0.6417870082809 x_accepted: - 0.1494212312914 - 0.005607806220598 - 0.01308958287811 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/pounders_example_data.csv ================================================ y,t 92.9000,0.5000 78.7000,0.6250 64.2000,0.7500 64.9000,0.8750 57.1000,1.0000 43.3000,1.2500 31.1000,1.7500 23.6000,2.2500 31.0500,1.7500 23.7750,2.2500 17.7375,2.7500 13.8000,3.2500 11.5875,3.7500 9.4125,4.2500 7.7250,4.7500 7.3500,5.2500 8.0250,5.7500 90.6000,0.5000 76.9000,0.6250 71.6000,0.7500 63.6000,0.8750 54.0000,1.0000 39.2000,1.2500 29.3000,1.7500 21.4000,2.2500 29.1750,1.7500 22.1250,2.2500 17.5125,2.7500 14.2500,3.2500 9.4500,3.7500 9.1500,4.2500 7.9125,4.7500 8.4750,5.2500 6.1125,5.7500 80.0000,0.5000 79.0000,0.6250 63.8000,0.7500 57.2000,0.8750 53.2000,1.0000 42.5000,1.2500 26.8000,1.7500 20.4000,2.2500 26.8500,1.7500 21.0000,2.2500 16.4625,2.7500 12.5250,3.2500 10.5375,3.7500 8.5875,4.2500 7.1250,4.7500 6.1125,5.2500 5.9625,5.7500 74.1000,0.5000 67.3000,0.6250 60.8000,0.7500 55.5000,0.8750 50.3000,1.0000 41.0000,1.2500 29.4000,1.7500 20.4000,2.2500 29.3625,1.7500 21.1500,2.2500 16.7625,2.7500 13.2000,3.2500 10.8750,3.7500 8.1750,4.2500 7.3500,4.7500 5.9625,5.2500 5.6250,5.7500 81.5000,.5000 62.4000,.7500 32.5000,1.5000 12.4100,3.0000 13.1200,3.0000 15.5600,3.0000 5.6300,6.0000 78.0000,.5000 59.9000,.7500 33.2000,1.5000 13.8400,3.0000 12.7500,3.0000 14.6200,3.0000 3.9400,6.0000 76.8000,.5000 61.0000,.7500 32.9000,1.5000 13.8700,3.0000 11.8100,3.0000 13.3100,3.0000 5.4400,6.0000 78.0000,.5000 63.5000,.7500 33.8000,1.5000 12.5600,3.0000 5.6300,6.0000 12.7500,3.0000 13.1200,3.0000 5.4400,6.0000 76.8000,.5000 60.0000,.7500 47.8000,1.0000 32.0000,1.5000 22.2000,2.0000 22.5700,2.0000 18.8200,2.5000 13.9500,3.0000 11.2500,4.0000 9.0000,5.0000 6.6700,6.0000 75.8000,.5000 62.0000,.7500 48.8000,1.0000 35.2000,1.5000 20.0000,2.0000 20.3200,2.0000 19.3100,2.5000 12.7500,3.0000 10.4200,4.0000 7.3100,5.0000 7.4200,6.0000 70.5000,.5000 59.5000,.7500 48.5000,1.0000 35.8000,1.5000 21.0000,2.0000 21.6700,2.0000 21.0000,2.5000 15.6400,3.0000 8.1700,4.0000 8.5500,5.0000 10.1200,6.0000 78.0000,.5000 66.0000,.6250 62.0000,.7500 58.0000,.8750 47.7000,1.0000 37.8000,1.2500 20.2000,2.2500 21.0700,2.2500 13.8700,2.7500 9.6700,3.2500 7.7600,3.7500 5.4400,4.2500 4.8700,4.7500 4.0100,5.2500 3.7500,5.7500 24.1900,3.0000 25.7600,3.0000 18.0700,3.0000 11.8100,3.0000 12.0700,3.0000 16.1200,3.0000 70.8000,.5000 54.7000,.7500 48.0000,1.0000 39.8000,1.5000 29.8000,2.0000 23.7000,2.5000 29.6200,2.0000 23.8100,2.5000 17.7000,3.0000 11.5500,4.0000 12.0700,5.0000 8.7400,6.0000 80.7000,.5000 61.3000,.7500 47.5000,1.0000 29.0000,1.5000 24.0000,2.0000 17.7000,2.5000 24.5600,2.0000 18.6700,2.5000 16.2400,3.0000 8.7400,4.0000 7.8700,5.0000 8.5100,6.0000 66.7000,.5000 59.2000,.7500 40.8000,1.0000 30.7000,1.5000 25.7000,2.0000 16.3000,2.5000 25.9900,2.0000 16.9500,2.5000 13.3500,3.0000 8.6200,4.0000 7.2000,5.0000 6.6400,6.0000 13.6900,3.0000 81.0000,.5000 64.5000,.7500 35.5000,1.5000 13.3100,3.0000 4.8700,6.0000 12.9400,3.0000 5.0600,6.0000 15.1900,3.0000 14.6200,3.0000 15.6400,3.0000 25.5000,1.7500 25.9500,1.7500 81.7000,.5000 61.6000,.7500 29.8000,1.7500 29.8100,1.7500 17.1700,2.7500 10.3900,3.7500 28.4000,1.7500 28.6900,1.7500 81.3000,.5000 60.9000,.7500 16.6500,2.7500 10.0500,3.7500 28.9000,1.7500 28.9500,1.7500 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_initial_residual_model.yaml ================================================ --- initial_residual_model: intercepts: - 25.015622878108047 - 18.675766504742285 - 10.714250439974172 - 16.92850306333966 - 13.8332898293664 - 7.611432734310156 - 5.780449944004236 - 4.918595910461541 - 5.730449944004235 - 5.093595910461538 - 3.573230198001667 - 2.8432762942943253 - 2.9807829301802418 - 2.56911876085199 - 2.229814978179112 - 2.901300021005225 - 4.398727952741098 - 22.715622878108036 - 16.875766504742288 - 18.114250439974164 - 15.628503063339657 - 10.733289829366399 - 3.511432734310162 - 3.980449944004235 - 2.718595910461538 - 3.855449944004235 - 3.4435959104615392 - 3.3482301980016658 - 3.2932762942943246 - 0.8432829301802407 - 2.3066187608519906 - 2.417314978179112 - 4.026300021005225 - 2.486227952741097 - 12.115622878108042 - 18.975766504742282 - 10.314250439974167 - 9.228503063339659 - 9.933289829366402 - 6.811432734310159 - 1.480449944004235 - 1.7185959104615378 - 1.5304499440042356 - 2.3185959104615392 - 2.298230198001665 - 1.568276294294325 - 1.930782930180241 - 1.7441187608519906 - 1.6298149781791125 - 1.6638000210052253 - 2.3362279527410976 - 6.215622878108036 - 7.275766504742279 - 7.314250439974167 - 7.528503063339656 - 7.033289829366396 - 5.311432734310159 - 4.080449944004233 - 1.7185959104615378 - 4.042949944004235 - 2.468595910461538 - 2.5982301980016658 - 2.243276294294324 - 2.2682829301802414 - 1.331618760851991 - 1.8548149781791121 - 1.5138000210052258 - 1.9987279527410973 - 13.615622878108042 - 8.914250439974168 - 2.617857443870772 - -0.020698756342492075 - 0.689301243657507 - 3.1293012436575083 - 2.3486741154642674 - 10.115622878108042 - 6.414250439974168 - 3.317857443870775 - 1.4093012436575076 - 0.3193012436575078 - 2.189301243657507 - 0.6586741154642675 - 8.915622878108039 - 7.51425043997417 - 3.0178574438707706 - 1.439301243657507 - -0.6206987563424917 - 0.8793012436575083 - 2.158674115464268 - 10.115622878108042 - 10.01425043997417 - 3.917857443870769 - 0.12930124365750828 - 2.3486741154642674 - 0.3193012436575078 - 0.689301243657507 - 2.158674115464268 - 8.915622878108039 - 6.51425043997417 - 4.533289829366396 - 2.117857443870772 - 0.5381907245488051 - 0.9081907245488061 - 2.599956711545751 - 1.519301243657507 - 3.5858449755949513 - 4.060262123100171 - 3.3886741154642674 - 7.915622878108039 - 8.51425043997417 - 5.533289829366396 - 5.317857443870775 - -1.6618092754511942 - -1.3418092754511939 - 3.0899567115457494 - 0.3193012436575078 - 2.7558449755949512 - 2.3702621231001704 - 4.1386741154642674 - 2.6156228781080415 - 6.01425043997417 - 5.233289829366399 - 5.917857443870769 - -0.6618092754511942 - 0.008190724548807538 - 4.779956711545751 - 3.2093012436575084 - 0.5058449755949512 - 3.6102621231001715 - 6.838674115464267 - 10.115622878108042 - 5.975766504742282 - 8.51425043997417 - 10.028503063339656 - 4.433289829366402 - 2.1114327343101564 - 1.5185959104615385 - 2.3885959104615395 - -0.2942698019983343 - -1.2867237057056755 - -0.8467170698197588 - -1.4033812391480094 - -0.6251850218208874 - -0.43869997899477475 - 0.12372795274109727 - 11.759301243657509 - 13.32930124365751 - 5.639301243657508 - -0.6206987563424917 - -0.36069875634249193 - 3.6893012436575088 - 2.9156228781080387 - 1.2142504399741725 - 4.733289829366399 - 9.91785744387077 - 8.138190724548807 - 7.47995671154575 - 7.958190724548807 - 7.589956711545749 - 5.269301243657507 - 3.885844975594952 - 7.130262123100171 - 5.458674115464268 - 12.815622878108044 - 7.814250439974167 - 4.233289829366399 - -0.882142556129228 - 2.338190724548806 - 1.47995671154575 - 2.8981907245488046 - 2.4499567115457523 - 3.809301243657506 - 1.0758449755949515 - 2.930262123100171 - 5.228674115464267 - -1.1843771218919557 - 5.7142504399741725 - -2.466710170633604 - 0.8178574438707713 - 4.038190724548805 - 0.07995671154575135 - 4.328190724548804 - 0.7299567115457499 - 0.9193012436575074 - 0.9558449755949505 - 2.260262123100171 - 3.358674115464267 - 1.2593012436575073 - 13.115622878108042 - 11.01425043997417 - 5.617857443870772 - 0.8793012436575083 - 1.5886741154642676 - 0.5093012436575073 - 1.7786741154642671 - 2.7593012436575073 - 2.189301243657507 - 3.2093012436575084 - 0.1804499440042342 - 0.6304499440042335 - 13.815622878108044 - 8.114250439974171 - 4.480449944004235 - 4.490449944004233 - 3.005730198001668 - 1.783282930180242 - 3.0804499440042328 - 3.3704499440042355 - 13.415622878108039 - 7.414250439974168 - 2.485730198001665 - 1.4432829301802421 - 3.5804499440042328 - 3.6304499440042335 residual_model_expected: intercepts: - 25.015622878108047 - 18.675766504742285 - 10.714250439974172 - 16.92850306333966 - 13.8332898293664 - 7.611432734310156 - 5.780449944004236 - 4.918595910461541 - 5.730449944004235 - 5.093595910461538 - 3.573230198001667 - 2.8432762942943253 - 2.9807829301802418 - 2.56911876085199 - 2.229814978179112 - 2.901300021005225 - 4.398727952741098 - 22.715622878108036 - 16.875766504742288 - 18.114250439974164 - 15.628503063339657 - 10.733289829366399 - 3.511432734310162 - 3.980449944004235 - 2.718595910461538 - 3.855449944004235 - 3.4435959104615392 - 3.3482301980016658 - 3.2932762942943246 - 0.8432829301802407 - 2.3066187608519906 - 2.417314978179112 - 4.026300021005225 - 2.486227952741097 - 12.115622878108042 - 18.975766504742282 - 10.314250439974167 - 9.228503063339659 - 9.933289829366402 - 6.811432734310159 - 1.480449944004235 - 1.7185959104615378 - 1.5304499440042356 - 2.3185959104615392 - 2.298230198001665 - 1.568276294294325 - 1.930782930180241 - 1.7441187608519906 - 1.6298149781791125 - 1.6638000210052253 - 2.3362279527410976 - 6.215622878108036 - 7.275766504742279 - 7.314250439974167 - 7.528503063339656 - 7.033289829366396 - 5.311432734310159 - 4.080449944004233 - 1.7185959104615378 - 4.042949944004235 - 2.468595910461538 - 2.5982301980016658 - 2.243276294294324 - 2.2682829301802414 - 1.331618760851991 - 1.8548149781791121 - 1.5138000210052258 - 1.9987279527410973 - 13.615622878108042 - 8.914250439974168 - 2.617857443870772 - -0.020698756342492075 - 0.689301243657507 - 3.1293012436575083 - 2.3486741154642674 - 10.115622878108042 - 6.414250439974168 - 3.317857443870775 - 1.4093012436575076 - 0.3193012436575078 - 2.189301243657507 - 0.6586741154642675 - 8.915622878108039 - 7.51425043997417 - 3.0178574438707706 - 1.439301243657507 - -0.6206987563424917 - 0.8793012436575083 - 2.158674115464268 - 10.115622878108042 - 10.01425043997417 - 3.917857443870769 - 0.12930124365750828 - 2.3486741154642674 - 0.3193012436575078 - 0.689301243657507 - 2.158674115464268 - 8.915622878108039 - 6.51425043997417 - 4.533289829366396 - 2.117857443870772 - 0.5381907245488051 - 0.9081907245488061 - 2.599956711545751 - 1.519301243657507 - 3.5858449755949513 - 4.060262123100171 - 3.3886741154642674 - 7.915622878108039 - 8.51425043997417 - 5.533289829366396 - 5.317857443870775 - -1.6618092754511942 - -1.3418092754511939 - 3.0899567115457494 - 0.3193012436575078 - 2.7558449755949512 - 2.3702621231001704 - 4.1386741154642674 - 2.6156228781080415 - 6.01425043997417 - 5.233289829366399 - 5.917857443870769 - -0.6618092754511942 - 0.008190724548807538 - 4.779956711545751 - 3.2093012436575084 - 0.5058449755949512 - 3.6102621231001715 - 6.838674115464267 - 10.115622878108042 - 5.975766504742282 - 8.51425043997417 - 10.028503063339656 - 4.433289829366402 - 2.1114327343101564 - 1.5185959104615385 - 2.3885959104615395 - -0.2942698019983343 - -1.2867237057056755 - -0.8467170698197588 - -1.4033812391480094 - -0.6251850218208874 - -0.43869997899477475 - 0.12372795274109727 - 11.759301243657509 - 13.32930124365751 - 5.639301243657508 - -0.6206987563424917 - -0.36069875634249193 - 3.6893012436575088 - 2.9156228781080387 - 1.2142504399741725 - 4.733289829366399 - 9.91785744387077 - 8.138190724548807 - 7.47995671154575 - 7.958190724548807 - 7.589956711545749 - 5.269301243657507 - 3.885844975594952 - 7.130262123100171 - 5.458674115464268 - 12.815622878108044 - 7.814250439974167 - 4.233289829366399 - -0.882142556129228 - 2.338190724548806 - 1.47995671154575 - 2.8981907245488046 - 2.4499567115457523 - 3.809301243657506 - 1.0758449755949515 - 2.930262123100171 - 5.228674115464267 - -1.1843771218919557 - 5.7142504399741725 - -2.466710170633604 - 0.8178574438707713 - 4.038190724548805 - 0.07995671154575135 - 4.328190724548804 - 0.7299567115457499 - 0.9193012436575074 - 0.9558449755949505 - 2.260262123100171 - 3.358674115464267 - 1.2593012436575073 - 13.115622878108042 - 11.01425043997417 - 5.617857443870772 - 0.8793012436575083 - 1.5886741154642676 - 0.5093012436575073 - 1.7786741154642671 - 2.7593012436575073 - 2.189301243657507 - 3.2093012436575084 - 0.1804499440042342 - 0.6304499440042335 - 13.815622878108044 - 8.114250439974171 - 4.480449944004235 - 4.490449944004233 - 3.005730198001668 - 1.783282930180242 - 3.0804499440042328 - 3.3704499440042355 - 13.415622878108039 - 7.414250439974168 - 2.485730198001665 - 1.4432829301802421 - 3.5804499440042328 - 3.6304499440042335 linear_terms: - - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.713742622150146 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 4.348989444230492 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.842268154536409 - 4.842268154536409 - 3.480506441842877 - 4.165692188910668 - 4.842268154536409 - 4.842268154536409 - 4.483425879433607 - 3.915982374815913 - 4.842268154536409 - 4.842268154536409 - 3.480506441842877 - 4.165692188910668 - 4.483425879433607 - 3.915982374815913 - 4.842268154536409 - 4.842268154536409 - - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.84635481393292 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608854 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.926007839381725 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 12.159194348241295 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241295 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 3.5589080859646285 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569454 - 55.926007839381725 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 17.92731548788399 - 17.92731548788399 - 13.762137034267115 - 10.793196717608854 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.226094689267484 - 20.670151246699717 - 15.659359279812538 - 20.670151246699717 - 15.659359279812538 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118885 - 3.558908085964629 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 15.659359279812536 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 15.659359279812536 - 20.670151246699717 - 15.659359279812536 - 12.159194348241295 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964628 - 12.159194348241295 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241295 - 3.5589080859646285 - 12.159194348241295 - 3.5589080859646285 - 12.159194348241295 - 12.159194348241295 - 12.159194348241293 - 24.03332128329257 - 24.03332128329257 - 63.15476421569454 - 49.9146681808974 - 24.03332128329257 - 24.03332128329257 - 13.762137034267115 - 8.60666628497297 - 24.03332128329257 - 24.03332128329257 - 63.15476421569454 - 49.9146681808974 - 13.762137034267115 - 8.60666628497297 - 24.03332128329257 - 24.03332128329257 - - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.5229744079594 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511592 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516123 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 47.77743791348328 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348328 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240037 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 20.60237968739701 - 20.60237968739701 - 16.515672503683682 - 13.484113409516123 - 11.16768678177973 - 9.355872580735188 - 7.911893266969132 - 6.743263411989219 - 5.785333776105146 - 14.893214379206793 - 14.893214379206793 - 14.893214379206793 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.10239578132861 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240037 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240038 - 14.89321437920679 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 26.325776492983195 - 26.325776492983195 - 56.638796479154635 - 47.77743791348328 - 26.325776492983195 - 26.325776492983195 - 16.515672503683682 - 11.16768678177973 - 26.325776492983195 - 26.325776492983195 - 56.638796479154635 - 47.77743791348328 - 16.515672503683682 - 11.16768678177973 - 26.325776492983195 - 26.325776492983195 square_terms: - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 residuals_candidate: - - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.713742622150146 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -4.348989444230492 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.842268154536409 - -4.842268154536409 - -3.480506441842877 - -4.165692188910668 - -4.842268154536409 - -4.842268154536409 - -4.483425879433607 - -3.915982374815913 - -4.842268154536409 - -4.842268154536409 - -3.480506441842877 - -4.165692188910668 - -4.483425879433607 - -3.915982374815913 - -4.842268154536409 - -4.842268154536409 - - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.45973250532658 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074091 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814582 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010802 - 7.810204904010803 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010801 - 7.810204904010801 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010803 - 7.810204904010802 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010802 - 0.8612683860797854 - 7.810204904010801 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.674257773851664 - 52.05477737814582 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 13.213572865733845 - 13.213572865733845 - 9.278711154833507 - 6.58547903507409 - 4.690683910157058 - 3.3310071244237753 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 7.810204904010801 - 7.810204904010801 - 7.810204904010801 - 7.810204904010802 - 7.810204904010802 - 7.810204904010801 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.39014077759771 - 15.874166926375274 - 11.052454726116114 - 15.874166926375274 - 11.052454726116114 - 7.810204904010801 - 3.955968294040006 - 1.950076150690875 - 0.8612683860797858 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 11.052454726116112 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 11.052454726116112 - 15.874166926375274 - 11.052454726116112 - 7.810204904010803 - 3.955968294040006 - 1.950076150690876 - 0.861268386079785 - 7.810204904010803 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010803 - 0.8612683860797854 - 7.810204904010803 - 0.8612683860797854 - 7.810204904010803 - 7.810204904010803 - 7.810204904010801 - 19.19105312875616 - 19.19105312875616 - 59.674257773851664 - 45.74897599198673 - 19.19105312875616 - 19.19105312875616 - 9.278711154833507 - 4.690683910157058 - 19.19105312875616 - 19.19105312875616 - 59.674257773851664 - 45.74897599198673 - 9.278711154833507 - 4.690683910157058 - 19.19105312875616 - 19.19105312875616 - - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.97257477720091 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.7316834383783 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.55904132650958 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.7316834383783 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.27639572698136 - 7.2517044069638175 - 5.731683438378299 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.731683438378299 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 43.611745724572614 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.611745724572614 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.672688669355195 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551943 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 15.888637065246865 - 15.888637065246865 - 12.032246624250076 - 9.27639572698136 - 7.2517044069638175 - 5.731683438378299 - 4.5707427558908105 - 3.671619147708398 - 2.967247069285186 - 10.5442249349763 - 10.5442249349763 - 10.5442249349763 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.266441869658838 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551943 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551952 - 10.544224934976299 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 21.483508338446786 - 21.483508338446786 - 53.15829003731176 - 43.611745724572614 - 21.483508338446786 - 21.483508338446786 - 12.032246624250076 - 7.2517044069638175 - 21.483508338446786 - 21.483508338446786 - 53.15829003731176 - 43.611745724572614 - 12.032246624250076 - 7.2517044069638175 - 21.483508338446786 - 21.483508338446786 x_candidate: - - -1.0 - 0.0 - 0.0 - - -1.0 - 1.0 - 0.0 - - -1.0 - 0.0 - 1.0 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_intial_residual_model.yaml ================================================ --- initial_residual_model: intercepts: - 25.015622878108047 - 18.675766504742285 - 10.714250439974172 - 16.92850306333966 - 13.8332898293664 - 7.611432734310156 - 5.780449944004236 - 4.918595910461541 - 5.730449944004235 - 5.093595910461538 - 3.573230198001667 - 2.8432762942943253 - 2.9807829301802418 - 2.56911876085199 - 2.229814978179112 - 2.901300021005225 - 4.398727952741098 - 22.715622878108036 - 16.875766504742288 - 18.114250439974164 - 15.628503063339657 - 10.733289829366399 - 3.511432734310162 - 3.980449944004235 - 2.718595910461538 - 3.855449944004235 - 3.4435959104615392 - 3.3482301980016658 - 3.2932762942943246 - 0.8432829301802407 - 2.3066187608519906 - 2.417314978179112 - 4.026300021005225 - 2.486227952741097 - 12.115622878108042 - 18.975766504742282 - 10.314250439974167 - 9.228503063339659 - 9.933289829366402 - 6.811432734310159 - 1.480449944004235 - 1.7185959104615378 - 1.5304499440042356 - 2.3185959104615392 - 2.298230198001665 - 1.568276294294325 - 1.930782930180241 - 1.7441187608519906 - 1.6298149781791125 - 1.6638000210052253 - 2.3362279527410976 - 6.215622878108036 - 7.275766504742279 - 7.314250439974167 - 7.528503063339656 - 7.033289829366396 - 5.311432734310159 - 4.080449944004233 - 1.7185959104615378 - 4.042949944004235 - 2.468595910461538 - 2.5982301980016658 - 2.243276294294324 - 2.2682829301802414 - 1.331618760851991 - 1.8548149781791121 - 1.5138000210052258 - 1.9987279527410973 - 13.615622878108042 - 8.914250439974168 - 2.617857443870772 - -0.020698756342492075 - 0.689301243657507 - 3.1293012436575083 - 2.3486741154642674 - 10.115622878108042 - 6.414250439974168 - 3.317857443870775 - 1.4093012436575076 - 0.3193012436575078 - 2.189301243657507 - 0.6586741154642675 - 8.915622878108039 - 7.51425043997417 - 3.0178574438707706 - 1.439301243657507 - -0.6206987563424917 - 0.8793012436575083 - 2.158674115464268 - 10.115622878108042 - 10.01425043997417 - 3.917857443870769 - 0.12930124365750828 - 2.3486741154642674 - 0.3193012436575078 - 0.689301243657507 - 2.158674115464268 - 8.915622878108039 - 6.51425043997417 - 4.533289829366396 - 2.117857443870772 - 0.5381907245488051 - 0.9081907245488061 - 2.599956711545751 - 1.519301243657507 - 3.5858449755949513 - 4.060262123100171 - 3.3886741154642674 - 7.915622878108039 - 8.51425043997417 - 5.533289829366396 - 5.317857443870775 - -1.6618092754511942 - -1.3418092754511939 - 3.0899567115457494 - 0.3193012436575078 - 2.7558449755949512 - 2.3702621231001704 - 4.1386741154642674 - 2.6156228781080415 - 6.01425043997417 - 5.233289829366399 - 5.917857443870769 - -0.6618092754511942 - 0.008190724548807538 - 4.779956711545751 - 3.2093012436575084 - 0.5058449755949512 - 3.6102621231001715 - 6.838674115464267 - 10.115622878108042 - 5.975766504742282 - 8.51425043997417 - 10.028503063339656 - 4.433289829366402 - 2.1114327343101564 - 1.5185959104615385 - 2.3885959104615395 - -0.2942698019983343 - -1.2867237057056755 - -0.8467170698197588 - -1.4033812391480094 - -0.6251850218208874 - -0.43869997899477475 - 0.12372795274109727 - 11.759301243657509 - 13.32930124365751 - 5.639301243657508 - -0.6206987563424917 - -0.36069875634249193 - 3.6893012436575088 - 2.9156228781080387 - 1.2142504399741725 - 4.733289829366399 - 9.91785744387077 - 8.138190724548807 - 7.47995671154575 - 7.958190724548807 - 7.589956711545749 - 5.269301243657507 - 3.885844975594952 - 7.130262123100171 - 5.458674115464268 - 12.815622878108044 - 7.814250439974167 - 4.233289829366399 - -0.882142556129228 - 2.338190724548806 - 1.47995671154575 - 2.8981907245488046 - 2.4499567115457523 - 3.809301243657506 - 1.0758449755949515 - 2.930262123100171 - 5.228674115464267 - -1.1843771218919557 - 5.7142504399741725 - -2.466710170633604 - 0.8178574438707713 - 4.038190724548805 - 0.07995671154575135 - 4.328190724548804 - 0.7299567115457499 - 0.9193012436575074 - 0.9558449755949505 - 2.260262123100171 - 3.358674115464267 - 1.2593012436575073 - 13.115622878108042 - 11.01425043997417 - 5.617857443870772 - 0.8793012436575083 - 1.5886741154642676 - 0.5093012436575073 - 1.7786741154642671 - 2.7593012436575073 - 2.189301243657507 - 3.2093012436575084 - 0.1804499440042342 - 0.6304499440042335 - 13.815622878108044 - 8.114250439974171 - 4.480449944004235 - 4.490449944004233 - 3.005730198001668 - 1.783282930180242 - 3.0804499440042328 - 3.3704499440042355 - 13.415622878108039 - 7.414250439974168 - 2.485730198001665 - 1.4432829301802421 - 3.5804499440042328 - 3.6304499440042335 residual_model_expected: intercepts: - 25.015622878108047 - 18.675766504742285 - 10.714250439974172 - 16.92850306333966 - 13.8332898293664 - 7.611432734310156 - 5.780449944004236 - 4.918595910461541 - 5.730449944004235 - 5.093595910461538 - 3.573230198001667 - 2.8432762942943253 - 2.9807829301802418 - 2.56911876085199 - 2.229814978179112 - 2.901300021005225 - 4.398727952741098 - 22.715622878108036 - 16.875766504742288 - 18.114250439974164 - 15.628503063339657 - 10.733289829366399 - 3.511432734310162 - 3.980449944004235 - 2.718595910461538 - 3.855449944004235 - 3.4435959104615392 - 3.3482301980016658 - 3.2932762942943246 - 0.8432829301802407 - 2.3066187608519906 - 2.417314978179112 - 4.026300021005225 - 2.486227952741097 - 12.115622878108042 - 18.975766504742282 - 10.314250439974167 - 9.228503063339659 - 9.933289829366402 - 6.811432734310159 - 1.480449944004235 - 1.7185959104615378 - 1.5304499440042356 - 2.3185959104615392 - 2.298230198001665 - 1.568276294294325 - 1.930782930180241 - 1.7441187608519906 - 1.6298149781791125 - 1.6638000210052253 - 2.3362279527410976 - 6.215622878108036 - 7.275766504742279 - 7.314250439974167 - 7.528503063339656 - 7.033289829366396 - 5.311432734310159 - 4.080449944004233 - 1.7185959104615378 - 4.042949944004235 - 2.468595910461538 - 2.5982301980016658 - 2.243276294294324 - 2.2682829301802414 - 1.331618760851991 - 1.8548149781791121 - 1.5138000210052258 - 1.9987279527410973 - 13.615622878108042 - 8.914250439974168 - 2.617857443870772 - -0.020698756342492075 - 0.689301243657507 - 3.1293012436575083 - 2.3486741154642674 - 10.115622878108042 - 6.414250439974168 - 3.317857443870775 - 1.4093012436575076 - 0.3193012436575078 - 2.189301243657507 - 0.6586741154642675 - 8.915622878108039 - 7.51425043997417 - 3.0178574438707706 - 1.439301243657507 - -0.6206987563424917 - 0.8793012436575083 - 2.158674115464268 - 10.115622878108042 - 10.01425043997417 - 3.917857443870769 - 0.12930124365750828 - 2.3486741154642674 - 0.3193012436575078 - 0.689301243657507 - 2.158674115464268 - 8.915622878108039 - 6.51425043997417 - 4.533289829366396 - 2.117857443870772 - 0.5381907245488051 - 0.9081907245488061 - 2.599956711545751 - 1.519301243657507 - 3.5858449755949513 - 4.060262123100171 - 3.3886741154642674 - 7.915622878108039 - 8.51425043997417 - 5.533289829366396 - 5.317857443870775 - -1.6618092754511942 - -1.3418092754511939 - 3.0899567115457494 - 0.3193012436575078 - 2.7558449755949512 - 2.3702621231001704 - 4.1386741154642674 - 2.6156228781080415 - 6.01425043997417 - 5.233289829366399 - 5.917857443870769 - -0.6618092754511942 - 0.008190724548807538 - 4.779956711545751 - 3.2093012436575084 - 0.5058449755949512 - 3.6102621231001715 - 6.838674115464267 - 10.115622878108042 - 5.975766504742282 - 8.51425043997417 - 10.028503063339656 - 4.433289829366402 - 2.1114327343101564 - 1.5185959104615385 - 2.3885959104615395 - -0.2942698019983343 - -1.2867237057056755 - -0.8467170698197588 - -1.4033812391480094 - -0.6251850218208874 - -0.43869997899477475 - 0.12372795274109727 - 11.759301243657509 - 13.32930124365751 - 5.639301243657508 - -0.6206987563424917 - -0.36069875634249193 - 3.6893012436575088 - 2.9156228781080387 - 1.2142504399741725 - 4.733289829366399 - 9.91785744387077 - 8.138190724548807 - 7.47995671154575 - 7.958190724548807 - 7.589956711545749 - 5.269301243657507 - 3.885844975594952 - 7.130262123100171 - 5.458674115464268 - 12.815622878108044 - 7.814250439974167 - 4.233289829366399 - -0.882142556129228 - 2.338190724548806 - 1.47995671154575 - 2.8981907245488046 - 2.4499567115457523 - 3.809301243657506 - 1.0758449755949515 - 2.930262123100171 - 5.228674115464267 - -1.1843771218919557 - 5.7142504399741725 - -2.466710170633604 - 0.8178574438707713 - 4.038190724548805 - 0.07995671154575135 - 4.328190724548804 - 0.7299567115457499 - 0.9193012436575074 - 0.9558449755949505 - 2.260262123100171 - 3.358674115464267 - 1.2593012436575073 - 13.115622878108042 - 11.01425043997417 - 5.617857443870772 - 0.8793012436575083 - 1.5886741154642676 - 0.5093012436575073 - 1.7786741154642671 - 2.7593012436575073 - 2.189301243657507 - 3.2093012436575084 - 0.1804499440042342 - 0.6304499440042335 - 13.815622878108044 - 8.114250439974171 - 4.480449944004235 - 4.490449944004233 - 3.005730198001668 - 1.783282930180242 - 3.0804499440042328 - 3.3704499440042355 - 13.415622878108039 - 7.414250439974168 - 2.485730198001665 - 1.4432829301802421 - 3.5804499440042328 - 3.6304499440042335 linear_terms: - - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.842268154536409 - 4.713742622150146 - 4.842268154536409 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 4.348989444230492 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 3.8712304612359034 - 4.165692188910668 - 4.386622308606341 - 4.550399630758491 - 4.751877523597983 - 4.713742622150146 - 4.713742622150146 - 4.483425879433607 - 4.2077176825347635 - 3.915982374815913 - 3.6241891423568875 - 3.3411505110783217 - 3.0716442642808213 - 2.81808670681996 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 3.480506441842877 - 4.165692188910668 - 4.550399630758491 - 4.8359539116697725 - 4.795984320324443 - 4.606904553696424 - 4.795984320324443 - 4.606904553696424 - 4.348989444230492 - 3.7694207275538334 - 3.20451303242801 - 2.697639699884843 - 4.348989444230492 - 3.480506441842877 - 4.165692188910668 - 4.8359539116697725 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 2.697639699884843 - 4.348989444230492 - 4.348989444230492 - 4.348989444230492 - 4.842268154536409 - 4.842268154536409 - 3.480506441842877 - 4.165692188910668 - 4.842268154536409 - 4.842268154536409 - 4.483425879433607 - 3.915982374815913 - 4.842268154536409 - 4.842268154536409 - 3.480506441842877 - 4.165692188910668 - 4.483425879433607 - 3.915982374815913 - 4.842268154536409 - 4.842268154536409 - - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.84635481393292 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.92600783938173 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608854 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 55.926007839381725 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 24.03332128329257 - 17.92731548788399 - 24.03332128329257 - 17.92731548788399 - 13.762137034267115 - 10.793196717608852 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 12.159194348241295 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241295 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241293 - 3.5589080859646285 - 12.159194348241293 - 12.159194348241295 - 3.5589080859646285 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569454 - 55.926007839381725 - 49.9146681808974 - 44.846354813932926 - 40.5229744079594 - 33.56053509484467 - 17.92731548788399 - 17.92731548788399 - 13.762137034267115 - 10.793196717608854 - 8.60666628497297 - 6.955196266780662 - 5.6825308893242505 - 4.685572737243362 - 3.8938723589600377 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 12.159194348241293 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.226094689267484 - 20.670151246699717 - 15.659359279812538 - 20.670151246699717 - 15.659359279812538 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118885 - 3.558908085964629 - 63.15476421569454 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 15.659359279812536 - 20.670151246699717 - 15.659359279812536 - 12.159194348241293 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964629 - 63.15476421569455 - 49.9146681808974 - 40.5229744079594 - 28.22609468926748 - 20.670151246699717 - 15.659359279812536 - 20.670151246699717 - 15.659359279812536 - 12.159194348241295 - 7.7253890215938394 - 5.154589183118886 - 3.558908085964628 - 12.159194348241295 - 63.15476421569454 - 49.9146681808974 - 28.22609468926748 - 12.159194348241295 - 3.5589080859646285 - 12.159194348241295 - 3.5589080859646285 - 12.159194348241295 - 12.159194348241295 - 12.159194348241293 - 24.03332128329257 - 24.03332128329257 - 63.15476421569454 - 49.9146681808974 - 24.03332128329257 - 24.03332128329257 - 13.762137034267115 - 8.60666628497297 - 24.03332128329257 - 24.03332128329257 - 63.15476421569454 - 49.9146681808974 - 13.762137034267115 - 8.60666628497297 - 24.03332128329257 - 24.03332128329257 - - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.5229744079594 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511592 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516123 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 26.325776492983195 - 20.60237968739701 - 26.325776492983195 - 20.60237968739701 - 16.515672503683682 - 13.484113409516121 - 11.16768678177973 - 9.355872580735188 - 7.911893266969131 - 6.743263411989219 - 5.785333776105146 - 56.638796479154635 - 47.77743791348328 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348328 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 14.89321437920679 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240037 - 56.638796479154635 - 52.032136772388945 - 47.77743791348329 - 43.94566363511593 - 40.522974407959396 - 34.7426501626184 - 20.60237968739701 - 20.60237968739701 - 16.515672503683682 - 13.484113409516123 - 11.16768678177973 - 9.355872580735188 - 7.911893266969132 - 6.743263411989219 - 5.785333776105146 - 14.893214379206793 - 14.893214379206793 - 14.893214379206793 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.10239578132861 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534715 - 7.2977158685733325 - 5.370328369240037 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240038 - 56.638796479154635 - 47.77743791348329 - 40.522974407959396 - 30.102395781328614 - 23.208590873487402 - 18.398363818154305 - 23.208590873487402 - 18.398363818154305 - 14.89321437920679 - 10.208549778534717 - 7.2977158685733325 - 5.370328369240038 - 14.89321437920679 - 56.638796479154635 - 47.77743791348329 - 30.102395781328614 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 5.370328369240038 - 14.89321437920679 - 14.89321437920679 - 14.89321437920679 - 26.325776492983195 - 26.325776492983195 - 56.638796479154635 - 47.77743791348328 - 26.325776492983195 - 26.325776492983195 - 16.515672503683682 - 11.16768678177973 - 26.325776492983195 - 26.325776492983195 - 56.638796479154635 - 47.77743791348328 - 16.515672503683682 - 11.16768678177973 - 26.325776492983195 - 26.325776492983195 square_terms: - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 - - 0.0 - 0.0 - 0.0 residuals_candidate: - - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.842268154536409 - -4.713742622150146 - -4.842268154536409 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -4.348989444230492 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -3.8712304612359034 - -4.165692188910668 - -4.386622308606341 - -4.550399630758491 - -4.751877523597983 - -4.713742622150146 - -4.713742622150146 - -4.483425879433607 - -4.2077176825347635 - -3.915982374815913 - -3.6241891423568875 - -3.3411505110783217 - -3.0716442642808213 - -2.81808670681996 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -3.480506441842877 - -4.165692188910668 - -4.550399630758491 - -4.8359539116697725 - -4.795984320324443 - -4.606904553696424 - -4.795984320324443 - -4.606904553696424 - -4.348989444230492 - -3.7694207275538334 - -3.20451303242801 - -2.697639699884843 - -4.348989444230492 - -3.480506441842877 - -4.165692188910668 - -4.8359539116697725 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -2.697639699884843 - -4.348989444230492 - -4.348989444230492 - -4.348989444230492 - -4.842268154536409 - -4.842268154536409 - -3.480506441842877 - -4.165692188910668 - -4.842268154536409 - -4.842268154536409 - -4.483425879433607 - -3.915982374815913 - -4.842268154536409 - -4.842268154536409 - -3.480506441842877 - -4.165692188910668 - -4.483425879433607 - -3.915982374815913 - -4.842268154536409 - -4.842268154536409 - - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.45973250532658 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814583 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074091 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 52.05477737814582 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 19.19105312875616 - 13.213572865733845 - 19.19105312875616 - 13.213572865733845 - 9.278711154833507 - 6.585479035074089 - 4.690683910157058 - 3.331007124423775 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010802 - 7.810204904010803 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010801 - 7.810204904010801 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010803 - 7.810204904010802 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010802 - 0.8612683860797854 - 7.810204904010801 - 7.810204904010803 - 0.8612683860797854 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.674257773851664 - 52.05477737814582 - 45.74897599198673 - 40.459732505326585 - 35.97257477720091 - 28.808657571246684 - 13.213572865733845 - 13.213572865733845 - 9.278711154833507 - 6.58547903507409 - 4.690683910157058 - 3.3310071244237753 - 2.341380378245929 - 1.6139284729625407 - 1.0757856521400777 - 7.810204904010801 - 7.810204904010801 - 7.810204904010801 - 7.810204904010802 - 7.810204904010802 - 7.810204904010801 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.39014077759771 - 15.874166926375274 - 11.052454726116114 - 15.874166926375274 - 11.052454726116114 - 7.810204904010801 - 3.955968294040006 - 1.950076150690875 - 0.8612683860797858 - 59.674257773851664 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 11.052454726116112 - 15.874166926375274 - 11.052454726116112 - 7.810204904010801 - 3.955968294040006 - 1.950076150690876 - 0.8612683860797858 - 59.67425777385167 - 45.74897599198673 - 35.97257477720091 - 23.390140777597708 - 15.874166926375274 - 11.052454726116112 - 15.874166926375274 - 11.052454726116112 - 7.810204904010803 - 3.955968294040006 - 1.950076150690876 - 0.861268386079785 - 7.810204904010803 - 59.674257773851664 - 45.74897599198673 - 23.390140777597708 - 7.810204904010803 - 0.8612683860797854 - 7.810204904010803 - 0.8612683860797854 - 7.810204904010803 - 7.810204904010803 - 7.810204904010801 - 19.19105312875616 - 19.19105312875616 - 59.674257773851664 - 45.74897599198673 - 19.19105312875616 - 19.19105312875616 - 9.278711154833507 - 4.690683910157058 - 19.19105312875616 - 19.19105312875616 - 59.674257773851664 - 45.74897599198673 - 9.278711154833507 - 4.690683910157058 - 19.19105312875616 - 19.19105312875616 - - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.97257477720091 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.7316834383783 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.55904132650958 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.7316834383783 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.27639572698136 - 7.2517044069638175 - 5.731683438378299 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 21.483508338446786 - 15.888637065246865 - 21.483508338446786 - 15.888637065246865 - 12.032246624250076 - 9.276395726981358 - 7.2517044069638175 - 5.731683438378299 - 4.57074275589081 - 3.671619147708398 - 2.967247069285186 - 53.15829003731176 - 43.611745724572614 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.611745724572614 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.672688669355195 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 10.544224934976299 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551943 - 53.15829003731176 - 48.16090631115304 - 43.61174572457262 - 39.559041326509586 - 35.972574777200904 - 29.99077263902042 - 15.888637065246865 - 15.888637065246865 - 12.032246624250076 - 9.27639572698136 - 7.2517044069638175 - 5.731683438378299 - 4.5707427558908105 - 3.671619147708398 - 2.967247069285186 - 10.5442249349763 - 10.5442249349763 - 10.5442249349763 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.266441869658838 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980882 - 4.0932028361453225 - 2.6726886693551943 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551952 - 53.15829003731176 - 43.61174572457262 - 35.972574777200904 - 25.26644186965884 - 18.41260655316296 - 13.791459264457881 - 18.41260655316296 - 13.791459264457881 - 10.544224934976299 - 6.439129050980883 - 4.0932028361453225 - 2.6726886693551952 - 10.544224934976299 - 53.15829003731176 - 43.61174572457262 - 25.26644186965884 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 2.6726886693551952 - 10.544224934976299 - 10.544224934976299 - 10.544224934976299 - 21.483508338446786 - 21.483508338446786 - 53.15829003731176 - 43.611745724572614 - 21.483508338446786 - 21.483508338446786 - 12.032246624250076 - 7.2517044069638175 - 21.483508338446786 - 21.483508338446786 - 53.15829003731176 - 43.611745724572614 - 12.032246624250076 - 7.2517044069638175 - 21.483508338446786 - 21.483508338446786 x_candidate: - - -1.0 - 0.0 - 0.0 - - -1.0 - 1.0 - 0.0 - - -1.0 - 0.0 - 1.0 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_main_from_residual_model.yaml ================================================ --- linear_terms_main_model_expected: - 171928.93452597785 - -177176.19493085583 - -42775.40334058995 linear_terms_residual_model: - - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227353 - -3.866221813446 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.71243244039 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227353 - -3.866221813447 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813447 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813447 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483835 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030153 - -4.140626483836 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -138.334179211 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813446 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030153 - -4.140626483835 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - -153.6248432276 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -247.3857201056 - -247.3857201056 - 2363.426377558 - -7709.278530499 - -247.3857201056 - -247.3857201056 - -53.7124324404 - -99.1378353945 - -247.3857201056 - -247.3857201056 - 2363.426377558 - -7709.278530499 - -53.7124324404 - -99.1378353945 - -247.3857201056 - -247.3857201056 - - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692858 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692859 - 80.74754133078 - 42.21231909165 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - -104.2839114197 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692859 - 80.74754133078 - 42.21231909165 - 972.3575239774 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 36.28651044149 - 36.28651044149 - -1393.823686755 - 4386.660918749 - 36.28651044149 - 36.28651044149 - -735.8610938667 - 87.55073042511 - 36.28651044149 - 36.28651044149 - -1393.823686755 - 4386.660918749 - -735.8610938666 - 87.55073042511 - 36.28651044149 - 36.28651044149 - - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 92.72176892163 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - 115.5097653415 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 131.5247089125 - 131.5247089125 - -375.8722506492 - 1677.946001564 - 131.5247089125 - 131.5247089125 - 44.33952683361 - 63.72812657892 - 131.5247089125 - 131.5247089125 - -375.8722506492 - 1677.946001564 - 44.3395268336 - 63.72812657892 - 131.5247089125 - 131.5247089125 residuals: - 19.67905061421 - 12.78536491634 - 4.453409401868 - 10.42602658124 - 7.181651769754 - 0.8467383120783 - -0.8151544815029 - -1.28878727387 - -0.8651544815029 - -1.11378727387 - -2.178296849214 - -2.4437228135 - -1.857756175876 - -1.847417965917 - -1.795022214911 - -0.7628423028115 - 1.065115779582 - 17.37905061421 - 10.98536491634 - 11.85340940187 - 9.126026581237 - 4.081651769754 - -3.253261687922 - -2.615154481503 - -3.48878727387 - -2.740154481503 - -2.76378727387 - -2.403296849214 - -1.9937228135 - -3.995256175876 - -2.109917965917 - -1.607522214911 - 0.3621576971885 - -0.8473842204181 - 6.779050614207 - 13.08536491634 - 4.053409401868 - 2.726026581237 - 3.281651769754 - 0.04673831207827 - -5.115154481503 - -4.48878727387 - -5.065154481503 - -3.88878727387 - -3.453296849214 - -3.7187228135 - -2.907756175876 - -2.672417965917 - -2.395022214911 - -2.000342302812 - -0.9973842204181 - 0.8790506142075 - 1.385364916339 - 1.053409401868 - 1.026026581237 - 0.3816517697539 - -1.453261687922 - -2.515154481503 - -4.48878727387 - -2.552654481503 - -3.73878727387 - -3.153296849214 - -3.0437228135 - -2.570256175876 - -3.084917965917 - -2.170022214911 - -2.150342302812 - -1.334884220418 - 8.279050614207 - 2.653409401868 - -4.10628705125 - -5.539020595211 - -4.829020595211 - -2.389020595211 - -0.8305062664824 - 4.779050614207 - 0.1534094018679 - -3.40628705125 - -4.109020595211 - -5.199020595211 - -3.329020595211 - -2.520506266482 - 3.579050614207 - 1.253409401868 - -3.70628705125 - -4.079020595211 - -6.139020595211 - -4.639020595211 - -1.020506266482 - 4.779050614207 - 3.753409401868 - -2.80628705125 - -5.389020595211 - -0.8305062664824 - -5.199020595211 - -4.829020595211 - -1.020506266482 - 3.579050614207 - 0.2534094018679 - -2.118348230246 - -4.60628705125 - -5.8778638078 - -5.5078638078 - -3.383083895331 - -3.999020595211 - -1.038044411845 - 0.2196196759168 - 0.2094937335176 - 2.579050614207 - 2.253409401868 - -1.118348230246 - -1.40628705125 - -8.0778638078 - -7.7578638078 - -2.893083895331 - -5.199020595211 - -1.868044411845 - -1.470380324083 - 0.9594937335176 - -2.720949385793 - -0.2465905981321 - -1.418348230246 - -0.8062870512504 - -7.0778638078 - -6.4078638078 - -1.203083895331 - -2.309020595211 - -4.118044411845 - -0.2303803240832 - 3.659493733518 - 4.779050614207 - 0.0853649163389 - 2.253409401868 - 3.526026581237 - -2.218348230246 - -4.653261687922 - -4.68878727387 - -3.81878727387 - -6.045796849214 - -6.5737228135 - -5.685256175876 - -5.819917965917 - -4.650022214911 - -4.102842302812 - -3.209884220418 - 6.240979404789 - 7.810979404789 - 0.1209794047887 - -6.139020595211 - -5.879020595211 - -1.829020595211 - -2.420949385793 - -5.046590598132 - -1.918348230246 - 3.19371294875 - 1.7221361922 - 1.496916104669 - 1.5421361922 - 1.606916104669 - -0.2490205952113 - -0.738044411845 - 3.289619675917 - 2.279493733518 - 7.479050614207 - 1.553409401868 - -2.418348230246 - -7.60628705125 - -4.0778638078 - -4.503083895331 - -3.5178638078 - -3.533083895331 - -1.709020595211 - -3.548044411845 - -0.9103803240832 - 2.049493733518 - -6.520949385793 - -0.5465905981321 - -9.118348230246 - -5.90628705125 - -2.3778638078 - -5.903083895331 - -2.0878638078 - -5.253083895331 - -4.599020595211 - -3.668044411845 - -1.580380324083 - 0.1794937335176 - -4.259020595211 - 7.779050614207 - 4.753409401868 - -1.10628705125 - -4.639020595211 - -1.590506266482 - -5.009020595211 - -1.400506266482 - -2.759020595211 - -3.329020595211 - -2.309020595211 - -6.415154481503 - -5.965154481503 - 8.479050614207 - 1.853409401868 - -2.115154481503 - -2.105154481503 - -2.745796849214 - -3.055256175876 - -3.515154481503 - -3.225154481503 - 8.079050614207 - 1.153409401868 - -3.265796849214 - -3.395256175876 - -3.015154481503 - -2.965154481503 square_terms_main_model_expected: - - 1398130238.041321 - -793279016.9641627 - -295676778.6746732 - - -793279016.9641627 - 493231504.5229622 - 169825981.1998634 - - -295676778.6746732 - 169825981.1998634 - 63822373.72220981 square_terms_residual_model: - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626186 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112152 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112152 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_main_with_new_accepted_x.yaml ================================================ --- best_x: - 0.15 - 0.008 - 0.01 delta: 0.025 linear_terms: - -19571.13290454 - -19823.82644072 - 14216.11542373 linear_terms_expected: - -1.782609615475e-10 - -3.274180926383e-11 - 2.546585164964e-11 mains: - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 square_terms: - - 23918483.46505184 - -221133.04826413715 - -3862092.6941709574 - - -221133.04826413715 - 3420438.117919954 - -157370.87591914795 - - -3862092.6941709574 - -157370.87591914795 - 925172.8526537095 x_candidate_uncentered: - 0.149883507892 - 0.008098080768719 - 0.009146244784311 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_residual_model.yaml ================================================ --- coefficients_linear_terms: - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - 5851.636806189 - -3363.878493533 - -1189.242968631 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -2241.152261457 - 1287.57973774 - 457.6976703957 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -616.8404903244 - 353.5363964799 - 128.4190782477 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836565 - -30.58972763875 - -10.39697992437 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - 1139.519115522 - -650.4239742628 - -244.9573029291 - - -634.6254513294 - 361.6933575664 - 137.9876812992 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - 759.1990161301 - -450.6925959322 - -113.2196112852 - - -102.1761425864 - 56.9978213237 - 25.77578197644 - - -90.52892012056 - 50.77122751287 - 22.0578212495 - - -75.43506032392 - 42.33874666634 - 18.28624410363 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - 5851.636806189 - -3363.878493533 - -1189.242968631 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -2241.152261457 - 1287.57973774 - 457.6976703957 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -616.8404903244 - 353.5363964799 - 128.4190782477 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836565 - -30.58972763875 - -10.39697992437 - - 1139.519115522 - -650.4239742628 - -244.9573029291 - - -634.6254513294 - 361.6933575664 - 137.9876812992 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - 759.1990161301 - -450.6925959322 - -113.2196112852 - - -102.1761425864 - 56.9978213237 - 25.77578197644 - - -90.52892012056 - 50.77122751287 - 22.0578212495 - - -75.43506032392 - 42.33874666634 - 18.28624410363 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - 5851.636806189 - -3363.878493533 - -1189.242968631 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -2241.152261457 - 1287.57973774 - 457.6976703957 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -616.8404903244 - 353.5363964799 - 128.4190782477 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836565 - -30.58972763875 - -10.39697992437 - - 1139.519115522 - -650.4239742628 - -244.957302929 - - -634.6254513294 - 361.6933575664 - 137.9876812992 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - 759.1990161301 - -450.6925959322 - -113.2196112852 - - -102.1761425864 - 56.9978213237 - 25.77578197644 - - -90.52892012056 - 50.77122751287 - 22.0578212495 - - -75.43506032392 - 42.33874666634 - 18.28624410363 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - 5851.636806189 - -3363.878493533 - -1189.242968631 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -2241.152261457 - 1287.57973774 - 457.6976703958 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -616.8404903244 - 353.5363964799 - 128.4190782477 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - 1139.519115522 - -650.4239742628 - -244.957302929 - - -634.6254513294 - 361.6933575664 - 137.9876812992 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - 759.1990161301 - -450.6925959322 - -113.2196112852 - - -102.1761425864 - 56.9978213237 - 25.77578197644 - - -90.52892012056 - 50.77122751287 - 22.0578212495 - - -75.43506032392 - 42.33874666634 - 18.28624410363 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448341 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448341 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448341 - 38.60197207293 - 16.67275695173 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448341 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -327.7907972813 - 188.0301389654 - 67.78167794519 - - -98.01155756365 - 54.88939519482 - 24.10659139958 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -327.7907972813 - 188.0301389654 - 67.78167794518 - - -98.01155756366 - 54.88939519482 - 24.10659139958 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395194 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - -79.96891672215 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -327.7907972813 - 188.0301389654 - 67.78167794518 - - -98.01155756366 - 54.88939519482 - 24.10659139958 - - -68.77747448341 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - 5851.636806189 - -3363.878493533 - -1189.242968631 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -2241.152261456 - 1287.57973774 - 457.6976703957 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -616.8404903244 - 353.5363964799 - 128.4190782477 - - 52.98781836565 - -30.58972763875 - -10.39697992437 - - 52.98781836567 - -30.58972763875 - -10.39697992438 - - 1139.519115522 - -650.4239742628 - -244.9573029291 - - -634.6254513294 - 361.6933575664 - 137.9876812992 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - 759.1990161301 - -450.6925959322 - -113.2196112852 - - -102.1761425864 - 56.9978213237 - 25.77578197643 - - -90.52892012056 - 50.77122751287 - 22.0578212495 - - -75.43506032391 - 42.33874666634 - 18.28624410363 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -79.96891672215 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -327.7907972813 - 188.0301389654 - 67.78167794518 - - -98.01155756365 - 54.88939519482 - 24.10659139958 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -79.96891672215 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -327.7907972813 - 188.0301389654 - 67.78167794518 - - -98.01155756366 - 54.88939519482 - 24.10659139958 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -1259.398547285 - 723.0143826494 - 258.7302520168 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -79.96891672213 - 45.39053302699 - 17.92448050339 - - 273.5011442614 - -156.4321396848 - -57.8688020304 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -327.7907972813 - 188.0301389654 - 67.78167794518 - - -98.01155756366 - 54.88939519482 - 24.10659139958 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -358.1729390416 - 204.8877730266 - 75.70741395195 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -68.77747448342 - 38.60197207293 - 16.67275695173 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -1787.097006562 - 1018.993930402 - 387.2171788385 - - -1787.097006562 - 1018.993930402 - 387.2171788386 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 1139.519115522 - -650.4239742628 - -244.9573029291 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - 2196.51612513 - -1262.407320136 - -447.2228027399 - - -7828.060887584 - 4499.150822791 - 1593.500698721 - - 1139.519115522 - -650.4239742628 - -244.957302929 - - -336.602139368 - 192.1454087218 - 72.30914784614 - - -203.0932702765 - 115.8979416271 - 43.73088360213 - - -203.0932702765 - 115.8979416271 - 43.73088360213 coefficients_square_terms: - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - -10720.15125926 - -4701.162126956 - 2877.493275431 - - -4701.162126956 - -6974.883293509 - -1034.563281084 - - 2877.493275431 - -1034.563281084 - -66.50544541636 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 4107.502577423 - 1801.278961009 - -1100.9294753 - - 1801.278961009 - 2670.207390632 - 399.4634027254 - - -1100.9294753 - 399.4634027254 - 29.78961963679 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105379 - - - 1132.425944378 - 496.6017216382 - -301.7604507638 - - 496.6017216382 - 733.6727867614 - 113.499372341 - - -301.7604507638 - 113.499372341 - 12.95003027788 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182092 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918614 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182093 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918629 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733725 - - - 1169.641704033 - 512.9089847695 - -307.4565994537 - - 512.9089847695 - 751.8082285228 - 125.2965573785 - - -307.4565994537 - 125.2965573785 - 24.72038264179 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - -1358.879936331 - -596.007217569 - 394.3663309731 - - -596.007217569 - -926.0633850662 - -74.53434762621 - - 394.3663309731 - -74.53434762621 - 71.17453818383 - - - 191.0851566267 - 83.78647255099 - -47.67820746517 - - 83.78647255099 - 119.2114851962 - 25.34580158139 - - -47.67820746517 - 25.34580158139 - 10.89567211601 - - - 168.6962139181 - 73.97112025538 - -42.64264193489 - - 73.97112025538 - 106.0235548818 - 21.3234391598 - - -42.64264193489 - 21.3234391598 - 8.138709169582 - - - 140.4964790262 - 61.60610305115 - -35.58093011922 - - 61.60610305115 - 88.39458253098 - 17.63175280599 - - -35.58093011922 - 17.63175280599 - 6.599339566474 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - -10720.15125926 - -4701.162126956 - 2877.493275431 - - -4701.162126956 - -6974.883293509 - -1034.563281084 - - 2877.493275431 - -1034.563281084 - -66.50544541639 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 4107.502577423 - 1801.278961009 - -1100.9294753 - - 1801.278961009 - 2670.207390632 - 399.4634027254 - - -1100.9294753 - 399.4634027254 - 29.78961963679 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105379 - - - 1132.425944378 - 496.6017216382 - -301.7604507638 - - 496.6017216382 - 733.6727867614 - 113.499372341 - - -301.7604507638 - 113.499372341 - 12.95003027788 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182093 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918629 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182093 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918614 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733724 - - - 1169.641704033 - 512.9089847695 - -307.4565994537 - - 512.9089847695 - 751.8082285228 - 125.2965573785 - - -307.4565994537 - 125.2965573785 - 24.72038264179 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - -1358.879936331 - -596.007217569 - 394.3663309731 - - -596.007217569 - -926.0633850662 - -74.53434762621 - - 394.3663309731 - -74.53434762621 - 71.17453818383 - - - 191.0851566267 - 83.78647255099 - -47.67820746517 - - 83.78647255099 - 119.2114851962 - 25.34580158139 - - -47.67820746517 - 25.34580158139 - 10.89567211601 - - - 168.6962139181 - 73.97112025538 - -42.64264193489 - - 73.97112025538 - 106.0235548818 - 21.3234391598 - - -42.64264193489 - 21.3234391598 - 8.138709169582 - - - 140.4964790262 - 61.60610305115 - -35.58093011922 - - 61.60610305115 - 88.39458253098 - 17.63175280599 - - -35.58093011922 - 17.63175280599 - 6.599339566474 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - -10720.15125926 - -4701.162126956 - 2877.493275431 - - -4701.162126956 - -6974.883293509 - -1034.563281084 - - 2877.493275431 - -1034.563281084 - -66.50544541638 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 4107.502577423 - 1801.278961009 - -1100.9294753 - - 1801.278961009 - 2670.207390632 - 399.4634027254 - - -1100.9294753 - 399.4634027254 - 29.78961963679 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 1132.425944378 - 496.6017216382 - -301.7604507638 - - 496.6017216382 - 733.6727867614 - 113.499372341 - - -301.7604507638 - 113.499372341 - 12.95003027788 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182093 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128153 - - 26.24677072092 - -8.827773128153 - 0.120280091863 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182088 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918614 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733725 - - - 1169.641704033 - 512.9089847695 - -307.4565994537 - - 512.9089847695 - 751.8082285228 - 125.2965573785 - - -307.4565994537 - 125.2965573785 - 24.72038264179 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - -1358.879936331 - -596.007217569 - 394.3663309731 - - -596.007217569 - -926.0633850662 - -74.53434762621 - - 394.3663309731 - -74.53434762621 - 71.17453818383 - - - 191.0851566267 - 83.78647255099 - -47.67820746517 - - 83.78647255099 - 119.2114851962 - 25.34580158139 - - -47.67820746517 - 25.34580158139 - 10.89567211601 - - - 168.6962139181 - 73.97112025538 - -42.64264193489 - - 73.97112025538 - 106.0235548818 - 21.3234391598 - - -42.64264193489 - 21.3234391598 - 8.138709169581 - - - 140.4964790262 - 61.60610305115 - -35.58093011922 - - 61.60610305115 - 88.39458253098 - 17.63175280599 - - -35.58093011922 - 17.63175280599 - 6.599339566474 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - -10720.15125926 - -4701.162126956 - 2877.493275431 - - -4701.162126956 - -6974.883293509 - -1034.563281084 - - 2877.493275431 - -1034.563281084 - -66.50544541636 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 4107.502577423 - 1801.278961009 - -1100.9294753 - - 1801.278961009 - 2670.207390632 - 399.4634027254 - - -1100.9294753 - 399.4634027254 - 29.78961963678 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 1132.425944378 - 496.6017216382 - -301.7604507638 - - 496.6017216382 - 733.6727867614 - 113.499372341 - - -301.7604507638 - 113.499372341 - 12.95003027788 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182092 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128153 - - 26.24677072092 - -8.827773128153 - 0.120280091863 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182093 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128154 - - 26.24677072092 - -8.827773128154 - 0.1202800918629 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733725 - - - 1169.641704033 - 512.9089847695 - -307.4565994537 - - 512.9089847695 - 751.8082285228 - 125.2965573785 - - -307.4565994537 - 125.2965573785 - 24.72038264179 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - -1358.879936331 - -596.007217569 - 394.3663309731 - - -596.007217569 - -926.0633850662 - -74.53434762621 - - 394.3663309731 - -74.53434762621 - 71.17453818383 - - - 191.0851566267 - 83.78647255099 - -47.67820746517 - - 83.78647255099 - 119.2114851962 - 25.34580158139 - - -47.67820746517 - 25.34580158139 - 10.89567211601 - - - 168.6962139181 - 73.97112025538 - -42.64264193489 - - 73.97112025538 - 106.0235548818 - 21.3234391598 - - -42.64264193489 - 21.3234391598 - 8.138709169582 - - - 140.4964790262 - 61.60610305115 - -35.58093011922 - - 61.60610305115 - 88.39458253098 - 17.63175280599 - - -35.58093011922 - 17.63175280599 - 6.599339566474 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985193 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102258 - - -32.4405095433 - 16.07620102258 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985193 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404193 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319469 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985191 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102258 - - -32.4405095433 - 16.07620102258 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985194 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102258 - - -32.4405095433 - 16.07620102258 - 6.017649404194 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102258 - - -32.4405095433 - 16.07620102258 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319469 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985194 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209044 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290172 - - -160.5922585799 - 59.64454290172 - 5.986811985787 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571709 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319469 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985191 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290172 - - -160.5922585799 - 59.64454290172 - 5.98681198579 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571708 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404193 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985197 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246737 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209044 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290173 - - -160.5922585799 - 59.64454290173 - 5.986811985789 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571708 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102258 - - -32.4405095433 - 16.07620102258 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - -10720.15125926 - -4701.162126956 - 2877.493275431 - - -4701.162126956 - -6974.883293509 - -1034.563281084 - - 2877.493275431 - -1034.563281084 - -66.50544541639 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 4107.502577423 - 1801.278961009 - -1100.9294753 - - 1801.278961009 - 2670.207390632 - 399.4634027254 - - -1100.9294753 - 399.4634027254 - 29.7896196368 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 1132.425944378 - 496.6017216382 - -301.7604507638 - - 496.6017216382 - 733.6727867614 - 113.499372341 - - -301.7604507638 - 113.499372341 - 12.95003027788 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128153 - - 26.24677072092 - -8.827773128153 - 0.1202800918613 - - - -96.78384376002 - -42.4439297132 - 26.24677072092 - - -42.4439297132 - -63.35038252298 - -8.827773128153 - - 26.24677072092 - -8.827773128153 - 0.1202800918629 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733724 - - - 1169.641704033 - 512.9089847695 - -307.4565994537 - - 512.9089847695 - 751.8082285228 - 125.2965573785 - - -307.4565994537 - 125.2965573785 - 24.72038264179 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - -1358.879936331 - -596.007217569 - 394.3663309731 - - -596.007217569 - -926.0633850662 - -74.53434762621 - - 394.3663309731 - -74.53434762621 - 71.17453818383 - - - 191.0851566267 - 83.78647255099 - -47.67820746517 - - 83.78647255099 - 119.2114851962 - 25.34580158139 - - -47.67820746517 - 25.34580158139 - 10.89567211601 - - - 168.6962139181 - 73.97112025538 - -42.64264193489 - - 73.97112025538 - 106.0235548818 - 21.3234391598 - - -42.64264193489 - 21.3234391598 - 8.138709169582 - - - 140.4964790262 - 61.60610305115 - -35.58093011922 - - 61.60610305115 - 88.39458253098 - 17.63175280599 - - -35.58093011922 - 17.63175280599 - 6.599339566474 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.98094747369 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985194 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246737 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290173 - - -160.5922585799 - 59.64454290173 - 5.986811985787 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571709 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404193 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985194 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246737 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290173 - - -160.5922585799 - 59.64454290173 - 5.986811985787 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571708 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404194 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319469 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 2309.371114485 - 1012.73405011 - -617.8751193011 - - 1012.73405011 - 1499.715375261 - 226.6996495043 - - -617.8751193011 - 226.6996495043 - 19.71367105378 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985191 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 147.8038109482 - 64.81345870863 - -38.46761366883 - - 64.81345870863 - 94.45893845337 - 16.56853761046 - - -38.46761366883 - 16.56853761046 - 4.157765246739 - - - -502.8297957863 - -220.5034461954 - 133.321878795 - - -220.5034461954 - -324.8254786078 - -51.67462438823 - - 133.321878795 - -51.67462438823 - -7.546900209046 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 601.415922879 - 263.7393940918 - -160.5922585799 - - 263.7393940918 - 390.1127867798 - 59.64454290173 - - -160.5922585799 - 59.64454290173 - 5.986811985788 - - - 182.8153091813 - 80.16167205613 - -46.0517180463 - - 80.16167205613 - 114.6708300718 - 23.41375701469 - - -46.0517180463 - 23.41375701469 - 9.249705571708 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404193 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 658.4385693042 - 288.7419554965 - -174.6355818894 - - 288.7419554965 - 425.4259635842 - 67.56085381701 - - -174.6355818894 - 67.56085381701 - 9.734306985194 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404194 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 128.0971384699 - 56.16913286035 - -32.4405095433 - - 56.16913286035 - 80.5930469462 - 16.07620102257 - - -32.4405095433 - 16.07620102257 - 6.017649404194 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 3292.641782076 - 1443.885786518 - -866.4874150134 - - 1443.885786518 - 2117.778991587 - 350.8652875891 - - -866.4874150134 - 350.8652875891 - 66.9809474737 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182088 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182088 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182093 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182093 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733725 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182092 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780393 - - -98.61168950718 - 39.47545780393 - 7.079420182093 - - - -4024.637363485 - -1764.942759693 - 1079.698769709 - - -1764.942759693 - -2617.72518908 - -389.5323589034 - - 1079.698769709 - -389.5323589034 - -26.55575319468 - - - 14342.9552468 - 6289.882966587 - -3848.05977671 - - 6289.882966587 - 9329.360846504 - 1387.748277719 - - -3848.05977671 - 1387.748277719 - 93.98859313953 - - - -2097.995093057 - -920.0148615183 - 553.5014196588 - - -920.0148615183 - -1351.37575036 - -220.8956593773 - - 553.5014196588 - -220.8956593773 - -38.92732733725 - - - 619.6881094322 - 271.7463432808 - -163.5235269434 - - 271.7463432808 - 399.2074510919 - 65.17951383646 - - -163.5235269434 - 65.17951383646 - 11.40410777447 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182092 - - - 373.9764193214 - 163.996345322 - -98.61168950718 - - 163.996345322 - 240.8144049061 - 39.47545780394 - - -98.61168950718 - 39.47545780394 - 7.079420182092 delta: 0.05 delta_old: 0.025 linear_terms: - - 83.4551262143 - 70.20115871443 - 59.39117854221 - 45.24391055618 - 38.62628748021 - 19.82198478816 - -22.14622491455 - -95.66099878835 - -22.14622491455 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.45512621431 - 70.20115871451 - 59.39117854221 - 45.24391055618 - 38.62628748021 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491455 - -95.66099878835 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 70.20115871453 - 59.39117854221 - 45.24391055618 - 38.62628748025 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491453 - -95.66099878835 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 70.20115871443 - 59.39117854221 - 45.24391055624 - 38.62628748025 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491455 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 59.39117854221 - 0.1871101139566 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 0.1871101139701 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 32.31842399979 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 38.62628748025 - 0.1871101139566 - -50.98637547872 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.5433752969 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 38.62628748025 - 0.18711011397 - -50.98637547872 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101934 - 50.54337529691 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.1871101139419 - -50.98637547872 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101935 - 50.54337529691 - 32.31842399979 - 83.4551262143 - 70.20115871451 - 59.39117854221 - 45.24391055617 - 38.62628748025 - 19.82198478816 - -95.66099878835 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.8811906615 - 44.6773124466 - 35.78441925523 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 83.4551262143 - 59.39117854221 - 38.62628748025 - 0.1871101139567 - -50.98637547872 - -188.858358915 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.5433752969 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.1871101139566 - -50.98637547872 - -188.858358915 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.54337529691 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.18711011397 - -50.98637547872 - -188.858358915 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101935 - 50.54337529691 - 32.31842399979 - 816.736081667 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 32.31842399979 - 816.736081667 - 32.31842399979 - 816.736081667 - 816.736081667 - 816.736081667 - -22.14622491453 - -22.14622491453 - 83.45512621431 - 59.39117854221 - -22.14622491455 - -22.14622491455 - -596.6157739812 - 118.7321519868 - -22.14622491455 - -22.14622491455 - 83.4551262143 - 59.39117854221 - -596.6157739812 - 118.7321519868 - -22.14622491455 - -22.14622491455 - - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.80571559279 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - 1.805173509361 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -36.84709189045 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -23.31820321215 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - 1.805173509361 - -23.31820321215 - 1.805173509361 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -39.80571559279 - -39.80571559279 - -65.70818330941 - -56.24495202103 - -39.8057155928 - -39.8057155928 - -42.71855980194 - -52.29733914833 - -39.8057155928 - -39.8057155928 - -65.70818330941 - -56.24495202103 - -42.71855980194 - -52.29733914833 - -39.8057155928 - -39.8057155928 - - 35.67527604537 - 40.13513697208 - 42.22265142152 - 44.3706588541 - 43.79191549807 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.29051063361 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 40.13513697207 - 42.22265142152 - 44.3706588541 - 43.79191549807 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633609 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 40.13513697206 - 42.22265142152 - 44.3706588541 - 43.79191549806 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633609 - 4.275018249209 - 3.077593180459 - 3.571453211327 - 3.635446234161 - 35.67527604537 - 40.13513697208 - 42.22265142152 - 44.37065885408 - 43.79191549806 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633611 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164356 - 35.67527604536 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - 3.585520164357 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604536 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364664 - 3.40064839132 - 3.585520164357 - 35.67527604536 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.990963636463 - 3.400648391319 - 3.585520164356 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205829 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364644 - 3.400648391319 - 3.585520164357 - 35.67527604537 - 40.13513697207 - 42.22265142152 - 44.3706588541 - 43.79191549806 - 43.60185566328 - 51.559374423 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633611 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364657 - 3.40064839132 - 3.585520164356 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364657 - 3.400648391319 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364651 - 3.400648391319 - 3.585520164356 - -135.8537067485 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - 3.585520164357 - -135.8537067485 - 3.585520164357 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 43.89691265518 - 43.89691265518 - 35.67527604537 - 42.22265142152 - 43.89691265518 - 43.89691265518 - 144.6484148813 - -4.29051063361 - 43.89691265518 - 43.89691265518 - 35.67527604537 - 42.22265142152 - 144.6484148813 - -4.29051063361 - 43.89691265518 - 43.89691265518 linear_terms_expected: - - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227353 - -3.866221813446 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.71243244039 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227353 - -3.866221813447 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813447 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -247.3857201056 - -138.334179211 - -247.3857201056 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813447 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483835 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -153.6248432276 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030153 - -4.140626483836 - 2363.426377558 - 5992.039123618 - -7709.278530499 - -2150.664440344 - -1182.145972324 - -577.1965207481 - -138.334179211 - -138.334179211 - -53.7124324404 - -103.0416465802 - -99.1378353945 - 954.5967200947 - 13.58623873662 - -1.174295227354 - -3.866221813446 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -153.6248432276 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030153 - -4.140626483835 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - 2363.426377558 - -7709.278530499 - -1182.145972324 - -357.7987188137 - -181.9416676796 - -104.2155735686 - -181.9416676796 - -104.2155735686 - -153.6248432276 - -142.8920752426 - 3.075193030154 - -4.140626483836 - -153.6248432276 - 2363.426377558 - -7709.278530499 - -357.7987188137 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -4.140626483836 - -153.6248432276 - -153.6248432276 - -153.6248432276 - -247.3857201056 - -247.3857201056 - 2363.426377558 - -7709.278530499 - -247.3857201056 - -247.3857201056 - -53.7124324404 - -99.1378353945 - -247.3857201056 - -247.3857201056 - 2363.426377558 - -7709.278530499 - -53.7124324404 - -99.1378353945 - -247.3857201056 - -247.3857201056 - - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - 36.28651044149 - -104.2839114197 - 36.28651044149 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 972.3575239774 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692858 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692859 - 80.74754133078 - 42.21231909165 - -1393.823686755 - -3484.676385903 - 4386.660918749 - 1174.087828191 - 621.0101781775 - 261.2029206975 - -104.2839114197 - -104.2839114197 - -735.8610938666 - 294.9448835657 - 87.55073042511 - 1134.846231379 - 104.076484543 - 66.32539487318 - 48.36159029625 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 972.3575239774 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692861 - 80.74754133078 - 42.21231909165 - -1393.823686755 - 4386.660918749 - 621.0101781775 - 119.8053923089 - -30.37542941294 - -230.7678532341 - -30.37542941294 - -230.7678532341 - 972.3575239774 - 1.227223692859 - 80.74754133078 - 42.21231909165 - 972.3575239774 - -1393.823686755 - 4386.660918749 - 119.8053923089 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 42.21231909165 - 972.3575239774 - 972.3575239774 - 972.3575239774 - 36.28651044149 - 36.28651044149 - -1393.823686755 - 4386.660918749 - 36.28651044149 - 36.28651044149 - -735.8610938667 - 87.55073042511 - 36.28651044149 - 36.28651044149 - -1393.823686755 - 4386.660918749 - -735.8610938666 - 87.55073042511 - 36.28651044149 - 36.28651044149 - - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 131.5247089125 - 92.72176892163 - 131.5247089125 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 115.5097653415 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - -1108.972694686 - 1677.946001564 - 546.4389881039 - 346.3140830129 - 215.6227895742 - 92.72176892163 - 92.72176892163 - 44.3395268336 - 78.93180868232 - 63.72812657892 - -104.6695747867 - 31.93096833735 - 29.20072767216 - 25.55713657195 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 115.5097653415 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - -375.8722506492 - 1677.946001564 - 346.3140830129 - 162.4888980685 - 109.9217527448 - 76.12955501496 - 109.9217527448 - 76.12955501496 - 115.5097653415 - 65.79975067225 - 30.90788818222 - 23.84379728044 - 115.5097653415 - -375.8722506492 - 1677.946001564 - 162.4888980685 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 23.84379728044 - 115.5097653415 - 115.5097653415 - 115.5097653415 - 131.5247089125 - 131.5247089125 - -375.8722506492 - 1677.946001564 - 131.5247089125 - 131.5247089125 - 44.33952683361 - 63.72812657892 - 131.5247089125 - 131.5247089125 - -375.8722506492 - 1677.946001564 - 44.3395268336 - 63.72812657892 - 131.5247089125 - 131.5247089125 square_terms: - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.87397890211 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.87397890211 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211001 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.9566846715422 - - 34.53987590883 - 0.9566846715422 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.8894811141752 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861657 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211001 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814764 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229466 - 37.07301548541 - - 0.07028381229466 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.8894811141752 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567033 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 square_terms_expected: - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626186 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - -10569.10299644 - -4682.769847703 - 3951.415969003 - - -4682.769847703 - -7035.132202115 - -875.1223560398 - - 3951.415969003 - -875.1223560398 - -122.0013610248 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 4252.951031935 - 1848.277580659 - -170.2845897301 - - 1848.277580659 - 2612.860235584 - 805.8419376052 - - -170.2845897301 - 805.8419376052 - -37.20600626185 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 1240.4084327 - 512.7592742227 - 475.6238125956 - - 512.7592742227 - 690.0567114374 - 253.7250568021 - - 475.6238125956 - 253.7250568021 - -57.22030396345 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -28.40757635067 - -31.79743224237 - 550.3578108482 - - -31.79743224237 - -84.07371396961 - 84.22388465059 - - 550.3578108482 - 84.22388465059 - -92.16014545357 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 1219.218783147 - 525.7884332597 - -29.15120474328 - - 525.7884332597 - 767.5344275996 - 236.8486988899 - - -29.15120474328 - 236.8486988899 - 88.99129419806 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - -1377.243922834 - -795.4386681958 - 747.122472413 - - -795.4386681958 - -2497.250719358 - -1840.429221121 - - 747.122472413 - -1840.429221121 - 63.15352494948 - - - 217.522078241 - 79.32830419073 - 153.0067332807 - - 79.32830419073 - 39.05322569843 - -15.05847653376 - - 153.0067332807 - -15.05847653376 - 8.195712590102 - - - 192.1674291662 - 73.24546567179 - 128.9418592581 - - 73.24546567179 - 60.8870259716 - 14.10438172489 - - 128.9418592581 - 14.10438172489 - 4.023771621814 - - - 160.9847149983 - 61.88723830032 - 112.7111318224 - - 61.88723830032 - 56.36412497327 - 19.44285900547 - - 112.7111318224 - 19.44285900547 - 1.958413872367 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112152 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 2432.916206823 - 1032.638149205 - 257.4061444594 - - 1032.638149205 - 1449.487149189 - 399.2375243902 - - 257.4061444594 - 399.2375243902 - -48.15069583689 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 224.0053660568 - 76.49830322054 - 530.6144385787 - - 76.49830322054 - 68.48505242171 - 118.4056191032 - - 530.6144385787 - 118.4056191032 - -75.99273950177 - - - -441.5774716513 - -211.0258671001 - 630.8815307116 - - -211.0258671001 - -340.9653527663 - 31.64858923452 - - 630.8815307116 - 31.64858923452 - -131.2299615288 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 643.1868519507 - 289.6433117825 - 69.96405555076 - - 289.6433117825 - 527.631431758 - 287.5422429696 - - 69.96405555076 - 287.5422429696 - 12.86544852284 - - - 207.8475807462 - 78.25234656542 - 139.1055791254 - - 78.25234656542 - 57.38466331417 - 5.647653214982 - - 139.1055791254 - 5.647653214982 - 5.691781115008 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - 754.037862043 - 303.007024288 - 520.6337570466 - - 303.007024288 - 387.992522728 - 191.4946509941 - - 520.6337570466 - 191.4946509941 - -62.14538340589 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879663 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 147.2292962915 - 56.67228284198 - 105.718994092 - - 56.67228284198 - 52.62002215236 - 19.90293970874 - - 105.718994092 - 19.90293970874 - 1.297978879664 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 3350.123265148 - 1460.313549849 - -683.3879923253 - - 1460.313549849 - 2132.90406945 - 491.0039053136 - - -683.3879923253 - 491.0039053136 - 341.2992596768 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112152 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891304 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - -3856.411663325 - -1744.213523774 - 2262.533558079 - - -1744.213523774 - -2684.929962548 - -210.0096145524 - - 2262.533558079 - -210.0096145524 - -71.22779112153 - - - 14481.06389629 - 6304.851879955 - -2851.437438832 - - 6304.851879955 - 9273.533330841 - 1517.67089317 - - -2851.437438832 - 1517.67089317 - 32.41624891305 - - - -2045.110581511 - -913.7367115119 - 1103.831669941 - - -913.7367115119 - -1367.741368628 - -163.7437421998 - - 1103.831669941 - -163.7437421998 - -316.398543429 - - - 662.0714264794 - 287.5911592069 - 90.92844099577 - - 287.5911592069 - 454.4431485291 - 203.8681218118 - - 90.92844099577 - 203.8681218118 - 25.01577261847 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 - - - 459.1360054272 - 176.8440075451 - 527.6634588185 - - 176.8440075451 - 209.2614271428 - 151.24310776 - - 527.6634588185 - 151.24310776 - -67.51764137812 ================================================ FILE: tests/optimagic/optimizers/_pounders/fixtures/update_residual_model_with_new_accepted_x.yaml ================================================ --- best_x: - 0.15 - 0.008 - 0.01 delta: 0.025 linear_terms: - - 93.72927818337 - 79.52774973986 - 68.0460977844 - 53.31267916955 - 46.22344511051 - 26.56888710767 - -16.71276506001 - -91.11716265854 - -16.71276506001 - -91.11716265855 - -591.8618521414 - 268.2130703199 - 120.9383814367 - 100.8847372434 - 59.6297183433 - 46.17027791586 - 37.07406190688 - 93.72927818338 - 79.52774973994 - 68.0460977844 - 53.31267916955 - 46.22344511051 - 26.56888710767 - -16.71276506001 - -91.11716265855 - -16.71276506001 - -91.11716265854 - -591.8618521414 - 268.2130703199 - 120.9383814367 - 100.8847372434 - 59.6297183433 - 46.17027791586 - 37.07406190688 - 93.72927818337 - 79.52774973995 - 68.0460977844 - 53.31267916955 - 46.22344511055 - 26.56888710767 - -16.71276506001 - -91.11716265855 - -16.71276505999 - -91.11716265854 - -591.8618521414 - 268.2130703199 - 120.9383814367 - 100.8847372434 - 59.6297183433 - 46.17027791586 - 37.07406190688 - 93.72927818337 - 79.52774973986 - 68.0460977844 - 53.31267916961 - 46.22344511055 - 26.56888710767 - -16.71276506001 - -91.11716265855 - -16.71276506001 - -91.11716265855 - -591.8618521414 - 268.2130703199 - 120.9383814367 - 100.8847372434 - 59.6297183433 - 46.17027791586 - 37.07406190688 - 93.72927818337 - 68.0460977844 - 6.220382700427 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 33.51976192863 - 93.72927818337 - 68.0460977844 - 6.220382700427 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 33.51976192863 - 93.7292781834 - 68.0460977844 - 6.22038270044 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 33.51976192863 - 93.72927818337 - 68.0460977844 - 6.220382700427 - 818.3501514529 - 33.51976192863 - 818.3501514529 - 818.3501514529 - 33.51976192863 - 93.7292781834 - 68.0460977844 - 46.22344511055 - 6.220382700427 - -46.05049956355 - -46.05049956355 - -184.5483589944 - 818.3501514529 - 94.44100065056 - 52.15519868068 - 33.51976192863 - 93.7292781834 - 68.0460977844 - 46.22344511055 - 6.22038270044 - -46.05049956355 - -46.05049956355 - -184.5483589944 - 818.3501514529 - 94.44100065054 - 52.15519868068 - 33.51976192863 - 93.72927818338 - 68.0460977844 - 46.22344511055 - 6.220382700412 - -46.05049956355 - -46.05049956354 - -184.5483589944 - 818.3501514529 - 94.44100065055 - 52.15519868068 - 33.51976192863 - 93.72927818337 - 79.52774973994 - 68.0460977844 - 53.31267916954 - 46.22344511055 - 26.56888710767 - -91.11716265854 - -91.11716265855 - -591.8618521414 - 268.2130703199 - 120.9383814367 - 100.8847372434 - 59.6297183433 - 46.17027791586 - 37.07406190688 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 818.3501514529 - 93.72927818337 - 68.0460977844 - 46.22344511055 - 6.220382700427 - -46.05049956355 - -184.5483589944 - -46.05049956354 - -184.5483589944 - 818.3501514529 - 94.44100065056 - 52.15519868068 - 33.51976192863 - 93.72927818338 - 68.0460977844 - 46.22344511055 - 6.220382700427 - -46.05049956355 - -184.5483589944 - -46.05049956354 - -184.5483589944 - 818.3501514529 - 94.44100065056 - 52.15519868068 - 33.51976192863 - 93.72927818338 - 68.0460977844 - 46.22344511055 - 6.22038270044 - -46.05049956355 - -184.5483589944 - -46.05049956355 - -184.5483589944 - 818.3501514529 - 94.44100065055 - 52.15519868068 - 33.51976192863 - 818.3501514529 - 93.72927818337 - 68.0460977844 - 6.220382700427 - 818.3501514529 - 33.51976192863 - 818.3501514529 - 33.51976192863 - 818.3501514529 - 818.3501514529 - 818.3501514529 - -16.71276505999 - -16.71276505999 - 93.72927818338 - 68.0460977844 - -16.71276506001 - -16.71276506001 - -591.8618521414 - 120.9383814367 - -16.71276506001 - -16.71276506001 - 93.72927818337 - 68.0460977844 - -591.8618521414 - 120.9383814367 - -16.71276506001 - -16.71276506001 - - -64.08543563383 - -58.95719282476 - -55.06353725601 - -53.16548062441 - -49.45660019523 - -44.9079525408 - -38.80557953615 - -36.01993057676 - -38.80557953615 - -36.01993057676 - -42.20725725468 - -32.42227560985 - -51.14899556075 - 779.0017047841 - 23.26780440071 - 7.758875524986 - 3.058627447971 - -64.08543563383 - -58.95719282476 - -55.06353725601 - -53.16548062441 - -49.45660019523 - -44.9079525408 - -38.80557953615 - -36.01993057676 - -38.80557953615 - -36.01993057676 - -42.20725725468 - -32.42227560985 - -51.14899556075 - 779.0017047841 - 23.26780440071 - 7.758875524986 - 3.058627447971 - -64.08543563383 - -58.95719282476 - -55.06353725601 - -53.16548062441 - -49.45660019523 - -44.9079525408 - -38.80557953615 - -36.01993057676 - -38.80557953615 - -36.01993057676 - -42.20725725468 - -32.42227560985 - -51.14899556075 - 779.0017047841 - 23.26780440071 - 7.758875524986 - 3.058627447971 - -64.08543563383 - -58.95719282476 - -55.06353725601 - -53.1654806244 - -49.45660019523 - -44.9079525408 - -38.80557953615 - -36.01993057676 - -38.80557953615 - -36.01993057676 - -42.20725725468 - -32.42227560985 - -51.14899556075 - 779.0017047841 - 23.26780440071 - 7.758875524986 - 3.058627447972 - -64.08543563383 - -55.06353725601 - -41.42976641553 - -22.11746019896 - -22.11746019896 - -22.11746019896 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -41.42976641553 - -22.11746019896 - -22.11746019896 - -22.11746019896 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -41.42976641553 - -22.11746019896 - -22.11746019896 - -22.11746019896 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -41.42976641553 - -22.11746019896 - 1.865866778257 - -22.11746019896 - -22.11746019896 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641553 - -36.97445451644 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641553 - -36.97445451644 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641554 - -36.97445451644 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -64.08543563383 - -58.95719282476 - -55.06353725601 - -53.16548062441 - -49.45660019523 - -44.9079525408 - -36.01993057676 - -36.01993057676 - -42.20725725468 - -32.42227560985 - -51.14899556075 - 779.0017047841 - 23.26780440071 - 7.758875524986 - 3.058627447972 - -22.11746019896 - -22.11746019896 - -22.11746019896 - -22.11746019896 - -22.11746019896 - -22.11746019896 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641553 - -36.97445451644 - -36.42960974813 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641553 - -36.97445451644 - -36.42960974813 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -64.08543563383 - -55.06353725601 - -49.45660019523 - -41.42976641553 - -36.97445451644 - -36.42960974813 - -36.97445451644 - -36.42960974813 - -22.11746019896 - -91.56047245988 - 12.83135652938 - 1.865866778257 - -22.11746019896 - -64.08543563383 - -55.06353725601 - -41.42976641553 - -22.11746019896 - 1.865866778257 - -22.11746019896 - 1.865866778257 - -22.11746019896 - -22.11746019896 - -22.11746019896 - -38.80557953615 - -38.80557953615 - -64.08543563383 - -55.06353725601 - -38.80557953615 - -38.80557953615 - -42.20725725468 - -51.14899556075 - -38.80557953615 - -38.80557953615 - -64.08543563383 - -55.06353725601 - -42.20725725468 - -51.14899556075 - -38.80557953615 - -38.80557953615 - - 36.49571808313 - 40.75599199724 - 42.73053159154 - 44.48422885803 - 44.06292704726 - 43.77082966126 - 43.87997490331 - 51.29080769453 - 43.87997490331 - 51.29080769453 - 142.8645261718 - -28.76442742577 - -4.013910726576 - 6.349474762352 - 3.32795307968 - 3.743284626869 - 3.766796291069 - 36.49571808312 - 40.75599199722 - 42.73053159154 - 44.48422885803 - 44.06292704726 - 43.77082966126 - 43.87997490331 - 51.29080769453 - 43.87997490331 - 51.29080769453 - 142.8645261718 - -28.76442742577 - -4.013910726575 - 6.349474762352 - 3.32795307968 - 3.743284626869 - 3.766796291069 - 36.49571808313 - 40.75599199722 - 42.73053159154 - 44.48422885803 - 44.06292704726 - 43.77082966126 - 43.87997490331 - 51.29080769453 - 43.8799749033 - 51.29080769453 - 142.8645261718 - -28.76442742578 - -4.013910726575 - 6.349474762352 - 3.32795307968 - 3.743284626868 - 3.766796291069 - 36.49571808313 - 40.75599199724 - 42.73053159154 - 44.48422885802 - 44.06292704726 - 43.77082966126 - 43.87997490331 - 51.29080769453 - 43.87997490331 - 51.29080769453 - 142.8645261718 - -28.76442742578 - -4.013910726577 - 6.349474762352 - 3.32795307968 - 3.743284626869 - 3.766796291069 - 36.49571808313 - 42.73053159154 - 43.46544414082 - -133.4358525068 - -133.4358525068 - -133.4358525068 - 3.702417354566 - 36.49571808313 - 42.73053159154 - 43.46544414082 - -133.4358525068 - -133.4358525068 - -133.4358525068 - 3.702417354565 - 36.49571808312 - 42.73053159154 - 43.46544414082 - -133.4358525068 - -133.4358525068 - -133.4358525068 - 3.702417354566 - 36.49571808313 - 42.73053159154 - 43.46544414082 - -133.4358525068 - 3.702417354566 - -133.4358525068 - -133.4358525068 - 3.702417354566 - 36.49571808312 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 45.87740009259 - 66.4411216144 - -133.4358525068 - -0.8871808239121 - 3.603391195481 - 3.702417354565 - 36.49571808312 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 45.87740009259 - 66.4411216144 - -133.4358525068 - -0.8871808239087 - 3.60339119548 - 3.702417354565 - 36.49571808312 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 45.87740009258 - 66.4411216144 - -133.4358525068 - -0.8871808239101 - 3.60339119548 - 3.702417354566 - 36.49571808313 - 40.75599199722 - 42.73053159154 - 44.48422885803 - 44.06292704726 - 43.77082966126 - 51.29080769453 - 51.29080769453 - 142.8645261718 - -28.76442742578 - -4.013910726577 - 6.349474762352 - 3.327953079681 - 3.743284626869 - 3.766796291069 - -133.4358525068 - -133.4358525068 - -133.4358525068 - -133.4358525068 - -133.4358525068 - -133.4358525068 - 36.49571808313 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 66.4411216144 - 45.87740009258 - 66.4411216144 - -133.4358525068 - -0.8871808239114 - 3.603391195481 - 3.702417354565 - 36.49571808312 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 66.4411216144 - 45.87740009258 - 66.4411216144 - -133.4358525068 - -0.8871808239114 - 3.60339119548 - 3.702417354565 - 36.49571808312 - 42.73053159154 - 44.06292704726 - 43.46544414082 - 45.87740009259 - 66.4411216144 - 45.87740009259 - 66.4411216144 - -133.4358525068 - -0.8871808239108 - 3.60339119548 - 3.702417354565 - -133.4358525068 - 36.49571808313 - 42.73053159154 - 43.46544414082 - -133.4358525068 - 3.702417354565 - -133.4358525068 - 3.702417354565 - -133.4358525068 - -133.4358525068 - -133.4358525068 - 43.8799749033 - 43.8799749033 - 36.49571808312 - 42.73053159154 - 43.87997490331 - 43.87997490331 - 142.8645261718 - -4.013910726576 - 43.87997490331 - 43.87997490331 - 36.49571808313 - 42.73053159154 - 142.8645261718 - -4.013910726576 - 43.87997490331 - 43.87997490331 linear_terms_expected: - - 83.4551262143 - 70.20115871443 - 59.39117854221 - 45.24391055618 - 38.62628748021 - 19.82198478816 - -22.14622491455 - -95.66099878835 - -22.14622491455 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.45512621431 - 70.20115871451 - 59.39117854221 - 45.24391055618 - 38.62628748021 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491455 - -95.66099878835 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 70.20115871453 - 59.39117854221 - 45.24391055618 - 38.62628748025 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491453 - -95.66099878835 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 70.20115871443 - 59.39117854221 - 45.24391055624 - 38.62628748025 - 19.82198478816 - -22.14622491455 - -95.66099878836 - -22.14622491455 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.88119066151 - 44.6773124466 - 35.78441925523 - 83.4551262143 - 59.39117854221 - 0.1871101139566 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 0.1871101139701 - 816.736081667 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 32.31842399979 - 816.736081667 - 816.736081667 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 38.62628748025 - 0.1871101139566 - -50.98637547872 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.5433752969 - 32.31842399979 - 83.45512621433 - 59.39117854221 - 38.62628748025 - 0.18711011397 - -50.98637547872 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101934 - 50.54337529691 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.1871101139419 - -50.98637547872 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101935 - 50.54337529691 - 32.31842399979 - 83.4551262143 - 70.20115871451 - 59.39117854221 - 45.24391055617 - 38.62628748025 - 19.82198478816 - -95.66099878835 - -95.66099878836 - -596.6157739812 - 265.7919023746 - 118.7321519868 - 97.69885198231 - 57.8811906615 - 44.6773124466 - 35.78441925523 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 816.736081667 - 83.4551262143 - 59.39117854221 - 38.62628748025 - 0.1871101139567 - -50.98637547872 - -188.858358915 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.5433752969 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.1871101139566 - -50.98637547872 - -188.858358915 - -50.98637547871 - -188.858358915 - 816.736081667 - 92.44936101936 - 50.54337529691 - 32.31842399979 - 83.45512621431 - 59.39117854221 - 38.62628748025 - 0.18711011397 - -50.98637547872 - -188.858358915 - -50.98637547872 - -188.858358915 - 816.736081667 - 92.44936101935 - 50.54337529691 - 32.31842399979 - 816.736081667 - 83.4551262143 - 59.39117854221 - 0.1871101139567 - 816.736081667 - 32.31842399979 - 816.736081667 - 32.31842399979 - 816.736081667 - 816.736081667 - 816.736081667 - -22.14622491453 - -22.14622491453 - 83.45512621431 - 59.39117854221 - -22.14622491455 - -22.14622491455 - -596.6157739812 - 118.7321519868 - -22.14622491455 - -22.14622491455 - 83.4551262143 - 59.39117854221 - -596.6157739812 - 118.7321519868 - -22.14622491455 - -22.14622491455 - - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.80571559279 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -39.8057155928 - -36.84709189045 - -39.8057155928 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - 1.805173509361 - -23.31820321215 - -23.31820321215 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -60.39894618479 - -56.24495202103 - -56.74595477465 - -51.00210223596 - -46.1667378912 - -36.84709189045 - -36.84709189045 - -42.71855980194 - -33.37423700032 - -52.29733914833 - 792.7694136555 - 23.53933160967 - 7.777083680153 - 3.011421814957 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -65.70818330941 - -56.24495202103 - -51.00210223596 - -42.54119035883 - -37.88298121996 - -37.16785677463 - -37.88298121996 - -37.16785677463 - -23.31820321215 - -93.40145763629 - 12.92907306798 - 1.805173509361 - -23.31820321215 - -65.70818330941 - -56.24495202103 - -42.54119035883 - -23.31820321215 - 1.805173509361 - -23.31820321215 - 1.805173509361 - -23.31820321215 - -23.31820321215 - -23.31820321215 - -39.80571559279 - -39.80571559279 - -65.70818330941 - -56.24495202103 - -39.8057155928 - -39.8057155928 - -42.71855980194 - -52.29733914833 - -39.8057155928 - -39.8057155928 - -65.70818330941 - -56.24495202103 - -42.71855980194 - -52.29733914833 - -39.8057155928 - -39.8057155928 - - 35.67527604537 - 40.13513697208 - 42.22265142152 - 44.3706588541 - 43.79191549807 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.29051063361 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 40.13513697207 - 42.22265142152 - 44.3706588541 - 43.79191549807 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633609 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 40.13513697206 - 42.22265142152 - 44.3706588541 - 43.79191549806 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633609 - 4.275018249209 - 3.077593180459 - 3.571453211327 - 3.635446234161 - 35.67527604537 - 40.13513697208 - 42.22265142152 - 44.37065885408 - 43.79191549806 - 43.60185566328 - 43.89691265518 - 51.559374423 - 43.89691265518 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633611 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164356 - 35.67527604536 - 42.22265142152 - 43.39074205828 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - 3.585520164357 - -135.8537067485 - -135.8537067485 - 3.585520164357 - 35.67527604536 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364664 - 3.40064839132 - 3.585520164357 - 35.67527604536 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.990963636463 - 3.400648391319 - 3.585520164356 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205829 - 45.9986361207 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364644 - 3.400648391319 - 3.585520164357 - 35.67527604537 - 40.13513697207 - 42.22265142152 - 44.3706588541 - 43.79191549806 - 43.60185566328 - 51.559374423 - 51.559374423 - 144.6484148813 - -29.52793630843 - -4.290510633611 - 4.275018249209 - 3.077593180459 - 3.571453211328 - 3.635446234161 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364657 - 3.40064839132 - 3.585520164356 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364657 - 3.400648391319 - 3.585520164357 - 35.67527604537 - 42.22265142152 - 43.79191549806 - 43.39074205828 - 45.9986361207 - 66.99917852268 - 45.9986361207 - 66.99917852268 - -135.8537067485 - -0.9909636364651 - 3.400648391319 - 3.585520164356 - -135.8537067485 - 35.67527604537 - 42.22265142152 - 43.39074205828 - -135.8537067485 - 3.585520164357 - -135.8537067485 - 3.585520164357 - -135.8537067485 - -135.8537067485 - -135.8537067485 - 43.89691265518 - 43.89691265518 - 35.67527604537 - 42.22265142152 - 43.89691265518 - 43.89691265518 - 144.6484148813 - -4.29051063361 - 43.89691265518 - 43.89691265518 - 35.67527604537 - 42.22265142152 - 144.6484148813 - -4.29051063361 - 43.89691265518 - 43.89691265518 residuals: - 21.53511643627 - 14.80453604351 - 6.548558251064 - 12.54188075473 - 9.282890198608 - 2.859555210712 - 0.9381817894678 - 0.2048532883114 - 0.8881817894678 - 0.3798532883114 - -0.9101956814319 - -1.36444138824 - -0.9351994446357 - -1.055070381505 - -1.111335532899 - -0.1703442432756 - 1.580641245921 - 19.23511643627 - 13.00453604351 - 13.94855825106 - 11.24188075473 - 6.182890198608 - -1.240444789288 - -0.8618182105322 - -1.995146711689 - -0.9868182105322 - -1.270146711689 - -1.135195681432 - -0.9144413882404 - -3.072699444636 - -1.317570381505 - -0.9238355328992 - 0.9546557567244 - -0.3318587540789 - 8.635116436265 - 15.10453604351 - 6.148558251063 - 4.841880754733 - 5.382890198608 - 2.059555210712 - -3.361818210532 - -2.995146711689 - -3.311818210532 - -2.395146711689 - -2.185195681432 - -2.63944138824 - -1.985199444636 - -1.880070381505 - -1.711335532899 - -1.407844243276 - -0.4818587540789 - 2.735116436265 - 3.404536043506 - 3.148558251063 - 3.141880754733 - 2.482890198608 - 0.5595552107122 - -0.7618182105322 - -2.995146711689 - -0.7993182105322 - -2.245146711689 - -1.885195681432 - -1.96444138824 - -1.647699444636 - -2.292570381505 - -1.486335532899 - -1.557844243276 - -0.8193587540789 - 10.13511643627 - 4.748558251063 - -2.218096467799 - -4.369688200573 - -3.659688200573 - -1.219688200573 - -0.3489655844206 - 6.635116436265 - 2.248558251063 - -1.518096467799 - -2.939688200573 - -4.029688200573 - -2.159688200573 - -2.038965584421 - 5.435116436265 - 3.348558251064 - -1.818096467799 - -2.909688200573 - -4.969688200573 - -3.469688200573 - -0.5389655844206 - 6.635116436265 - 5.848558251064 - -0.918096467799 - -4.219688200573 - -0.3489655844206 - -4.029688200573 - -3.659688200573 - -0.5389655844206 - 5.435116436265 - 2.348558251064 - -0.0171098013921 - -2.718096467799 - -4.257793595776 - -3.887793595776 - -2.006947842151 - -2.829688200573 - -0.1835757519589 - 0.8557490906722 - 0.6910344155794 - 4.435116436265 - 4.348558251064 - 0.9828901986079 - 0.481903532201 - -6.457793595776 - -6.137793595776 - -1.516947842151 - -4.029688200573 - -1.013575751959 - -0.8342509093278 - 1.441034415579 - -0.8648835637348 - 1.848558251064 - 0.6828901986079 - 1.081903532201 - -5.457793595776 - -4.787793595776 - 0.1730521578493 - -1.139688200573 - -3.263575751959 - 0.4057490906722 - 4.141034415579 - 6.635116436265 - 2.104536043506 - 4.348558251064 - 5.641880754733 - -0.1171098013921 - -2.640444789288 - -3.195146711689 - -2.325146711689 - -4.777695681432 - -5.49444138824 - -4.762699444636 - -5.027570381505 - -3.966335532899 - -3.510344243276 - -2.694358754079 - 7.410311799427 - 8.980311799427 - 1.290311799427 - -4.969688200573 - -4.709688200573 - -0.659688200573 - -0.5648835637348 - -2.951441748936 - 0.1828901986079 - 5.081903532201 - 3.342206404224 - 2.873052157849 - 3.162206404224 - 2.983052157849 - 0.920311799427 - 0.1164242480411 - 3.925749090672 - 2.761034415579 - 9.335116436265 - 3.648558251063 - -0.3171098013921 - -5.718096467799 - -2.457793595776 - -3.126947842151 - -1.897793595776 - -2.156947842151 - -0.539688200573 - -2.693575751959 - -0.2742509093278 - 2.531034415579 - -4.664883563735 - 1.548558251064 - -7.017109801392 - -4.018096467799 - -0.7577935957756 - -4.526947842151 - -0.4677935957756 - -3.876947842151 - -3.429688200573 - -2.813575751959 - -0.9442509093278 - 0.6610344155794 - -3.089688200573 - 9.635116436265 - 6.848558251064 - 0.781903532201 - -3.469688200573 - -1.108965584421 - -3.839688200573 - -0.9189655844206 - -1.589688200573 - -2.159688200573 - -1.139688200573 - -4.661818210532 - -4.211818210532 - 10.33511643627 - 3.948558251064 - -0.3618182105322 - -0.3518182105322 - -1.477695681432 - -2.132699444636 - -1.761818210532 - -1.471818210532 - 9.935116436265 - 3.248558251063 - -1.997695681432 - -2.472699444636 - -1.261818210532 - -1.211818210532 residuals_expected: - 19.6353723292 - 12.84033601748 - 4.582720606307 - 10.57944901886 - 7.388011778232 - 1.080920572465 - -0.6242875339148 - -1.259097736466 - -0.6742875339148 - -1.084097736466 - -3.217135650199 - -1.742307946594 - -1.554717017541 - 1.384057264504 - -1.402675175922 - -0.4764291246294 - 1.296405523471 - 17.3353723292 - 11.04033601748 - 11.98272060631 - 9.27944901886 - 4.288011778232 - -3.019079427535 - -2.424287533915 - -3.459097736466 - -2.549287533915 - -2.734097736466 - -3.442135650199 - -1.292307946594 - -3.692217017541 - 1.121557264504 - -1.215175175922 - 0.6485708753706 - -0.616094476529 - 6.735372329195 - 13.14033601748 - 4.182720606307 - 2.87944901886 - 3.488011778232 - 0.2809205724649 - -4.924287533915 - -4.459097736466 - -4.874287533915 - -3.859097736466 - -4.492135650199 - -3.017307946594 - -2.604717017541 - 0.5590572645039 - -2.002675175922 - -1.713929124629 - -0.766094476529 - 0.8353723291955 - 1.440336017483 - 1.182720606307 - 1.17944901886 - 0.588011778232 - -1.219079427535 - -2.324287533915 - -4.459097736466 - -2.361787533915 - -3.709097736466 - -4.192135650199 - -2.342307946594 - -2.267217017541 - 0.1465572645039 - -1.777675175922 - -1.863929124629 - -1.103594476529 - 8.235372329195 - 2.782720606307 - -3.880822074155 - -3.670161022919 - -2.960161022919 - -0.5201610229187 - -0.6195992900418 - 4.735372329195 - 0.2827206063069 - -3.180822074155 - -2.240161022919 - -3.330161022919 - -1.460161022919 - -2.309599290042 - 3.535372329196 - 1.382720606307 - -3.480822074155 - -2.210161022919 - -4.270161022919 - -2.770161022919 - -0.8095992900418 - 4.735372329195 - 3.882720606307 - -2.580822074155 - -3.520161022919 - -0.6195992900418 - -3.330161022919 - -2.960161022919 - -0.8095992900418 - 3.535372329196 - 0.3827206063069 - -1.911988221768 - -4.380822074155 - -5.747347392322 - -5.377347392322 - -3.559846094244 - -2.130161022919 - -0.9497554490721 - 0.547414971422 - 0.4204007099582 - 2.535372329196 - 2.382720606307 - -0.911988221768 - -1.180822074155 - -7.947347392322 - -7.627347392322 - -3.069846094244 - -3.330161022919 - -1.779755449072 - -1.142585028578 - 1.170400709958 - -2.764627670804 - -0.1172793936931 - -1.211988221768 - -0.5808220741553 - -6.947347392322 - -6.277347392322 - -1.379846094244 - -0.4401610229187 - -4.029755449072 - 0.09741497142202 - 3.870400709958 - 4.735372329195 - 0.1403360174832 - 2.382720606307 - 3.67944901886 - -2.011988221768 - -4.419079427535 - -4.659097736466 - -3.789097736466 - -7.084635650199 - -5.872307946594 - -5.382217017541 - -2.588442735496 - -4.257675175922 - -3.816429124629 - -2.978594476529 - 8.109838977081 - 9.679838977081 - 1.989838977081 - -4.270161022919 - -4.010161022919 - 0.0398389770813 - -2.464627670805 - -4.917279393693 - -1.711988221768 - 3.419177925845 - 1.852652607678 - 1.320153905756 - 1.672652607678 - 1.430153905756 - 1.619838977081 - -0.6497554490721 - 3.617414971422 - 2.490400709958 - 7.435372329196 - 1.682720606307 - -2.211988221768 - -7.380822074155 - -3.947347392322 - -4.679846094244 - -3.387347392322 - -3.709846094244 - 0.1598389770813 - -3.459755449072 - -0.582585028578 - 2.260400709958 - -6.564627670804 - -0.4172793936931 - -8.911988221768 - -5.680822074155 - -2.247347392322 - -6.079846094244 - -1.957347392322 - -5.429846094244 - -2.730161022919 - -3.579755449072 - -1.252585028578 - 0.3904007099582 - -2.390161022919 - 7.735372329195 - 4.882720606307 - -0.8808220741553 - -2.770161022919 - -1.379599290042 - -3.140161022919 - -1.189599290042 - -0.8901610229187 - -1.460161022919 - -0.4401610229187 - -6.224287533915 - -5.774287533915 - 8.435372329196 - 1.982720606307 - -1.924287533915 - -1.914287533915 - -3.784635650199 - -2.752217017541 - -3.324287533915 - -3.034287533915 - 8.035372329195 - 1.282720606307 - -4.304635650199 - -3.092217017541 - -2.824287533915 - -2.774287533915 square_terms: - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.87397890211 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.87397890211 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211001 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814763 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229468 - 37.07301548541 - - 0.07028381229468 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.9566846715422 - - 34.53987590883 - 0.9566846715422 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.8894811141752 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.9566846715419 - - 34.53987590883 - 0.9566846715419 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 37.76206570488 - 4.598069813344 - 268.480673393 - - 4.598069813344 - -15.06222715154 - 39.86023126116 - - 268.480673393 - 39.86023126116 - -13.8739789021 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 36.36211362787 - 11.74965491243 - 232.6612213924 - - 11.74965491243 - -14.33678876201 - 101.5946337199 - - 232.6612213924 - 101.5946337199 - -16.74890647466 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 26.99562208048 - 4.039388146136 - 194.3460658399 - - 4.039388146136 - -10.904018831 - 35.05642111527 - - 194.3460658399 - 35.05642111527 - -17.54258356033 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861657 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 17.09406685234 - 2.66162436771 - 131.0277600318 - - 2.66162436771 - -5.180832861658 - 23.26291444469 - - 131.0277600318 - 23.26291444469 - -23.07010638636 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 12.39426977847 - 3.219862122548 - 69.57634867759 - - 3.219862122548 - 3.931549769208 - 27.88803537784 - - 69.57634867759 - 27.88803537784 - 16.06772788907 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211001 - - - -4.590996625593 - -49.8578626567 - 88.18903535997 - - -49.8578626567 - -392.7968335729 - -441.4737183738 - - 88.18903535997 - -441.4737183738 - -2.005253308587 - - - 6.609230403572 - -1.114542090064 - 50.17123518647 - - -1.114542090064 - -20.03956487444 - -10.10106952879 - - 50.17123518647 - -10.10106952879 - -0.6749898814764 - - - 5.867803812028 - -0.1814136458974 - 42.89612529825 - - -0.1814136458974 - -11.28413222756 - -1.804764358727 - - 42.89612529825 - -1.804764358727 - -1.028734386942 - - - 5.122058993026 - 0.07028381229466 - 37.07301548541 - - 0.07028381229466 - -8.007614389428 - 0.4527765498708 - - 37.07301548541 - 0.4527765498708 - -1.160231423527 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539624 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.8894811141752 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134263 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 30.88627308451 - 4.976024773829 - 218.8203159401 - - 4.976024773829 - -12.55705651792 - 43.13446872147 - - 218.8203159401 - 43.13446872147 - -16.96609172267 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 19.05038877716 - 2.921211127977 - 142.2705130619 - - 2.921211127977 - -6.493471507915 - 25.45927037319 - - 142.2705130619 - 25.45927037319 - -20.03762618713 - - - 15.31308103373 - 2.369394773812 - 124.3899129792 - - 2.369394773812 - -4.034968539623 - 20.83080340569 - - 124.3899129792 - 20.83080340569 - -30.92076532995 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 10.44273226791 - 6.475979422676 - 57.63907853267 - - 6.475979422676 - 34.37966124455 - 56.97442501698 - - 57.63907853267 - 56.97442501698 - 1.719659134262 - - - 6.258067891218 - -0.4773313726774 - 46.28932429293 - - -0.4773313726774 - -14.3215416894 - -4.441525949927 - - 46.28932429293 - -4.441525949927 - -0.889481114175 - - - 4.783039455404 - 0.1257874954058 - 34.53987590883 - - 0.1257874954058 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631132 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 23.89982318472 - 3.566267197888 - 173.817334734 - - 3.566267197888 - -9.358360214064 - 30.98344929428 - - 173.817334734 - 30.98344929428 - -17.96992259777 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 4.783039455404 - 0.1257874954057 - 34.53987590883 - - 0.1257874954057 - -6.993256198462 - 0.956684671542 - - 34.53987590883 - 0.956684671542 - -1.179917631133 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 14.37037076795 - 4.106940832789 - 45.77485567203 - - 4.106940832789 - 3.781269465646 - 35.03465443111 - - 45.77485567203 - 35.03465443111 - 68.57957805078 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567033 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440814 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 42.05642503987 - 5.182308979952 - 295.7086970924 - - 5.182308979952 - -16.8011933671 - 44.88068608775 - - 295.7086970924 - 44.88068608775 - -11.16800948171 - - - 34.52716237442 - 3.742228341934 - 249.1555844695 - - 3.742228341934 - -13.95687891579 - 32.4806538628 - - 249.1555844695 - 32.4806538628 - -15.39308605662 - - - 13.22112788651 - 1.569537501602 - 137.5825625705 - - 1.569537501602 - -4.091404567032 - 14.28797929438 - - 137.5825625705 - 14.28797929438 - -69.36780402294 - - - 10.59582926181 - 3.961203981524 - 63.6129919848 - - 3.961203981524 - 13.8089243593 - 34.67215199382 - - 63.6129919848 - 34.67215199382 - 3.402916211 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 - - - 21.28989652643 - 3.211915555773 - 156.5687870814 - - 3.211915555773 - -7.888244440813 - 27.94191248902 - - 156.5687870814 - 27.94191248902 - -18.64926539005 x_candidate_uncentered: - 0.149883507892 - 0.008098080768719 - 0.009146244784311 ================================================ FILE: tests/optimagic/optimizers/_pounders/test_linear_subsolvers.py ================================================ """Test suite for linear trust-region subsolvers.""" import math import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimizers._pounders.linear_subsolvers import ( LinearModel, improve_geomtery_trsbox_linear, minimize_trsbox_linear, ) @pytest.mark.parametrize( "model_gradient, lower_bounds, upper_bounds, delta, expected", [ ( np.array([1.0, 0.0, 1.0]), -np.ones(3), np.ones(3), 2.0, np.array([-1.0, 0.0, -1.0]), ), ( np.array([0.00028774, 0.00763968, 0.01217268]), -np.ones(3), np.ones(3), 9.5367431640625e-05, np.array([-1.90902854e-06, -5.06859218e-05, -8.07603861e-05]), ), ( np.array([0.00028774, 0.00763968, 0.01217268]), np.array([0, -1, -1]), np.ones(3), 0.1, np.array([0.0, -5.31586927e-02, -8.47003742e-02]), ), ( np.arange(5) * 0.1, -np.ones(5), np.ones(5), 0.1, np.array([0.0, -0.01825742, -0.03651484, -0.05477226, -0.07302967]), ), ( np.arange(4, -1, -1) * 0.1, -np.ones(5), np.ones(5), 0.1, np.array([-0.07302967, -0.05477226, -0.03651484, -0.01825742, 0]), ), ( np.arange(5) * 0.1, np.array([-1, -1, 0, -1, -1]), np.array([1, 1, 0.2, 0.2, 1]), 0.1, np.array([0.0, -1.96116135e-02, 0.0, -5.88348405e-02, -7.84464541e-02]), ), ( np.arange(4, -1, -1) * 0.1, np.array([-1, -1, -1, -1, 0]), np.array([0.3, 0.3, 1, 1, 1]), 0.1, np.array([-0.07302967, -0.05477226, -0.03651484, -0.01825742, 0.0]), ), ], ) def test_trsbox_linear(model_gradient, lower_bounds, upper_bounds, delta, expected): linear_model = LinearModel(linear_terms=model_gradient) x_out = minimize_trsbox_linear(linear_model, lower_bounds, upper_bounds, delta) aaae(x_out, expected) @pytest.mark.parametrize( "x_center, c_term, model_gradient, lower_bounds, upper_bounds, delta, expected", [ ( np.array([0.0, 0.0]), -1.0, np.array([1.0, -1.0]), np.array([-2.0, -2.0]), np.array([1.0, 2.0]), 2.0, np.array([-math.sqrt(2.0), math.sqrt(2.0)]), ), ( np.array([0.0, 0.0]), -1.0, np.array([1.0, -1.0]), np.array([-2.0, -2.0]), np.array([1.0, 2.0]), 5.0, np.array([-2.0, 2.0]), ), ( np.array([0.0, 0.0]) + 1, 3.0, np.array([1.0, -1.0]), np.array([-2.0, -2.0]) + 1, np.array([1.0, 2.0]) + 1, 5.0, np.array([1.0, -2.0]) + 1, ), ( np.array([0.0, 0.0]), -1.0, np.array([-1.0, -1.0]), np.array([-2.0, -2.0]), np.array([0.1, 0.9]), math.sqrt(2.0), np.array([0.1, 0.9]), ), ( np.array([0.0, 0.0, 0.0]), -1.0, np.array([-1.0, -1.0, -1.0]), np.array([-2.0, -2.0, -2.0]), np.array([0.9, 0.1, 5.0]), math.sqrt(3.0), np.array([0.9, 0.1, math.sqrt(3.0 - 0.81 - 0.01)]), ), ( np.array([0.0, 0.0]), 0.0, np.array([1e-15, -1.0]), np.array([-2.0, -2.0]), np.array([1.0, 2.0]), 5.0, np.array([0.0, 2.0]), ), ( np.array([0.0, 0.0]), 0.0, np.array([1e-15, 0.0]), np.array([-2.0, -2.0]), np.array([1.0, 2.0]), 5.0, np.array([0.0, 0.0]), ), ], ) def test_trsbox_geometry( x_center, c_term, model_gradient, lower_bounds, upper_bounds, delta, expected, ): linear_model = LinearModel(intercept=c_term, linear_terms=model_gradient) x_out = improve_geomtery_trsbox_linear( x_center, linear_model, lower_bounds, upper_bounds, delta, ) aaae(x_out, expected) ================================================ FILE: tests/optimagic/optimizers/_pounders/test_pounders_history.py ================================================ """Test the history class for least-squares optimizers.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory ENTRIES = [ (np.arange(3), [np.arange(5)]), ([np.arange(3)], list(range(5))), (np.arange(3).reshape(1, 3), np.arange(5).reshape(1, 5)), ] TEST_CASES = [] for entries in ENTRIES: for is_center in True, False: TEST_CASES.append((entries, is_center)) @pytest.mark.parametrize("entries, is_center", TEST_CASES) def test_add_entries_not_initialized(entries, is_center): history = LeastSquaresHistory() if is_center: c_info = {"x": np.zeros(3), "residuals": np.zeros(5), "radius": 1} history.add_centered_entries(*entries, c_info) else: history.add_entries(*entries) xs, residuals, critvals = history.get_entries() xs_sinlge = history.get_xs() residuals_sinlge = history.get_residuals() critvals_sinlge = history.get_critvals() for entry in xs, residuals, critvals: assert isinstance(entry, np.ndarray) aaae(xs, np.arange(3).reshape(1, 3)) aaae(xs_sinlge, np.arange(3).reshape(1, 3)) aaae(residuals, np.arange(5).reshape(1, 5)) aaae(residuals_sinlge, np.arange(5).reshape(1, 5)) aaae(critvals, np.array([30.0])) aaae(critvals_sinlge, np.array([30.0])) @pytest.mark.parametrize("entries, is_center", TEST_CASES) def test_add_entries_initialized_with_space(entries, is_center): history = LeastSquaresHistory() history.add_entries(np.ones((4, 3)), np.zeros((4, 5))) if is_center: c_info = {"x": np.zeros(3), "residuals": np.zeros(5), "radius": 1} history.add_centered_entries(*entries, c_info) else: history.add_entries(*entries) xs, residuals, critvals = history.get_entries(index=-1) xs_sinlge = history.get_xs(index=-1) residuals_sinlge = history.get_residuals(index=-1) critvals_sinlge = history.get_critvals(index=-1) for entry in xs, residuals: assert isinstance(entry, np.ndarray) aaae(xs, np.arange(3)) aaae(xs_sinlge, np.arange(3)) aaae(residuals, np.arange(5)) aaae(residuals_sinlge, np.arange(5)) assert critvals == 30 assert critvals_sinlge == 30 def test_add_entries_initialized_extension_needed(): history = LeastSquaresHistory() history.add_entries(np.ones((4, 3)), np.zeros((4, 5))) history.xs = history.xs[:5] history.residuals = history.residuals[:5] history.critvals = history.critvals[:5] history.add_entries(np.arange(12).reshape(4, 3), np.arange(20).reshape(4, 5)) assert len(history.xs) == 10 assert len(history.residuals) == 10 assert len(history.critvals) == 10 xs, residuals, _ = history.get_entries(index=-1) xs_sinlge = history.get_xs(index=-1) residuals_sinlge = history.get_residuals(index=-1) for entry in xs, xs_sinlge, residuals, residuals_sinlge: assert isinstance(entry, np.ndarray) assert history.get_n_fun() == 8 def test_add_centered_entries(): history = LeastSquaresHistory() history.add_entries(np.ones((2, 2)), np.ones((2, 4))) center_info = { "x": history.get_xs(index=-1), "residuals": history.get_residuals(index=-1), "radius": 0.5, } history.add_centered_entries( xs=np.ones(2), residuals=np.ones(4) * 2, center_info=center_info ) xs, residuals, critvals = history.get_entries(index=-1) aaae(xs, np.array([1.5, 1.5])) aaae(residuals, np.array([3, 3, 3, 3])) assert critvals == 36 assert history.get_n_fun() == 3 def test_get_centered_entries(): history = LeastSquaresHistory() history.add_entries(np.ones((4, 3)), np.ones((4, 5))) center_info = { "x": np.arange(3), "residuals": np.arange(5), "radius": 0.25, } xs, residuals, critvals = history.get_centered_entries( center_info=center_info, index=-1 ) aaae(xs, np.array([4, 0, -4])) aaae(residuals, np.arange(1, -4, -1)) assert critvals == 15 assert history.get_n_fun() == 4 ================================================ FILE: tests/optimagic/optimizers/_pounders/test_pounders_unit.py ================================================ """Test the auxiliary functions of the pounders algorithm.""" from collections import namedtuple from functools import partial from pathlib import Path import numpy as np import pandas as pd import pytest import yaml from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimizers._pounders.pounders_auxiliary import ( add_geomtery_points_to_make_main_model_fully_linear, create_initial_residual_model, create_main_from_residual_model, evaluate_residual_model, find_affine_points, fit_residual_model, get_feature_matrices_residual_model, update_main_model_with_new_accepted_x, update_residual_model, update_residual_model_with_new_accepted_x, ) from optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory FIXTURES_DIR = Path(__file__).parent / "fixtures" def read_yaml(path): with open(rf"{path}") as file: data = yaml.full_load(file) return data # ====================================================================================== # Fixtures # ====================================================================================== @pytest.fixture() def criterion(): data = pd.read_csv(FIXTURES_DIR / "pounders_example_data.csv") endog = np.asarray(data["y"]) exog = np.asarray(data["t"]) def func(x: np.ndarray, exog: np.ndarray, endog: np.ndarray) -> np.ndarray: """User provided residual function.""" return endog - np.exp(-x[0] * exog) / (x[1] + x[2] * exog) return partial(func, exog=exog, endog=endog) @pytest.fixture() def data_create_initial_residual_model(): test_data = read_yaml(FIXTURES_DIR / "update_initial_residual_model.yaml") history = LeastSquaresHistory() ResidualModel = namedtuple( "ResidualModel", ["intercepts", "linear_terms", "square_terms"] ) history.add_entries( np.array(test_data["x_candidate"]), np.array(test_data["residuals_candidate"]), ) accepted_index = 0 delta = 0.1 inputs_dict = {"history": history, "accepted_index": accepted_index, "delta": delta} residual_model_expected = ResidualModel( intercepts=test_data["residual_model_expected"]["intercepts"], linear_terms=test_data["residual_model_expected"]["linear_terms"], square_terms=test_data["residual_model_expected"]["square_terms"], ) return inputs_dict, residual_model_expected @pytest.fixture() def data_update_residual_model(): test_data = read_yaml(FIXTURES_DIR / "update_residual_model.yaml") ResidualModel = namedtuple( "ResidualModel", ["intercepts", "linear_terms", "square_terms"] ) residual_model = ResidualModel( intercepts=None, linear_terms=np.array(test_data["linear_terms"]), square_terms=np.array(test_data["square_terms"]), ) coefficients_to_add = { "linear_terms": np.array(test_data["coefficients_linear_terms"]).T, "square_terms": np.array(test_data["coefficients_square_terms"]), } inputs_dict = { "residual_model": residual_model, "coefficients_to_add": coefficients_to_add, "delta": test_data["delta"], "delta_old": test_data["delta_old"], } expected_dict = { "linear_terms": test_data["linear_terms_expected"], "square_terms": test_data["square_terms_expected"], } return inputs_dict, expected_dict @pytest.fixture() def data_update_main_from_residual_model(): test_data = read_yaml(FIXTURES_DIR / "update_main_from_residual_model.yaml") ResidualModel = namedtuple( "ResidualModel", ["intercepts", "linear_terms", "square_terms"] ) MainModel = namedtuple("MainModel", ["linear_terms", "square_terms"]) residual_model = ResidualModel( intercepts=np.array(test_data["residuals"]), linear_terms=np.array(test_data["linear_terms_residual_model"]), square_terms=np.array(test_data["square_terms_residual_model"]), ) main_model_expected = MainModel( linear_terms=test_data["linear_terms_main_model_expected"], square_terms=test_data["square_terms_main_model_expected"], ) return residual_model, main_model_expected @pytest.fixture() def data_update_residual_model_with_new_accepted_x(): test_data = read_yaml( FIXTURES_DIR / "update_residual_model_with_new_accepted_x.yaml" ) ResidualModel = namedtuple( "ResidualModel", ["intercepts", "linear_terms", "square_terms"] ) inputs_dict = {} residual_model_expected = {} residual_model = ResidualModel( intercepts=np.array(test_data["residuals"]), linear_terms=np.array(test_data["linear_terms"]), square_terms=np.array(test_data["square_terms"]), ) inputs_dict["residual_model"] = residual_model inputs_dict["x_candidate"] = ( np.array(test_data["x_candidate_uncentered"]) - np.array(test_data["best_x"]) ) / test_data["delta"] residual_model_expected = ResidualModel( intercepts=test_data["residuals_expected"], linear_terms=test_data["linear_terms_expected"], square_terms=np.array(test_data["square_terms"]), ) return inputs_dict, residual_model_expected @pytest.fixture() def data_update_main_model_with_new_accepted_x(): test_data = read_yaml(FIXTURES_DIR / "update_main_model_with_new_accepted_x.yaml") MainModel = namedtuple("MainModel", ["linear_terms", "square_terms"]) inputs_dict = {} expected_dict = {} main_model = MainModel( linear_terms=np.array(test_data["linear_terms"]), square_terms=np.array(test_data["square_terms"]), ) inputs_dict["main_model"] = main_model inputs_dict["x_candidate"] = ( np.array(test_data["x_candidate_uncentered"]) - np.array(test_data["best_x"]) ) / test_data["delta"] expected_dict["linear_terms"] = test_data["linear_terms_expected"] return inputs_dict, expected_dict @pytest.fixture( params=[ "zero_i", "zero_ii", "zero_iii", "zero_iv", "nonzero_i", "nonzero_ii", "nonzero_iii", ] ) def data_find_affine_points(request): test_data = read_yaml(FIXTURES_DIR / f"find_affine_points_{request.param}.yaml") history = LeastSquaresHistory() history_x = np.array(test_data["history_x"]) history.add_entries(history_x, np.zeros(history_x.shape)) inputs_dict = { "history": history, "x_accepted": np.array(test_data["x_accepted"]), "model_improving_points": np.array(test_data["model_improving_points"]), "project_x_onto_null": test_data["project_x_onto_null"], "delta": test_data["delta"], "theta1": test_data["theta1"], "c": test_data["c"], "model_indices": np.array(test_data["model_indices"]), "n_modelpoints": test_data["n_modelpoints"], } expected_dict = { "model_improving_points": test_data["model_improving_points_expected"], "model_indices": test_data["model_indices_expected"], "n_modelpoints": test_data["n_modelpoints_expected"], } return inputs_dict, expected_dict @pytest.fixture(params=["i", "ii"]) def data_add_points_until_main_model_fully_linear(request, criterion): test_data = read_yaml( FIXTURES_DIR / f"add_points_until_main_model_fully_linear_{request.param}.yaml" ) history = LeastSquaresHistory() n = 3 n_modelpoints = test_data["n_modelpoints"] history.add_entries( np.array(test_data["history_x"])[: -(n - n_modelpoints)], np.array(test_data["history_criterion"])[: -(n - n_modelpoints)], ) MainModel = namedtuple("MainModel", ["linear_terms", "square_terms"]) main_model = MainModel( linear_terms=np.array(test_data["linear_terms"]), square_terms=np.array(test_data["square_terms"]), ) index_best_x = test_data["index_best_x"] x_accepted = test_data["history_x"][index_best_x] def batch_fun(x_list, n_cores): return [criterion(x) for x in x_list] inputs_dict = { "history": history, "main_model": main_model, "model_improving_points": np.array(test_data["model_improving_points"]), "model_indices": np.array(test_data["model_indices"]), "x_accepted": np.array(x_accepted), "n_modelpoints": n_modelpoints, "delta": test_data["delta"], "criterion": criterion, "lower_bounds": None, "upper_bounds": None, "batch_fun": batch_fun, } expected_dict = { "model_indices": test_data["model_indices_expected"], "history_x": test_data["history_x_expected"], } return inputs_dict, expected_dict @pytest.fixture() def data_get_interpolation_matrices_residual_model(): test_data = read_yaml( FIXTURES_DIR / "get_interpolation_matrices_residual_model.yaml" ) history = LeastSquaresHistory() history_x = np.array(test_data["history_x"]) history.add_entries(history_x, np.zeros(history_x.shape)) n_params = 3 n_maxinterp = 2 * n_params + 1 n_modelpoints = 7 inputs_dict = { "history": history, "x_accepted": np.array(test_data["x_accepted"]), "model_indices": np.array(test_data["model_indices"]), "delta": test_data["delta"], "c2": 10, "theta2": 1e-4, "n_maxinterp": n_maxinterp, } expected_dict = { "x_sample_monomial_basis": np.array( test_data["x_sample_monomial_basis_expected"] )[: n_params + 1, : n_params + 1], "monomial_basis": np.array(test_data["monomial_basis_expected"])[ :n_modelpoints ], "basis_null_space": test_data["basis_null_space_expected"], "lower_triangular": np.array(test_data["lower_triangular_expected"])[ :, n_params + 1 : n_maxinterp ], "n_modelpoints": test_data["n_modelpoints_expected"], } return inputs_dict, expected_dict @pytest.fixture(params=["4", "7"]) def data_evaluate_residual_model(request): test_data = read_yaml(FIXTURES_DIR / f"interpolate_f_iter_{request.param}.yaml") history = LeastSquaresHistory() history.add_entries( np.array(test_data["history_x"]), np.array(test_data["history_criterion"]), ) ResidualModel = namedtuple( "ResidualModel", ["intercepts", "linear_terms", "square_terms"] ) residual_model = ResidualModel( intercepts=np.array(test_data["residuals"]), linear_terms=np.array(test_data["linear_terms_residual_model"]), square_terms=np.array(test_data["square_terms_residual_model"]), ) x_accepted = np.array(test_data["x_accepted"]) model_indices = np.array(test_data["model_indices"]) n_modelpoints = test_data["n_modelpoints"] delta_old = test_data["delta_old"] center_info = {"x": x_accepted, "radius": delta_old} centered_xs = history.get_centered_xs( center_info, index=model_indices[:n_modelpoints] ) center_info = {"residuals": residual_model.intercepts} centered_residuals = history.get_centered_residuals( center_info, index=model_indices ) inputs_dict = { "centered_xs": centered_xs, "centered_residuals": centered_residuals, "residual_model": residual_model, } expected_dict = { "y_residuals": test_data["f_interpolated_expected"], } return inputs_dict, expected_dict @pytest.fixture() def data_fit_residual_model(): test_data = read_yaml(FIXTURES_DIR / "get_coefficients_residual_model.yaml") n_params = 3 n_maxinterp = 2 * n_params + 1 n_modelpoints = 7 inputs_dict = { "m_mat": np.array(test_data["x_sample_monomial_basis"])[ : n_params + 1, : n_params + 1 ], "n_mat": np.array(test_data["monomial_basis"])[:n_modelpoints], "z_mat": np.array(test_data["basis_null_space"]), "n_z_mat": np.array(test_data["lower_triangular"])[ :, n_params + 1 : n_maxinterp ], "y_residuals": np.array(test_data["f_interpolated"]), "n_modelpoints": test_data["n_modelpoints"], } expected_coefficients_dict = { "linear_terms": np.array(test_data["linear_terms_expected"]).T, "square_terms": np.array(test_data["square_terms_expected"]), } return inputs_dict, expected_coefficients_dict # ====================================================================================== # Test cases # ====================================================================================== @pytest.mark.skip(reason="refactoring") def test_update_initial_residual_model(data_update_initial_residual_model): inputs, residual_model_expected = data_update_initial_residual_model residual_model_out = create_initial_residual_model(**inputs) aaae(residual_model_out["intercepts"], residual_model_expected["intercepts"]) aaae(residual_model_out["linear_terms"], residual_model_expected["linear_terms"]) def test_update_residual_model(data_update_residual_model): inputs, expected = data_update_residual_model residual_model_out = update_residual_model(**inputs) aaae( residual_model_out.linear_terms, expected["linear_terms"], ) aaae( residual_model_out.square_terms, expected["square_terms"], ) def test_update_main_from_residual_model(data_update_main_from_residual_model): residual_model, main_model_expected = data_update_main_from_residual_model main_model_out = create_main_from_residual_model( residual_model, multiply_square_terms_with_intercepts=True ) aaae( main_model_out.linear_terms, main_model_expected.linear_terms, ) aaae( main_model_out.square_terms, main_model_expected.square_terms, decimal=3, ) def test_update_residual_model_with_new_accepted_x( data_update_residual_model_with_new_accepted_x, ): ( inputs, residual_model_expected, ) = data_update_residual_model_with_new_accepted_x residual_model_out = update_residual_model_with_new_accepted_x(**inputs) aaae(residual_model_out.intercepts, residual_model_expected.intercepts) aaae(residual_model_out.linear_terms, residual_model_expected.linear_terms) @pytest.mark.xfail(reason="Known rounding differences between C and Python.") def test_update_main_model_with_new_accepted_x( data_update_main_model_with_new_accepted_x, ): ( inputs, main_model_expected, ) = data_update_main_model_with_new_accepted_x main_model_out = update_main_model_with_new_accepted_x(**inputs) aaae(main_model_out.linear_terms, main_model_expected.linear_terms) def test_find_affine_points(data_find_affine_points): inputs, expected = data_find_affine_points ( model_improving_points_out, model_indices_out, n_modelpoints_out, project_x_onto_null_out, ) = find_affine_points(**inputs) aaae( model_improving_points_out, expected["model_improving_points"], ) aaae(model_indices_out, expected["model_indices"]) assert np.allclose(n_modelpoints_out, expected["n_modelpoints"]) assert np.allclose(project_x_onto_null_out, True) def test_add_points_until_main_model_fully_linear( data_add_points_until_main_model_fully_linear, ): inputs, expected = data_add_points_until_main_model_fully_linear n = 3 ( history_out, model_indices_out, ) = add_geomtery_points_to_make_main_model_fully_linear(**inputs, n_cores=1) aaae(model_indices_out, expected["model_indices"]) for index_added in range(n - inputs["n_modelpoints"], 0, -1): aaae( history_out.get_xs(index=-index_added), expected["history_x"][-index_added], ) def test_get_interpolation_matrices_residual_model( data_get_interpolation_matrices_residual_model, ): inputs, expected = data_get_interpolation_matrices_residual_model ( x_sample_monomial_basis, monomial_basis, basis_null_space, lower_triangular, n_modelpoints, ) = get_feature_matrices_residual_model(**inputs) aaae(x_sample_monomial_basis, expected["x_sample_monomial_basis"]) aaae(monomial_basis, expected["monomial_basis"]) aaae(basis_null_space, expected["basis_null_space"]) aaae(lower_triangular, expected["lower_triangular"]) assert np.allclose(n_modelpoints, expected["n_modelpoints"]) def test_evaluate_residual_model(data_evaluate_residual_model): inputs, expected = data_evaluate_residual_model y_residuals = evaluate_residual_model(**inputs) aaae(y_residuals, expected["y_residuals"]) def test_fit_residual_model(data_fit_residual_model): inputs, expected_coefficients = data_fit_residual_model coefficients_to_add = fit_residual_model(**inputs) aaae( coefficients_to_add["linear_terms"], expected_coefficients["linear_terms"], ) aaae( coefficients_to_add["square_terms"], expected_coefficients["square_terms"], ) ================================================ FILE: tests/optimagic/optimizers/_pounders/test_quadratic_subsolvers.py ================================================ """Test various solvers for quadratic trust-region subproblems.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimizers._pounders._conjugate_gradient import ( minimize_trust_cg, ) from optimagic.optimizers._pounders._steihaug_toint import ( minimize_trust_stcg, ) from optimagic.optimizers._pounders._trsbox import minimize_trust_trsbox from optimagic.optimizers._pounders.bntr import ( bntr, ) from optimagic.optimizers._pounders.gqtpar import ( gqtpar, ) from optimagic.optimizers._pounders.pounders_auxiliary import MainModel # ====================================================================================== # Subsolver BNTR # ====================================================================================== TEST_CASES_BNTR = [ ( np.array([0.0002877431832243, 0.00763968126032, 0.01217268029151]), np.array( [ [ 4.0080360351800763e00, 1.6579091056425378e02, 1.7322297746691254e02, ], [ 1.6579091056425378e02, 1.6088016292793940e04, 1.1041403355728811e04, ], [ 1.7322297746691254e02, 1.1041403355728811e04, 9.2992625728417297e03, ], ] ), -np.ones(3), np.ones(3), np.array([0.000122403, 3.92712e-06, -8.2519e-06]), ), ( np.array([7.898833044695e-06, 254.9676549378, 0.0002864050095122]), np.array( [ [3.97435226e00, 1.29126446e02, 1.90424789e02], [1.29126446e02, 1.08362658e04, 9.05024598e03], [1.90424789e02, 9.05024598e03, 1.06395102e04], ] ), np.array([-1.0, 0, -1.0]), np.ones(3), np.array([-4.89762e-06, 0.0, 6.0738e-08]), ), ( np.array([0.000208896, 0.040137, 0.0237668]), np.array( [ [ 8.6267971128257614e-01, 3.3589357331133463e01, 3.8550834275262481e01, ], [ 3.3589357331133463e01, 4.0625660472990171e03, 2.7006581320776222e03, ], [ 3.8550834275262481e01, 2.7006581320776222e03, 2.3157072223295277e03, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.000404701, -8.56315e-06, -7.01394e-06]), ), ( np.array([1053.998577258, -1768.195151975, 1091.754813306]), np.array( [ [ 5.1009001863913858e02, -2.9142602235646069e02, 2.4000221805201900e02, ], [ -2.9142602235646069e02, 1.3922341317778117e04, 5.7863734667132694e03, ], [ 2.4000221805201900e02, 5.7863734667132694e03, 1.5911148658889811e03, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([-1, 0.52169, -1]), ), ( np.array([-191889.2320478, -1002015.908232, -573072.9226335]), np.array( [ [ 1.1012704153339069e07, 4.9533363163771488e07, 2.9628266883962810e07, ], [ 4.9533363163771488e07, 2.2267942225630835e08, 1.3303758212303287e08, ], [ 2.9628266883962810e07, 1.3303758212303287e08, 7.9554367206848219e07, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([-1, 0.148669, 0.131015]), ), ( np.array([1076.73, -4802.74, 828.249]), np.array( [ [ 4.8212187042743824e02, -9.8489480047918653e02, 1.1822837156689332e03, ], [ -9.8489480047918653e02, 7.7891876734093257e03, 2.1566788126264223e03, ], [ 1.1822837156689332e03, 2.1566788126264223e03, 1.9148005132287210e03, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([1.0, 1, -1]), ), ( np.array([39307.4, 43176.2, 19136.1]), np.array( [ [ 2.1888915578112096e05, 1.9734665605071097e05, 1.0865582588513123e05, ], [ 1.9734665605071097e05, 1.5802957082548781e05, 9.3932751210457645e04, ], [ 1.0865582588513123e05, 9.3932751210457645e04, 6.9919507495186845e04, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.835475, -1, -0.228586]), ), ( np.array([15924.6, -7936.89, 4559.77]), np.array( [ [ 1.4823363165787258e05, -9.3991198881618606e04, -6.7423849020288171e03, ], [ -9.3991198881618606e04, 1.0299013233992350e05, 2.7454282523562739e04, ], [ -6.7423849020288171e03, 2.7454282523562739e04, -8.7825122820168282e04, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.15422, 0.484382, -1.0]), ), ( np.array([-223.491, -2375.1, -3508.53]), np.array( [ [ 1.8762040451468388e03, 4.5209129063298806e03, 3.7587689627124179e04, ], [ 4.5209129063298806e03, 2.6540113149319626e06, 1.3806874591227937e06, ], [ 3.7587689627124179e04, 1.3806874591227937e06, 1.4430203128871324e06, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.700966, 0.0157984, -0.0309433]), ), ( np.array([-0.00566046, -0.26497, -0.24923]), np.array( [ [ 9.0152048402068141e-01, 3.9069240493708740e01, 4.0976585309530130e01, ], [ 3.9069240493708740e01, 4.0339538281863297e03, 2.7447144903267226e03, ], [ 4.0976585309530130e01, 2.7447144903267226e03, 2.3178455554478642e03, ], ], ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.0141205, 0.000131845, -0.000298234]), ), ( np.array([16459.6, 42312.7, 33953.9]), np.array( [ [ 3.4897766687256113e07, 1.7536007046689782e08, 1.0424382825704373e08, ], [ 1.7536007046689782e08, 8.8481756045390594e08, 5.2619306030723321e08, ], [ 1.0424382825704373e08, 5.2619306030723321e08, 3.1297679051347983e08, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([-0.131066, 0.180817, -0.260453]), ), ( np.array([17660.3, 18827.2, 28759.5]), np.array( [ [ 9.7041306729050993e04, 1.0613110916937439e05, 1.5558443292460032e05, ], [ 1.0613110916937439e05, 1.0840421118778562e05, 1.5388850550829183e05, ], [ 1.5558443292460032e05, 1.5388850550829183e05, 2.1840298326937514e05, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([1, 0.266874, -1]), ), ( np.array([16678, 65723.7, -153755]), np.array( [ [ 2.8786103367161286e04, 1.0278873046014908e05, -2.4232333719251846e05, ], [ 1.0278873046014908e05, 7.9423330424583505e05, -4.3975347261092327e04, ], [ -2.4232333719251846e05, -4.3975347261092327e04, 3.5707186446013493e06, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([1, -0.206169, 0.108385]), ), ( np.array([26602.2, -118867, 7457.08]), np.array( [ [ 1.3510413991352668e05, -4.4190620422288636e05, 1.6183211956800147e04, ], [ -4.4190620422288636e05, 6.7224673907168563e06, 1.5956835170839101e05, ], [ 1.6183211956800147e04, 1.5956835170839101e05, 6.7613560286023448e03, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.0743402, 0.0463054, -1.0]), ), ( np.array([-1726.71, -394.745, -340.876]), np.array( [ [ 3.2235026082366367e03, 3.5903801754879023e03, 1.4504956347170955e03, ], [ 3.5903801754879023e03, 1.0326690788609463e04, 4.9152962632434155e03, ], [ 1.4504956347170955e03, 4.9152962632434155e03, 2.7645273367617360e03, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([0.925468, -0.722815, 0.922884]), ), ( np.array([-1460.95, -48078.5, -61349.4]), np.array( [ [ -2.1558862194927831e04, 2.9346854336376925e05, 3.6945385626803833e05, ], [ 2.9346854336376925e05, 7.6788393809145853e07, 5.7299202312126122e07, ], [ 3.6945385626803833e05, 5.7299202312126122e07, 5.0198599698606022e07, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([1, 0.00933713, -0.0167956]), ), ( np.array([-7292.55, -299376, -269052]), np.array( [ [ 3.6778621108518197e05, 1.5160538979173467e07, 1.3518289246498797e07, ], [ 1.5160538979173467e07, 6.1341858259608674e08, 5.4813989289859617e08, ], [ 1.3518289246498797e07, 5.4813989289859617e08, 4.9252782230468601e08, ], ] ), np.array([-1.0, -1.0, -1.0]), np.ones(3), np.array([-1, 0.0341927, -0.0100605]), ), ] @pytest.mark.slow() @pytest.mark.parametrize( "linear_terms, square_terms, lower_bounds, upper_bounds, x_expected", TEST_CASES_BNTR, ) def test_bounded_newton_trustregion( linear_terms, square_terms, lower_bounds, upper_bounds, x_expected, ): main_model = MainModel(linear_terms=linear_terms, square_terms=square_terms) options = { "conjugate_gradient_method": "cg", "maxiter": 50, "maxiter_gradient_descent": 5, "gtol_abs": 1e-8, "gtol_rel": 1e-8, "gtol_scaled": 0, "gtol_abs_conjugate_gradient": 1e-8, "gtol_rel_conjugate_gradient": 1e-6, } result = bntr( main_model, lower_bounds, upper_bounds, x_candidate=np.zeros_like(x_expected), **options, ) aaae(result["x"], x_expected, decimal=5) # ====================================================================================== # Subsolver GQTPAR # ====================================================================================== TEST_CASES_GQTPAR = [ ( np.array([-0.0005429824695352, -0.1032556117176, -0.06816855282091]), np.array( [ [2.05714077e-02, 7.58182390e-01, 9.00050279e-01], [7.58182390e-01, 6.25867992e01, 4.20096648e01], [9.00050279e-01, 4.20096648e01, 4.03810858e01], ] ), np.array( [ -0.9994584757179, -0.007713730538474, 0.03198833730482, ] ), -0.001340933981148, ) ] @pytest.mark.slow() @pytest.mark.parametrize( "linear_terms, square_terms, x_expected, criterion_expected", TEST_CASES_GQTPAR ) def test_gqtpar_quadratic(linear_terms, square_terms, x_expected, criterion_expected): main_model = MainModel(linear_terms=linear_terms, square_terms=square_terms) result = gqtpar(main_model, x_candidate=np.zeros_like(x_expected)) aaae(result["x"], x_expected) aaae(result["criterion"], criterion_expected) # ====================================================================================== # Conjugate Gradient Algorithms # ====================================================================================== TEST_CASES_CG = [ ( np.array([79579.8, 35973.7]), np.array( [ [2.2267942225630835e08, 1.3303758212303287e08], [1.3303758212303287e08, 7.9554367206848219e07], ] ), 0.2393319731158, -np.array([0.0958339, -0.159809]), ), ( np.array([0.00028774, 0.00763968, 0.01217268]), np.array( [ [4.00803604e00, 1.65790911e02, 1.73222977e02], [1.65790911e02, 1.60880163e04, 1.10414034e04], [1.73222977e02, 1.10414034e04, 9.29926257e03], ] ), 9.5367431640625e-05, np.array([9.50204689e-05, 3.56030822e-06, -7.30627902e-06]), ), ( np.array([0.00028774, 0.00763968, 0.01217268]), np.array( [ [4.00803604e00, 1.65790911e02, 1.73222977e02], [1.65790911e02, 1.60880163e04, 1.10414034e04], [1.73222977e02, 1.10414034e04, 9.29926257e03], ] ), 9.5367431640625e-05, np.array([9.50204689e-05, 3.56030822e-06, -7.30627902e-06]), ), ( -np.array([-6.76002e-06, -6.56323e-08, 2.00988e-07]), np.array( [ [ 4.0080360351800763e00, 1.6579091056425378e02, 1.7322297746691254e02, ], [ 1.6579091056425378e02, 1.6088016292793940e04, 1.1041403355728811e04, ], [ 1.7322297746691254e02, 1.1041403355728811e04, 9.2992625728417297e03, ], ] ), 0.0003814697265625, np.array([-2.7382e-05, -3.66814e-07, 9.45617e-07]), ), ( -np.array([-4.69447, -0.619271, 0.837666]), np.array( [ [ 6.9147751896043360e01, 2.6192110911280561e03, 2.8094172839794960e03, ], [ 2.6192110911280561e03, 2.4907533417816096e05, 1.6917615514201863e05, ], [ 2.8094172839794960e03, 1.6917615514201863e05, 1.4352314212505225e05, ], ] ), 0.0657627701334, np.array([-0.0656472, -0.00168561, 0.00351321]), ), ( -np.array([-2.45646e-05, -4.1711e-07, 9.2032e-07]), np.array( [ [ 8.6267971128257614e-01, 3.3589357331133463e01, 3.8550834275262481e01, ], [ 3.3589357331133463e01, 4.0625660472990171e03, 2.7006581320776222e03, ], [ 3.8550834275262481e01, 2.7006581320776222e03, 2.3157072223295277e03, ], ] ), 0.0003814697265625, np.array([-0.000310185, -3.86464e-06, 9.67128e-06]), ), ( -np.array([-4.29172e-08, -1.8127e-06, -1.38313e-06]), np.array( [ [ 1.7207808265135328e06, 7.2130304472968280e07, 5.5202182930777229e07, ], [ 7.2130304472968280e07, 3.0516230749633555e09, 2.3274035648401971e09, ], [ 5.5202182930777229e07, 2.3274035648401971e09, 1.7782503817136776e09, ], ] ), 0.390625, np.array([-7.44084e-15, -6.07092e-16, 2.47754e-16]), ), ( -np.array([79525.7, 3.04463e06, 2.42641e06]), np.array( [ [ 6.3624954351893254e06, 2.5406887701711509e08, 1.9610463258207005e08, ], [ 2.5406887701711509e08, 1.0261536342839724e10, 7.8819891642426796e09, ], [ 1.9610463258207005e08, 7.8819891642426796e09, 6.0688426371444454e09, ], ] ), 0.0001192842356654, np.array([2.43607e-06, 9.32646e-05, 7.4327e-05]), ), ] TEST_CASES_TRSBOX = [ ( np.array([1.0, 0.0, 1.0]), np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]), 2.0, np.array([-1.0, 0.0, -0.5]), ), ( np.array([1.0, 0.0, 1.0]), np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]), 5.0 / 12.0, np.array([-1.0 / 3.0, 0.0, -0.25]), ), ( np.array([1.0, 0.0, 1.0]), np.array([[-2.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]), 5.0 / 12.0, np.array([-1.0 / 3.0, 0.0, -0.25]), ), ( np.array([0.0, 0.0, 1.0]), np.array([[-2.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]), 0.5, np.array([0.0, 0.0, -0.5]), ), ] @pytest.mark.slow() @pytest.mark.parametrize( "gradient, hessian, trustregion_radius, x_expected", TEST_CASES_CG ) def test_trustregion_conjugate_gradient( gradient, hessian, trustregion_radius, x_expected ): x_out = minimize_trust_cg( gradient, hessian, trustregion_radius, gtol_abs=1e-8, gtol_rel=1e-6 ) aaae(x_out, x_expected) @pytest.mark.slow() @pytest.mark.parametrize( "gradient, hessian, trustregion_radius, x_expected", TEST_CASES_CG ) def test_trustregion_steihaug_toint(gradient, hessian, trustregion_radius, x_expected): x_out = minimize_trust_stcg(gradient, hessian, trustregion_radius) aaae(x_out, x_expected) @pytest.mark.slow() @pytest.mark.parametrize( "linear_terms, square_terms, trustregion_radius, x_expected", TEST_CASES_CG + TEST_CASES_TRSBOX, ) def test_trustregion_trsbox(linear_terms, square_terms, trustregion_radius, x_expected): lower_bounds = -1e20 * np.ones_like(linear_terms) upper_bounds = 1e20 * np.ones_like(linear_terms) x_out = minimize_trust_trsbox( linear_terms, square_terms, trustregion_radius, lower_bounds=lower_bounds, upper_bounds=upper_bounds, ) aaae(x_out, x_expected, decimal=4) ================================================ FILE: tests/optimagic/optimizers/test_bayesian_optimizer.py ================================================ """Unit tests for Bayesian optimizer helper functions.""" import numpy as np import pytest from optimagic.config import IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2 from optimagic.optimization.internal_optimization_problem import InternalBounds if IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2: from bayes_opt import acquisition from optimagic.optimizers.bayesian_optimizer import ( _extract_params_from_kwargs, _process_acquisition_function, _process_bounds, ) def test_extract_params_from_kwargs(): """Test basic parameter extraction from kwargs dictionary.""" params_dict = {"param0": 1.0, "param1": 2.0, "param2": 3.0} result = _extract_params_from_kwargs(params_dict) np.testing.assert_array_equal(result, np.array([1.0, 2.0, 3.0])) def test_process_bounds_valid(): """Test processing valid bounds for Bayesian optimization.""" bounds = InternalBounds(lower=np.array([-1.0, 0.0]), upper=np.array([1.0, 2.0])) result = _process_bounds(bounds) expected = {"param0": (-1.0, 1.0), "param1": (0.0, 2.0)} assert result == expected def test_process_bounds_none(): """Test processing bounds with None values.""" bounds = InternalBounds(lower=None, upper=np.array([1.0, 2.0])) with pytest.raises( ValueError, match="Bayesian optimization requires finite bounds" ): _process_bounds(bounds) def test_process_bounds_infinite(): """Test processing bounds with infinite values.""" bounds = InternalBounds(lower=np.array([-1.0, 0.0]), upper=np.array([1.0, np.inf])) with pytest.raises( ValueError, match="Bayesian optimization requires finite bounds" ): _process_bounds(bounds) @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt is not installed in a recent enough version >= 2.0.0.", ) def test_process_acquisition_function_none(): """Test processing None acquisition function.""" result = _process_acquisition_function( acquisition_function=None, kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) assert result is None @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt not installed" ) @pytest.mark.parametrize( "acq_name, expected_class", [ ("ucb", acquisition.UpperConfidenceBound), ("upper_confidence_bound", acquisition.UpperConfidenceBound), ("ei", acquisition.ExpectedImprovement), ("expected_improvement", acquisition.ExpectedImprovement), ("poi", acquisition.ProbabilityOfImprovement), ("probability_of_improvement", acquisition.ProbabilityOfImprovement), ], ) def test_process_acquisition_function_string(acq_name, expected_class): """Test processing string acquisition function.""" result = _process_acquisition_function( acquisition_function=acq_name, kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) assert isinstance(result, expected_class) @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt not installed" ) def test_process_acquisition_function_invalid_string(): """Test processing invalid string acquisition function.""" with pytest.raises(ValueError, match="Invalid acquisition_function string"): _process_acquisition_function( acquisition_function="acq", kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt not installed" ) def test_process_acquisition_function_instance(): """Test processing acquisition function instance.""" acq_instance = acquisition.UpperConfidenceBound() result = _process_acquisition_function( acquisition_function=acq_instance, kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) assert result is acq_instance @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt not installed" ) def test_process_acquisition_function_class(): """Test processing acquisition function class.""" result = _process_acquisition_function( acquisition_function=acquisition.UpperConfidenceBound, kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) assert isinstance(result, acquisition.UpperConfidenceBound) @pytest.mark.skipif( not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason="bayes_opt not installed" ) def test_process_acquisition_function_invalid_type(): """Test processing invalid acquisition function type.""" with pytest.raises(TypeError, match="acquisition_function must be None, a string"): _process_acquisition_function( acquisition_function=123, kappa=2.576, xi=0.01, exploration_decay=None, exploration_decay_delay=None, random_seed=None, ) ================================================ FILE: tests/optimagic/optimizers/test_bhhh.py ================================================ """Test the internal BHHH algorithm.""" from functools import partial import numpy as np import pytest import statsmodels.api as sm from numpy.testing import assert_array_almost_equal as aaae from scipy.stats import norm from optimagic import mark, minimize from optimagic.optimizers.bhhh import bhhh_internal from optimagic.utilities import get_rng def generate_test_data(): rng = get_rng(seed=12) num_observations = 5000 x1 = rng.multivariate_normal([0, 0], [[1, 0.75], [0.75, 1]], num_observations) x2 = rng.multivariate_normal([1, 4], [[1, 0.75], [0.75, 1]], num_observations) endog = np.hstack((np.zeros(num_observations), np.ones(num_observations))) simulated_exog = np.vstack((x1, x2)).astype(np.float32) exog = simulated_exog intercept = np.ones((exog.shape[0], 1)) exog = np.hstack((intercept, exog)) return endog, exog def _cdf_logit(x): return 1 / (1 + np.exp(-x)) def get_loglikelihood_logit(endog, exog, x): q = 2 * endog - 1 linear_prediction = np.dot(exog, x) return np.log(_cdf_logit(q * linear_prediction)) def get_score_logit(endog, exog, x): linear_prediction = np.dot(exog, x) return (endog - _cdf_logit(linear_prediction))[:, None] * exog def get_loglikelihood_probit(endog, exog, x): q = 2 * endog - 1 linear_prediction = np.dot(exog, x[: exog.shape[1]]) return np.log(norm.cdf(q * linear_prediction)) def get_score_probit(endog, exog, x): q = 2 * endog - 1 linear_prediction = np.dot(exog, x[: exog.shape[1]]) derivative_loglikelihood = ( q * norm.pdf(q * linear_prediction) / norm.cdf(q * linear_prediction) ) return derivative_loglikelihood[:, None] * exog def criterion_and_derivative_logit(x): """Return Logit criterion and derivative. Args: x (np.ndarray): Parameter vector of shape (n_obs,). Returns: tuple: first entry is the criterion, second entry is the score """ endog, exog = generate_test_data() score = partial(get_score_logit, endog, exog) loglike = partial(get_loglikelihood_logit, endog, exog) return -loglike(x), score(x) def criterion_and_derivative_probit(x): """Return Probit criterion and derivative. Args: x (np.ndarray): Parameter vector of shape (n_obs,). Returns: tuple: first entry is the criterion, second entry is the score """ endog, exog = generate_test_data() score = partial(get_score_probit, endog, exog) loglike = partial(get_loglikelihood_probit, endog, exog) return -loglike(x), score(x) @pytest.fixture() def result_statsmodels_logit(): endog, exog = generate_test_data() result = sm.Logit(endog, exog).fit() return result @pytest.fixture() def result_statsmodels_probit(): endog, exog = generate_test_data() result = sm.Probit(endog, exog).fit() return result @pytest.mark.parametrize( "criterion_and_derivative, result_statsmodels", [ (criterion_and_derivative_logit, "result_statsmodels_logit"), (criterion_and_derivative_probit, "result_statsmodels_probit"), ], ) def test_maximum_likelihood(criterion_and_derivative, result_statsmodels, request): result_expected = request.getfixturevalue(result_statsmodels) x = np.zeros(3) result_bhhh = bhhh_internal( criterion_and_derivative, x=x, gtol_abs=1e-8, maxiter=200, ) aaae(result_bhhh.x, result_expected.params, decimal=4) @pytest.mark.parametrize( "criterion_and_derivative, result_statsmodels", [ (criterion_and_derivative_logit, "result_statsmodels_logit"), (criterion_and_derivative_probit, "result_statsmodels_probit"), ], ) def test_maximum_likelihood_external_interfaace( criterion_and_derivative, result_statsmodels, request ): result_expected = request.getfixturevalue(result_statsmodels) x = np.zeros(3) result_bhhh = minimize( fun=mark.likelihood(criterion_and_derivative), jac=True, params=x, algorithm="bhhh", ) aaae(result_bhhh.params, result_expected.params, decimal=4) ================================================ FILE: tests/optimagic/optimizers/test_fides_options.py ================================================ """Test the different options of fides.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.config import IS_FIDES_INSTALLED from optimagic.optimization.optimize import minimize from optimagic.parameters.bounds import Bounds if IS_FIDES_INSTALLED: from fides.hessian_approximation import FX, SR1, Broyden from optimagic.optimizers.fides import Fides else: FX = lambda: None SR1 = lambda: None Broyden = lambda phi: None # noqa: ARG005 test_cases_no_contribs_needed = [ {}, {"hessian_update_strategy": "bfgs"}, {"hessian_update_strategy": "BFGS"}, {"hessian_update_strategy": SR1()}, {"hessian_update_strategy": Broyden(phi=0.5)}, {"hessian_update_strategy": "sr1"}, {"hessian_update_strategy": "DFP"}, {"hessian_update_strategy": "bb"}, {"convergence_ftol_rel": 1e-6}, {"convergence_xtol_abs": 1e-6}, {"convergence_gtol_abs": 1e-6}, {"convergence_gtol_rel": 1e-6}, {"stopping_maxiter": 100}, {"stopping_max_seconds": 200}, {"trustregion_initial_radius": 20, "trustregion_stepback_strategy": "truncate"}, {"trustregion_subspace_dimension": "full"}, {"trustregion_max_stepback_fraction": 0.8}, {"trustregion_decrease_threshold": 0.4, "trustregion_decrease_factor": 0.2}, {"trustregion_increase_threshold": 0.9, "trustregion_increase_factor": 4}, ] def criterion_and_derivative(x): return (x**2).sum(), 2 * x def criterion(x): return (x**2).sum() @pytest.mark.skipif(not IS_FIDES_INSTALLED, reason="fides not installed.") @pytest.mark.parametrize("algo_options", test_cases_no_contribs_needed) def test_fides_correct_algo_options(algo_options): res = minimize( fun_and_jac=criterion_and_derivative, fun=criterion, x0=np.array([1, -5, 3]), bounds=Bounds( lower=np.array([-10, -10, -10]), upper=np.array([10, 10, 10]), ), algorithm=Fides(**algo_options), ) aaae(res.params, np.zeros(3), decimal=4) test_cases_needing_contribs = [ {"hessian_update_strategy": FX()}, {"hessian_update_strategy": "ssm"}, {"hessian_update_strategy": "TSSM"}, {"hessian_update_strategy": "gnsbfgs"}, ] @pytest.mark.skipif(not IS_FIDES_INSTALLED, reason="fides not installed.") @pytest.mark.parametrize("algo_options", test_cases_needing_contribs) def test_fides_unimplemented_algo_options(algo_options): with pytest.raises(NotImplementedError): minimize( fun_and_jac=criterion_and_derivative, fun=criterion, x0=np.array([1, -5, 3]), bounds=Bounds( lower=np.array([-10, -10, -10]), upper=np.array([10, 10, 10]), ), algorithm=Fides(**algo_options), ) @pytest.mark.skipif(not IS_FIDES_INSTALLED, reason="fides not installed.") def test_fides_stop_after_one_iteration(): res = minimize( fun_and_jac=criterion_and_derivative, fun=criterion, x0=np.array([1, -5, 3]), bounds=Bounds( lower=np.array([-10, -10, -10]), upper=np.array([10, 10, 10]), ), algorithm=Fides(stopping_maxiter=1), ) assert not res.success assert res.n_iterations == 1 ================================================ FILE: tests/optimagic/optimizers/test_gfo_optimizers.py ================================================ import numpy as np import pytest from optimagic.config import IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED from optimagic.optimization.internal_optimization_problem import ( SphereExampleInternalOptimizationProblemWithConverter, ) from optimagic.optimizers.gfo_optimizers import ( GFOCommonOptions, _get_gfo_constraints, _get_initialize_gfo, _get_search_space_gfo, _gfo_internal, _value2para, ) from optimagic.parameters.bounds import Bounds problem = SphereExampleInternalOptimizationProblemWithConverter() def test_get_gfo_constraints(): got = _get_gfo_constraints() expected = [] assert got == expected def test_get_initialize_gfo(): x0 = np.array([1, 0, 1]) x1 = [ {"x0": 1, "x1": 2, "x2": 3}, ] n_init = 20 got = _get_initialize_gfo(x0, n_init, x1, problem.converter) expected = { "warm_start": [ {"x0": 1, "x1": 0, "x2": 1}, # x0 {"x0": 1, "x1": 2, "x2": 3}, ], # x1 "vertices": n_init // 2, "grid": n_init // 2, } assert got == expected def test_get_search_space_gfo(): bounds = Bounds(lower=np.array([-10, -10]), upper=np.array([10, 10])) n_grid_points = { "x0": 5, "x1": 5, } got = _get_search_space_gfo(bounds, n_grid_points, problem.converter) expected = { "x0": np.array([-10.0, -5.0, 0.0, 5.0, 10.0]), "x1": np.array([-10.0, -5.0, 0.0, 5.0, 10.0]), } assert len(got.keys()) == 2 assert np.all(got["x0"] == expected["x0"]) assert np.all(got["x1"] == expected["x1"]) def test_value2para(): assert _value2para(np.array([0, 1, 2])) == {"x0": 0, "x1": 1, "x2": 2} @pytest.mark.skipif( not IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, reason="gfo not installed" ) def test_gfo_internal(): from gradient_free_optimizers import DownhillSimplexOptimizer res = _gfo_internal( common_options=GFOCommonOptions(), problem=problem, x0=np.full(10, 2), optimizer=DownhillSimplexOptimizer, ) assert np.all(res.x == np.full(10, 0)) ================================================ FILE: tests/optimagic/optimizers/test_iminuit_migrad.py ================================================ """Test suite for the iminuit migrad optimizer.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.config import IS_IMINUIT_INSTALLED from optimagic.optimization.optimize import minimize from optimagic.optimizers.iminuit_migrad import ( IminuitMigrad, _convert_bounds_to_minuit_limits, ) def sphere(x): return (x**2).sum() def sphere_grad(x): return 2 * x def test_convert_bounds_unbounded(): """Test converting unbounded bounds.""" lower = np.array([-np.inf, -np.inf]) upper = np.array([np.inf, np.inf]) limits = _convert_bounds_to_minuit_limits(lower, upper) assert len(limits) == 2 assert limits[0] == (None, None) assert limits[1] == (None, None) def test_convert_bounds_lower_only(): """Test converting lower bounds only.""" lower = np.array([1.0, 2.0]) upper = np.array([np.inf, np.inf]) limits = _convert_bounds_to_minuit_limits(lower, upper) assert len(limits) == 2 assert limits[0] == (1.0, None) assert limits[1] == (2.0, None) def test_convert_bounds_upper_only(): """Test converting upper bounds only.""" lower = np.array([-np.inf, -np.inf]) upper = np.array([1.0, 2.0]) limits = _convert_bounds_to_minuit_limits(lower, upper) assert len(limits) == 2 assert limits[0] == (None, 1.0) assert limits[1] == (None, 2.0) def test_convert_bounds_two_sided(): """Test converting two-sided bounds.""" lower = np.array([1.0, -2.0]) upper = np.array([2.0, -1.0]) limits = _convert_bounds_to_minuit_limits(lower, upper) assert len(limits) == 2 assert limits[0] == (1.0, 2.0) assert limits[1] == (-2.0, -1.0) def test_convert_bounds_mixed(): """Test converting mixed bounds (some infinite, some finite).""" lower = np.array([-np.inf, 0.0, 1.0]) upper = np.array([1.0, np.inf, 2.0]) limits = _convert_bounds_to_minuit_limits(lower, upper) assert len(limits) == 3 assert limits[0] == (None, 1.0) assert limits[1] == (0.0, None) assert limits[2] == (1.0, 2.0) @pytest.mark.skipif(not IS_IMINUIT_INSTALLED, reason="iminuit not installed.") def test_iminuit_migrad(): """Test basic optimization with sphere function.""" x0 = np.array([1.0, 2.0, 3.0]) algorithm = IminuitMigrad() res = minimize( fun=sphere, jac=sphere_grad, algorithm=algorithm, x0=x0, ) assert res.success aaae(res.x, np.zeros(3), decimal=6) assert res.n_fun_evals > 0 assert res.n_jac_evals > 0 ================================================ FILE: tests/optimagic/optimizers/test_ipopt_options.py ================================================ """Test the different options of ipopt.""" import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.config import IS_CYIPOPT_INSTALLED from optimagic.optimization.optimize import minimize from optimagic.optimizers.ipopt import Ipopt from optimagic.parameters.bounds import Bounds test_cases = [ {}, {"convergence_ftol_rel": 1e-7}, {"stopping_maxiter": 1_100_000}, {"mu_target": 1e-8}, {"s_max": 200}, {"stopping_max_wall_time_seconds": 200}, {"stopping_max_cpu_time": 1e10}, {"dual_inf_tol": 2.5}, {"constr_viol_tol": 1e-7}, {"compl_inf_tol": 1e-7}, {"acceptable_iter": 15}, {"acceptable_tol": 1e-5}, {"acceptable_dual_inf_tol": 1e-5}, {"acceptable_constr_viol_tol": 1e-5}, {"acceptable_compl_inf_tol": 1e-5}, {"acceptable_obj_change_tol": 1e5}, {"diverging_iterates_tol": 1e5}, {"nlp_lower_bound_inf": -1e5}, {"nlp_upper_bound_inf": 1e10}, {"fixed_variable_treatment": "relax_bounds"}, {"dependency_detector": "mumps"}, {"dependency_detection_with_rhs": "no"}, {"dependency_detection_with_rhs": False}, {"kappa_d": 1e-7}, {"bound_relax_factor": 1e-12}, {"honor_original_bounds": "yes"}, {"check_derivatives_for_naninf": True}, {"jac_c_constant": True}, {"jac_d_constant": True}, {"hessian_constant": True}, # scaling {"nlp_scaling_method": None}, {"obj_scaling_factor": 1.1}, {"nlp_scaling_max_gradient": 200}, {"nlp_scaling_obj_target_gradient": 0.2}, {"nlp_scaling_constr_target_gradient": 0}, {"nlp_scaling_constr_target_gradient": 2e-9}, {"nlp_scaling_min_value": 1e-9}, {"bound_push": 0.02}, {"bound_frac": 0.02}, {"slack_bound_push": 0.001}, {"slack_bound_frac": 0.001}, {"constr_mult_init_max": 5000}, {"bound_mult_init_val": 1.2}, {"bound_mult_init_method": "mu-based"}, {"least_square_init_primal": "yes"}, {"least_square_init_duals": "yes"}, {"warm_start_init_point": "yes"}, {"warm_start_same_structure": False}, {"warm_start_bound_push": 0.002}, {"warm_start_bound_frac": 0.002}, {"warm_start_slack_bound_push": 0.0001}, {"warm_start_slack_bound_frac": 0.002}, {"warm_start_mult_bound_push": 0.002}, {"warm_start_mult_init_max": 1e8}, {"warm_start_entire_iterate": "yes"}, {"replace_bounds": "yes"}, {"skip_finalize_solution_call": "no"}, {"timing_statistics": "yes"}, {"mu_max_fact": 1500}, {"mu_max": 100_500}, {"mu_min": 1e-09}, {"adaptive_mu_globalization": "kkt-error"}, {"adaptive_mu_kkterror_red_iters": 5}, {"adaptive_mu_kkterror_red_fact": 0.9}, {"filter_margin_fact": 1e-4}, {"filter_max_margin": 0.5}, {"adaptive_mu_restore_previous_iterate": False}, {"adaptive_mu_monotone_init_factor": 0.9}, {"adaptive_mu_kkt_norm_type": "max-norm"}, {"mu_strategy": "adaptive"}, {"mu_oracle": "probing"}, {"mu_oracle": "loqo"}, {"fixed_mu_oracle": "loqo"}, {"mu_init": 0.2}, {"barrier_tol_factor": 10.5}, {"mu_linear_decrease_factor": 0.01}, {"mu_superlinear_decrease_power": 1.2}, {"mu_allow_fast_monotone_decrease": False}, {"tau_min": 0.75}, {"sigma_max": 200}, {"sigma_min": 1e-8}, {"quality_function_norm_type": "2-norm"}, {"quality_function_centrality": "log"}, {"quality_function_balancing_term": "cubic"}, {"quality_function_max_section_steps": 10}, {"quality_function_max_section_steps": 5.5}, {"quality_function_section_sigma_tol": 0.02}, {"quality_function_section_qf_tol": 0.5}, {"line_search_method": "penalty"}, {"alpha_red_factor": 0.8}, {"accept_every_trial_step": True}, {"accept_after_max_steps": 3}, {"alpha_for_y": "max"}, {"alpha_for_y_tol": 5}, {"tiny_step_tol": 1e-15}, {"tiny_step_y_tol": 0.02}, {"watchdog_shortened_iter_trigger": 20}, {"watchdog_trial_iter_max": 5}, {"theta_max_fact": 2e5}, {"theta_min_fact": 0.002}, {"eta_phi": 0.3}, {"delta": 0.9}, {"s_phi": 2.2}, {"s_theta": 1.5}, {"gamma_phi": 1e-6}, {"gamma_theta": 1e-5}, {"alpha_min_frac": 0.08}, {"max_soc": 5}, {"kappa_soc": 0.9}, {"obj_max_inc": 5.3}, {"max_filter_resets": 10}, {"filter_reset_trigger": 3}, {"corrector_type": "affine"}, {"skip_corr_if_neg_curv": True}, {"skip_corr_in_monotone_mode": False}, {"corrector_compl_avrg_red_fact": 3}, {"corrector_compl_avrg_red_fact": 3.5}, {"soc_method": 1}, {"nu_init": 1e-5}, {"nu_inc": 1e-5}, {"rho": 0.2}, {"kappa_sigma": 1e8}, {"recalc_y": True}, {"recalc_y_feas_tol": 1e-4}, {"slack_move": 1e-11}, {"constraint_violation_norm_type": "2-norm"}, # step calculation {"mehrotra_algorithm": False}, {"fast_step_computation": True}, {"min_refinement_steps": 3}, {"max_refinement_steps": 12}, {"residual_ratio_max": 1e-9}, {"residual_ratio_singular": 1e-4}, {"residual_improvement_factor": 1.3}, {"neg_curv_test_tol": 1e-11}, {"neg_curv_test_reg": False}, {"max_hessian_perturbation": 1e19}, {"min_hessian_perturbation": 1e-19}, {"perturb_inc_fact_first": 50.3}, {"perturb_inc_fact": 4.4}, {"perturb_dec_fact": 0.25}, {"first_hessian_perturbation": 0.002}, {"jacobian_regularization_value": 1e-7}, {"jacobian_regularization_exponent": 0.2}, {"perturb_always_cd": False}, # restoration phase {"expect_infeasible_problem": False}, {"expect_infeasible_problem_ctol": 0.005}, {"expect_infeasible_problem_ytol": 1e7}, {"start_with_resto": False}, {"soft_resto_pderror_reduction_factor": 0.99}, {"max_soft_resto_iters": 5}, {"required_infeasibility_reduction": 0.8}, {"max_resto_iter": 4_000_000}, {"evaluate_orig_obj_at_resto_trial": False}, {"resto_penalty_parameter": 830.4}, {"resto_proximity_weight": 2.4}, {"bound_mult_reset_threshold": 804.4}, {"constr_mult_reset_threshold": 1.4}, {"resto_failure_feasibility_threshold": 0.4}, # hessian approximation {"limited_memory_aug_solver": "extended"}, {"limited_memory_max_history": 5}, {"limited_memory_update_type": "sr1"}, {"limited_memory_initialization": "scalar2"}, {"limited_memory_init_val": 0.5}, {"limited_memory_init_val_max": 2e9}, {"limited_memory_init_val_min": 2e-9}, {"limited_memory_max_skipping": 4}, {"limited_memory_special_for_resto": False}, {"hessian_approximation_space": "all-variables"}, # linear solver # using ma27, ma57, ma77, ma86 leads to remaining at the start values # using ma97 leads to segmentation fault {"linear_solver_options": {"mumps_pivtol": 1e-5}}, {"linear_solver_options": {"linear_system_scaling": None}}, {"linear_solver_options": {"ma86_scaling": None}}, {"linear_solver_options": {"mumps_pivtol": 1e-7}}, {"linear_solver_options": {"mumps_pivtolmax": 0.2}}, {"linear_solver_options": {"mumps_mem_percent": 2000}}, {"linear_solver_options": {"mumps_permuting_scaling": 5}}, {"linear_solver_options": {"mumps_pivot_order": 5}}, {"linear_solver_options": {"mumps_scaling": 74}}, {"linear_solver_options": {"mumps_dep_tol": 0.1}}, ] def criterion(x): return (x**2).sum() def derivative(x): return 2 * x @pytest.mark.skipif(not IS_CYIPOPT_INSTALLED, reason="cyipopt not installed.") @pytest.mark.parametrize("algo_options", test_cases) def test_ipopt_algo_options(algo_options): algorithm = Ipopt(**algo_options) res = minimize( fun=criterion, jac=derivative, algorithm=algorithm, x0=np.array([1, 2, 3]), bounds=Bounds( lower=np.array([-np.inf, -np.inf, -np.inf]), upper=np.array([np.inf, np.inf, np.inf]), ), ) aaae(res.params, np.zeros(3), decimal=7) ================================================ FILE: tests/optimagic/optimizers/test_nag_optimizers.py ================================================ import numpy as np import pytest from optimagic import mark from optimagic.optimization.optimize import minimize from optimagic.optimizers.nag_optimizers import ( IS_DFOLS_INSTALLED, _build_options_dict, _change_evals_per_point_interface, _get_fast_start_method, ) from optimagic.parameters.bounds import Bounds from tests.estimagic.test_bootstrap import aaae def test_change_evals_per_point_interface_none(): res = _change_evals_per_point_interface(None) assert res is None def test_change_evals_per_point_interface_func(): def return_args( upper_trustregion_radius, lower_trustregion_radius, n_iterations, n_resets ): return ( upper_trustregion_radius, lower_trustregion_radius, n_iterations, n_resets, ) func = _change_evals_per_point_interface(return_args) res = func(delta=0, rho=1, iter=2, nrestarts=3) expected = (0, 1, 2, 3) assert res == expected def test_get_fast_start_method_auto(): res = _get_fast_start_method("auto") assert res == (None, None) def test_get_fast_start_method_jacobian(): res = _get_fast_start_method("jacobian") assert res == (True, False) def test_get_fast_start_method_trust(): res = _get_fast_start_method("trustregion") assert res == (False, True) def test_get_fast_start_method_error(): with pytest.raises(ValueError): _get_fast_start_method("wrong_input") def test_build_options_dict_none(): default = {"a": 1, "b": 2} assert default == _build_options_dict(None, default) def test_build_options_dict_override(): default = {"a": 1, "b": 2} user_input = {"a": 0} res = _build_options_dict(user_input, default) expected = {"a": 0, "b": 2} assert res == expected def test_build_options_dict_invalid_key(): default = {"a": 1, "b": 2} user_input = {"other_key": 0} with pytest.raises(ValueError): _build_options_dict(user_input, default) @mark.least_squares def sos(x): return x @pytest.mark.skipif( not IS_DFOLS_INSTALLED, reason="DFO-LS is not installed.", ) def test_nag_dfols_starting_at_optimum(): # From issue: https://github.com/optimagic-dev/optimagic/issues/538 params = np.zeros(2, dtype=float) res = minimize( fun=sos, params=params, algorithm="nag_dfols", bounds=Bounds(-1 * np.ones_like(params), np.ones_like(params)), ) aaae(res.params, params) ================================================ FILE: tests/optimagic/optimizers/test_neldermead.py ================================================ import numpy as np import pytest from optimagic.optimizers.neldermead import ( _gao_han, _init_algo_params, _init_simplex, _nash, _pfeffer, _varadhan_borchers, neldermead_parallel, ) # function to test def sphere(x, *args, **kwargs): # noqa: ARG001 return (x**2).sum() # unit tests def test_init_algo_params(): # test setting j = 2 adaptive = True # outcome result = _init_algo_params(adaptive, j) # expected outcome expected = (1, 2, 0.5, 0.5) assert result == expected def test_init_simplex(): # test setting x = np.array([1, 2, 3]) # outcome result = _init_simplex(x) # expected outcome expected = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) assert (result == expected).all() def test_pfeffer(): # test setting x = np.array([1, 0, 1]) # outcome result = _pfeffer(x) # expected outcome expected = np.array([[1, 0, 1], [1.05, 0, 1], [1, 0.00025, 1], [1, 0, 1.05]]) assert (result == expected).all() def test_nash(): # test setting x = np.array([1, 0, 1]) # outcome result = _nash(x) # expected outcome expected = np.array([[1, 0, 1], [1.1, 0, 1], [1, 0.1, 1], [1, 0, 1.1]]) assert (result == expected).all() def test_gao_han(): # test setting x = np.array([1, 0, 1]) # outcome result = _gao_han(x) # expected outcome expected = np.array([[0.66667, -0.33333, 0.66667], [2, 0, 1], [1, 1, 1], [1, 0, 2]]) np.testing.assert_allclose(result, expected, atol=1e-3) def test_varadhan_borchers(): # test setting x = np.array([1, 0, 1]) # outcome result = _varadhan_borchers(x) # expected outcome expected = np.array( [ [1, 0, 1], [2.3333, 0.3333, 1.3333], [1.3333, 1.3333, 1.3333], [1.3333, 0.3333, 2.3333], ] ) np.testing.assert_allclose(result, expected, atol=1e-3) # general parameter test test_cases = [ {}, {"adaptive": False}, {"init_simplex_method": "nash"}, {"init_simplex_method": "pfeffer"}, {"init_simplex_method": "varadhan_borchers"}, ] @pytest.mark.parametrize("algo_options", test_cases) def test_neldermead_correct_algo_options(algo_options): res = neldermead_parallel( criterion=sphere, x=np.array([1, -5, 3]), **algo_options, ) np.testing.assert_allclose(res["solution_x"], np.zeros(3), atol=5e-4) # test if maximum number of iterations works def test_fides_stop_after_one_iteration(): res = neldermead_parallel( criterion=sphere, x=np.array([1, -5, 3]), stopping_maxiter=1, ) assert not res["success"] assert res["n_iterations"] == 1 ================================================ FILE: tests/optimagic/optimizers/test_nevergrad.py ================================================ """Test helper functions for nevergrad optimizers.""" from typing import get_args import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import algorithms, mark from optimagic.config import IS_NEVERGRAD_INSTALLED from optimagic.optimization.optimize import minimize from optimagic.parameters.bounds import Bounds if IS_NEVERGRAD_INSTALLED: import nevergrad as ng @mark.least_squares def sos(x): return x ### Nonlinear constraints on hold until improved handling. # def dummy_func(): # return lambda x: x # vec_constr = [ # { # "type": "ineq", # "fun": lambda x: [np.prod(x) + 1.0, 2.0 - np.prod(x)], # "jac": dummy_func, # "n_constr": 2, # } # ] # constrs = [ # { # "type": "ineq", # "fun": lambda x: np.prod(x) + 1.0, # "jac": dummy_func, # "n_constr": 1, # }, # { # "type": "ineq", # "fun": lambda x: 2.0 - np.prod(x), # "jac": dummy_func, # "n_constr": 1, # }, # ] # def test_process_nonlinear_constraints(): # got = _process_nonlinear_constraints(vec_constr) # assert len(got) == 2 # def test_get_constraint_evaluations(): # x = np.array([1, 1]) # got = _get_constraint_evaluations(constrs, x) # expected = [np.array([-2.0]), np.array([-1.0])] # assert got == expected # def test_batch_constraint_evaluations(): # x = np.array([1, 1]) # x_list = [x] * 2 # got = _batch_constraint_evaluations(constrs, x_list, 2) # expected = [[np.array([-2.0]), np.array([-1.0])]] * 2 # assert got == expected ### # test if all optimizers listed in Literal type hint are valid attributes @pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed") def test_meta_optimizers_are_valid(): opt = algorithms.NevergradMeta optimizers = get_args(opt.__annotations__["optimizer"]) for optimizer in optimizers: try: getattr(ng.optimizers, optimizer) except AttributeError: pytest.fail(f"Optimizer '{optimizer}' not found in Nevergrad") @pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed") def test_ngopt_optimizers_are_valid(): opt = algorithms.NevergradNGOpt optimizers = get_args(opt.__annotations__["optimizer"]) for optimizer in optimizers: try: getattr(ng.optimizers, optimizer) except AttributeError: pytest.fail(f"Optimizer '{optimizer}' not found in Nevergrad") # list of available optimizers in nevergrad_meta NEVERGRAD_META = get_args(algorithms.NevergradMeta.__annotations__["optimizer"]) # list of available optimizers in nevergrad_ngopt NEVERGRAD_NGOPT = get_args(algorithms.NevergradNGOpt.__annotations__["optimizer"]) # test stochastic_global_algorithm_on_sum_of_squares @pytest.mark.slow @pytest.mark.parametrize("algorithm", NEVERGRAD_META) @pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed") def test_meta_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm): res = minimize( fun=sos, params=np.array([0.35, 0.35]), bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])), algorithm=algorithms.NevergradMeta(algorithm), collect_history=False, skip_checks=True, algo_options={"seed": 12345}, ) assert res.success in [True, None] aaae(res.params, np.array([0.2, 0]), decimal=1) @pytest.mark.slow @pytest.mark.parametrize("algorithm", NEVERGRAD_NGOPT) @pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason="nevergrad not installed") def test_ngopt_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm): res = minimize( fun=sos, params=np.array([0.35, 0.35]), bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])), algorithm=algorithms.NevergradNGOpt(algorithm), collect_history=False, skip_checks=True, algo_options={"seed": 12345}, ) assert res.success in [True, None] aaae(res.params, np.array([0.2, 0]), decimal=1) ================================================ FILE: tests/optimagic/optimizers/test_pounders_integration.py ================================================ """Test suite for the internal pounders interface.""" import sys from functools import partial from itertools import product import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.optimizers.pounders import internal_solve_pounders from tests.optimagic.optimizers._pounders.test_pounders_unit import FIXTURES_DIR def load_history(start_vec, solver_sub): start_vec_str = np.array2string( start_vec, precision=3, separator=",", suppress_small=False ) history_x = np.genfromtxt( FIXTURES_DIR / f"history_x_{start_vec_str}_{solver_sub}_3_8.csv", delimiter=",", ) history_criterion = np.genfromtxt( FIXTURES_DIR / f"history_criterion_{start_vec_str}_{solver_sub}_3_8.csv", delimiter=",", ) return history_x, history_criterion @pytest.fixture() def criterion(): data = pd.read_csv(FIXTURES_DIR / "pounders_example_data.csv") endog = np.asarray(data["y"]) exog = np.asarray(data["t"]) def func(x: np.ndarray, exog: np.ndarray, endog: np.ndarray) -> np.ndarray: """User provided residual function.""" return endog - np.exp(-x[0] * exog) / (x[1] + x[2] * exog) return partial(func, exog=exog, endog=endog) @pytest.fixture() def pounders_options(): out = { "delta": 0.1, "delta_min": 1e-6, "delta_max": 1e6, "gamma0": 0.5, "gamma1": 2.0, "theta1": 1e-5, "theta2": 1e-4, "eta0": 0.0, "eta1": 0.1, "c1": np.sqrt(3), "c2": 10, "lower_bounds": None, "upper_bounds": None, "maxiter": 200, } return out @pytest.fixture() def trustregion_subproblem_options(): out = { "maxiter": 50, "maxiter_gradient_descent": 5, "gtol_abs": 1e-8, "gtol_rel": 1e-8, "gtol_scaled": 0, "gtol_abs_cg": 1e-8, "gtol_rel_cg": 1e-6, "k_easy": 0.1, "k_hard": 0.2, } return out start_vec = [np.array([0.15, 0.008, 0.01], dtype=np.float64)] cg_routine = ["cg", "steihaug_toint", "trsbox"] universal_tests = list(product(start_vec, cg_routine)) specific_tests = [ (np.array([1e-6, 1e-6, 1e-6]), "cg"), (np.array([1e-3, 1e-3, 1e-3]), "cg"), ] TEST_CASES = universal_tests + specific_tests @pytest.mark.skipif(sys.platform == "win32", reason="Not accurate on Windows.") @pytest.mark.skipif( sys.platform == "linux" and sys.version_info[:2] >= (3, 10), reason="Not accurate on Linux with Python 3.10 or higher.", ) @pytest.mark.parametrize("start_vec, conjugate_gradient_method_sub", TEST_CASES) def test_bntr( start_vec, conjugate_gradient_method_sub, criterion, pounders_options, trustregion_subproblem_options, ): solver_sub = "bntr" gtol_abs = 1e-8 gtol_rel = 1e-8 gtol_scaled = 0 def batch_fun(x_list, n_cores): return [criterion(x) for x in x_list] result = internal_solve_pounders( x0=start_vec, criterion=criterion, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, maxinterp=2 * len(start_vec) + 1, solver_sub=solver_sub, conjugate_gradient_method_sub=conjugate_gradient_method_sub, maxiter_sub=trustregion_subproblem_options["maxiter"], maxiter_gradient_descent_sub=trustregion_subproblem_options[ "maxiter_gradient_descent" ], gtol_abs_sub=trustregion_subproblem_options["gtol_abs"], gtol_rel_sub=trustregion_subproblem_options["gtol_rel"], gtol_scaled_sub=trustregion_subproblem_options["gtol_scaled"], gtol_abs_conjugate_gradient_sub=trustregion_subproblem_options["gtol_abs_cg"], gtol_rel_conjugate_gradient_sub=trustregion_subproblem_options["gtol_rel_cg"], k_easy_sub=trustregion_subproblem_options["k_easy"], k_hard_sub=trustregion_subproblem_options["k_hard"], n_cores=1, batch_fun=batch_fun, **pounders_options, ) x_expected = np.array([0.1902789114691, 0.006131410288292, 0.01053088353832]) aaae(result.x, x_expected, decimal=3) @pytest.mark.parametrize("start_vec", [(np.array([0.15, 0.008, 0.01]))]) def test_gqtpar(start_vec, criterion, pounders_options, trustregion_subproblem_options): solver_sub = "gqtpar" gtol_abs = 1e-8 gtol_rel = 1e-8 gtol_scaled = 0 def batch_fun(x_list, n_cores): return [criterion(x) for x in x_list] result = internal_solve_pounders( x0=start_vec, criterion=criterion, gtol_abs=gtol_abs, gtol_rel=gtol_rel, gtol_scaled=gtol_scaled, maxinterp=7, solver_sub=solver_sub, conjugate_gradient_method_sub="trsbox", maxiter_sub=trustregion_subproblem_options["maxiter"], maxiter_gradient_descent_sub=trustregion_subproblem_options[ "maxiter_gradient_descent" ], gtol_abs_sub=trustregion_subproblem_options["gtol_abs"], gtol_rel_sub=trustregion_subproblem_options["gtol_rel"], gtol_scaled_sub=trustregion_subproblem_options["gtol_scaled"], gtol_abs_conjugate_gradient_sub=trustregion_subproblem_options["gtol_abs_cg"], gtol_rel_conjugate_gradient_sub=trustregion_subproblem_options["gtol_rel_cg"], k_easy_sub=trustregion_subproblem_options["k_easy"], k_hard_sub=trustregion_subproblem_options["k_hard"], n_cores=1, batch_fun=batch_fun, **pounders_options, ) x_expected = np.array([0.1902789114691, 0.006131410288292, 0.01053088353832]) aaae(result.x, x_expected, decimal=4) ================================================ FILE: tests/optimagic/optimizers/test_pygad_optimizer.py ================================================ """Test helper functions for PyGAD optimizer.""" import warnings import pytest from optimagic.optimizers.pygad_optimizer import ( AdaptiveMutation, InversionMutation, RandomMutation, ScrambleMutation, SwapMutation, _convert_mutation_to_pygad_params, _create_mutation_from_string, _determine_effective_batch_size, _get_default_mutation_params, ) @pytest.mark.parametrize( "batch_size, n_cores, expected", [ (None, 1, None), (None, 4, 4), (10, 4, 10), (4, 4, 4), (2, 4, 2), (5, 1, 5), (0, 4, 0), (None, 100, 100), (1, 1, 1), ], ) def test_determine_effective_batch_size_return_values(batch_size, n_cores, expected): result = _determine_effective_batch_size(batch_size, n_cores) assert result == expected @pytest.mark.parametrize( "batch_size, n_cores, should_warn", [ (2, 4, True), (1, 8, True), (0, 4, True), (4, 4, False), (8, 4, False), (None, 4, False), (5, 1, False), (None, 1, False), ], ) def test_determine_effective_batch_size_warnings(batch_size, n_cores, should_warn): if should_warn: warning_pattern = ( f"batch_size \\({batch_size}\\) is smaller than " f"n_cores \\({n_cores}\\)\\. This may reduce parallel efficiency\\. " f"Consider setting batch_size >= n_cores\\." ) with pytest.warns(UserWarning, match=warning_pattern): result = _determine_effective_batch_size(batch_size, n_cores) assert result == batch_size else: with warnings.catch_warnings(): warnings.simplefilter("error") result = _determine_effective_batch_size(batch_size, n_cores) # Tests for _get_default_mutation_params @pytest.mark.parametrize( "mutation_type, expected", [ ( "random", { "mutation_type": "random", "mutation_probability": None, "mutation_percent_genes": "default", "mutation_num_genes": None, "mutation_by_replacement": False, }, ), ( None, { "mutation_type": None, "mutation_probability": None, "mutation_percent_genes": None, "mutation_num_genes": None, "mutation_by_replacement": None, }, ), ], ) def test_get_default_mutation_params(mutation_type, expected): result = _get_default_mutation_params(mutation_type) assert result == expected # Tests for _create_mutation_from_string @pytest.mark.parametrize( "mutation_type, expected_class", [ ("random", RandomMutation), ("swap", SwapMutation), ("inversion", InversionMutation), ("scramble", ScrambleMutation), ("adaptive", AdaptiveMutation), ], ) def test_create_mutation_from_string_valid(mutation_type, expected_class): result = _create_mutation_from_string(mutation_type) assert isinstance(result, expected_class) def test_create_mutation_from_string_invalid(): with pytest.raises(ValueError, match="Unsupported mutation type: invalid"): _create_mutation_from_string("invalid") # Tests for _convert_mutation_to_pygad_params def test_convert_mutation_none(): result = _convert_mutation_to_pygad_params(None) expected = { "mutation_type": None, "mutation_probability": None, "mutation_percent_genes": None, "mutation_num_genes": None, "mutation_by_replacement": None, } assert result == expected @pytest.mark.parametrize( "mutation_string", ["random", "swap", "inversion", "scramble", "adaptive"], ) def test_convert_mutation_string(mutation_string): result = _convert_mutation_to_pygad_params(mutation_string) assert result["mutation_type"] == mutation_string assert "mutation_probability" in result assert "mutation_percent_genes" in result assert "mutation_num_genes" in result assert "mutation_by_replacement" in result @pytest.mark.parametrize( "mutation_class", [ RandomMutation, SwapMutation, InversionMutation, ScrambleMutation, AdaptiveMutation, ], ) def test_convert_mutation_class(mutation_class): result = _convert_mutation_to_pygad_params(mutation_class) assert result["mutation_type"] == mutation_class.mutation_type assert "mutation_probability" in result assert "mutation_percent_genes" in result assert "mutation_num_genes" in result assert "mutation_by_replacement" in result def test_convert_mutation_instance(): # Test RandomMutation instance mutation = RandomMutation(probability=0.2, by_replacement=True) result = _convert_mutation_to_pygad_params(mutation) assert result["mutation_type"] == "random" assert result["mutation_probability"] == 0.2 assert result["mutation_by_replacement"] is True # Test SwapMutation instance mutation = SwapMutation() result = _convert_mutation_to_pygad_params(mutation) assert result["mutation_type"] == "swap" # Test AdaptiveMutation instance mutation = AdaptiveMutation(probability_bad=0.3, probability_good=0.1) result = _convert_mutation_to_pygad_params(mutation) assert result["mutation_type"] == "adaptive" assert result["mutation_probability"] == [0.3, 0.1] def test_convert_mutation_custom_function(): def custom_mutation(offspring, ga_instance): return offspring result = _convert_mutation_to_pygad_params(custom_mutation) assert result["mutation_type"] == custom_mutation def test_convert_mutation_invalid_type(): with pytest.raises(ValueError, match="Unsupported mutation type"): _convert_mutation_to_pygad_params(123) # Tests for mutation dataclasses def test_random_mutation_default(): mutation = RandomMutation() result = mutation.to_pygad_params() assert result["mutation_type"] == "random" assert result["mutation_probability"] is None assert result["mutation_percent_genes"] == "default" assert result["mutation_num_genes"] is None assert result["mutation_by_replacement"] is False def test_random_mutation_with_parameters(): mutation = RandomMutation( probability=0.15, num_genes=5, percent_genes=20.0, by_replacement=True ) result = mutation.to_pygad_params() assert result["mutation_type"] == "random" assert result["mutation_probability"] == 0.15 assert result["mutation_percent_genes"] == 20.0 assert result["mutation_num_genes"] == 5 assert result["mutation_by_replacement"] is True @pytest.mark.parametrize( "mutation_class, expected_type", [ (SwapMutation, "swap"), (InversionMutation, "inversion"), (ScrambleMutation, "scramble"), ], ) def test_simple_mutations(mutation_class, expected_type): mutation = mutation_class() result = mutation.to_pygad_params() assert result["mutation_type"] == expected_type assert result["mutation_probability"] is None assert result["mutation_percent_genes"] == "default" assert result["mutation_num_genes"] is None assert result["mutation_by_replacement"] is False def test_adaptive_mutation_default(): mutation = AdaptiveMutation() result = mutation.to_pygad_params() assert result["mutation_type"] == "adaptive" assert result["mutation_probability"] == [0.1, 0.05] # Default values assert result["mutation_percent_genes"] is None assert result["mutation_num_genes"] is None assert result["mutation_by_replacement"] is False def test_adaptive_mutation_with_probabilities(): mutation = AdaptiveMutation(probability_bad=0.2, probability_good=0.08) result = mutation.to_pygad_params() assert result["mutation_type"] == "adaptive" assert result["mutation_probability"] == [0.2, 0.08] assert result["mutation_percent_genes"] is None assert result["mutation_num_genes"] is None assert result["mutation_by_replacement"] is False def test_adaptive_mutation_with_num_genes(): mutation = AdaptiveMutation(num_genes_bad=10, num_genes_good=5) result = mutation.to_pygad_params() assert result["mutation_type"] == "adaptive" assert result["mutation_probability"] is None assert result["mutation_num_genes"] == [10, 5] assert result["mutation_percent_genes"] is None assert result["mutation_by_replacement"] is False def test_adaptive_mutation_with_percent_genes(): mutation = AdaptiveMutation(percent_genes_bad=25.0, percent_genes_good=10.0) result = mutation.to_pygad_params() assert result["mutation_type"] == "adaptive" assert result["mutation_probability"] is None assert result["mutation_num_genes"] is None assert result["mutation_percent_genes"] == [25.0, 10.0] assert result["mutation_by_replacement"] is False def test_mutation_type_class_variables(): assert RandomMutation.mutation_type == "random" assert SwapMutation.mutation_type == "swap" assert InversionMutation.mutation_type == "inversion" assert ScrambleMutation.mutation_type == "scramble" assert AdaptiveMutation.mutation_type == "adaptive" ================================================ FILE: tests/optimagic/optimizers/test_pygmo_optimizers.py ================================================ """Test optimization helper functions.""" import numpy as np import pytest from optimagic.optimizers.pygmo_optimizers import ( _convert_str_to_int, get_population_size, ) test_cases = [ # popsize, x, lower_bound, expected (55.3, None, None, 55), (None, np.ones(5), 500, 500), (None, np.ones(5), 4, 60), ] @pytest.mark.parametrize("popsize, x, lower_bound, expected", test_cases) def test_determine_population_size(popsize, x, lower_bound, expected): res = get_population_size(population_size=popsize, x=x, lower_bound=lower_bound) assert res == expected def test_convert_str_to_int(): d = {"a": 1, "b": 3} assert _convert_str_to_int(d, "a") == 1 assert _convert_str_to_int(d, 1) == 1 with pytest.raises(ValueError): _convert_str_to_int(d, 5) with pytest.raises(ValueError): _convert_str_to_int(d, "hello") ================================================ FILE: tests/optimagic/optimizers/test_pyswarms_optimizers.py ================================================ """Test helper functions in PySwarms optimizers.""" import numpy as np import pytest from numpy.testing import assert_array_equal from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.optimization.internal_optimization_problem import InternalBounds from optimagic.optimizers.pyswarms_optimizers import ( PyramidTopology, RandomTopology, RingTopology, StarTopology, VonNeumannTopology, _build_velocity_clamp, _create_initial_positions, _get_pyswarms_bounds, _resolve_topology_config, ) RNG = np.random.default_rng(12345) # Test _build_velocity_clamp def test_build_velocity_clamp_both_values(): """Test velocity clamp with both min and max values.""" result = _build_velocity_clamp(-1.0, 1.0) assert result == (-1.0, 1.0) def test_build_velocity_clamp_partial_values(): """Test velocity clamp with only one value provided.""" result = _build_velocity_clamp(-1.0, None) assert result is None result = _build_velocity_clamp(None, 1.0) assert result is None def test_build_velocity_clamp_none_values(): """Test velocity clamp with None values.""" result = _build_velocity_clamp(None, None) assert result is None # Test _get_pyswarms_bounds def test_get_pyswarms_bounds_with_both(): """Test bounds conversion when both lower and upper bounds are provided.""" bounds = InternalBounds(lower=np.array([-2.0, -3.0]), upper=np.array([5.0, 4.0])) result = _get_pyswarms_bounds(bounds) assert result is not None lower, upper = result assert_array_equal(lower, np.array([-2.0, -3.0])) assert_array_equal(upper, np.array([5.0, 4.0])) def test_get_pyswarms_bounds_with_none(): """Test bounds conversion when no bounds are provided.""" bounds = InternalBounds(lower=None, upper=None) result = _get_pyswarms_bounds(bounds) assert result is None def test_get_pyswarms_bounds_partial_bounds(): """Test bounds conversion with only one bound provided.""" # Only lower bounds bounds = InternalBounds(lower=np.array([1.0, 2.0]), upper=None) result = _get_pyswarms_bounds(bounds) assert result is None # Only upper bounds bounds = InternalBounds(lower=None, upper=np.array([3.0, 4.0])) result = _get_pyswarms_bounds(bounds) assert result is None def test_get_pyswarms_bounds_with_infinite(): """Test that infinite bounds raise ValueError.""" bounds = InternalBounds( lower=np.array([-np.inf, -1.0]), upper=np.array([1.0, np.inf]) ) with pytest.raises(ValueError, match="PySwarms does not support infinite bounds"): _get_pyswarms_bounds(bounds) # Test _create_initial_positions @pytest.mark.parametrize("center", [0.5, 1.0, 2.0]) def test_create_initial_positions_basic(center): """Test basic initial positions creation.""" x0 = np.array([1.0, 2.0]) n_particles = 5 bounds = (np.array([-5.0, -5.0]), np.array([5.0, 5.0])) init_pos = _create_initial_positions( x0=x0, n_particles=n_particles, bounds=bounds, center=center, rng=RNG ) assert init_pos.shape == (5, 2) assert_array_equal(init_pos[0], x0) # Check all particles are within bounds assert np.all(init_pos >= bounds[0]) assert np.all(init_pos <= bounds[1]) def test_create_initial_positions_no_bounds(): """Test initial positions creation with no bounds.""" x0 = np.array([0.5, 1.5]) n_particles = 3 bounds = None init_pos = _create_initial_positions( x0=x0, n_particles=n_particles, bounds=bounds, center=1.0, rng=RNG ) assert init_pos.shape == (3, 2) expected_x0 = np.array([0.5, 1.0]) assert_array_equal(init_pos[0], expected_x0) assert np.all(init_pos >= 0.0) assert np.all(init_pos <= 1.0) @pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") @pytest.mark.parametrize( ("topology_string", "expected_class_name", "expected_options"), [ ("star", "Star", {}), ("ring", "Ring", {"k": 3, "p": 2}), ("vonneumann", "VonNeumann", {"p": 2, "r": 1}), ("random", "Random", {"k": 3}), ("pyramid", "Pyramid", {}), ], ) def test_resolve_topology_config_by_string( topology_string, expected_class_name, expected_options ): """Test topology resolution with string names.""" topology, options = _resolve_topology_config(topology_string) assert topology.__class__.__name__ == expected_class_name assert options == expected_options @pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") @pytest.mark.parametrize( ("config_instance", "expected_class_name", "expected_options"), [ (StarTopology(), "Star", {}), (RingTopology(k_neighbors=5, p_norm=1, static=True), "Ring", {"k": 5, "p": 1}), ( VonNeumannTopology(p_norm=1, range_param=2), "VonNeumann", {"p": 1, "r": 2}, ), (RandomTopology(k_neighbors=4, static=False), "Random", {"k": 4}), (PyramidTopology(static=True), "Pyramid", {}), ], ) def test_resolve_topology_config_by_instance( config_instance, expected_class_name, expected_options ): """Test topology resolution with instances.""" topology, options = _resolve_topology_config(config_instance) # Check the class name and options assert topology.__class__.__name__ == expected_class_name assert options == expected_options if hasattr(config_instance, "static"): assert topology.static == config_instance.static @pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") def test_resolve_topology_config_invalid_string(): """Test topology resolution with invalid string.""" with pytest.raises(ValueError, match="Unknown topology string: 'invalid'"): _resolve_topology_config("invalid") @pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") def test_resolve_topology_config_invalid_type(): """Test topology resolution with invalid type.""" with pytest.raises(TypeError, match="Unsupported topology configuration type"): _resolve_topology_config(123) ================================================ FILE: tests/optimagic/optimizers/test_tao_optimizers.py ================================================ """Test the wrapper around pounders.""" import functools import numpy as np import pandas as pd import pytest from optimagic.config import IS_PETSC4PY_INSTALLED from optimagic.optimization.optimize import minimize from optimagic.utilities import get_rng if not IS_PETSC4PY_INSTALLED: pytestmark = pytest.mark.skip(reason="petsc4py is not installed.") NUM_AGENTS = 2_000 from optimagic import mark def get_random_params( length, rng, # noqa: ARG001 low=0, high=1, lower_bound=-np.inf, upper_bound=np.inf, ): params = pd.DataFrame( { "value": np.random.uniform(low, high, size=length), "lower_bound": lower_bound, "upper_bound": upper_bound, } ) return params def test_robustness(): rng = get_rng(5471) true_params = get_random_params(2, rng) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize(criterion_func, start_params, "tao_pounders") x = np.column_stack([np.ones_like(exog), exog]) y = endog.reshape(len(endog), 1) expected = np.linalg.lstsq(x, y, rcond=None)[0].flatten() np.testing.assert_almost_equal( result.params["value"].to_numpy(), expected, decimal=6 ) def test_box_constr(): rng = get_rng(5472) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize(criterion_func, start_params, "tao_pounders") assert 0 <= result.params["value"].to_numpy()[0] <= 0.3 assert 0 <= result.params["value"].to_numpy()[1] <= 0.3 def test_max_iters(): rng = get_rng(5473) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize( criterion_func, start_params, "tao_pounders", algo_options={"stopping.maxiter": 25}, ) assert result.message in ("user defined", "step size small") def test_grtol(): rng = get_rng(5474) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize( criterion_func, start_params, "tao_pounders", algo_options={ "convergence.gtol_abs": False, "convergence.gtol_scaled": False, }, ) assert result.message in ( "relative_gradient_tolerance below critical value", "step size small", ) def test_gatol(): rng = get_rng(5475) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize( criterion_func, start_params, "tao_pounders", algo_options={ "convergence.gtol_rel": False, "convergence.gtol_scaled": False, }, ) assert result.message in ( "absolute_gradient_tolerance below critical value", "step size small", ) def test_gttol(): rng = get_rng(5476) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) result = minimize( criterion_func, start_params, "tao_pounders", algo_options={ "convergence.gtol_rel": False, "convergence.gtol_abs": False, }, ) assert result.message in ( "gradient_total_tolerance below critical value", "step size small", ) def test_tol(): rng = get_rng(5477) true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3) start_params = true_params.copy() start_params["value"] = get_random_params(2, rng, 0.1, 0.2)["value"] exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params) criterion_func = mark.least_squares( functools.partial(_ols_criterion, endog=endog, exog=exog) ) minimize( criterion_func, start_params, "tao_pounders", algo_options={ "convergence.gtol_abs": 1e-7, "convergence.gtol_rel": 1e-7, "convergence.gtol_scaled": 1e-9, }, ) def _ols_criterion(x, endog, exog): return endog - x.loc[0, "value"] - x.loc[1, "value"] * exog def _simulate_ols_sample(num_agents, paras): rng = get_rng(seed=1234) exog = rng.uniform(-5, 5, num_agents) error_term = rng.normal(0, 1, num_agents) endog = paras.at[0, "value"] + paras.at[1, "value"] * exog + error_term return exog, endog ================================================ FILE: tests/optimagic/optimizers/test_tranquilo.py ================================================ import numpy as np import pytest from optimagic.exceptions import NotInstalledError from optimagic.optimizers.tranquilo import Tranquilo, TranquiloLS @pytest.fixture() def mock_problem(): """Create a minimal mock of InternalOptimizationProblem.""" class MockBounds: lower = np.array([-1.0, -1.0]) upper = np.array([1.0, 1.0]) class MockProblem: bounds = MockBounds() def batch_fun(self, xs): return [np.sum(x**2) for x in xs] return MockProblem() def test_tranquilo_raises_if_version_too_old(monkeypatch, mock_problem): import optimagic.optimizers.tranquilo as tranquilo_mod monkeypatch.setattr( tranquilo_mod, "IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0", False ) algo = Tranquilo() x0 = np.array([0.5, 0.5]) with pytest.raises(NotInstalledError, match="tranquilo"): algo._solve_internal_problem(mock_problem, x0) def test_tranquilo_ls_raises_if_version_too_old(monkeypatch, mock_problem): import optimagic.optimizers.tranquilo as tranquilo_mod monkeypatch.setattr( tranquilo_mod, "IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0", False ) algo = TranquiloLS() x0 = np.array([0.5, 0.5]) with pytest.raises(NotInstalledError, match="tranquilo"): algo._solve_internal_problem(mock_problem, x0) ================================================ FILE: tests/optimagic/parameters/test_block_trees.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal from pybaum import tree_equal from pybaum import tree_just_flatten as tree_leaves from optimagic import second_derivative from optimagic.parameters.block_trees import ( block_tree_to_hessian, block_tree_to_matrix, hessian_to_block_tree, matrix_to_block_tree, ) from optimagic.parameters.tree_registry import get_registry def test_matrix_to_block_tree_array_and_scalar(): t = {"a": 1.0, "b": np.arange(2)} calculated = matrix_to_block_tree(np.arange(9).reshape(3, 3), t, t) expected = { "a": {"a": np.array(0), "b": np.array([1, 2])}, "b": {"a": np.array([3, 6]), "b": np.array([[4, 5], [7, 8]])}, } assert _tree_equal_up_to_dtype(calculated, expected) def test_matrix_to_block_tree_only_params_dfs(): tree = { "a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]), "b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]), } calculated = matrix_to_block_tree(np.arange(25).reshape(5, 5), tree, tree) expected = { "a": { "a": pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]), "b": pd.DataFrame( [[2, 3, 4], [7, 8, 9]], columns=["j", "k", "l"], index=["a", "b"] ), }, "b": { "a": pd.DataFrame( [[10, 11], [15, 16], [20, 21]], index=["j", "k", "l"], columns=["a", "b"], ), "b": pd.DataFrame( [[12, 13, 14], [17, 18, 19], [22, 23, 24]], index=["j", "k", "l"], columns=["j", "k", "l"], ), }, } assert _tree_equal_up_to_dtype(calculated, expected) def test_matrix_to_block_tree_single_element(): tree1 = {"a": 0} tree2 = {"b": 1, "c": 2} block_tree = {"a": {"b": 0, "c": 1}} matrix = np.array([[0, 1]]) calculated = matrix_to_block_tree(matrix, tree1, tree2) assert tree_equal(block_tree, calculated) # one params df (make sure we don't get a list back) # dataframe and scalar # tests against jax def test_block_tree_to_matrix_array_and_scalar(): t1 = {"c": np.arange(3), "d": (2.0, 1)} t2 = {"a": 1.0, "b": np.arange(2)} expected = np.arange(15).reshape(5, 3) block_tree = { "c": {"a": np.array([0, 3, 6]), "b": np.array([[1, 2], [4, 5], [7, 8]])}, "d": ( {"a": np.array(9), "b": np.array([10, 11])}, {"a": np.array(12), "b": np.array([13, 14])}, ), } calculated = block_tree_to_matrix(block_tree, t1, t2) assert_array_equal(expected, calculated) def test_block_tree_to_matrix_only_params_dfs(): expected = np.arange(25).reshape(5, 5) tree = { "a": pd.DataFrame(index=["a", "b"]).assign(value=[1, 2]), "b": pd.DataFrame(index=["j", "k", "l"]).assign(value=[3, 4, 5]), } block_tree = { "a": { "a": pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]), "b": pd.DataFrame( [[2, 3, 4], [7, 8, 9]], columns=["j", "k", "l"], index=["a", "b"] ), }, "b": { "a": pd.DataFrame( [[10, 11], [15, 16], [20, 21]], index=["j", "k", "l"], columns=["a", "b"], ), "b": pd.DataFrame( [[12, 13, 14], [17, 18, 19], [22, 23, 24]], index=["j", "k", "l"], columns=["j", "k", "l"], ), }, } calculated = block_tree_to_matrix(block_tree, tree, tree) assert_array_equal(expected, calculated) def test_block_tree_to_hessian_bijection(): params = {"a": np.arange(4), "b": [{"c": (1, 2), "d": np.array([5, 6])}]} f_tree = {"e": np.arange(3), "f": (5, 6, [7, 8, {"g": 1.0}])} registry = get_registry(extended=True) n_p = len(tree_leaves(params, registry=registry)) n_f = len(tree_leaves(f_tree, registry=registry)) expected = np.arange(n_f * n_p**2).reshape(n_f, n_p, n_p) block_hessian = hessian_to_block_tree(expected, f_tree, params) got = block_tree_to_hessian(block_hessian, f_tree, params) assert_array_equal(expected, got) def test_hessian_to_block_tree_bijection(): params = {"a": np.arange(4), "b": [{"c": (1, 2), "d": np.array([5, 6])}]} def func(params): return {"e": params["a"] ** 3, "f": (params["b"][0]["c"][1] / 0.5)} expected = second_derivative(func, params).derivative hessian = block_tree_to_hessian(expected, func(params), params) got = hessian_to_block_tree(hessian, func(params), params) _tree_equal_up_to_dtype(expected, got) def test_block_tree_to_matrix_valueerror(): # test that value error is raised when dimensions don't match inner = {"a": 1, "b": 1} outer = 1 block_tree = {"a": 1} # should have same structure as inner with pytest.raises(ValueError): block_tree_to_matrix(block_tree, inner, outer) def _tree_equal_up_to_dtype(left, right): # does not compare dtypes for pandas.DataFrame return tree_equal(left, right, equality_checkers={pd.DataFrame: _frame_equal}) def _frame_equal(left, right): try: pd.testing.assert_frame_equal(left, right, check_dtype=False) return True except AssertionError: return False ================================================ FILE: tests/optimagic/parameters/test_bounds.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal from optimagic.exceptions import InvalidBoundsError from optimagic.parameters.bounds import ( Bounds, _get_fast_path_bounds, get_internal_bounds, pre_process_bounds, ) @pytest.fixture() def pytree_params(): pytree_params = { "delta": 0.95, "utility": pd.DataFrame( [[0.5, 0]] * 3, index=["a", "b", "c"], columns=["value", "lower_bound"] ), "probs": np.array([[0.8, 0.2], [0.3, 0.7]]), } return pytree_params @pytest.fixture() def array_params(): return np.arange(2) def test_pre_process_bounds_trivial_case(): got = pre_process_bounds(Bounds(lower=[0], upper=[1])) expected = Bounds(lower=[0], upper=[1]) assert got == expected def test_pre_process_bounds_none_case(): assert pre_process_bounds(None) is None def test_pre_process_bounds_sequence(): got = pre_process_bounds([(0, 1), (None, 1)]) expected = Bounds(lower=[0, -np.inf], upper=[1, 1]) assert_array_equal(got.lower, expected.lower) assert_array_equal(got.upper, expected.upper) def test_pre_process_bounds_invalid_type(): with pytest.raises(InvalidBoundsError): pre_process_bounds(1) def test_get_bounds_subdataframe(pytree_params): upper_bounds = { "utility": pd.DataFrame([[2]] * 2, index=["b", "c"], columns=["value"]), } lower_bounds = { "delta": 0, "utility": pd.DataFrame([[1]] * 2, index=["a", "b"], columns=["value"]), } bounds = Bounds(lower=lower_bounds, upper=upper_bounds) lb, ub = get_internal_bounds(pytree_params, bounds=bounds) assert np.all(lb[1:3] == np.ones(2)) assert np.all(ub[2:4] == 2 * np.ones(2)) TEST_CASES = [ Bounds(lower={"delta": [0, -1]}, upper=None), Bounds(lower={"probs": 1}, upper=None), Bounds(lower={"probs": np.array([0, 1])}, upper=None), # wrong size lower bounds Bounds(lower=None, upper={"probs": np.array([0, 1])}), # wrong size upper bounds ] @pytest.mark.parametrize("bounds", TEST_CASES) def test_get_bounds_error(pytree_params, bounds): with pytest.raises(InvalidBoundsError): get_internal_bounds(pytree_params, bounds=bounds) def test_get_bounds_no_arguments(pytree_params): got_lower, got_upper = get_internal_bounds(pytree_params) expected_lower = np.array([-np.inf] + 3 * [0] + 4 * [-np.inf]) assert_array_equal(got_lower, expected_lower) assert got_upper is None def test_get_bounds_with_lower_bounds(pytree_params): lower_bounds = {"delta": 0.1} bounds = Bounds(lower=lower_bounds) got_lower, got_upper = get_internal_bounds(pytree_params, bounds=bounds) expected_lower = np.array([0.1] + 3 * [0] + 4 * [-np.inf]) assert_array_equal(got_lower, expected_lower) assert got_upper is None def test_get_bounds_with_upper_bounds(pytree_params): upper_bounds = { "utility": pd.DataFrame([[1]] * 3, index=["a", "b", "c"], columns=["value"]), } bounds = Bounds(upper=upper_bounds) got_lower, got_upper = get_internal_bounds(pytree_params, bounds=bounds) expected_lower = np.array([-np.inf] + 3 * [0] + 4 * [-np.inf]) expected_upper = np.array([np.inf] + 3 * [1] + 4 * [np.inf]) assert_array_equal(got_lower, expected_lower) assert_array_equal(got_upper, expected_upper) def test_get_bounds_numpy(array_params): got_lower, got_upper = get_internal_bounds(array_params) assert got_lower is None assert got_upper is None def test_get_bounds_numpy_error(array_params): # lower bounds larger than upper bounds bounds = Bounds(lower=np.ones_like(array_params), upper=np.zeros_like(array_params)) with pytest.raises(InvalidBoundsError): get_internal_bounds( array_params, bounds=bounds, ) def test_get_fast_path_bounds_both_none(): got_lower, got_upper = _get_fast_path_bounds(Bounds(lower=None, upper=None)) assert got_lower is None assert got_upper is None def test_get_fast_path_bounds_lower_none(): got_lower, got_upper = _get_fast_path_bounds( bounds=Bounds(lower=None, upper=np.array([1, 2, 3])), ) assert_array_equal(got_lower, None) assert_array_equal(got_upper, np.array([1, 2, 3])) ================================================ FILE: tests/optimagic/parameters/test_check_constraints.py ================================================ import numpy as np import pytest import optimagic as om from optimagic.exceptions import InvalidParamsError from optimagic.parameters.check_constraints import _iloc from optimagic.parameters.constraint_tools import check_constraints def test_iloc(): dictionary = { "index": np.array(["a", "b", "c"]), "lower_bounds": np.array([0, 0, 0]), "upper_bounds": np.array([1, 1, 1]), "is_fixed_to_value": np.array([False, False, True]), } position = [0, 2] expected_result = { "index": np.array(["a", "c"]), "lower_bounds": np.array([0, 0]), "upper_bounds": np.array([1, 1]), "is_fixed_to_value": np.array([False, True]), } result = _iloc(dictionary, position) assert len(result) == len(expected_result) for k, v in expected_result.items(): assert k in result assert np.array_equal(result[k], v) def test_check_constraints_are_satisfied_type_equality(): with pytest.raises(InvalidParamsError): check_constraints( params=np.array([1, 2, 3]), constraints=om.EqualityConstraint(lambda x: x[:2]), ) def test_check_constraints_are_satisfied_type_increasing(): with pytest.raises(InvalidParamsError): check_constraints( params=np.array([1, 2, 3, 2, 4]), constraints=om.IncreasingConstraint(lambda x: x[[1, 2, 3]]), ) def test_check_constraints_are_satisfied_type_decreasing(): with pytest.raises(InvalidParamsError): check_constraints( params=np.array([1, 2, 3, 2, 4]), constraints=om.DecreasingConstraint(lambda x: x[[0, 1, 3]]), ) def test_check_constraints_are_satisfied_type_pairwise_equality(): with pytest.raises(InvalidParamsError): check_constraints( params=np.array([1, 2, 3, 3, 4]), constraints=om.PairwiseEqualityConstraint( selectors=[lambda x: x[[0, 4]], lambda x: x[[3, 2]]] ), ) def test_check_constraints_are_satisfied_type_probability(): with pytest.raises(InvalidParamsError): check_constraints( params=np.array([0.10, 0.25, 0.50, 1, 0.7]), constraints=om.ProbabilityConstraint(lambda x: x[[0, 1, 2, 4]]), ) def test_check_constraints_are_satisfied_type_linear_lower_bound(): with pytest.raises(InvalidParamsError): check_constraints( params=np.ones(5), constraints=om.LinearConstraint( selector=lambda x: x[[0, 2, 3, 4]], lower_bound=1.1, weights=0.25 ), ) def test_check_constraints_are_satisfied_type_linear_upper_bound(): with pytest.raises(InvalidParamsError): check_constraints( params=np.ones(5), constraints=om.LinearConstraint( selector=lambda x: x[[0, 2, 3, 4]], upper_bound=0.9, weights=0.25 ), ) def test_check_constraints_are_satisfied_type_linear_value(): with pytest.raises(InvalidParamsError): check_constraints( params=np.ones(5), constraints=om.LinearConstraint( selector=lambda x: x[[0, 2, 3, 4]], value=2, weights=0.25 ), ) def test_check_constraints_are_satisfied_type_covariance(): with pytest.raises(InvalidParamsError): check_constraints( params=[1, 1, 1, -1, 1, -1], constraints=om.FlatCovConstraint(selector=lambda params: params), ) def test_check_constraints_are_satisfied_type_sdcorr(): with pytest.raises(InvalidParamsError): check_constraints( params=[1, 1, 1, -1, 1, 1], constraints=om.FlatSDCorrConstraint(selector=lambda params: params), ) ================================================ FILE: tests/optimagic/parameters/test_constraint_tools.py ================================================ import pytest import optimagic as om from optimagic.exceptions import InvalidParamsError from optimagic.parameters.constraint_tools import check_constraints, count_free_params def test_count_free_params_no_constraints(): params = {"a": 1, "b": 2, "c": [3, 3]} assert count_free_params(params) == 4 def test_count_free_params_with_constraints(): params = {"a": 1, "b": 2, "c": [3, 3]} constraints = om.EqualityConstraint(lambda x: x["c"]) assert count_free_params(params, constraints=constraints) == 3 def test_check_constraints(): params = {"a": 1, "b": 2, "c": [3, 4]} constraints = om.EqualityConstraint(lambda x: x["c"]) with pytest.raises(InvalidParamsError): check_constraints(params, constraints=constraints) ================================================ FILE: tests/optimagic/parameters/test_conversion.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.parameters.bounds import Bounds from optimagic.parameters.conversion import ( _is_fast_deriv_eval, _is_fast_path, get_converter, ) from optimagic.parameters.scaling import ScalingOptions from optimagic.typing import AggregationLevel def test_get_converter_fast_case(): converter, internal = get_converter( params=np.arange(3), constraints=None, bounds=None, func_eval=3, derivative_eval=2 * np.arange(3), solver_type=AggregationLevel.SCALAR, ) aaae(internal.values, np.arange(3)) assert internal.lower_bounds is None assert internal.upper_bounds is None aaae(converter.params_to_internal(np.arange(3)), np.arange(3)) aaae(converter.params_from_internal(np.arange(3)), np.arange(3)) aaae( converter.derivative_to_internal(2 * np.arange(3), np.arange(3)), 2 * np.arange(3), ) def test_get_converter_with_constraints_and_bounds(): bounds = Bounds( lower=np.array([-1, -np.inf, -np.inf]), upper=np.array([np.inf, 10, np.inf]), ) converter, internal = get_converter( params=np.arange(3), constraints=[{"loc": 2, "type": "fixed"}], bounds=bounds, func_eval=3, derivative_eval=2 * np.arange(3), solver_type=AggregationLevel.SCALAR, ) aaae(internal.values, np.arange(2)) aaae(internal.lower_bounds, np.array([-1, -np.inf])) aaae(internal.upper_bounds, np.array([np.inf, 10])) aaae(converter.params_to_internal(np.arange(3)), np.arange(2)) aaae(converter.params_from_internal(np.arange(2)), np.arange(3)) aaae( converter.derivative_to_internal(2 * np.arange(3), np.arange(2)), 2 * np.arange(2), ) def test_get_converter_with_scaling(): bounds = Bounds( lower=np.arange(3) - 1, upper=np.arange(3) + 1, ) converter, internal = get_converter( params=np.arange(3), constraints=None, bounds=bounds, func_eval=3, derivative_eval=2 * np.arange(3), solver_type=AggregationLevel.SCALAR, scaling=ScalingOptions(method="start_values", clipping_value=0.5), ) aaae(internal.values, np.array([0, 1, 1])) aaae(internal.lower_bounds, np.array([-2, 0, 0.5])) aaae(internal.upper_bounds, np.array([2, 2, 1.5])) aaae(converter.params_to_internal(np.arange(3)), np.array([0, 1, 1])) aaae(converter.params_from_internal(np.array([0, 1, 1])), np.arange(3)) aaae( converter.derivative_to_internal(2 * np.arange(3), np.arange(3)), np.array([0, 2, 8]), ) def test_get_converter_with_trees(): params = {"a": 0, "b": 1, "c": 2} converter, internal = get_converter( params=params, constraints=None, bounds=None, func_eval={"d": 1, "e": 2}, derivative_eval={"a": 0, "b": 2, "c": 4}, solver_type=AggregationLevel.SCALAR, ) aaae(internal.values, np.arange(3)) aaae(internal.lower_bounds, np.full(3, -np.inf)) aaae(internal.upper_bounds, np.full(3, np.inf)) aaae(converter.params_to_internal(params), np.arange(3)) assert converter.params_from_internal(np.arange(3)) == params aaae( converter.derivative_to_internal(params, np.arange(3)), np.arange(3), ) @pytest.fixture() def fast_kwargs(): kwargs = { "params": np.arange(3), "constraints": None, "solver_type": AggregationLevel.SCALAR, "scaling": None, "derivative_eval": np.arange(3), "add_soft_bounds": False, } return kwargs STILL_FAST = [ ("params", np.arange(3)), ("constraints", []), ] @pytest.mark.parametrize("name, value", STILL_FAST) def test_is_fast_path_when_true(fast_kwargs, name, value): kwargs = fast_kwargs.copy() kwargs[name] = value assert _is_fast_path(**kwargs) SLOW = [ ("params", {"a": 1}), ("params", np.arange(4).reshape(2, 2)), ("constraints", [{}]), ("scaling", ScalingOptions()), ("derivative_eval", {"bla": 3}), ("derivative_eval", np.arange(3).reshape(1, 3)), ("add_soft_bounds", True), ] @pytest.mark.parametrize("name, value", SLOW) def test_is_fast_path_when_false(fast_kwargs, name, value): kwargs = fast_kwargs.copy() kwargs[name] = value assert not _is_fast_path(**kwargs) helper = np.arange(6).reshape(3, 2) FAST_DERIV_CASES = [ (AggregationLevel.LIKELIHOOD, helper), (AggregationLevel.LEAST_SQUARES, helper), (AggregationLevel.SCALAR, None), (AggregationLevel.LIKELIHOOD, None), (AggregationLevel.LEAST_SQUARES, None), ] @pytest.mark.parametrize("key, f", FAST_DERIV_CASES) def test_is_fast_deriv_eval_true(key, f): assert _is_fast_deriv_eval(f, key) SLOW_DERIV_CASES = [ (AggregationLevel.LIKELIHOOD, np.arange(8).reshape(2, 2, 2)), (AggregationLevel.LIKELIHOOD, {"contributions": np.arange(8).reshape(2, 2, 2)}), (AggregationLevel.LEAST_SQUARES, np.arange(8).reshape(2, 2, 2)), ( AggregationLevel.LEAST_SQUARES, {"root_contributions": np.arange(8).reshape(2, 2, 2)}, ), ] @pytest.mark.parametrize("key, f", SLOW_DERIV_CASES) def test_is_fast_deriv_eval_false(key, f): assert not _is_fast_deriv_eval(f, key) ================================================ FILE: tests/optimagic/parameters/test_kernel_transformations.py ================================================ from functools import partial from itertools import product import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae import optimagic.parameters.kernel_transformations as kt from optimagic.differentiation.derivatives import first_derivative from optimagic.parameters.kernel_transformations import cov_matrix_to_sdcorr_params from optimagic.utilities import get_rng to_test = list(product(range(10, 30, 5), [1234, 5471])) def get_internal_cholesky(dim, seed=0): """Return random internal cholesky values given dimension.""" rng = get_rng(seed) chol = np.tril(rng.normal(size=(dim, dim))) internal = chol[np.tril_indices(len(chol))] return internal def get_external_covariance(dim, seed=0): """Return random external covariance values given dimension.""" rng = get_rng(seed) data = rng.normal(size=(dim, 1000)) cov = np.cov(data) external = cov[np.tril_indices(dim)] return external def get_internal_probability(dim, seed=0): """Return random internal positive values given dimension.""" rng = get_rng(seed) internal = rng.uniform(size=dim) return internal def get_external_probability(dim, seed=0): """Return random internal positive values that sum to one.""" internal = get_internal_probability(dim, seed) external = internal / internal.sum() return external def get_external_sdcorr(dim, seed=0): """Return random external sdcorr values given dimension.""" rng = get_rng(seed) data = rng.normal(size=(dim, 1000)) cov = np.cov(data) external = cov_matrix_to_sdcorr_params(cov) return external @pytest.mark.parametrize("dim, seed", to_test) def test_covariance_from_internal_jacobian(dim, seed): # noqa: ARG001 internal = get_internal_cholesky(dim) func = partial(kt.covariance_from_internal, constr=None) numerical_deriv = first_derivative(func, internal) deriv = kt.covariance_from_internal_jacobian(internal, None) aaae(deriv, numerical_deriv.derivative, decimal=3) @pytest.mark.parametrize("dim, seed", to_test) def test_covariance_to_internal_jacobian(dim, seed): # noqa: ARG001 external = get_external_covariance(dim) func = partial(kt.covariance_to_internal, constr=None) numerical_deriv = first_derivative(func, external) deriv = kt.covariance_to_internal_jacobian(external, None) aaae(deriv, numerical_deriv.derivative, decimal=3) @pytest.mark.parametrize("dim, seed", to_test) def test_probability_from_internal_jacobian(dim, seed): # noqa: ARG001 internal = get_internal_probability(dim) func = partial(kt.probability_from_internal, constr=None) numerical_deriv = first_derivative(func, internal) deriv = kt.probability_from_internal_jacobian(internal, None) aaae(deriv, numerical_deriv.derivative, decimal=3) @pytest.mark.parametrize("dim, seed", to_test) def test_probability_to_internal_jacobian(dim, seed): # noqa: ARG001 external = get_external_probability(dim) func = partial(kt.probability_to_internal, constr=None) numerical_deriv = first_derivative(func, external) deriv = kt.probability_to_internal_jacobian(external, None) aaae(deriv, numerical_deriv.derivative, decimal=3) @pytest.mark.parametrize("dim, seed", to_test) def test_sdcorr_from_internal_jacobian(dim, seed): # noqa: ARG001 internal = get_internal_cholesky(dim) func = partial(kt.sdcorr_from_internal, constr=None) numerical_deriv = first_derivative(func, internal) deriv = kt.sdcorr_from_internal_jacobian(internal, None) aaae(deriv, numerical_deriv.derivative, decimal=3) @pytest.mark.parametrize("dim, seed", to_test) def test_sdcorr_to_internal_jacobian(dim, seed): # noqa: ARG001 external = get_external_sdcorr(dim) func = partial(kt.sdcorr_to_internal, constr=None) numerical_deriv = first_derivative(func, external) deriv = kt.sdcorr_to_internal_jacobian(external, None) aaae(deriv, numerical_deriv.derivative, decimal=3) ================================================ FILE: tests/optimagic/parameters/test_nonlinear_constraints.py ================================================ import itertools from dataclasses import dataclass import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal from pandas.testing import assert_frame_equal from pybaum import tree_just_flatten from optimagic.differentiation.numdiff_options import NumdiffOptions from optimagic.exceptions import InvalidConstraintError from optimagic.parameters.nonlinear_constraints import ( _check_validity_and_return_evaluation, _get_components, _get_selection_indices, _get_transformation, _get_transformation_type, _process_selector, _vector_to_list_of_scalar, equality_as_inequality_constraints, process_nonlinear_constraints, vector_as_list_of_scalar_constraints, ) from optimagic.parameters.tree_registry import get_registry @dataclass class Converter: def params_from_internal(self, x): return x def params_to_internal(self, params): registry = get_registry(extended=True) return np.array(tree_just_flatten(params, registry=registry)) # ====================================================================================== # _get_transformation_type # ====================================================================================== TEST_CASES = [ (0, np.inf, "identity"), # (lower_bounds, upper_bounds, expected) (-1, 2, "stack"), (np.zeros(3), np.ones(3), "stack"), (np.zeros(3), np.tile(np.inf, 3), "identity"), (np.array([1, 2]), np.tile(np.inf, 2), "subtract_lb"), ] @pytest.mark.parametrize("lower_bounds, upper_bounds, expected", TEST_CASES) def test_get_transformation_type(lower_bounds, upper_bounds, expected): got = _get_transformation_type(lower_bounds, upper_bounds) assert got == expected # ====================================================================================== # _get_transformation # ====================================================================================== TEST_CASES = [ # (lower_bounds, upper_bounds, case, expected) # noqa: ERA001 (0, 0, "func", {"name": "stack", "out": np.array([1, -1])}), (1, 1, "func", {"name": "stack", "out": np.array([0, 0])}), (0, 0, "derivative", {"name": "stack", "out": np.array([1, -1])}), (1, 1, "derivative", {"name": "stack", "out": np.array([1, -1])}), (1, np.inf, "func", {"name": "subtract_lb", "out": np.array([0])}), (0, np.inf, "derivative", {"name": "identity", "out": np.array([1])}), ] @pytest.mark.parametrize("lower_bounds, upper_bounds, case, expected", TEST_CASES) def test_get_positivity_transform(lower_bounds, upper_bounds, case, expected): transform = _get_transformation(lower_bounds, upper_bounds) got = transform[case](np.array([1])) assert np.all(got == expected["out"]) assert transform["name"] == expected["name"] # ====================================================================================== # _get_selection_indices # ====================================================================================== def test_get_selection_indices(): params = {"a": [0, 1, 2], "b": [3, 4, 5]} selector = lambda p: p["a"] expected = np.array([0, 1, 2], dtype=int) got_index, got_n_params = _get_selection_indices(params, selector) assert got_n_params == 6 assert_array_equal(got_index, expected) # ====================================================================================== # _process_selector # ====================================================================================== TEST_CASES = [ ({"selector": lambda x: x**2}, 10, 100), # (constraint, params, expected) ({"loc": "a"}, pd.Series([0, 1], index=["a", "b"]), 0), ( {"query": "a == 1"}, pd.DataFrame([[1], [0]], columns=["a"]), pd.DataFrame([[1]], columns=["a"]), ), ] @pytest.mark.parametrize("constraint, params, expected", TEST_CASES) def test_process_selector(constraint, params, expected): _selector = _process_selector(constraint) got = _selector(params) if isinstance(got, pd.DataFrame): assert_frame_equal(got, expected) else: assert got == expected # ====================================================================================== # _check_validity_nonlinear_constraint # ====================================================================================== TEST_CASES = [ {}, # no fun {"func": 10}, # non-callable fun {"func": lambda x: x, "derivative": 10}, # non-callable jac {"func": lambda x: x}, # no bounds at all { "func": lambda x: x, "value": 1, "lower_bounds": 1, }, # cannot have value and bounds { "func": lambda x: x, "value": 1, "upper_bounds": 1, }, # cannot have value and bounds {"func": lambda x: x}, # needs to have at least one bound {"func": lambda x: x, "lower_bounds": 1, "upper_bounds": 0}, {"func": lambda x: x, "selector": 10}, {"func": lambda x: x, "loc": 10}, {"func": lambda x: x, "query": 10}, ] TEST_CASES = list( itertools.product(TEST_CASES, [np.arange(3), pd.DataFrame({"a": [0, 1, 2]})]) ) @pytest.mark.parametrize("constraint, params", TEST_CASES) def test_check_validity_nonlinear_constraint(constraint, params): with pytest.raises(InvalidConstraintError): _check_validity_and_return_evaluation(constraint, params, skip_checks=False) def test_check_validity_nonlinear_constraint_correct_example(): constr = { "func": lambda x: x, "derivative": np.ones_like, "lower_bounds": np.arange(4), "selector": lambda x: x[:1], } _check_validity_and_return_evaluation( constr, params=np.arange(4), skip_checks=False ) # ====================================================================================== # equality_as_inequality_constraints # ====================================================================================== TEST_CASES = [ ( [ { "type": "ineq", "fun": lambda x: np.array([x]), "jac": lambda x: np.array([[1]]), # noqa: ARG005 "n_constr": 1, } ], # constraints "same", # expected ), ( [ { "type": "ineq", "fun": lambda x: np.array([x]), "jac": lambda x: np.array([[1]]), # noqa: ARG005 "n_constr": 1, } ], # constraints [ { "type": "eq", "fun": lambda x: np.array([x, -x]).reshape(-1, 1), "jac": lambda x: np.array([[1], [-1]]), # noqa: ARG005 "n_constr": 1, } ], # expected ), ] @pytest.mark.parametrize("constraints, expected", TEST_CASES) def test_equality_as_inequality_constraints(constraints, expected): got = equality_as_inequality_constraints(constraints) if expected == "same": assert got == constraints for g, c in zip(got, constraints, strict=False): if c["type"] == "eq": assert g["n_constr"] == 2 * c["n_constr"] assert g["type"] == "ineq" # ====================================================================================== # process_nonlinear_constraints # ====================================================================================== def test_process_nonlinear_constraints(): nonlinear_constraints = [ {"type": "nonlinear", "func": lambda x: np.dot(x, x), "value": 1}, { "type": "nonlinear", "func": lambda x: x, "lower_bounds": -1, "upper_bounds": 2, }, ] params = np.array([1.0]) converter = Converter() numdiff_options = NumdiffOptions() got = process_nonlinear_constraints( nonlinear_constraints, params=params, bounds=None, converter=converter, numdiff_options=numdiff_options, skip_checks=False, ) expected = [ {"type": "eq", "fun": lambda x: np.dot(x, x) - 1.0, "n_constr": 1}, { "type": "ineq", "fun": lambda x: np.concatenate((x + 1.0, 2.0 - x), axis=0), "n_constr": 2, }, ] for g, e in zip(got, expected, strict=False): assert g["type"] == e["type"] assert g["n_constr"] == e["n_constr"] for value in [0.1, 0.2, 1.2, -2.0]: x = np.array([value]) assert_array_equal(g["fun"](x), e["fun"](x)) assert "jac" in g assert "tol" in g # ====================================================================================== # vector_as_list_of_scalar_constraints # ====================================================================================== def test_get_components(): fun = lambda x: np.array([x[0], 2 * x[1]]) jac = lambda x: np.array([[1, 0], [0, 2]]) # noqa: ARG005 fun_component, jac_component = _get_components(fun, jac, idx=1) x = np.array([0, 3]) assert fun_component(x) == 6 assert_array_equal(jac_component(x), np.array([0, 2])) def test_vector_to_list_of_scalar(): constr = { "fun": lambda x: x, "jac": lambda x: np.eye(len(x)), "n_constr": 2, } got = _vector_to_list_of_scalar(constr) for got_constr in got: assert got_constr["n_constr"] == 1 for i in range(2): assert got[i]["fun"](np.arange(2)) == i assert_array_equal(got[i]["jac"](np.arange(2)), np.eye(2)[i]) def test_vector_as_list_of_scalar_constraints(): constr = { "fun": lambda x: x, "jac": lambda x: np.eye(len(x)), "n_constr": 2, } constraints = [constr, constr] got = vector_as_list_of_scalar_constraints(constraints) assert len(got) == 4 ================================================ FILE: tests/optimagic/parameters/test_process_constraints.py ================================================ """Test the pc processing.""" import numpy as np import pandas as pd import pytest import optimagic as om from optimagic.exceptions import InvalidConstraintError from optimagic.parameters.bounds import Bounds from optimagic.parameters.constraint_tools import check_constraints from optimagic.parameters.process_constraints import ( _replace_pairwise_equality_by_equality, ) def test_replace_pairwise_equality_by_equality(): constr = {"indices": [[0, 1], [2, 3]], "type": "pairwise_equality"} expected = [ {"index": [0, 2], "type": "equality"}, {"index": [1, 3], "type": "equality"}, ] calculated = _replace_pairwise_equality_by_equality([constr]) assert calculated == expected @pytest.mark.filterwarnings("ignore:Specifying constraints as a dictionary is") def test_empty_constraints_work(): params = pd.DataFrame() params["value"] = np.arange(5) params["bla"] = list("abcde") constraints = [{"query": "bla == 'blubb'", "type": "equality"}] check_constraints(params, constraints) def test_to_many_bounds_in_increasing_constraint_raise_good_error(): with pytest.raises(InvalidConstraintError): check_constraints( params=np.arange(3), bounds=Bounds(lower=np.arange(3) - 1), constraints=om.IncreasingConstraint(selector=lambda x: x[:3]), ) ================================================ FILE: tests/optimagic/parameters/test_process_selectors.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal as aae from pybaum import tree_flatten, tree_just_flatten, tree_unflatten from optimagic.exceptions import InvalidConstraintError from optimagic.parameters.process_selectors import process_selectors from optimagic.parameters.tree_conversion import TreeConverter from optimagic.parameters.tree_registry import get_registry @pytest.mark.parametrize("constraints", [None, []]) def test_process_selectors_no_constraint(constraints): calculated = process_selectors( constraints=constraints, params=np.arange(5), tree_converter=None, param_names=list("abcde"), ) assert calculated == [] @pytest.fixture() def tree_params(): df = pd.DataFrame({"value": [3, 4], "lower_bound": [0, 0]}, index=["c", "d"]) params = ([0, np.array([1, 2]), {"a": df, "b": 5}], 6) return params @pytest.fixture() def tree_params_converter(tree_params): registry = get_registry(extended=True) _, treedef = tree_flatten(tree_params, registry=registry) converter = TreeConverter( params_flatten=lambda params: np.array( tree_just_flatten(params, registry=registry) ), params_unflatten=lambda x: tree_unflatten( treedef, x.tolist(), registry=registry ), derivative_flatten=None, ) return converter @pytest.fixture() def np_params_converter(): converter = TreeConverter( lambda x: x, lambda x: x, lambda x: x, ) return converter @pytest.fixture() def df_params(): df = pd.DataFrame({"value": np.arange(6) + 10}, index=list("abcdef")) df.index.name = "name" return df @pytest.fixture() def df_params_converter(df_params): converter = TreeConverter( lambda x: x["value"].to_numpy(), lambda x: df_params.assign(value=x), None, ) return converter def test_process_selectors_tree_selector(tree_params, tree_params_converter): calculated = process_selectors( constraints=[{"type": "equality", "selector": lambda x: x[1]}], params=tree_params, tree_converter=tree_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["index"], np.array([6])) def test_process_selectors_tree_selectors(tree_params, tree_params_converter): constraints = [ { "type": "pairwise_equality", "selectors": [lambda x: x[1], lambda x: x[0][1][0]], } ] calculated = process_selectors( constraints=constraints, params=tree_params, tree_converter=tree_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["indices"][0], np.array([6])) aae(calculated[0]["indices"][1], np.array([1])) def test_process_selectors_numpy_array_loc(np_params_converter): calculated = process_selectors( constraints=[{"type": "equality", "loc": [1, 4]}], params=np.arange(6) + 10, tree_converter=np_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["index"], np.array([1, 4])) def test_process_selectors_numpy_array_locs(np_params_converter): constraints = [ { "type": "pairwise_equality", "locs": [[1, 4], [0, 3]], } ] calculated = process_selectors( constraints=constraints, params=np.arange(6) + 10, tree_converter=np_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["indices"][0], np.array([1, 4])) aae(calculated[0]["indices"][1], np.array([0, 3])) def test_process_selectors_dataframe_loc(df_params, df_params_converter): constraints = [{"type": "equality", "loc": ["b", "e"]}] calculated = process_selectors( constraints=constraints, params=df_params, tree_converter=df_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["index"], np.array([1, 4])) def test_process_selectors_dataframe_query(df_params, df_params_converter): q = "name == 'b' | name == 'e'" constraints = [{"type": "equality", "query": q}] calculated = process_selectors( constraints=constraints, params=df_params, tree_converter=df_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["index"], np.array([1, 4])) def test_process_selectors_dataframe_locs(df_params, df_params_converter): constraints = [{"type": "pairwise_equality", "locs": [["b", "e"], ["a", "d"]]}] calculated = process_selectors( constraints=constraints, params=df_params, tree_converter=df_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["indices"][0], np.array([1, 4])) aae(calculated[0]["indices"][1], np.array([0, 3])) def test_process_selectors_dataframe_queries(df_params, df_params_converter): queries = ["name == 'b' | name == 'e'", "name == 'a' | name == 'd'"] constraints = [{"type": "pairwise_equality", "queries": queries}] calculated = process_selectors( constraints=constraints, params=df_params, tree_converter=df_params_converter, param_names=list("abcdefg"), ) aae(calculated[0]["indices"][0], np.array([1, 4])) aae(calculated[0]["indices"][1], np.array([0, 3])) @pytest.mark.parametrize("field", ["selectors", "queries", "query", "locs"]) def test_process_selectors_numpy_array_invalid_fields(field, np_params_converter): with pytest.raises(InvalidConstraintError): process_selectors( constraints=[{"type": "equality", field: None}], params=np.arange(6), tree_converter=np_params_converter, param_names=list("abcdefg"), ) @pytest.mark.parametrize("field", ["selectors", "queries", "locs"]) def test_process_selectors_dataframe_invalid_fields( field, df_params, df_params_converter ): with pytest.raises(InvalidConstraintError): process_selectors( constraints=[{"type": "equality", field: None}], params=df_params, tree_converter=df_params_converter, param_names=list("abcdefg"), ) @pytest.mark.parametrize("field", ["selectors", "queries", "query", "locs", "loc"]) def test_process_selectors_tree_invalid_fields( field, tree_params, tree_params_converter ): with pytest.raises(InvalidConstraintError): process_selectors( constraints=[{"type": "equality", field: None}], params=tree_params, tree_converter=tree_params_converter, param_names=list("abcdefg"), ) def test_process_selectors_duplicates(np_params_converter): constraints = [ { "type": "pairwise_equality", "locs": [[1, 4], [0, 0]], } ] with pytest.raises(InvalidConstraintError): process_selectors( constraints=constraints, params=np.arange(6) + 10, tree_converter=np_params_converter, param_names=list("abcdefg"), ) def test_process_selectors_differen_length_in_multiple_selectors(np_params_converter): constraints = [ { "type": "pairwise_equality", "locs": [[1, 4], [0, 3, 5]], } ] with pytest.raises(InvalidConstraintError): process_selectors( constraints=constraints, params=np.arange(6) + 10, tree_converter=np_params_converter, param_names=list("abcdefg"), ) ================================================ FILE: tests/optimagic/parameters/test_scale_conversion.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from numpy.testing import assert_array_equal as aae from optimagic import first_derivative from optimagic.parameters.conversion import InternalParams from optimagic.parameters.scale_conversion import get_scale_converter from optimagic.parameters.scaling import ScalingOptions TEST_CASES = { "start_values": InternalParams( values=np.array([0, 1, 1, 1, 1, 1]), lower_bounds=np.array([-2, 0, 0.5, 2 / 3, 3 / 4, 4 / 5]), upper_bounds=np.array([2, 2, 1.5, 4 / 3, 5 / 4, 6 / 5]), names=None, ), "bounds": InternalParams( values=np.full(6, 0.5), lower_bounds=np.zeros(6), upper_bounds=np.ones(6), names=None, ), } IDS = list(TEST_CASES) PARAMETRIZATION = list(TEST_CASES.items()) @pytest.mark.parametrize("method, expected", PARAMETRIZATION, ids=IDS) def test_get_scale_converter_active(method, expected): params = InternalParams( values=np.arange(6), lower_bounds=np.arange(6) - 1, upper_bounds=np.arange(6) + 1, names=list("abcdef"), ) scaling = ScalingOptions( method=method, clipping_value=0.5, ) converter, scaled = get_scale_converter( internal_params=params, scaling=scaling, ) aaae(scaled.values, expected.values) aaae(scaled.lower_bounds, expected.lower_bounds) aaae(scaled.upper_bounds, expected.upper_bounds) aaae(converter.params_to_internal(params.values), expected.values) aaae(converter.params_from_internal(expected.values), params.values) calculated_jacobian = converter.derivative_to_internal(np.eye(len(params.values))) numerical_jacobian = first_derivative( converter.params_from_internal, expected.values ).derivative aaae(calculated_jacobian, numerical_jacobian) def test_scale_conversion_fast_path(): params = InternalParams( values=np.arange(6), lower_bounds=np.arange(6) - 1, upper_bounds=np.arange(6) + 1, names=list("abcdef"), ) converter, scaled = get_scale_converter( internal_params=params, scaling=None, ) aae(params.values, scaled.values) aae(params.lower_bounds, scaled.lower_bounds) aae(params.upper_bounds, scaled.upper_bounds) aae(converter.params_to_internal(params.values), params.values) aae(converter.params_from_internal(params.values), params.values) aae(converter.derivative_to_internal(np.ones(3)), np.ones(3)) ================================================ FILE: tests/optimagic/parameters/test_scaling.py ================================================ import pytest from optimagic.exceptions import InvalidScalingError from optimagic.parameters.scaling import ( ScalingOptions, pre_process_scaling, ) def test_pre_process_scaling_trivial_case(): scaling = ScalingOptions( method="start_values", clipping_value=1, magnitude=2, ) got = pre_process_scaling(scaling=scaling) assert got == scaling def test_pre_process_scaling_none_case(): assert pre_process_scaling(scaling=None) is None def test_pre_process_scaling_false_case(): assert pre_process_scaling(scaling=False) is None def test_pre_process_scaling_true_case(): got = pre_process_scaling(scaling=True) assert got == ScalingOptions() def test_pre_process_scaling_dict_case(): got = pre_process_scaling( scaling={"method": "start_values", "clipping_value": 1, "magnitude": 2} ) assert got == ScalingOptions(method="start_values", clipping_value=1, magnitude=2) def test_pre_process_scaling_invalid_type(): with pytest.raises(InvalidScalingError, match="Invalid scaling options"): pre_process_scaling(scaling="invalid") def test_pre_process_scaling_invalid_dict_key(): with pytest.raises(InvalidScalingError, match="Invalid scaling options of type:"): pre_process_scaling(scaling={"wrong_key": "start_values"}) def test_pre_process_scaling_invalid_dict_value(): with pytest.raises(InvalidScalingError, match="Invalid clipping value:"): pre_process_scaling(scaling={"clipping_value": "invalid"}) def test_scaling_options_invalid_method_value(): with pytest.raises(InvalidScalingError, match="Invalid scaling method:"): ScalingOptions(method="invalid") def test_scaling_options_invalid_clipping_value_type(): with pytest.raises(InvalidScalingError, match="Invalid clipping value:"): ScalingOptions(clipping_value="invalid") def test_scaling_options_invalid_magnitude_value_type(): with pytest.raises(InvalidScalingError, match="Invalid scaling magnitude:"): ScalingOptions(magnitude="invalid") def test_scaling_options_invalid_magnitude_value_range(): with pytest.raises(InvalidScalingError, match="Invalid scaling magnitude:"): ScalingOptions(magnitude=-1) ================================================ FILE: tests/optimagic/parameters/test_space_conversion.py ================================================ import numpy as np import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic import first_derivative from optimagic.parameters.space_conversion import ( InternalParams, _multiply_from_left, _multiply_from_right, get_space_converter, ) from optimagic.utilities import get_rng def _get_test_case_no_constraint(): n_params = 10 fp = InternalParams( values=np.arange(n_params), lower_bounds=np.full(n_params, -1), upper_bounds=np.full(n_params, 11), names=list("abcdefghij"), ) constraints = [] return constraints, fp, fp def _get_test_case_fixed(with_value): fp = InternalParams( values=np.arange(5), lower_bounds=np.full(5, -np.inf), upper_bounds=np.full(5, np.inf), names=list("abcde"), ) if with_value: constraints = [{"index": [0, 2, 4], "type": "fixed", "value": [0, 2, 4]}] else: constraints = [{"index": [0, 2, 4], "type": "fixed"}] internal = InternalParams( values=np.array([1, 3]), lower_bounds=np.full(2, -np.inf), upper_bounds=np.full(2, np.inf), names=None, ) return constraints, fp, internal def _get_test_case_increasing(as_one): fp = InternalParams( values=np.array([0.1, 2.2, 2.3, 10.1, -1]), lower_bounds=np.full(5, -np.inf), upper_bounds=np.full(5, np.inf), names=list("abcde"), ) internal = InternalParams( values=np.array([0.1, -2.1, -0.1, -7.8, -1]), lower_bounds=np.full(5, -np.inf), upper_bounds=np.array([np.inf, 0, 0, 0, np.inf]), names=None, ) if as_one: constraints = [{"type": "increasing", "index": [0, 1, 2, 3]}] else: constraints = [ {"type": "increasing", "index": [0, 1, 2]}, {"type": "increasing", "index": [2, 3]}, ] return constraints, fp, internal def _get_test_case_decreasing(as_one): fp = InternalParams( values=np.array([0.1, 2.2, 2.3, 10.1, -1]), lower_bounds=np.full(5, -np.inf), upper_bounds=np.full(5, np.inf), names=list("abcde"), ) internal = InternalParams( values=np.array([0.1, -2.1, -0.1, -7.8, -1]), lower_bounds=np.full(5, -np.inf), upper_bounds=np.array([np.inf, 0, 0, 0, np.inf]), names=None, ) if as_one: constraints = [{"type": "decreasing", "index": [3, 2, 1, 0]}] else: constraints = [ {"type": "decreasing", "index": [2, 1, 0]}, {"type": "decreasing", "index": [3, 2]}, ] return constraints, fp, internal def _get_test_case_equality(as_one): fp = InternalParams( values=np.array([0, 1.5, 1.5, 0, 1.5, 1]), lower_bounds=np.array([-10, 1, 0.9, -np.inf, -np.inf, -10]), upper_bounds=np.full(6, np.inf), names=list("abcdef"), ) internal = InternalParams( values=np.array([0, 1.5, 0, 1]), lower_bounds=np.array([-10, 1, -np.inf, -10]), upper_bounds=np.full(4, np.inf), names=None, ) if as_one: constraints = [{"type": "equality", "index": [1, 2, 4]}] else: constraints = [ {"type": "equality", "index": [1, 2]}, {"type": "equality", "index": [1, 4]}, ] return constraints, fp, internal def _get_test_case_probability(): fp = InternalParams( values=np.array([0.1, 0.2, 0.2, 0.5, 10]), lower_bounds=np.full(5, -np.inf), upper_bounds=np.full(5, np.inf), names=list("abcde"), ) internal = InternalParams( values=np.array([0.2, 0.4, 0.4, 10]), lower_bounds=np.array([0, 0, 0, -np.inf]), upper_bounds=np.full(4, np.inf), names=None, ) constraints = [{"type": "probability", "index": [0, 1, 2, 3]}] return constraints, fp, internal def _get_test_case_uncorrelated_covariance(): fp = InternalParams( values=np.array([1, 0, 4, 0, 0, 9, 10]), lower_bounds=np.full(7, -np.inf), upper_bounds=np.full(7, np.inf), names=list("abcdefg"), ) internal = InternalParams( values=np.array([1, 4, 9, 10]), lower_bounds=np.array([0, 0, 0, -np.inf]), upper_bounds=np.full(4, np.inf), names=None, ) constraints = [ {"type": "covariance", "index": [0, 1, 2, 3, 4, 5]}, {"type": "fixed", "index": [1, 3, 4], "value": 0}, ] return constraints, fp, internal def _get_test_case_covariance(): fp = InternalParams( values=np.array([1, -0.2, 1.2, -0.2, 0.1, 1.3, 0.1, -0.05, 0.2, 1, 10]), lower_bounds=np.full(11, -np.inf), upper_bounds=np.full(11, np.inf), names=list("abcdefghijk"), ) internal = InternalParams( values=np.array( [ 1, -0.2, 1.07703296, -0.2, 0.0557086, 1.12111398, 0.1, -0.0278543, 0.19761748, 0.97476739, 10, ] ), lower_bounds=np.array( [0, -np.inf, 0, -np.inf, -np.inf, 0, -np.inf, -np.inf, -np.inf, 0, -np.inf] ), upper_bounds=np.full(11, np.inf), names=None, ) constraints = [{"type": "covariance", "index": np.arange(10)}] return constraints, fp, internal def _get_test_case_normalized_covariance(): fp = InternalParams( values=np.array([4, 0.1, 2, 0.2, 0.3, 3, 10]), lower_bounds=np.full(7, -np.inf), upper_bounds=np.full(7, np.inf), names=list("abcdefg"), ) internal = InternalParams( values=np.array([0.05, 1.4133294025, 0.1, 0.2087269956, 1.7165177078, 10]), lower_bounds=[-np.inf, 0, -np.inf, -np.inf, 0, -np.inf], upper_bounds=np.full(6, np.inf), names=None, ) constraints = [ {"type": "covariance", "index": np.arange(6)}, {"type": "fixed", "index": [0], "value": 4}, ] return constraints, fp, internal def _get_test_case_sdcorr(): fp = InternalParams( values=np.array([2, 1.5, 3, 0.2, 0.15, 0.33, 10]), lower_bounds=np.full(7, -np.inf), upper_bounds=np.full(7, np.inf), names=list("abcdefg"), ) internal = InternalParams( values=np.array([2, 0.3, 1.46969385, 0.45, 0.91855865, 2.82023935, 10]), lower_bounds=np.array([0, -np.inf, 0, -np.inf, -np.inf, 0, -np.inf]), upper_bounds=np.full(7, np.inf), names=None, ) constraints = [{"type": "sdcorr", "index": np.arange(6)}] return constraints, fp, internal TEST_CASES = { "no_constraints": _get_test_case_no_constraint(), "fixed_at_start": _get_test_case_fixed(with_value=False), "fixed_at_value": _get_test_case_fixed(with_value=True), "one_increasing": _get_test_case_increasing(as_one=True), "overlapping_increasing": _get_test_case_increasing(as_one=False), "one_decreasing": _get_test_case_decreasing(as_one=True), "overlapping_decreasing": _get_test_case_decreasing(as_one=False), "one_equality": _get_test_case_equality(as_one=True), "everlapping_equality": _get_test_case_equality(as_one=False), "probability": _get_test_case_probability(), "uncorrelated_covariance": _get_test_case_uncorrelated_covariance(), "covariance": _get_test_case_covariance(), "normalized_covariance": _get_test_case_normalized_covariance(), "sdcorr": _get_test_case_sdcorr(), } PARAMETRIZATION = list(TEST_CASES.values()) IDS = list(TEST_CASES) @pytest.mark.parametrize( "constraints, params, expected_internal", PARAMETRIZATION, ids=IDS ) def test_space_converter_with_params(constraints, params, expected_internal): converter, internal = get_space_converter( internal_params=params, internal_constraints=constraints, ) aaae(internal.values, expected_internal.values) aaae(internal.lower_bounds, expected_internal.lower_bounds) aaae(internal.upper_bounds, expected_internal.upper_bounds) aaae(converter.params_to_internal(params.values), expected_internal.values) aaae(converter.params_from_internal(expected_internal.values), params.values) numerical_jacobian = first_derivative( converter.params_from_internal, expected_internal.values ).derivative calculated_jacobian = converter.derivative_to_internal( external_derivative=np.eye(len(params.values)), internal_values=expected_internal.values, ) aaae(calculated_jacobian, numerical_jacobian) @pytest.mark.parametrize("seed", range(5)) def test_multiply_from_left_and_right(seed): rng = get_rng(seed) mat_list = [rng.uniform(size=(10, 10)) for i in range(5)] a, b, c, d, e = mat_list expected = a @ b @ c @ d @ e calc_from_left = _multiply_from_left(mat_list) calc_from_right = _multiply_from_right(mat_list) aaae(calc_from_left, expected) aaae(calc_from_right, expected) ================================================ FILE: tests/optimagic/parameters/test_tree_conversion.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_equal as aae from optimagic.parameters.bounds import Bounds from optimagic.parameters.tree_conversion import get_tree_converter from optimagic.typing import AggregationLevel @pytest.fixture() def params(): df = pd.DataFrame({"value": [3, 4], "lower_bound": [0, 0]}, index=["c", "d"]) params = ([0, np.array([1, 2]), {"a": df, "b": 5}], 6) return params @pytest.fixture() def upper_bounds(): upper = ([None, np.array([11, np.inf]), None], 100) return upper FUNC_EVALS = [ 5.0, np.float32(5), np.ones(5), {"a": 1, "b": 2, "c": [np.full(4, 0.5)]}, pd.Series(1, index=list("abcde")), np.ones(5), {"a": 1, "b": 2}, ] @pytest.mark.parametrize("func_eval", FUNC_EVALS) def test_tree_converter_scalar_solver(params, upper_bounds, func_eval): bounds = Bounds( upper=upper_bounds, ) converter, flat_params = get_tree_converter( params=params, bounds=bounds, func_eval=func_eval, derivative_eval=params, solver_type=AggregationLevel.SCALAR, ) expected_values = np.arange(7) expected_lb = np.array([-np.inf, -np.inf, -np.inf, 0, 0, -np.inf, -np.inf]) expected_ub = np.array([np.inf, 11, np.inf, np.inf, np.inf, np.inf, 100]) expected_names = ["0_0", "0_1_0", "0_1_1", "0_2_a_c", "0_2_a_d", "0_2_b", "1"] aae(flat_params.values, expected_values) aae(flat_params.lower_bounds, expected_lb) aae(flat_params.upper_bounds, expected_ub) assert flat_params.names == expected_names aae(converter.params_flatten(params), np.arange(7)) unflat = converter.params_unflatten(np.arange(7)) assert unflat[0][0] == params[0][0] aae(unflat[0][1], params[0][1]) SOLVER_TYPES = [ AggregationLevel.SCALAR, AggregationLevel.LIKELIHOOD, AggregationLevel.LEAST_SQUARES, ] @pytest.mark.parametrize("solver_type", SOLVER_TYPES) def test_tree_conversion_fast_path(solver_type): if solver_type == AggregationLevel.SCALAR: derivative_eval = np.arange(3) * 2 func_eval = 3 else: derivative_eval = np.arange(6).reshape(2, 3) func_eval = np.ones(2) converter, flat_params = get_tree_converter( params=np.arange(3), bounds=Bounds(lower=None, upper=np.arange(3) + 1), func_eval=func_eval, derivative_eval=derivative_eval, solver_type=solver_type, ) aae(flat_params.values, np.arange(3)) assert flat_params.lower_bounds is None aae(flat_params.upper_bounds, np.arange(3) + 1) assert flat_params.names == list(map(str, range(3))) aae(converter.params_flatten(np.arange(3)), np.arange(3)) aae(converter.params_unflatten(np.arange(3)), np.arange(3)) aae(converter.derivative_flatten(derivative_eval), derivative_eval) ================================================ FILE: tests/optimagic/parameters/test_tree_registry.py ================================================ import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal from pybaum import leaf_names, tree_flatten, tree_unflatten from optimagic.parameters.tree_registry import get_registry @pytest.fixture() def value_df(): df = pd.DataFrame( np.arange(6).reshape(3, 2), columns=["a", "value"], index=["alpha", "beta", "gamma"], ) return df @pytest.fixture() def other_df(): df = pd.DataFrame(index=["alpha", "beta", "gamma"]) df["b"] = np.arange(3).astype(np.int16) df["c"] = 3.14 return df def test_flatten_df_with_value_column(value_df): registry = get_registry(extended=True) flat, _ = tree_flatten(value_df, registry=registry) assert flat == [1, 3, 5] def test_unflatten_df_with_value_column(value_df): registry = get_registry(extended=True) _, treedef = tree_flatten(value_df, registry=registry) unflat = tree_unflatten(treedef, [10, 11, 12], registry=registry) assert unflat.equals(value_df.assign(value=[10, 11, 12])) def test_leaf_names_df_with_value_column(value_df): registry = get_registry(extended=True) names = leaf_names(value_df, registry=registry) assert names == ["alpha", "beta", "gamma"] def test_flatten_partially_numeric_df(other_df): registry = get_registry(extended=True) flat, _ = tree_flatten(other_df, registry=registry) assert flat == [0, 3.14, 1, 3.14, 2, 3.14] def test_unflatten_partially_numeric_df(other_df): registry = get_registry(extended=True) _, treedef = tree_flatten(other_df, registry=registry) unflat = tree_unflatten(treedef, [1, 2, 3, 4, 5, 6], registry=registry) other_df = other_df.assign(b=[1, 3, 5], c=[2, 4, 6]) assert_frame_equal(unflat, other_df, check_dtype=False) def test_leaf_names_partially_numeric_df(other_df): registry = get_registry(extended=True) names = leaf_names(other_df, registry=registry) assert names == ["alpha_b", "alpha_c", "beta_b", "beta_c", "gamma_b", "gamma_c"] ================================================ FILE: tests/optimagic/shared/__init__.py ================================================ ================================================ FILE: tests/optimagic/shared/test_process_user_functions.py ================================================ import numpy as np import pytest from numpy.typing import NDArray from optimagic import mark from optimagic.exceptions import InvalidKwargsError from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.shared.process_user_function import ( get_kwargs_from_args, infer_aggregation_level, partial_func_of_params, ) from optimagic.typing import AggregationLevel def test_partial_func_of_params(): def f(params, b, c): return params + b + c func = partial_func_of_params(f, {"b": 2, "c": 3}) assert func(1) == 6 def test_partial_func_of_params_too_many_kwargs(): def f(params, b, c): return params + b + c with pytest.raises(InvalidKwargsError): partial_func_of_params(f, {"params": 1, "b": 2, "c": 3}) def test_partial_func_of_params_too_few_kwargs(): def f(params, b, c): return params + b + c with pytest.raises(InvalidKwargsError): partial_func_of_params(f, {"c": 3}) def test_get_kwargs_from_args(): def f(a, b, c=3, d=4): return a + b + c got = get_kwargs_from_args([1, 2], f, offset=1) expected = {"b": 1, "c": 2} assert got == expected def test_infer_aggregation_level_no_decorator(): def f(params): return 1 assert infer_aggregation_level(f) == AggregationLevel.SCALAR def test_infer_aggregation_level_scalar_decorator(): @mark.scalar def f(params): return 1 assert infer_aggregation_level(f) == AggregationLevel.SCALAR def test_infer_aggregation_level_scalar_anotation(): def f(params: NDArray[np.float64]) -> ScalarFunctionValue: return ScalarFunctionValue(1) assert infer_aggregation_level(f) == AggregationLevel.SCALAR def test_infer_aggregation_level_least_squares_decorator(): @mark.least_squares def f(params): return np.ones(3) assert infer_aggregation_level(f) == AggregationLevel.LEAST_SQUARES def test_infer_aggregation_level_least_squares_anotation(): def f(params: NDArray[np.float64]) -> LeastSquaresFunctionValue: return LeastSquaresFunctionValue(np.ones(3)) assert infer_aggregation_level(f) == AggregationLevel.LEAST_SQUARES def test_infer_aggregation_level_likelihood_decorator(): @mark.likelihood def f(params): return np.ones(3) assert infer_aggregation_level(f) == AggregationLevel.LIKELIHOOD def test_infer_aggregation_level_likelihood_anotation(): def f(params: NDArray[np.float64]) -> LikelihoodFunctionValue: return LikelihoodFunctionValue(np.ones(3)) assert infer_aggregation_level(f) == AggregationLevel.LIKELIHOOD ================================================ FILE: tests/optimagic/test_algo_selection.py ================================================ from optimagic import algos def test_dfols_is_present(): assert hasattr(algos, "nag_dfols") assert hasattr(algos.Bounded, "nag_dfols") assert hasattr(algos.LeastSquares, "nag_dfols") assert hasattr(algos.Local, "nag_dfols") assert hasattr(algos.Bounded.Local.LeastSquares, "nag_dfols") assert hasattr(algos.Local.Bounded.LeastSquares, "nag_dfols") assert hasattr(algos.LeastSquares.Bounded.Local, "nag_dfols") def test_scipy_cobyla_is_present(): assert hasattr(algos, "scipy_cobyla") assert hasattr(algos.Local, "scipy_cobyla") assert hasattr(algos.NonlinearConstrained, "scipy_cobyla") assert hasattr(algos.GradientFree, "scipy_cobyla") assert hasattr(algos.Local.NonlinearConstrained, "scipy_cobyla") assert hasattr(algos.NonlinearConstrained.Local, "scipy_cobyla") assert hasattr(algos.GradientFree.NonlinearConstrained, "scipy_cobyla") assert hasattr(algos.GradientFree.NonlinearConstrained.Local, "scipy_cobyla") assert hasattr(algos.Local.GradientFree.NonlinearConstrained, "scipy_cobyla") assert hasattr(algos.NonlinearConstrained.GradientFree.Local, "scipy_cobyla") assert hasattr(algos.NonlinearConstrained.Local.GradientFree, "scipy_cobyla") assert hasattr(algos.Local.NonlinearConstrained.GradientFree, "scipy_cobyla") def test_algorithm_lists(): assert len(algos.All) >= len(algos.Available) assert len(algos.AllNames) == len(algos.All) assert len(algos.AvailableNames) == len(algos.Available) ================================================ FILE: tests/optimagic/test_batch_evaluators.py ================================================ import itertools import warnings import pytest from optimagic.batch_evaluators import process_batch_evaluator batch_evaluators = ["joblib", "threading"] n_core_list = [1, 2] test_cases = list(itertools.product(batch_evaluators, n_core_list)) def double(x): return 2 * x def buggy_func(x): # noqa: ARG001 raise AssertionError() def add_x_and_y(x, y): return x + y @pytest.mark.slow() @pytest.mark.parametrize("batch_evaluator, n_cores", test_cases) def test_batch_evaluator_without_exceptions(batch_evaluator, n_cores): batch_evaluator = process_batch_evaluator(batch_evaluator) calculated = batch_evaluator( func=double, arguments=list(range(10)), n_cores=n_cores, ) expected = list(range(0, 20, 2)) assert calculated == expected @pytest.mark.slow() @pytest.mark.parametrize("batch_evaluator, n_cores", test_cases) def test_batch_evaluator_with_unhandled_exceptions(batch_evaluator, n_cores): batch_evaluator = process_batch_evaluator(batch_evaluator) with pytest.raises(AssertionError): batch_evaluator( func=buggy_func, arguments=list(range(10)), n_cores=n_cores, error_handling="raise", ) @pytest.mark.slow() @pytest.mark.parametrize("batch_evaluator, n_cores", test_cases) def test_batch_evaluator_with_handled_exceptions(batch_evaluator, n_cores): batch_evaluator = process_batch_evaluator(batch_evaluator) with warnings.catch_warnings(): warnings.simplefilter("ignore") calculated = batch_evaluator( func=buggy_func, arguments=list(range(10)), n_cores=n_cores, error_handling="continue", ) for calc in calculated: assert isinstance(calc, str) @pytest.mark.slow() @pytest.mark.parametrize("batch_evaluator, n_cores", test_cases) def test_batch_evaluator_with_list_unpacking(batch_evaluator, n_cores): batch_evaluator = process_batch_evaluator(batch_evaluator) calculated = batch_evaluator( func=add_x_and_y, arguments=[(1, 2), (3, 4)], n_cores=n_cores, unpack_symbol="*", ) expected = [3, 7] assert calculated == expected @pytest.mark.slow() @pytest.mark.parametrize("batch_evaluator, n_cores", test_cases) def test_batch_evaluator_with_dict_unpacking(batch_evaluator, n_cores): batch_evaluator = process_batch_evaluator(batch_evaluator) calculated = batch_evaluator( func=add_x_and_y, arguments=[{"x": 1, "y": 2}, {"x": 3, "y": 4}], n_cores=n_cores, unpack_symbol="**", ) expected = [3, 7] assert calculated == expected def test_get_batch_evaluator_invalid_value(): with pytest.raises(ValueError): process_batch_evaluator("bla") def test_get_batch_evaluator_invalid_type(): with pytest.raises(TypeError): process_batch_evaluator(3) def test_get_batch_evaluator_with_callable(): assert callable(process_batch_evaluator(lambda x: x)) ================================================ FILE: tests/optimagic/test_constraints.py ================================================ import pytest from optimagic.constraints import ( Constraint, DecreasingConstraint, EqualityConstraint, FixedConstraint, FlatCovConstraint, FlatSDCorrConstraint, IncreasingConstraint, LinearConstraint, NonlinearConstraint, PairwiseEqualityConstraint, ProbabilityConstraint, _all_none, _select_non_none, ) from optimagic.exceptions import InvalidConstraintError @pytest.fixture def dummy_func(): return lambda x: x def test_fixed_constraint(dummy_func): constr = FixedConstraint(selector=dummy_func) dict_repr = {"type": "fixed", "selector": dummy_func} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_increasing_constraint(dummy_func): constr = IncreasingConstraint(selector=dummy_func) dict_repr = {"type": "increasing", "selector": dummy_func} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_decreasing_constraint(dummy_func): constr = DecreasingConstraint(selector=dummy_func) dict_repr = {"type": "decreasing", "selector": dummy_func} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_equality_constraint(dummy_func): constr = EqualityConstraint(selector=dummy_func) dict_repr = {"type": "equality", "selector": dummy_func} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_pairwise_equality_constraint(dummy_func): constr = PairwiseEqualityConstraint(selectors=[dummy_func, dummy_func]) dict_repr = {"type": "pairwise_equality", "selectors": [dummy_func, dummy_func]} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_probability_constraint(dummy_func): constr = ProbabilityConstraint(selector=dummy_func) dict_repr = {"type": "probability", "selector": dummy_func} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_covariance_constraint(dummy_func): constr = FlatCovConstraint(selector=dummy_func) dict_repr = {"type": "covariance", "selector": dummy_func, "regularization": 0.0} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_sdcorr_constraint(dummy_func): constr = FlatSDCorrConstraint(selector=dummy_func) dict_repr = {"type": "sdcorr", "selector": dummy_func, "regularization": 0.0} assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_linear_constraint_with_value(dummy_func): constr = LinearConstraint(selector=dummy_func, value=2.1, weights=[1, 2]) dict_repr = { "type": "linear", "selector": dummy_func, "value": 2.1, "weights": [1, 2], } assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_linear_constraint_with_bounds(dummy_func): constr = LinearConstraint( selector=dummy_func, lower_bound=1.0, upper_bound=2.0, weights=[1, 2] ) dict_repr = { "type": "linear", "selector": dummy_func, "lower_bound": 1.0, "upper_bound": 2.0, "weights": [1, 2], } assert constr._to_dict() == dict_repr def test_linear_constraint_with_bounds_and_value(dummy_func): msg = "'value' cannot be used with 'lower_bound' or 'upper_bound'." with pytest.raises(InvalidConstraintError, match=msg): LinearConstraint( selector=dummy_func, lower_bound=1.0, upper_bound=2.0, value=2.1, weights=[1, 2], ) def test_linear_constraint_with_nothing(dummy_func): msg = "At least one of 'lower_bound', 'upper_bound', or 'value' must be non-None." with pytest.raises(InvalidConstraintError, match=msg): LinearConstraint(selector=dummy_func, weights=[1, 2]) def test_nonlinear_constraint_with_value(dummy_func): constr = NonlinearConstraint(selector=dummy_func, value=2.1, func=dummy_func) dict_repr = { "type": "nonlinear", "selector": dummy_func, "value": 2.1, "func": dummy_func, "tol": 1e-5, } assert constr._to_dict() == dict_repr assert isinstance(constr, Constraint) def test_nonlinear_constraint_with_bounds(dummy_func): constr = NonlinearConstraint( selector=dummy_func, lower_bound=1.0, upper_bound=2.0, func=dummy_func ) dict_repr = { "type": "nonlinear", "selector": dummy_func, "func": dummy_func, "lower_bounds": 1.0, "upper_bounds": 2.0, "tol": 1e-5, } assert constr._to_dict() == dict_repr def test_nonlinear_constraint_with_bounds_and_value(dummy_func): msg = "'value' cannot be used with 'lower_bound' or 'upper_bound'." with pytest.raises(InvalidConstraintError, match=msg): NonlinearConstraint( selector=dummy_func, lower_bound=1.0, upper_bound=2.0, value=2.1, func=dummy_func, ) def test_nonlinear_constraint_with_nothing(dummy_func): msg = "At least one of 'lower_bound', 'upper_bound', or 'value' must be non-None." with pytest.raises(InvalidConstraintError, match=msg): NonlinearConstraint(selector=dummy_func, func=dummy_func) def test_all_none(): assert _all_none(None, None, None) assert not _all_none(None, 1, None) def test_select_non_none(): assert _select_non_none(a=None, b=None, c=None) == {} assert _select_non_none(a=None, b=1, c=None) == {"b": 1} assert _select_non_none(a=None, b=None, c=2) == {"c": 2} assert _select_non_none(a=1, b=2, c=3) == {"a": 1, "b": 2, "c": 3} ================================================ FILE: tests/optimagic/test_decorators.py ================================================ import pytest from optimagic.decorators import ( catch, unpack, ) def test_catch_at_defaults(): @catch def f(): raise ValueError with pytest.warns(UserWarning): assert f() is None @catch def g(): raise KeyboardInterrupt() with pytest.raises(KeyboardInterrupt): g() def test_catch_with_reraise(): @catch(reraise=True) def f(): raise ValueError with pytest.raises(ValueError): f() def test_unpack_decorator_none(): @unpack(symbol=None) def f(x): return x assert f(3) == 3 def test_unpack_decorator_one_star(): @unpack(symbol="*") def f(x, y): return x + y assert f((3, 4)) == 7 def test_unpack_decorator_two_stars(): @unpack(symbol="**") def f(x, y): return x + y assert f({"x": 3, "y": 4}) == 7 ================================================ FILE: tests/optimagic/test_deprecations.py ================================================ """Test that our deprecations work. This also serves as an internal overview of deprecated functions. """ import warnings import numpy as np import pytest from numpy.testing import assert_almost_equal as aaae import estimagic as em import optimagic as om from estimagic import ( OptimizeLogReader, OptimizeResult, batch_evaluators, check_constraints, convergence_plot, convergence_report, count_free_params, criterion_plot, first_derivative, get_benchmark_problems, maximize, minimize, params_plot, profile_plot, rank_report, run_benchmark, second_derivative, slice_plot, traceback_report, utilities, ) from optimagic.deprecations import ( convert_dict_to_function_value, handle_log_options_throw_deprecated_warning, infer_problem_type_from_dict_output, is_dict_output, pre_process_constraints, ) from optimagic.differentiation.derivatives import NumdiffResult from optimagic.exceptions import InvalidConstraintError from optimagic.logging.logger import SQLiteLogOptions from optimagic.optimization.fun_value import ( LeastSquaresFunctionValue, LikelihoodFunctionValue, ScalarFunctionValue, ) from optimagic.parameters.bounds import Bounds from optimagic.typing import AggregationLevel # ====================================================================================== # Deprecated in 0.5.0, remove in 0.6.0 # ====================================================================================== def test_estimagic_minimize_is_deprecated(): with pytest.warns(FutureWarning, match="estimagic.minimize has been deprecated"): minimize(lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb") def test_estimagic_maximize_is_deprecated(): with pytest.warns(FutureWarning, match="estimagic.maximize has been deprecated"): maximize(lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb") def test_estimagic_first_derivative_is_deprecated(): msg = "estimagic.first_derivative has been deprecated" with pytest.warns(FutureWarning, match=msg): first_derivative(lambda x: x @ x, np.arange(3)) def test_estimagic_second_derivative_is_deprecated(): msg = "estimagic.second_derivative has been deprecated" with pytest.warns(FutureWarning, match=msg): second_derivative(lambda x: x @ x, np.arange(3)) def test_estimagic_benchmarking_functions_are_deprecated(): msg = "estimagic.get_benchmark_problems has been deprecated" with pytest.warns(FutureWarning, match=msg): problems = get_benchmark_problems("example") msg = "estimagic.run_benchmark has been deprecated" with pytest.warns(FutureWarning, match=msg): results = run_benchmark( problems, optimize_options={"test": {"algorithm": "scipy_lbfgsb"}} ) msg = "estimagic.convergence_report has been deprecated" with pytest.warns(FutureWarning, match=msg): convergence_report(problems, results) msg = "estimagic.rank_report has been deprecated" with pytest.warns(FutureWarning, match=msg): rank_report(problems, results) msg = "estimagic.traceback_report has been deprecated" with pytest.warns(FutureWarning, match=msg): traceback_report(problems, results) msg = "estimagic.profile_plot has been deprecated" with pytest.warns(FutureWarning, match=msg): profile_plot(problems, results) msg = "estimagic.convergence_plot has been deprecated" with pytest.warns(FutureWarning, match=msg): convergence_plot(problems, results) def test_estimagic_slice_plot_is_deprecated(): msg = "estimagic.slice_plot has been deprecated" with pytest.warns(FutureWarning, match=msg): slice_plot( func=lambda x: x @ x, params=np.arange(3), bounds=Bounds(lower=np.zeros(3), upper=np.ones(3) * 5), ) def test_estimagic_check_constraints_is_deprecated(): msg = "estimagic.check_constraints has been deprecated" with pytest.warns(FutureWarning, match=msg): check_constraints( params=np.arange(3), constraints=om.FixedConstraint(lambda x: x[0]), ) def test_estimagic_count_free_params_is_deprecated(): msg = "estimagic.count_free_params has been deprecated" with pytest.warns(FutureWarning, match=msg): count_free_params( params=np.arange(3), constraints=om.FixedConstraint(lambda x: x[0]), ) @pytest.fixture() def example_db(tmp_path): path = tmp_path / "test.db" def _crit(params): x = np.array(list(params.values())) return x @ x om.minimize( fun=_crit, params={"a": 1, "b": 2, "c": 3}, algorithm="scipy_lbfgsb", logging=path, ) return path def test_estimagic_log_reader_is_deprecated(example_db): msg = "OptimizeLogReader is deprecated and will be removed in a future " "version. Please use optimagic.logging.SQLiteLogger instead." with pytest.warns(FutureWarning, match=msg): OptimizeLogReader(example_db) def test_estimagic_optimize_result_is_deprecated(): res = om.minimize(lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb") msg = "estimagic.OptimizeResult has been deprecated" with pytest.warns(FutureWarning, match=msg): OptimizeResult( params=res.params, fun=res.fun, start_fun=res.start_fun, start_params=res.start_params, algorithm=res.algorithm, direction=res.direction, n_free=res.n_free, ) def test_estimagic_chol_params_to_lower_triangular_matrix_is_deprecated(): msg = "estimagic.utilities.chol_params_to_lower_triangular_matrix has been deprecat" with pytest.warns(FutureWarning, match=msg): utilities.chol_params_to_lower_triangular_matrix(np.arange(6)) def test_estimagic_cov_params_to_matrix_is_deprecated(): msg = "estimagic.utilities.cov_params_to_matrix has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.cov_params_to_matrix(np.arange(6)) def test_estimagic_cov_matrix_to_params_is_deprecated(): msg = "estimagic.utilities.cov_matrix_to_params has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.cov_matrix_to_params(np.eye(3)) def test_estimagic_sdcorr_params_to_sds_and_corr_is_deprecated(): msg = "estimagic.utilities.sdcorr_params_to_sds_and_corr has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.sdcorr_params_to_sds_and_corr(np.arange(6)) def test_estimagic_sds_and_corr_to_cov_is_deprecated(): msg = "estimagic.utilities.sds_and_corr_to_cov has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.sds_and_corr_to_cov(np.arange(3), np.eye(3)) def test_estimagic_cov_to_sds_and_corr_is_deprecated(): msg = "estimagic.utilities.cov_to_sds_and_corr has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.cov_to_sds_and_corr(np.eye(3)) def test_estimagic_sdcorr_params_to_matrix_is_deprecated(): msg = "estimagic.utilities.sdcorr_params_to_matrix has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.sdcorr_params_to_matrix(np.arange(6)) def test_estimagic_cov_matrix_to_sdcorr_params_is_deprecated(): msg = "estimagic.utilities.cov_matrix_to_sdcorr_params has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.cov_matrix_to_sdcorr_params(np.eye(3)) def test_estimagic_number_of_triangular_elements_to_dimension_is_deprecated(): msg = "estimagic.utilities.number_of_triangular_elements_to_dimension has been" with pytest.warns(FutureWarning, match=msg): utilities.number_of_triangular_elements_to_dimension(6) def test_estimagic_dimension_to_number_of_triangular_elements_is_deprecated(): msg = "estimagic.utilities.dimension_to_number_of_triangular_elements has been" with pytest.warns(FutureWarning, match=msg): utilities.dimension_to_number_of_triangular_elements(3) def test_estimagic_propose_alternatives_is_deprecated(): msg = "estimagic.utilities.propose_alternatives has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.propose_alternatives("estimagic", list("abcdefg")) def test_estimagic_robust_cholesky_is_deprecated(): msg = "estimagic.utilities.robust_cholesky has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.robust_cholesky(np.eye(3)) def test_estimagic_robust_inverse_is_deprecated(): msg = "estimagic.utilities.robust_inverse has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.robust_inverse(np.eye(3)) def test_estimagic_hash_array_is_deprecated(): msg = "estimagic.utilities.hash_array has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.hash_array(np.arange(3)) def test_estimagic_calculate_trustregion_initial_radius_is_deprecated(): msg = "estimagic.utilities.calculate_trustregion_initial_radius has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.calculate_trustregion_initial_radius(np.arange(3)) def test_estimagic_pickle_functions_are_deprecated(tmp_path): msg = "estimagic.utilities.to_pickle has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.to_pickle(np.arange(3), tmp_path / "test.pkl") msg = "estimagic.utilities.read_pickle has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.read_pickle(tmp_path / "test.pkl") def test_estimagic_isscalar_is_deprecated(): msg = "estimagic.utilities.isscalar has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.isscalar(1) def test_estimagic_get_rng_is_deprecated(): msg = "estimagic.utilities.get_rng has been deprecated" with pytest.warns(FutureWarning, match=msg): utilities.get_rng(42) def test_estimagic_criterion_plot_is_deprecated(): msg = "estimagic.criterion_plot has been deprecated" res = om.minimize(lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb") with pytest.warns(FutureWarning, match=msg): criterion_plot(res) def test_estimagic_params_plot_is_deprecated(): msg = "estimagic.params_plot has been deprecated" res = om.minimize(lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb") with pytest.warns(FutureWarning, match=msg): params_plot(res) def test_criterion_is_depracated(): msg = "the `criterion` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( criterion=lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", ) def test_criterion_kwargs_is_deprecated(): msg = "the `criterion_kwargs` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x, a: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", criterion_kwargs={"a": 1}, ) def test_derivative_is_deprecated(): msg = "the `derivative` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", derivative=lambda x: 2 * x, ) def test_derivative_kwargs_is_deprecated(): msg = "the `derivative_kwargs` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", jac=lambda x, a: 2 * x, derivative_kwargs={"a": 1}, ) def test_criterion_and_derivative_is_deprecated(): msg = "the `criterion_and_derivative` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", criterion_and_derivative=lambda x: (x @ x, 2 * x), ) def test_criterion_and_derivative_kwargs_is_deprecated(): msg = "the `criterion_and_derivative_kwargs` argument has been renamed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", fun_and_jac=lambda x, a: (x @ x, 2 * x), criterion_and_derivative_kwargs={"a": 1}, ) ALGO_OPTIONS = [ {"convergence_absolute_criterion_tolerance": 1e-8}, {"convergence_relative_criterion_tolerance": 1e-8}, {"convergence_absolute_params_tolerance": 1e-8}, {"convergence_relative_params_tolerance": 1e-8}, {"convergence_absolute_gradient_tolerance": 1e-8}, {"convergence_relative_gradient_tolerance": 1e-8}, {"convergence_scaled_gradient_tolerance": 1e-8}, {"stopping_max_iterations": 1_000}, {"stopping_max_criterion_evaluations": 1_000}, ] @pytest.mark.parametrize("algo_option", ALGO_OPTIONS) def test_old_convergence_criteria_are_deprecated(algo_option): msg = "The following keys in `algo_options` are deprecated" with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, params=np.arange(3), algorithm="scipy_lbfgsb", algo_options=algo_option, ) def test_deprecated_attributes_of_optimize_result(): res = om.minimize(lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb") msg = "attribute is deprecated" with pytest.warns(FutureWarning, match=msg): _ = res.n_criterion_evaluations with pytest.warns(FutureWarning, match=msg): _ = res.n_derivative_evaluations with pytest.warns(FutureWarning, match=msg): _ = res.criterion with pytest.warns(FutureWarning, match=msg): _ = res.start_criterion BOUNDS_KWARGS = [ {"lower_bounds": np.full(3, -1)}, {"upper_bounds": np.full(3, 2)}, ] SOFT_BOUNDS_KWARGS = [ {"soft_lower_bounds": np.full(3, -1)}, {"soft_upper_bounds": np.full(3, 1)}, ] @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS + SOFT_BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_minimize(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", **bounds_kwargs, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS + SOFT_BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_maximize(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", **bounds_kwargs, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_first_derivative(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.first_derivative( lambda x: x @ x, np.arange(3), **bounds_kwargs, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_second_derivative(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.second_derivative( lambda x: x @ x, np.arange(3), **bounds_kwargs, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_estimate_ml(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_ml( loglike=loglike, params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, **bounds_kwargs, ) def test_numdiff_options_is_deprecated_in_estimate_ml(): msg = "The argument `numdiff_options` is deprecated" with pytest.warns(FutureWarning, match=msg): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_ml( loglike=loglike, params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, numdiff_options={"method": "forward"}, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_estimate_msm(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): em.estimate_msm( simulate_moments=lambda x: x, empirical_moments=np.zeros(3), moments_cov=np.eye(3), params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, **bounds_kwargs, ) def test_numdiff_options_is_deprecated_in_estimate_msm(): msg = "The argument `numdiff_options` is deprecated" with pytest.warns(FutureWarning, match=msg): em.estimate_msm( simulate_moments=lambda x: x, empirical_moments=np.zeros(3), moments_cov=np.eye(3), params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, numdiff_options={"method": "forward"}, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_count_free_params(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.count_free_params( np.arange(3), constraints=om.FixedConstraint(lambda x: x[0]), **bounds_kwargs, ) @pytest.mark.parametrize("bounds_kwargs", BOUNDS_KWARGS) def test_old_bounds_are_deprecated_in_check_constraints(bounds_kwargs): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.check_constraints( np.arange(3), constraints=om.FixedConstraint(lambda x: x[0]), **bounds_kwargs, ) def test_old_bounds_are_deprecated_in_slice_plot(): msg = "Specifying bounds via the arguments" with pytest.warns(FutureWarning, match=msg): om.slice_plot( lambda x: x @ x, np.arange(3), lower_bounds=np.full(3, -1), upper_bounds=np.full(3, 2), ) def test_is_dict_output(): assert is_dict_output({"value": 1}) assert not is_dict_output(1) def test_infer_problem_type_from_dict_output(): assert infer_problem_type_from_dict_output({"value": 1}) == AggregationLevel.SCALAR assert ( infer_problem_type_from_dict_output({"value": 1, "root_contributions": 2}) == AggregationLevel.LEAST_SQUARES ) assert ( infer_problem_type_from_dict_output({"value": 1, "contributions": 2}) == AggregationLevel.LIKELIHOOD ) def test_convert_value_dict_to_function_value(): got = convert_dict_to_function_value({"value": 1}) assert isinstance(got, ScalarFunctionValue) assert got.value == 1 def test_convert_root_contributions_dict_to_function_value(): got = convert_dict_to_function_value({"value": 5, "root_contributions": [1, 2]}) assert isinstance(got, LeastSquaresFunctionValue) assert got.value == [1, 2] def test_convert_contributions_dict_to_function_value(): got = convert_dict_to_function_value({"value": 5, "contributions": [1, 4]}) assert isinstance(got, LikelihoodFunctionValue) assert got.value == [1, 4] def test_old_scaling_options_are_deprecated_in_minimize(): msg = "Specifying scaling options via the argument `scaling_options` is deprecated" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", scaling_options={"method": "start_values", "magnitude": 1}, ) def test_old_scaling_options_are_deprecated_in_maximize(): msg = "Specifying scaling options via the argument `scaling_options` is deprecated" with pytest.warns(FutureWarning, match=msg): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", scaling_options={"method": "start_values", "magnitude": 1}, ) def test_old_multistart_options_are_deprecated_in_minimize(): msg = "Specifying multistart options via the argument `multistart_options` is" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", multistart_options={"n_samples": 10}, ) def test_old_multistart_options_are_deprecated_in_maximize(): msg = "Specifying multistart options via the argument `multistart_options` is" with pytest.warns(FutureWarning, match=msg): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", multistart_options={"n_samples": 10}, ) def test_multistart_option_share_optimization_option_is_deprecated(): msg = "The `share_optimization` option is deprecated and will be removed in" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)), multistart={"share_optimization": 0.1}, ) def test_multistart_option_convergence_relative_params_tolerance_option_is_deprecated(): msg = "The `convergence_relative_params_tolerance` option is deprecated and will" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)), multistart={"convergence_relative_params_tolerance": 0.01}, ) def test_multistart_option_optimization_error_handling_option_is_deprecated(): msg = "The `optimization_error_handling` option is deprecated and will be removed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)), multistart={"optimization_error_handling": "continue"}, ) def test_multistart_option_exploration_error_handling_option_is_deprecated(): msg = "The `exploration_error_handling` option is deprecated and will be removed" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)), multistart={"exploration_error_handling": "continue"}, ) def test_deprecated_dict_access_of_multistart_info(): res = om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", multistart=True, bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)), ) msg = "The dictionary access for 'local_optima' is deprecated and will be removed" with pytest.warns(FutureWarning, match=msg): _ = res.multistart_info["local_optima"] def test_base_steps_in_first_derivatives_is_deprecated(): msg = "The `base_steps` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.first_derivative(lambda x: x @ x, np.arange(3), base_steps=1e-3) def test_step_ratio_in_first_derivatives_is_deprecated(): msg = "The `step_ratio` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.first_derivative(lambda x: x @ x, np.arange(3), step_ratio=2) def test_n_steps_in_first_derivatives_is_deprecated(): msg = "The `n_steps` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.first_derivative(lambda x: x @ x, np.arange(3), n_steps=2) def test_return_info_in_first_derivatives_is_deprecated(): msg = "The `return_info` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.first_derivative(lambda x: x @ x, np.arange(3), return_info=True) def test_return_func_value_in_first_derivatives_is_deprecated(): msg = "The `return_func_value` argument is deprecated and will be removed in" with pytest.warns(FutureWarning, match=msg): om.first_derivative(lambda x: x @ x, np.arange(3), return_func_value=True) def test_base_steps_in_second_derivatives_is_deprecated(): msg = "The `base_steps` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.second_derivative(lambda x: x @ x, np.arange(3), base_steps=1e-3) def test_step_ratio_in_second_derivatives_is_deprecated(): msg = "The `step_ratio` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.second_derivative(lambda x: x @ x, np.arange(3), step_ratio=2) def test_n_steps_in_second_derivatives_is_deprecated(): msg = "The `n_steps` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.second_derivative(lambda x: x @ x, np.arange(3), n_steps=1) def test_return_func_value_in_second_derivatives_is_deprecated(): msg = "The `return_func_value` argument is deprecated and will be removed in" with pytest.warns(FutureWarning, match=msg): om.second_derivative(lambda x: x @ x, np.arange(3), return_func_value=True) def test_return_info_in_second_derivatives_is_deprecated(): msg = "The `return_info` argument is deprecated and will be removed alongside" with pytest.warns(FutureWarning, match=msg): om.second_derivative(lambda x: x @ x, np.arange(3), return_info=True) def test_numdiff_result_func_evals_is_deprecated(): msg = "The `func_evals` attribute is deprecated and will be removed in optimagic" res = NumdiffResult(derivative=1) with pytest.warns(FutureWarning, match=msg): _ = res.func_evals def test_numdiff_result_derivative_candidates_is_deprecated(): msg = "The `derivative_candidates` attribute is deprecated and will be removed" res = NumdiffResult(derivative=1) with pytest.warns(FutureWarning, match=msg): _ = res.derivative_candidates def test_numdiff_result_dict_access_is_deprecated(): msg = "The dictionary access for 'derivative' is deprecated and will be removed" res = NumdiffResult(derivative=1) with pytest.warns(FutureWarning, match=msg): _ = res["derivative"] def test_key_argument_is_deprecated_in_first_derivative(): with pytest.warns(FutureWarning, match="The `key` argument in"): om.first_derivative(lambda x: {"value": x @ x}, np.arange(3), key="value") def test_key_argument_is_deprecated_in_second_derivative(): with pytest.warns(FutureWarning, match="The `key` argument in"): om.second_derivative(lambda x: {"value": x @ x}, np.arange(3), key="value") def test_jac_dicts_are_deprecated_in_minimize(): msg = "Specifying a dictionary of jac functions is deprecated" with pytest.warns(FutureWarning, match=msg): res = om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", jac={"value": lambda x: 2 * x}, ) aaae(res.params, np.zeros(3)) def test_jac_dicts_are_deprecated_in_maximize(): msg = "Specifying a dictionary of jac functions is deprecated" with pytest.warns(FutureWarning, match=msg): res = om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", jac={"value": lambda x: -2 * x}, ) aaae(res.params, np.zeros(3)) def test_fun_and_jac_dicts_are_deprecated_in_minimize(): msg = "Specifying a dictionary of fun_and_jac functions is deprecated" with pytest.warns(FutureWarning, match=msg): res = om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", fun_and_jac={"value": lambda x: (x @ x, 2 * x)}, ) aaae(res.params, np.zeros(3)) def test_fun_and_jac_dicts_are_deprecated_in_maximize(): msg = "Specifying a dictionary of fun_and_jac functions is deprecated" with pytest.warns(FutureWarning, match=msg): res = om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", fun_and_jac={"value": lambda x: (-x @ x, -2 * x)}, ) aaae(res.params, np.zeros(3)) def test_fun_with_dict_return_is_deprecated_in_minimize(): msg = "Returning a dictionary with the special keys" with pytest.warns(FutureWarning, match=msg): res = om.minimize( lambda x: {"value": x @ x}, np.arange(3), algorithm="scipy_lbfgsb", ) aaae(res.params, np.zeros(3)) def test_fun_with_dict_return_is_deprecated_in_slice_plot(): msg = "Functions that return dictionaries" with pytest.warns(FutureWarning, match=msg): om.slice_plot( lambda x: {"value": x @ x}, np.arange(3), bounds=om.Bounds(lower=np.zeros(3), upper=np.ones(3) * 5), ) def test_handle_log_options(): msg = ( "Usage of the parameter log_options is deprecated " "and will be removed in a future version. " "Provide a LogOptions instance for the parameter `logging`, if you need to " "configure the logging." ) log_options = {"fast_logging": True} with pytest.warns(FutureWarning, match=msg): logger = None handled_logger = handle_log_options_throw_deprecated_warning( log_options, logger ) assert handled_logger is None creation_warning = ( f"\nUsing {log_options=} to create an instance of SQLiteLogOptions. " f"This mechanism will be removed in the future." ) with pytest.warns(match=creation_warning): handled_logger = handle_log_options_throw_deprecated_warning( log_options, ":memory:" ) assert isinstance(handled_logger, SQLiteLogOptions) incompatibility_msg = "Found string or path for logger argument, but parameter" f" {log_options=} is not compatible " log_options_typo = {"fast_lugging": False} with pytest.raises(ValueError, match=incompatibility_msg): handled_logger = handle_log_options_throw_deprecated_warning( log_options_typo, ":memory:" ) assert handled_logger == ":memory:" def test_log_options_are_deprecated_in_estimate_ml(tmp_path): with pytest.warns(FutureWarning, match="LogOptions"): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_ml( loglike=loglike, params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, logging=tmp_path / "log.db", log_options={"fast_logging": True, "if_database_exists": "replace"}, ) with pytest.warns(FutureWarning, match="if_table_exists"): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_ml( loglike=loglike, params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, logging=tmp_path / "log_1.db", log_options={"fast_logging": True, "if_table_exists": "replace"}, ) def test_log_options_are_deprecated_in_estimate_msm(tmp_path): with pytest.warns(FutureWarning, match="LogOptions"): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_msm( simulate_moments=lambda x: x, empirical_moments=np.zeros(3), moments_cov=np.eye(3), params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, logging=tmp_path / "log.db", log_options={"fast_logging": True, "if_database_exists": "replace"}, ) with pytest.warns(FutureWarning, match="if_table_exists"): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_msm( simulate_moments=lambda x: x, empirical_moments=np.zeros(3), moments_cov=np.eye(3), params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, logging=tmp_path / "log_1.db", log_options={"fast_logging": True, "if_table_exists": "replace"}, ) def test_log_options_are_deprecated_in_minimize(tmp_path): with pytest.warns(FutureWarning, match="LogOptions"): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", logging=tmp_path / "log.db", log_options={"fast_logging": True, "if_database_exists": "replace"}, ) with pytest.warns(FutureWarning, match="if_table_exists"): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", logging=tmp_path / "log_1.db", log_options={"fast_logging": True, "if_table_exists": "replace"}, ) def test_log_options_are_deprecated_in_maximize(tmp_path): with pytest.warns(FutureWarning, match="LogOptions"): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", logging=tmp_path / "log.db", log_options={"fast_logging": True, "if_database_exists": "replace"}, ) with pytest.warns(FutureWarning, match="if_table_exists"): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", logging=tmp_path / "log_1.db", log_options={"fast_logging": True, "if_table_exists": "replace"}, ) def test_dict_constraints_are_deprecated_in_minimize(): msg = "Specifying constraints as a dictionary is deprecated and" with pytest.warns(FutureWarning, match=msg): om.minimize( lambda x: x @ x, np.arange(3), algorithm="scipy_lbfgsb", constraints={"type": "fixed", "loc": [0, 1]}, ) def test_dict_constraints_are_deprecated_in_maximize(): msg = "Specifying constraints as a dictionary is deprecated and" with pytest.warns(FutureWarning, match=msg): om.maximize( lambda x: -x @ x, np.arange(3), algorithm="scipy_lbfgsb", constraints={"type": "fixed", "loc": [0, 1]}, ) def test_dict_constraints_are_deprecated_in_estimate_ml(): msg = "Specifying constraints as a dictionary is deprecated and" with pytest.warns(FutureWarning, match=msg): @om.mark.likelihood def loglike(x): return -(x**2) em.estimate_ml( loglike=loglike, params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, constraints={"type": "fixed", "loc": [0, 1]}, ) def test_dict_constraints_are_deprecated_in_estimate_msm(): msg = "Specifying constraints as a dictionary is deprecated and" with pytest.warns(FutureWarning, match=msg): em.estimate_msm( simulate_moments=lambda x: x, empirical_moments=np.zeros(3), moments_cov=np.eye(3), params=np.arange(3), optimize_options={"algorithm": "scipy_lbfgsb"}, constraints={"type": "fixed", "loc": [0, 1]}, ) @pytest.fixture def dummy_func(): return lambda x: x def test_pre_process_constraints_trivial_case(dummy_func): constraints = om.FixedConstraint(selector=dummy_func) expected = [{"type": "fixed", "selector": dummy_func}] assert pre_process_constraints(constraints) == expected def test_pre_process_constraints_list_of_constraints(dummy_func): constraints = [ om.FixedConstraint(selector=dummy_func), om.IncreasingConstraint(selector=dummy_func), ] expected = [ {"type": "fixed", "selector": dummy_func}, {"type": "increasing", "selector": dummy_func}, ] assert pre_process_constraints(constraints) == expected def test_pre_process_constraints_none_case(): assert pre_process_constraints(None) == [] def test_pre_process_constraints_mixed_case(dummy_func): constraints = [ om.FixedConstraint(selector=dummy_func), {"type": "increasing", "selector": dummy_func}, ] expected = [ {"type": "fixed", "selector": dummy_func}, {"type": "increasing", "selector": dummy_func}, ] assert pre_process_constraints(constraints) == expected def test_pre_process_constraints_dict_case(dummy_func): constraints = {"type": "fixed", "selector": dummy_func} expected = [{"type": "fixed", "selector": dummy_func}] assert pre_process_constraints(constraints) == expected def test_pre_process_constraints_invalid_case(): constraints = "invalid" msg = "Invalid constraint type: " with pytest.raises(InvalidConstraintError, match=msg): pre_process_constraints(constraints) def test_pre_process_constraints_invalid_mixed_case(): constraints = [ {"type": "fixed", "loc": [0, 1]}, om.FixedConstraint(), "invalid", ] msg = "Invalid constraint types: {}" with pytest.raises(InvalidConstraintError, match=msg): pre_process_constraints(constraints) def test_deprecated_log_reader(example_db): with pytest.warns(FutureWarning, match="SQLiteLogReader"): reader = OptimizeLogReader(example_db) res = reader.read_start_params() assert res == {"a": 1, "b": 2, "c": 3} def test_estimagic_joblib_batch_evaluator_is_deprecated(): msg = "estimagic.batch_evaluators.joblib_batch_evaluator has been deprecated" with pytest.warns(FutureWarning, match=msg): batch_evaluators.joblib_batch_evaluator(lambda x: x, [1, 2], n_cores=1) def test_estimagic_process_batch_evaluator_is_deprecated(): msg = "estimagic.batch_evaluators.process_batch_evaluator has been deprecated" with pytest.warns(FutureWarning, match=msg): batch_evaluators.process_batch_evaluator("joblib") ================================================ FILE: tests/optimagic/test_mark.py ================================================ import functools from dataclasses import dataclass import pytest import optimagic as om from optimagic.optimization.algorithm import AlgoInfo, Algorithm from optimagic.typing import AggregationLevel def f(x): pass @dataclass(frozen=True) class ImmutableF: def __call__(self, x): pass def _g(x, y): pass g = functools.partial(_g, y=1) CALLABLES = [f, ImmutableF(), g] @pytest.mark.parametrize("func", CALLABLES) def test_scalar(func): got = om.mark.scalar(func) assert got._problem_type == AggregationLevel.SCALAR @pytest.mark.parametrize("func", CALLABLES) def test_least_squares(func): got = om.mark.least_squares(func) assert got._problem_type == AggregationLevel.LEAST_SQUARES @pytest.mark.parametrize("func", CALLABLES) def test_likelihood(func): got = om.mark.likelihood(func) assert got._problem_type == AggregationLevel.LIKELIHOOD def test_mark_minimizer(): @om.mark.minimizer( name="test", solver_type=AggregationLevel.LEAST_SQUARES, is_available=True, is_global=True, needs_jac=True, needs_hess=True, needs_bounds=True, supports_parallelism=True, supports_bounds=True, supports_infinite_bounds=True, supports_linear_constraints=True, supports_nonlinear_constraints=True, disable_history=False, ) @dataclass(frozen=True) class DummyAlgorithm(Algorithm): initial_radius: float = 1.0 max_radius: float = 10.0 convergence_ftol_rel: float = 1e-6 stopping_maxiter: int = 1000 def _solve_internal_problem(self, problem, x0): pass assert hasattr(DummyAlgorithm, "__algo_info__") assert isinstance(DummyAlgorithm.__algo_info__, AlgoInfo) assert DummyAlgorithm.__algo_info__.name == "test" ================================================ FILE: tests/optimagic/test_timing.py ================================================ import pytest from optimagic import timing def test_invalid_aggregate_batch_time(): with pytest.raises(ValueError, match="aggregate_batch_time must be a callable"): timing.CostModel( fun=None, jac=None, fun_and_jac=None, label="label", aggregate_batch_time="Not callable", ) ================================================ FILE: tests/optimagic/test_type_conversion.py ================================================ import numpy as np import pytest from optimagic.type_conversion import TYPE_CONVERTERS from optimagic.typing import ( GtOneFloat, NonNegativeFloat, NonNegativeInt, PositiveFloat, PositiveInt, ) @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_int_conversion(candidate): got = TYPE_CONVERTERS[int](candidate) assert isinstance(got, int) assert got == 1 @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_positive_int_conversion(candidate): got = TYPE_CONVERTERS[PositiveInt](candidate) assert isinstance(got, int) assert got == 1 @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_non_negative_int_conversion(candidate): got = TYPE_CONVERTERS[NonNegativeInt](candidate) assert isinstance(got, int) assert got == 1 @pytest.mark.parametrize("candidate", [-1, "-1", -1.0, 0]) def test_positive_int_conversion_fail(candidate): with pytest.raises(Exception): # noqa: B017 TYPE_CONVERTERS[PositiveInt](candidate) @pytest.mark.parametrize("candidate", [-1, "-1", -1.0]) def test_non_negative_int_conversion_fail(candidate): with pytest.raises(Exception): # noqa: B017 TYPE_CONVERTERS[NonNegativeInt](candidate) @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_float_conversion(candidate): got = TYPE_CONVERTERS[float](candidate) assert isinstance(got, float) assert got == 1.0 @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_positive_float_conversion(candidate): got = TYPE_CONVERTERS[PositiveFloat](candidate) assert isinstance(got, float) assert got == 1.0 @pytest.mark.parametrize("candidate", [1, "1", 1.0, "1.0", np.int32(1), np.array(1.0)]) def test_non_negative_float_conversion(candidate): got = TYPE_CONVERTERS[NonNegativeFloat](candidate) assert isinstance(got, float) assert got == 1.0 @pytest.mark.parametrize("candidate", [-1, "-1", -1.0, 0]) def test_positive_float_conversion_fail(candidate): with pytest.raises(Exception): # noqa: B017 TYPE_CONVERTERS[PositiveFloat](candidate) @pytest.mark.parametrize("candidate", [-1, "-1", -1.0]) def test_non_negative_float_conversion_fail(candidate): with pytest.raises(Exception): # noqa: B017 TYPE_CONVERTERS[NonNegativeFloat](candidate) @pytest.mark.parametrize("candidate", [np.bool_(True), "yes", "1", "true", True]) def test_bool_conversion_true(candidate): got = TYPE_CONVERTERS[bool](candidate) assert got is True @pytest.mark.parametrize("candidate", [np.bool_(False), "no", "0", "false", False]) def test_bool_conversion_false(candidate): got = TYPE_CONVERTERS[bool](candidate) assert got is False @pytest.mark.parametrize("candidate", [1.3, "1.3", np.float32(1.3), np.array(1.3)]) def test_gt_one_float(candidate): got = TYPE_CONVERTERS[PositiveFloat](candidate) assert isinstance(got, float) assert np.allclose(got, 1.3) @pytest.mark.parametrize("candidate", [0.5, "0.5", np.float32(0.5), np.array(0.5)]) def test_gt_one_float_fail(candidate): with pytest.raises(Exception): # noqa: B017 TYPE_CONVERTERS[GtOneFloat](candidate) ================================================ FILE: tests/optimagic/test_typed_dicts_consistency.py ================================================ from typing import get_args, get_type_hints from optimagic.differentiation.numdiff_options import NumdiffOptions, NumdiffOptionsDict from optimagic.optimization.multistart_options import ( MultistartOptions, MultistartOptionsDict, ) from optimagic.parameters.scaling import ScalingOptions, ScalingOptionsDict def assert_attributes_and_type_hints_are_equal(dataclass, typed_dict): """Test that dataclass and typed_dict have same attributes and types. This assertion purposefully ignores that all type hints in the typed dict are wrapped by typing.NotRequired. As there is no easy way to *not* read the NotRequired types in 3.10, we need to activate include_extras=True to get the NotRequired types in Python 3.11 and above. Once we drop support for Python 3.10, we can remove the include_extras=True argument and the removal of the NotRequired types. Args: dataclass: An instance of a dataclass typed_dict: An instance of a typed dict """ types_from_dataclass = get_type_hints(dataclass) types_from_typed_dict = get_type_hints(typed_dict, include_extras=True) types_from_typed_dict = { # Remove typing.NotRequired from the types k: get_args(v)[0] for k, v in types_from_typed_dict.items() } assert types_from_dataclass == types_from_typed_dict def test_scaling_options_and_dict_have_same_attributes(): assert_attributes_and_type_hints_are_equal(ScalingOptions, ScalingOptionsDict) def test_multistart_options_and_dict_have_same_attributes(): assert_attributes_and_type_hints_are_equal(MultistartOptions, MultistartOptionsDict) def test_numdiff_options_and_dict_have_same_attributes(): assert_attributes_and_type_hints_are_equal(NumdiffOptions, NumdiffOptionsDict) ================================================ FILE: tests/optimagic/test_utilities.py ================================================ import numpy as np import pandas as pd import pytest from numpy.testing import assert_array_almost_equal as aaae from optimagic.config import IS_JAX_INSTALLED from optimagic.utilities import ( calculate_trustregion_initial_radius, chol_params_to_lower_triangular_matrix, cov_matrix_to_params, cov_matrix_to_sdcorr_params, cov_params_to_matrix, cov_to_sds_and_corr, dimension_to_number_of_triangular_elements, get_rng, hash_array, isscalar, number_of_triangular_elements_to_dimension, propose_alternatives, read_pickle, robust_cholesky, robust_inverse, sdcorr_params_to_matrix, sdcorr_params_to_sds_and_corr, sds_and_corr_to_cov, to_pickle, ) if IS_JAX_INSTALLED: import jax.numpy as jnp def test_chol_params_to_lower_triangular_matrix(): calculated = chol_params_to_lower_triangular_matrix(pd.Series([1, 2, 3])) expected = np.array([[1, 0], [2, 3]]) aaae(calculated, expected) def test_cov_params_to_matrix(): params = np.array([1, 0.1, 2, 0.2, 0.22, 3]) expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]]) calculated = cov_params_to_matrix(params) aaae(calculated, expected) def test_cov_matrix_to_params(): expected = np.array([1, 0.1, 2, 0.2, 0.22, 3]) cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]]) calculated = cov_matrix_to_params(cov) aaae(calculated, expected) def test_sdcorr_params_to_sds_and_corr(): sdcorr_params = pd.Series([1, 2, 3, 0.1, 0.2, 0.3]) exp_corr = np.array([[1, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 1]]) exp_sds = np.array([1, 2, 3]) calc_sds, calc_corr = sdcorr_params_to_sds_and_corr(sdcorr_params) aaae(calc_sds, exp_sds) aaae(calc_corr, exp_corr) def test_sdcorr_params_to_matrix(): sds = np.sqrt([1, 2, 3]) corrs = [0.07071068, 0.11547005, 0.08981462] params = np.hstack([sds, corrs]) expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]]) calculated = sdcorr_params_to_matrix(params) aaae(calculated, expected) def test_cov_matrix_to_sdcorr_params(): sds = np.sqrt([1, 2, 3]) corrs = [0.07071068, 0.11547005, 0.08981462] expected = np.hstack([sds, corrs]) cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]]) calculated = cov_matrix_to_sdcorr_params(cov) aaae(calculated, expected) def test_sds_and_corr_to_cov(): sds = [1, 2, 3] corr = np.ones((3, 3)) * 0.2 corr[np.diag_indices(3)] = 1 calculated = sds_and_corr_to_cov(sds, corr) expected = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]]) aaae(calculated, expected) def test_cov_to_sds_and_corr(): cov = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]]) calc_sds, calc_corr = cov_to_sds_and_corr(cov) exp_sds = [1, 2, 3] exp_corr = np.ones((3, 3)) * 0.2 exp_corr[np.diag_indices(3)] = 1 aaae(calc_sds, exp_sds) aaae(calc_corr, exp_corr) def test_number_of_triangular_elements_to_dimension(): inputs = [6, 10, 15, 21] expected = [3, 4, 5, 6] for inp, exp in zip(inputs, expected, strict=False): assert number_of_triangular_elements_to_dimension(inp) == exp def test_dimension_to_number_of_triangular_elements(): inputs = [3, 4, 5, 6] expected = [6, 10, 15, 21] for inp, exp in zip(inputs, expected, strict=False): assert dimension_to_number_of_triangular_elements(inp) == exp def random_cov(dim, seed): rng = np.random.default_rng(seed) num_elements = int(dim * (dim + 1) / 2) chol = np.zeros((dim, dim)) chol[np.tril_indices(dim)] = rng.uniform(size=num_elements) cov = chol @ chol.T zero_positions = rng.choice(range(dim), size=int(dim / 5), replace=False) for pos in zero_positions: cov[:, pos] = 0 cov[pos] = 0 return cov seeds = [58822, 3181, 98855, 44002, 47631, 97741, 10655, 4600, 1151, 58189] dims = [8] * 6 + [10, 12, 15, 20] @pytest.mark.parametrize("dim, seed", zip(dims, seeds, strict=False)) def test_robust_cholesky_with_zero_variance(dim, seed): cov = random_cov(dim, seed) chol = robust_cholesky(cov) aaae(chol.dot(chol.T), cov) assert (chol[np.triu_indices(len(cov), k=1)] == 0).all() def test_robust_cholesky_with_extreme_cases(): for cov in [np.ones((5, 5)), np.zeros((5, 5))]: chol = robust_cholesky(cov) aaae(chol.dot(chol.T), cov) def test_robust_inverse_nonsingular(): mat = np.eye(3) + 0.2 expected = np.linalg.inv(mat) calculated = robust_inverse(mat) aaae(calculated, expected) def test_robust_inverse_singular(): mat = np.zeros((5, 5)) expected = np.zeros((5, 5)) with pytest.warns(UserWarning, match="LinAlgError"): calculated = robust_inverse(mat) aaae(calculated, expected) def test_hash_array(): arr1 = np.arange(4)[::2] arr2 = np.array([0, 2]) arr3 = np.array([0, 3]) assert hash_array(arr1) == hash_array(arr2) assert hash_array(arr1) != hash_array(arr3) def test_initial_trust_radius_small_x(): x = np.array([0.01, 0.01]) expected = 0.1 res = calculate_trustregion_initial_radius(x) assert expected == pytest.approx(res, abs=1e-8) def test_initial_trust_radius_large_x(): x = np.array([20.5, 10]) expected = 2.05 res = calculate_trustregion_initial_radius(x) assert expected == pytest.approx(res, abs=1e-8) def test_pickling(tmp_path): a = [1, 2, 3] path = tmp_path / "bla.pkl" to_pickle(a, path) b = read_pickle(path) assert a == b SCALARS = [1, 2.0, np.pi, np.array(1), np.array(2.0), np.array(np.pi), np.nan] @pytest.mark.parametrize("element", SCALARS) def test_isscalar_true(element): assert isscalar(element) is True NON_SCALARS = [np.arange(3), {"a": 1}, [1, 2, 3]] @pytest.mark.parametrize("element", NON_SCALARS) def test_isscalar_false(element): assert isscalar(element) is False @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def tets_isscalar_jax_true(): x = jnp.arange(3) element = x @ x assert isscalar(element) is True @pytest.mark.skipif(not IS_JAX_INSTALLED, reason="Needs jax.") def test_isscalar_jax_false(): element = jnp.arange(3) assert isscalar(element) is False TEST_CASES = [ 0, 1, 10, 1000000, None, np.random.default_rng(), np.random.Generator(np.random.MT19937()), ] @pytest.mark.parametrize("seed", TEST_CASES) def test_get_rng_correct_input(seed): rng = get_rng(seed) assert isinstance(rng, np.random.Generator) TEST_CASES = [0.1, "a", object(), lambda x: x**2] @pytest.mark.parametrize("seed", TEST_CASES) def test_get_rng_wrong_input(seed): with pytest.raises(TypeError): get_rng(seed) def test_propose_alternatives(): possibilities = ["scipy_lbfgsb", "scipy_slsqp", "nlopt_lbfgsb"] inputs = [["scipy_L-BFGS-B", 1], ["L-BFGS-B", 2]] expected = [["scipy_slsqp"], ["scipy_slsqp", "scipy_lbfgsb"]] for inp, exp in zip(inputs, expected, strict=False): assert propose_alternatives(inp[0], possibilities, number=inp[1]) == exp ================================================ FILE: tests/optimagic/visualization/test_backends.py ================================================ import numpy as np import pytest from optimagic.exceptions import InvalidPlottingBackendError, NotInstalledError from optimagic.visualization.backends import ( BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION, line_plot, ) from optimagic.visualization.plotting_utilities import LineData @pytest.fixture() def sample_lines(): lines = [ LineData(x=np.array([0, 1, 2]), y=np.array([0, 1, 2])), LineData(x=np.array([0, 1, 2]), y=np.array([2, 1, 0])), ] return lines @pytest.mark.parametrize("backend", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys()) def test_line_plot_all_backends(sample_lines, backend, close_mpl_figures): line_plot(sample_lines, backend=backend) def test_line_plot_invalid_backend(sample_lines): with pytest.raises(InvalidPlottingBackendError): line_plot(sample_lines, backend="bla") def test_line_plot_unavailable_backend(sample_lines, monkeypatch): # Use monkeypatch to simulate that 'matplotlib' backend is not installed. monkeypatch.setitem( BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION, "matplotlib", (False, None, None) ) with pytest.raises(NotInstalledError): line_plot(sample_lines, backend="matplotlib") ================================================ FILE: tests/optimagic/visualization/test_convergence_plot.py ================================================ import pytest from optimagic import get_benchmark_problems from optimagic.benchmarking.process_benchmark_results import process_benchmark_results from optimagic.benchmarking.run_benchmark import run_benchmark from optimagic.visualization.convergence_plot import ( _check_only_allowed_subset_provided, _extract_convergence_plot_lines, convergence_plot, ) @pytest.fixture() def benchmark_results(): problems = get_benchmark_problems("example") stop_after_10 = { "stopping_max_criterion_evaluations": 10, "stopping_max_iterations": 10, } optimizers = { "lbfgsb": {"algorithm": "scipy_lbfgsb", "algo_options": stop_after_10}, "nm": {"algorithm": "scipy_neldermead", "algo_options": stop_after_10}, } results = run_benchmark( problems, optimizers, n_cores=1, # must be 1 for the test to work ) return problems, results def test_convergence_plot_default_options(benchmark_results): problems, results = benchmark_results convergence_plot( problems=problems, results=results, problem_subset=["bard_good_start"], ) # integration test to make sure non default argument do not throw Errors profile_options = [ {"n_cols": 3}, {"distance_measure": "parameter_distance"}, {"monotone": False}, {"normalize_distance": False}, {"runtime_measure": "walltime"}, {"runtime_measure": "n_batches"}, {"stopping_criterion": "x"}, {"stopping_criterion": "x_and_y"}, {"stopping_criterion": "x_or_y"}, {"x_precision": 1e-5}, {"y_precision": 1e-5}, {"backend": "matplotlib"}, {"backend": "bokeh"}, {"backend": "altair"}, ] @pytest.mark.parametrize("options", profile_options) @pytest.mark.parametrize("grid", [True, False]) def test_convergence_plot_options(options, grid, benchmark_results, close_mpl_figures): problems, results = benchmark_results convergence_plot( problems=problems, results=results, problem_subset=["bard_good_start"], combine_plots_in_grid=grid, **options, ) def test_convergence_plot_stopping_criterion_none(benchmark_results): problems, results = benchmark_results with pytest.raises(UnboundLocalError): convergence_plot( problems=problems, results=results, problem_subset=["bard_good_start"], stopping_criterion=None, ) def test_check_only_allowed_subset_provided_none(): allowed = ["a", "b", "c"] _check_only_allowed_subset_provided(None, allowed, "name") def test_check_only_allowed_subset_provided_all_included(): allowed = ["a", "b", "c"] _check_only_allowed_subset_provided(["a", "b"], allowed, "name") def test_check_only_allowed_subset_provided_missing(): allowed = ["a", "b", "c"] with pytest.raises(ValueError): _check_only_allowed_subset_provided(["d"], allowed, "name") def test_extract_convergence_plot_lines(benchmark_results): problems, results = benchmark_results df, _ = process_benchmark_results( problems=problems, results=results, stopping_criterion="y" ) lines_list, titles = _extract_convergence_plot_lines( df=df, problems=problems, runtime_measure="n_evaluations", outcome="criterion_normalized", palette=["red", "green", "blue"], combine_plots_in_grid=True, backend="bla", ) assert isinstance(lines_list, list) and isinstance(titles, list) assert len(lines_list) == len(titles) == len(problems) for subplot_lines in lines_list: assert isinstance(subplot_lines, list) and len(subplot_lines) == 2 assert subplot_lines[0].name == "lbfgsb" assert subplot_lines[1].name == "nm" assert subplot_lines[0].color == "red" assert subplot_lines[1].color == "green" ================================================ FILE: tests/optimagic/visualization/test_deviation_plot.py ================================================ import pytest from optimagic import get_benchmark_problems from optimagic.benchmarking.run_benchmark import run_benchmark from optimagic.visualization.deviation_plot import ( deviation_plot, ) # integration test to make sure non default argument do not throw Errors profile_options = [ {"distance_measure": "parameter_distance"}, {"distance_measure": "criterion"}, {"monotone": True}, {"monotone": False}, {"runtime_measure": "n_evaluations"}, {"runtime_measure": "n_batches"}, ] @pytest.mark.parametrize("options", profile_options) def test_convergence_plot_options(options): problems = get_benchmark_problems("example") stop_after_10 = { "stopping_max_criterion_evaluations": 10, "stopping_max_iterations": 10, } optimizers = { "lbfgsb": {"algorithm": "scipy_lbfgsb", "algo_options": stop_after_10}, "nm": {"algorithm": "scipy_neldermead", "algo_options": stop_after_10}, } results = run_benchmark( problems, optimizers, n_cores=1, # must be 1 for the test to work ) deviation_plot(problems=problems, results=results, **options) ================================================ FILE: tests/optimagic/visualization/test_history_plots.py ================================================ import itertools from pathlib import Path import numpy as np import pytest from numpy.testing import assert_array_equal import optimagic as om from optimagic.logging import SQLiteLogOptions from optimagic.optimization.optimize import minimize from optimagic.parameters.bounds import Bounds from optimagic.visualization.backends import ( BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION, ) from optimagic.visualization.history_plots import ( LineData, _extract_criterion_plot_lines, _extract_params_plot_lines, _harmonize_inputs_to_dict, _PlottingMultistartHistory, _retrieve_optimization_data_from_results, _retrieve_optimization_data_from_single_result, criterion_plot, params_plot, ) @pytest.fixture() def minimize_result(): bounds = Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 6)) out = {} for multistart in [True, False]: res = [] for algorithm in ["scipy_neldermead", "scipy_lbfgsb"]: _res = minimize( fun=lambda x: x @ x, params=np.arange(5), algorithm=algorithm, bounds=bounds, multistart=( om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5) if multistart else None ), ) res.append(_res) out[multistart] = res return out # ====================================================================================== # Params plot # ====================================================================================== TEST_CASES = list( itertools.product( [True, False], # multistart [None, lambda x: x[:2]], # selector [None, 50], # max_evaluations [True, False], # show_exploration ) ) @pytest.mark.parametrize( "multistart, selector, max_evaluations, show_exploration", TEST_CASES ) def test_params_plot_multistart( minimize_result, multistart, selector, max_evaluations, show_exploration ): for _res in minimize_result[multistart]: params_plot( _res, selector=selector, max_evaluations=max_evaluations, show_exploration=show_exploration, ) # ====================================================================================== # Test criterion plot # ====================================================================================== TEST_CASES = list(itertools.product([True, False], repeat=4)) @pytest.mark.parametrize( "multistart, monotone, stack_multistart, exploration", TEST_CASES ) def test_criterion_plot_list_input( minimize_result, multistart, monotone, stack_multistart, exploration ): res = minimize_result[multistart] criterion_plot( res, monotone=monotone, stack_multistart=stack_multistart, show_exploration=exploration, ) def test_criterion_plot_name_input(minimize_result): result = minimize_result[False] criterion_plot(result[0], names="neldermead", palette="blue") def test_criterion_plot_wrong_results(): with pytest.raises(TypeError): criterion_plot([10, np.array([1, 2, 3])]) def test_criterion_plot_different_input_types(): bounds = Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 6)) # logged result minimize( fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb", bounds=bounds, multistart=om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5), logging=SQLiteLogOptions("test.db", fast_logging=True), ) res = minimize( fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb", bounds=bounds, multistart=om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5), ) results = ["test.db", res] criterion_plot(results) criterion_plot(results, monotone=True) criterion_plot(results, stack_multistart=True) criterion_plot(results, monotone=True, stack_multistart=True) criterion_plot(results, show_exploration=True) criterion_plot("test.db") def test_criterion_plot_wrong_inputs(): with pytest.raises(ValueError): criterion_plot("bla", names=[1, 2]) with pytest.raises(ValueError): criterion_plot(["bla", "bla"], names="blub") @pytest.mark.parametrize("backend", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys()) def test_criterion_plot_different_backends(minimize_result, backend, close_mpl_figures): res = minimize_result[False][0] criterion_plot(res, backend=backend) @pytest.mark.parametrize("backend", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys()) def test_params_plot_different_backends(minimize_result, backend, close_mpl_figures): res = minimize_result[False][0] params_plot(res, backend=backend) def test_harmonize_inputs_to_dict_single_result(): res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb") assert _harmonize_inputs_to_dict(results=res, names=None) == {"0": res} def test_harmonize_inputs_to_dict_single_result_with_name(): res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb") assert _harmonize_inputs_to_dict(results=res, names="bla") == {"bla": res} def test_harmonize_inputs_to_dict_list_results(): res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb") results = [res, res] assert _harmonize_inputs_to_dict(results=results, names=None) == { "0": res, "1": res, } def test_harmonize_inputs_to_dict_dict_input(): res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb") results = {"bla": res, om.algos.scipy_lbfgsb(): res, om.algos.scipy_neldermead: res} got = _harmonize_inputs_to_dict(results=results, names=None) expected = {"bla": res, "scipy_lbfgsb": res, "scipy_neldermead": res} assert got == expected def test_harmonize_inputs_to_dict_dict_input_with_names(): res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm="scipy_lbfgsb") results = {"bla": res, "blub": res} got = _harmonize_inputs_to_dict(results=results, names=["a", "b"]) expected = {"a": res, "b": res} assert got == expected def test_harmonize_inputs_to_dict_invalid_names(): results = [None] names = ["a", "b"] with pytest.raises(ValueError): _harmonize_inputs_to_dict(results=results, names=names) def test_harmonize_inputs_to_dict_str_input(): assert _harmonize_inputs_to_dict(results="test.db", names=None) == {"0": "test.db"} def test_harmonize_inputs_to_dict_path_input(): path = Path("test.db") assert _harmonize_inputs_to_dict(results=path, names=None) == {"0": path} def _compare_plotting_multistart_history_with_result( data: _PlottingMultistartHistory, res: om.OptimizeResult, res_name: str ): assert_array_equal(data.history.fun, res.history.fun) assert data.name == res_name assert_array_equal(data.start_params, res.start_params) assert data.is_multistart == (res.multistart_info is not None) def test_retrieve_data_from_result(minimize_result): res = minimize_result[False][0] results = {"bla": res} data = _retrieve_optimization_data_from_results( results=results, stack_multistart=False, show_exploration=False, plot_name="bla" ) assert isinstance(data, list) and len(data) == 1 assert isinstance(data[0], _PlottingMultistartHistory) _compare_plotting_multistart_history_with_result( data=data[0], res=res, res_name="bla" ) def test_retrieve_data_from_logged_result(tmp_path): res = minimize( fun=lambda x: x @ x, params=np.arange(2), algorithm="scipy_lbfgsb", logging=SQLiteLogOptions(tmp_path / "test.db", fast_logging=True), ) results = {"logged": tmp_path / "test.db"} data = _retrieve_optimization_data_from_results( results=results, stack_multistart=False, show_exploration=False, plot_name="bla" ) assert isinstance(data, list) and len(data) == 1 assert isinstance(data[0], _PlottingMultistartHistory) _compare_plotting_multistart_history_with_result( data=data[0], res=res, res_name="logged" ) @pytest.mark.parametrize("stack_multistart", [True, False]) def test_retrieve_data_from_multistart_result(minimize_result, stack_multistart): res = minimize_result[True][0] results = {"multistart": res} data = _retrieve_optimization_data_from_results( results=results, stack_multistart=stack_multistart, show_exploration=False, plot_name="bla", ) assert isinstance(data, list) and len(data) == 1 assert data[0].is_multistart assert len(data[0].local_histories) == 5 if stack_multistart: assert_array_equal( data[0].stacked_local_histories.fun, np.concatenate([hist.fun for hist in data[0].local_histories]), ) else: assert data[0].stacked_local_histories is None def test_extract_criterion_plot_lines(minimize_result): res = minimize_result[True][0] results = {"multistart": res} data = _retrieve_optimization_data_from_results( results=results, stack_multistart=False, show_exploration=False, plot_name="bla" ) palette_cycle = itertools.cycle(["red", "green", "blue"]) lines, multistart_lines = _extract_criterion_plot_lines( data=data, max_evaluations=None, palette_cycle=palette_cycle, stack_multistart=False, monotone=False, ) history = res.history.fun assert isinstance(lines, list) and len(lines) == 1 assert isinstance(lines[0], LineData) assert_array_equal(lines[0].x, np.arange(len(history))) assert_array_equal(lines[0].y, history) assert isinstance(multistart_lines, list) and all( isinstance(line, LineData) for line in multistart_lines ) assert len(multistart_lines) == 5 def test_extract_params_plot_lines(minimize_result): res = minimize_result[False][0] data = _retrieve_optimization_data_from_single_result( result=res, stack_multistart=False, show_exploration=False, plot_name="params_plot", ) palette_cycle = itertools.cycle(["red", "green", "blue"]) lines = _extract_params_plot_lines( data=data, selector=None, max_evaluations=None, palette_cycle=palette_cycle, ) params = np.array(res.history.params) num_params = params.shape[1] assert isinstance(lines, list) and len(lines) == num_params assert all(isinstance(line, LineData) for line in lines) for i, line in enumerate(lines): assert_array_equal(line.x, np.arange(len(params))) assert_array_equal(line.y, params[:, i]) ================================================ FILE: tests/optimagic/visualization/test_plotting_utilities.py ================================================ import base64 import numpy as np import pytest from numpy.testing import assert_array_equal from optimagic.visualization.plotting_utilities import ( _decode_base64_data, _ensure_array_from_plotly_data, ) def test_decode_base64_data(): expected = np.arange(10, dtype=float) encoded = base64.b64encode(expected.tobytes()).decode("ascii") got = _decode_base64_data(encoded, dtype="float") assert_array_equal(expected, got) def test_ensure_array_from_plotly_data_case_array(): expected = np.arange(10, dtype=float) got = _ensure_array_from_plotly_data(expected) assert_array_equal(expected, got) def test_ensure_array_from_plotly_data_case_list(): expected = np.arange(10, dtype=float) got = _ensure_array_from_plotly_data(expected.tolist()) assert_array_equal(expected, got) def test_ensure_array_from_plotly_data_case_base64(): expected = np.arange(10, dtype=float) encoded = base64.b64encode(expected.tobytes()).decode("ascii") got = _ensure_array_from_plotly_data({"bdata": encoded, "dtype": "float"}) assert_array_equal(expected, got) @pytest.mark.parametrize( "invalid_input", [ None, "not a valid input", 1234, [{"a": 1}, {"b": 2}], ], ) def test_ensure_array_from_plotly_data_case_invalid(invalid_input): with pytest.raises(ValueError, match="Failed to convert input to numpy array."): _ensure_array_from_plotly_data(invalid_input) ================================================ FILE: tests/optimagic/visualization/test_profile_plot.py ================================================ import itertools import numpy as np import pandas as pd import pytest from numpy.testing import assert_allclose from optimagic import get_benchmark_problems from optimagic.benchmarking.run_benchmark import run_benchmark from optimagic.visualization.profile_plot import ( _determine_alpha_grid, _extract_profile_plot_lines, _find_switch_points, create_solution_times, profile_plot, ) @pytest.fixture() def performance_ratios(): df = pd.DataFrame( data={"algo1": [1.0, 1.0, 4.0], "algo2": [1.5, np.inf, 1.0]}, index=["prob1", "prob2", "prob3"], ) return df def test_find_switch_points(performance_ratios): res = _find_switch_points(performance_ratios) expected = np.array([1.0, 1.5, 4.0]) np.testing.assert_array_almost_equal(res, expected) def test_determine_alpha_grid(performance_ratios): res = _determine_alpha_grid(performance_ratios) expected = np.array([1.0 + 1e-10, 1.25, 1.5, 2.75, 4.0, 4.0 * 1.025, 4.0 * 1.05]) np.testing.assert_array_almost_equal(res, expected) def test_create_solution_times_n_evaluations(): df = pd.DataFrame( columns=["problem", "algorithm", "n_evaluations"], data=[ ["prob1", "algo1", 0], ["prob1", "algo1", 1], ["prob1", "algo2", 2], ["prob1", "algo2", 3], ["prob2", "algo1", 5], ["prob2", "algo2", 0], ["prob2", "algo2", 1], ], ) info = pd.DataFrame( { "algo1": [True, True], "algo2": [True, False], }, index=["prob1", "prob2"], ) expected = pd.DataFrame( { "algo1": [1.0, 5], "algo2": [3.0, np.inf], }, index=pd.Index(["prob1", "prob2"], name="problem"), ) expected.columns.name = "algorithm" res = create_solution_times( df=df, runtime_measure="n_evaluations", converged_info=info ) pd.testing.assert_frame_equal(res, expected) def test_create_solution_times_n_batches(): df = pd.DataFrame( columns=["problem", "algorithm", "n_batches"], data=[ ["prob1", "algo1", 0], ["prob1", "algo1", 1], ["prob1", "algo2", 2], ["prob1", "algo2", 2], ["prob2", "algo1", 1], ["prob2", "algo2", 0], ["prob2", "algo2", 0], ], ) info = pd.DataFrame( { "algo1": [True, True], "algo2": [True, False], }, index=["prob1", "prob2"], ) expected = pd.DataFrame( { "algo1": [1.0, 1], "algo2": [2.0, np.inf], }, index=pd.Index(["prob1", "prob2"], name="problem"), ) expected.columns.name = "algorithm" res = create_solution_times(df=df, runtime_measure="n_batches", converged_info=info) pd.testing.assert_frame_equal(res, expected) def test_create_solution_times_walltime(): df = pd.DataFrame( columns=["problem", "algorithm", "n_evaluations", "walltime"], data=[ ["prob1", "algo1", 0, 0], ["prob1", "algo1", 1, 1], ["prob1", "algo2", 2, 2], ["prob1", "algo2", 3, 3], ["prob2", "algo1", 5, 5], ["prob2", "algo2", 0, 0], ["prob2", "algo2", 1, 1], ], ) info = pd.DataFrame( { "algo1": [True, True], "algo2": [True, False], }, index=["prob1", "prob2"], ) expected = pd.DataFrame( { "algo1": [1.0, 5], "algo2": [3.0, np.inf], }, index=pd.Index(["prob1", "prob2"], name="problem"), ) expected.columns.name = "algorithm" res = create_solution_times(df=df, runtime_measure="walltime", converged_info=info) pd.testing.assert_frame_equal(res, expected) def test_extract_profile_plot_lines(): solution_times = pd.DataFrame( { "algo1": [1.0, 5], "algo2": [3.0, np.inf], }, index=["prob1", "prob2"], ) solution_times.columns.name = "algorithm" info = pd.DataFrame( { "algo1": [True, True], "algo2": [True, False], }, index=["prob1", "prob2"], ) palette_cycle = itertools.cycle(["red", "green", "blue"]) lines = _extract_profile_plot_lines( solution_times=solution_times, normalize_runtime=False, converged_info=info, palette_cycle=palette_cycle, ) assert isinstance(lines, list) and len(lines) == 2 assert_allclose(lines[0].x, np.array([1.0, 2.0, 3.0, 4.0, 5.0, 5.125, 5.25])) assert_allclose(lines[0].y, np.array([0.5, 0.5, 0.5, 0.5, 1.0, 1.0, 1.0])) assert lines[0].name == "algo1" assert_allclose(lines[1].x, np.array([1.0, 2.0, 3.0, 4.0, 5.0, 5.125, 5.25])) assert_allclose(lines[1].y, np.array([0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5])) assert lines[1].name == "algo2" # integration test to make sure non default argument do not throw Errors profile_options = [ {"runtime_measure": "walltime"}, {"runtime_measure": "n_batches"}, {"stopping_criterion": "x_or_y"}, {"backend": "matplotlib"}, {"backend": "bokeh"}, {"backend": "altair"}, ] @pytest.mark.parametrize("options", profile_options) def test_profile_plot_options(options, close_mpl_figures): problems = get_benchmark_problems("example") stop_after_10 = { "stopping_max_criterion_evaluations": 10, "stopping_max_iterations": 10, } optimizers = { "lbfgsb": {"algorithm": "scipy_lbfgsb", "algo_options": stop_after_10}, "neldermead": { "algorithm": "scipy_neldermead", "algo_options": stop_after_10, }, } results = run_benchmark( problems, optimizers, n_cores=1, # must be 1 for the test to work ) profile_plot(problems=problems, results=results, **options) ================================================ FILE: tests/optimagic/visualization/test_slice_plot.py ================================================ import numpy as np import pytest from optimagic import mark from optimagic.parameters.bounds import Bounds from optimagic.visualization.backends import BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION from optimagic.visualization.plotting_utilities import LineData, MarkerData from optimagic.visualization.slice_plot import ( _extract_slice_plot_lines_and_labels, _get_plot_data, _get_processed_func_and_func_eval, slice_plot, ) @pytest.fixture() def fixed_inputs(): params = {"alpha": 0, "beta": 0, "gamma": 0, "delta": 0} bounds = Bounds( lower={name: -5 for name in params}, upper={name: i + 2 for i, name in enumerate(params)}, ) out = { "params": params, "bounds": bounds, } return out @mark.likelihood def sphere_loglike(params): x = np.array(list(params.values())) return x**2 def sphere(params): x = np.array(list(params.values())) return x @ x KWARGS = [ {}, {"plots_per_row": 4}, {"selector": lambda x: [x["alpha"], x["beta"]]}, {"param_names": {"alpha": "Alpha", "beta": "Beta"}}, {"share_x": True}, {"share_y": False}, {"return_dict": True}, {"title": "Slice Plot"}, ] parametrization = [ (func, kwargs) for func in [sphere_loglike, sphere] for kwargs in KWARGS ] @pytest.mark.parametrize("backend", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys()) @pytest.mark.parametrize("func, kwargs", parametrization) def test_slice_plot(fixed_inputs, func, backend, kwargs, close_mpl_figures): slice_plot( func=func, backend=backend, **fixed_inputs, **kwargs, ) def test_extract_slice_plot_lines(fixed_inputs): params, bounds = fixed_inputs["params"], fixed_inputs["bounds"] func, func_eval = _get_processed_func_and_func_eval( sphere, func_kwargs=None, params=params ) plot_data, internal_params = _get_plot_data( func=func, params=params, bounds=bounds, func_eval=func_eval, selector=None, n_gridpoints=10, batch_evaluator="joblib", n_cores=1, ) lines_list, marker_list, xlabels, ylabels = _extract_slice_plot_lines_and_labels( plot_data=plot_data, internal_params=internal_params, func_eval=func_eval, param_names={"alpha": "Alpha"}, color=None, ) assert isinstance(lines_list, list) and len(lines_list) == len(params) assert all( isinstance(subplot_lines, list) and len(subplot_lines) == 1 and isinstance(subplot_lines[0], LineData) for subplot_lines in lines_list ) assert isinstance(marker_list, list) and len(marker_list) == len(params) assert all(isinstance(marker, MarkerData) for marker in marker_list) for i, k in enumerate(params): assert marker_list[i].x == params[k] assert isinstance(xlabels, list) assert xlabels == ["Alpha", "beta", "gamma", "delta"] assert isinstance(ylabels, list) assert all(ylabel == "Function Value" for ylabel in ylabels) ================================================ FILE: tests/optimagic/visualization/test_slice_plot_3d.py ================================================ import numpy as np import pytest from optimagic import mark from optimagic.parameters.bounds import Bounds from optimagic.parameters.conversion import get_converter from optimagic.visualization.slice_plot_3d import ( Projection, generate_evaluation_points, plot_data_cache, slice_plot_3d, ) @pytest.fixture() def fixed_inputs(): params = {"alpha": 0, "beta": 0, "gamma": 0, "delta": 0} bounds = Bounds( lower={name: -5 for name in params}, upper={name: i for i, name in enumerate(params)}, ) return {"params": params, "bounds": bounds} @mark.likelihood def sphere_loglike(params): x = np.array(list(params.values())) return x**2 def sphere(params): x = np.array(list(params.values())) return x @ x kwargs_slice_plot_3d = [ {}, {"projection": "contour"}, {"projection": "surface"}, {"projection": "surface", "n_gridpoints": 100}, {"projection": {"lower": "contour", "upper": "contour"}}, {"projection": {"lower": "surface", "upper": "contour"}}, { "projection": {"lower": "contour", "upper": "surface"}, "selector": lambda x: [x["alpha"], x["beta"], x["delta"]], }, {"selector": lambda x: [x["alpha"], x["beta"]]}, {"param_names": {"alpha": "Alpha", "beta": "Beta"}}, {"layout_kwargs": {"width": 800, "height": 600, "title": "Custom Layout"}}, { "projection": "surface", "selector": lambda x: [x["alpha"], x["gamma"]], }, { "projection": "contour", "selector": lambda x: [x["alpha"], x["delta"]], }, { "projection": "surface", "plot_kwargs": {"surface_plot": {"colorscale": "Viridis", "opacity": 0.9}}, }, { "projection": "contour", "plot_kwargs": {"contour_plot": {"colorscale": "Viridis", "showscale": True}}, }, { "selector": lambda x: [x["alpha"], x["beta"], x["gamma"]], "make_subplot_kwargs": {"rows": 1, "cols": 3, "horizontal_spacing": 0.01}, }, { "param_names": {"alpha": "α", "beta": "β", "gamma": "γ", "delta": "δ"}, "n_gridpoints": 10, "expand_yrange": 2, }, { "layout_kwargs": { "template": "plotly_dark", "xaxis_showgrid": True, "yaxis_showgrid": True, } }, { "plot_kwargs": { "scatter_plot": None, "line_plot": {"color_discrete_sequence": ["red"], "markers": True}, } }, {"return_dict": True}, { "return_dict": True, "layout_kwargs": { "template": "plotly_dark", "xaxis_showgrid": True, "yaxis_showgrid": True, }, "plot_kwargs": { "scatter_plot": None, "line_plot": {"color_discrete_sequence": ["red"], "markers": True}, }, }, ] parametrized_slice_plot_3d = [ (func, kwarg) for func in [sphere, sphere_loglike] for kwarg in kwargs_slice_plot_3d ] @pytest.mark.parametrize("func, kwargs", parametrized_slice_plot_3d) def test_slice_plot_3d(fixed_inputs, func, kwargs): slice_plot_3d(func=func, **fixed_inputs, **kwargs) kwargs_generate_evaluation_points = [ ( sphere, 5, ["alpha"], "univariate", False, [ [-5.0, 0.0, 0.0, 0.0], [-3.75, 0.0, 0.0, 0.0], [-2.5, 0.0, 0.0, 0.0], [-1.25, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], ], ), ( sphere, 3, ["alpha", "gamma"], "contour", False, [ [-5.0, 0.0, 0.0, 0.0], [-2.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, -5.0, 0.0], [0.0, 0.0, -1.5, 0.0], [0.0, 0.0, 2.0, 0.0], [-5.0, 0.0, -5.0, 0.0], [-2.5, 0.0, -5.0, 0.0], [0.0, 0.0, -5.0, 0.0], [-5.0, 0.0, -1.5, 0.0], [-2.5, 0.0, -1.5, 0.0], [0.0, 0.0, -1.5, 0.0], [-5.0, 0.0, 2.0, 0.0], [-2.5, 0.0, 2.0, 0.0], [0.0, 0.0, 2.0, 0.0], [-5.0, 0.0, -5.0, 0.0], [-5.0, 0.0, -1.5, 0.0], [-5.0, 0.0, 2.0, 0.0], [-2.5, 0.0, -5.0, 0.0], [-2.5, 0.0, -1.5, 0.0], [-2.5, 0.0, 2.0, 0.0], [0.0, 0.0, -5.0, 0.0], [0.0, 0.0, -1.5, 0.0], [0.0, 0.0, 2.0, 0.0], ], ), ( sphere, 5, ["beta", "delta"], "surface", True, [ [0.0, -5.0, 0.0, 0.0], [0.0, -3.5, 0.0, 0.0], [0.0, -2.0, 0.0, 0.0], [0.0, -0.5, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, -5.0], [0.0, 0.0, 0.0, -3.0], [0.0, 0.0, 0.0, -1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 3.0], [0.0, -5.0, 0.0, -5.0], [0.0, -3.5, 0.0, -5.0], [0.0, -2.0, 0.0, -5.0], [0.0, -0.5, 0.0, -5.0], [0.0, 1.0, 0.0, -5.0], [0.0, -5.0, 0.0, -3.0], [0.0, -3.5, 0.0, -3.0], [0.0, -2.0, 0.0, -3.0], [0.0, -0.5, 0.0, -3.0], [0.0, 1.0, 0.0, -3.0], [0.0, -5.0, 0.0, -1.0], [0.0, -3.5, 0.0, -1.0], [0.0, -2.0, 0.0, -1.0], [0.0, -0.5, 0.0, -1.0], [0.0, 1.0, 0.0, -1.0], [0.0, -5.0, 0.0, 1.0], [0.0, -3.5, 0.0, 1.0], [0.0, -2.0, 0.0, 1.0], [0.0, -0.5, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, -5.0, 0.0, 3.0], [0.0, -3.5, 0.0, 3.0], [0.0, -2.0, 0.0, 3.0], [0.0, -0.5, 0.0, 3.0], [0.0, 1.0, 0.0, 3.0], [0.0, -5.0, 0.0, -5.0], [0.0, -5.0, 0.0, -3.0], [0.0, -5.0, 0.0, -1.0], [0.0, -5.0, 0.0, 1.0], [0.0, -5.0, 0.0, 3.0], [0.0, -3.5, 0.0, -5.0], [0.0, -3.5, 0.0, -3.0], [0.0, -3.5, 0.0, -1.0], [0.0, -3.5, 0.0, 1.0], [0.0, -3.5, 0.0, 3.0], [0.0, -2.0, 0.0, -5.0], [0.0, -2.0, 0.0, -3.0], [0.0, -2.0, 0.0, -1.0], [0.0, -2.0, 0.0, 1.0], [0.0, -2.0, 0.0, 3.0], [0.0, -0.5, 0.0, -5.0], [0.0, -0.5, 0.0, -3.0], [0.0, -0.5, 0.0, -1.0], [0.0, -0.5, 0.0, 1.0], [0.0, -0.5, 0.0, 3.0], [0.0, 1.0, 0.0, -5.0], [0.0, 1.0, 0.0, -3.0], [0.0, 1.0, 0.0, -1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 1.0, 0.0, 3.0], ], ), ] @pytest.mark.parametrize( "func, n_points, selected_params, projection, grid_univariate, expected_points", kwargs_generate_evaluation_points, ) def test_generate_evaluation_points( fixed_inputs, func, n_points, selected_params, projection, grid_univariate, expected_points, ): projection = Projection(projection) params = fixed_inputs["params"] func_eval = func(params) converter, internal_params = get_converter( params=params, constraints=None, bounds=fixed_inputs["bounds"], func_eval=func_eval, solver_type="value", ) params_data = { name: np.linspace( internal_params.lower_bounds[internal_params.names.index(name)], internal_params.upper_bounds[internal_params.names.index(name)], n_points, ) for name in selected_params } selected_indices = [list(params.keys()).index(param) for param in selected_params] points = generate_evaluation_points( projection, selected_indices, internal_params, params_data, converter, ) points = [[point[key] for key in internal_params.names] for point in points] np.testing.assert_allclose(points, expected_points, rtol=0.2) kwargs_plot_data_cache = [ ( sphere, 5, [0], "univariate", [25, 14.0, 6.25, 1.5, 0], {("alpha",): [25, 14.0, 6.25, 1.5, 0]}, ), ( sphere, 3, [0, 2], "contour", [ 25, 6.25, 0, 25, 2.25, 4, 50, 31.25, 25, 27.25, 8.5, 2.25, 29, 10.25, 4, 50, 27.25, 29, 31.25, 8.5, 10.25, 25, 2.25, 4, ], { ("alpha",): [25, 6.25, 0], ("gamma",): [25, 2.25, 4], ("alpha", "gamma"): [50, 27.25, 29, 31.25, 8.5, 10.25, 25, 2.25, 4], }, ), ] @pytest.mark.parametrize( "func, n_points, selected_indices, projection, func_values, expected_values", kwargs_plot_data_cache, ) def test_evaluate_function_values( fixed_inputs, func, n_points, projection, selected_indices, func_values, expected_values, ): projection = Projection(projection) params = fixed_inputs["params"] func_eval = func(params) converter, internal_params = get_converter( params=params, constraints=None, bounds=fixed_inputs["bounds"], func_eval=func_eval, solver_type="value", ) plot_data = plot_data_cache( projection, selected_indices, internal_params, func_values, n_points ) assert plot_data == expected_values