[
  {
    "path": ".github/CODE_OF_CONDUCT.md",
    "content": "# Code of Conduct\n\n- [NumFOCUS Code of Conduct](https://numfocus.org/code-of-conduct)\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.md",
    "content": "---\nname: Bug Report\nabout: Create a report to help us improve\ntitle: ''\nlabels: bug\nassignees: ''\n\n---\n\n### Bug description\n\nA clear and concise description of what the bug is.\n\n### To Reproduce\n\nIdeally, provide a minimal code example. If that's not possible, describe steps to reproduce the bug.\n\n### Expected behavior\n\nA clear and concise description of what you expected to happen.\n\n### Screenshots/Error messages\n\nIf applicable, add screenshots to help explain your problem.\n\n### System\n\n - OS: [e.g. Ubuntu 18.04]\n - Version [e.g. 0.0.1]\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/enhancement.md",
    "content": "---\nname: Enhancement\nabout: Enhance an existing component.\ntitle: ''\nlabels: enhancement\nassignees: ''\n\n---\n\n* optimagic version used, if any:\n* Python version, if any:\n* Operating System:\n\n### What would you like to enhance and why? Is it related to an issue/problem?\n\nA clear and concise description of the current implementation and its limitations.\n\n### Describe the solution you'd like\n\nA clear and concise description of what you want to happen.\n\n### Describe alternatives you've considered\n\nA clear and concise description of any alternative solutions or features you've\nconsidered and why you have discarded them.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: feature-request\nassignees: ''\n\n---\n\n### Current situation\n\nA clear and concise description of what the problem is. Ex. I'm always frustrated when [...]; Currently there is no way of [...]\n\n### Desired Situation\n\nWhat functionality should become possible or easier?\n\n### Proposed implementation\n\nHow would you implement the new feature? Did you consider alternative implementations?\nYou can start by describing interface changes like a new argument or a new function. There is no need to get too detailed here.\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE/pull_request_template.md",
    "content": "### What problem do you want to solve?\n\nReference the issue or discussion, if there is any. Provide a description of your\nproposed solution.\n\n### Todo\n\n- [ ] Target the right branch and pick an appropriate title.\n- [ ] Put `Closes #XXXX` in the first PR comment to auto-close the relevant issue once\n  the PR is accepted. This is not applicable if there is no corresponding issue.\n- [ ] Any steps that still need to be done.\n"
  },
  {
    "path": ".github/workflows/main.yml",
    "content": "---\nname: main\nconcurrency:\n  group: ${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    branches:\n      - '*'\njobs:\n  run-tests-linux:\n    name: Run tests on ubuntu-latest py${{ matrix.python-version }}\n    runs-on: ubuntu-latest\n    strategy:\n      fail-fast: false\n      matrix:\n        python-version:\n          - '312'\n          - '313'\n          - '314'\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: tests-linux-py${{ matrix.python-version }}\n      - name: Run pytest\n        shell: bash -el {0}\n        run: pixi run -e tests-linux-py${{ matrix.python-version }} tests-with-cov\n      - name: Upload coverage report.\n        if: matrix.python-version == '312'\n        uses: codecov/codecov-action@v4\n        with:\n          token: ${{ secrets.CODECOV_TOKEN }}\n  run-tests-win-and-mac:\n    name: Run tests on ${{ matrix.os }} py${{ matrix.python-version }}\n    runs-on: ${{ matrix.os }}\n    strategy:\n      fail-fast: false\n      matrix:\n        os:\n          - macos-latest\n          - windows-latest\n        python-version:\n          - '312'\n          - '313'\n          - '314'\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: tests-py${{ matrix.python-version }}\n      - name: Run pytest\n        shell: bash -el {0}\n        run: pixi run -e tests-py${{ matrix.python-version }} tests-fast\n  run-tests-with-old-plotly:\n    name: Run tests on ubuntu-latest with plotly < 6\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: tests-old-plotly\n      - name: Run pytest\n        shell: bash -el {0}\n        run: pixi run -e tests-old-plotly tests-fast\n  run-tests-nevergrad:\n    name: Run nevergrad tests py${{ matrix.python-version }}\n    runs-on: ubuntu-latest\n    strategy:\n      fail-fast: false\n      matrix:\n        python-version:\n          - '312'\n          - '313'\n          - '314'\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: tests-nevergrad-py${{ matrix.python-version }}\n      - name: Run pytest\n        shell: bash -el {0}\n        run: >-\n          pixi run -e tests-nevergrad-py${{ matrix.python-version }}\n          pytest tests/optimagic/optimizers/test_nevergrad.py\n  code-in-docs:\n    name: Run code snippets in documentation\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: tests-linux-py314\n      - name: Run doctest\n        shell: bash -el {0}\n        run: >-\n          pixi run -e tests-linux-py314\n          python -m doctest -v docs/source/how_to/how_to_constraints.md\n  run-mypy:\n    name: Run mypy\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: prefix-dev/setup-pixi@v0.9.4\n        with:\n          pixi-version: v0.65.0\n          cache: true\n          cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }}\n          frozen: true\n          environments: type-checking\n      - name: Run mypy\n        shell: bash -el {0}\n        run: pixi run -e type-checking mypy\n"
  },
  {
    "path": ".github/workflows/publish-to-pypi.yml",
    "content": "---\nname: PyPI\non: push\njobs:\n  build-n-publish:\n    name: Build and publish optimagic Python 🐍 distributions 📦 to PyPI\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - name: Set up Python 3.12\n        uses: actions/setup-python@v5\n        with:\n          python-version: '3.12'\n      - name: Install pypa/build\n        run: >-\n          python -m\n          pip install\n          build\n          --user\n      - name: Build a binary wheel and a source tarball\n        run: >-\n          python -m\n          build\n          --sdist\n          --wheel\n          --outdir dist/\n      - name: Publish distribution 📦 to PyPI\n        if: startsWith(github.ref, 'refs/tags')\n        uses: pypa/gh-action-pypi-publish@release/v1\n        with:\n          password: ${{ secrets.PYPI_API_TOKEN_OPTIMAGIC }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# AI\nCLAUDE.md\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# MacOS specific service store\n.DS_Store\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n*build/\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n*.sublime-workspace\n*.sublime-project\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\ndocs/build/\ndocs/source/_build/\ndocs/source/**/*.db\ndocs/source/**/*.db-shm\ndocs/source/**/*.db-wal\ndocs/source/refs.bib.bak\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n.pixi/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# VSCode project settings\n.vscode\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n*notes/\n\n.idea/\n\n*.bak\n\n\n*.db\n\n\n.pytask.sqlite3\n\n\nsrc/estimagic/_version.py\nsrc/optimagic/_version.py\n\n*.~lock.*\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "---\nrepos:\n  - repo: meta\n    hooks:\n      - id: check-hooks-apply\n      - id: check-useless-excludes\n        # - id: identity  # Prints all files passed to pre-commits. Debugging.\n  - repo: https://github.com/lyz-code/yamlfix\n    rev: 1.19.1\n    hooks:\n      - id: yamlfix\n        exclude: tests/optimagic/optimizers/_pounders/fixtures\n  - repo: local\n    hooks:\n      - id: update-algo-selection-code\n        name: update algo selection code\n        entry: python .tools/update_algo_selection_hook.py\n        language: python\n        files: ^(src/optimagic/optimizers/|src/optimagic/algorithms\\.py|\\.tools/)\n        require_serial: true\n        additional_dependencies:\n          - hatchling\n          - ruff\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v6.0.0\n    hooks:\n      - id: check-added-large-files\n        args:\n          - --maxkb=2500\n        exclude: tests/optimagic/optimizers/_pounders/fixtures/\n      - id: check-case-conflict\n      - id: check-merge-conflict\n      - id: check-vcs-permalinks\n      - id: check-yaml\n      - id: check-toml\n      - id: debug-statements\n      - id: end-of-file-fixer\n      - id: fix-byte-order-marker\n        types:\n          - text\n      - id: forbid-submodules\n      - id: mixed-line-ending\n        args:\n          - --fix=lf\n        description: Forces to replace line ending by the UNIX 'lf' character.\n      - id: name-tests-test\n        args:\n          - --pytest-test-first\n      - id: no-commit-to-branch\n        args:\n          - --branch\n          - main\n      - id: trailing-whitespace\n        exclude: docs/\n      - id: check-ast\n  - repo: https://github.com/adrienverge/yamllint.git\n    rev: v1.38.0\n    hooks:\n      - id: yamllint\n        exclude: tests/optimagic/optimizers/_pounders/fixtures\n  - repo: https://github.com/astral-sh/ruff-pre-commit\n    rev: v0.15.5\n    hooks:\n      # Run the linter.\n      - id: ruff\n        types_or:\n          - python\n          - pyi\n          - jupyter\n        args:\n          - --fix\n      # Run the formatter.\n      - id: ruff-format\n        types_or:\n          - python\n          - pyi\n          - jupyter\n  - repo: https://github.com/executablebooks/mdformat\n    rev: 1.0.0\n    hooks:\n      - id: mdformat\n        additional_dependencies:\n          - mdformat-gfm\n          - mdformat-gfm-alerts\n          - mdformat-ruff\n        args:\n          - --wrap\n          - '88'\n        files: (README\\.md)\n  - repo: https://github.com/executablebooks/mdformat\n    rev: 1.0.0\n    hooks:\n      - id: mdformat\n        additional_dependencies:\n          - mdformat-myst\n          - mdformat-ruff\n        args:\n          - --wrap\n          - '88'\n        files: (docs/.)\n        exclude: docs/source/how_to/how_to_specify_algorithm_and_algo_options.md\n  - repo: https://github.com/kynan/nbstripout\n    rev: 0.9.1\n    hooks:\n      - id: nbstripout\n        exclude: |\n          (?x)^(\n            docs/source/estimagic/tutorials/estimation_tables_overview.ipynb|\n            docs/source/estimagic/explanation/bootstrap_montecarlo_comparison.ipynb|\n          )$\n        args:\n          - --drop-empty-cells\nci:\n  autoupdate_schedule: monthly\n  skip:\n    - update-algo-selection-code\n"
  },
  {
    "path": ".readthedocs.yml",
    "content": "---\nversion: 2\nbuild:\n  os: ubuntu-24.04\n  tools:\n    python: '3.14'\n  jobs:\n    create_environment:\n      - asdf plugin add pixi\n      - asdf install pixi latest\n      - asdf global pixi latest\n    post_build:\n      - pixi run -e docs build-docs\n      - mkdir --parents $READTHEDOCS_OUTPUT/html/\n      - cp -a docs/build/html/. \"$READTHEDOCS_OUTPUT/html\" && rm -r docs/build\n"
  },
  {
    "path": ".tools/create_algo_selection_code.py",
    "content": "import importlib\nimport inspect\nimport pkgutil\nimport textwrap\nfrom itertools import combinations\nfrom types import ModuleType\nfrom typing import Callable, Type\n\nfrom optimagic.config import OPTIMAGIC_ROOT\nfrom optimagic.optimization.algorithm import Algorithm\nfrom optimagic.typing import AggregationLevel\n\n\ndef main() -> None:\n    \"\"\"Create the source code for algorithms.py.\n\n    The main part of the generated code are nested dataclasses that enable filtered\n    autocomplete for algorithm selection. Creating them entails the following steps:\n\n    - Discover all modules that contain optimizer classes\n    - Collect all optimizer classes\n    - Create a mapping from a tuple of categories (e.g. Global, Bounded, ...) to the\n      optimizer classes that belong to them. To find out which optimizers need to be\n      included we use the attributes stored in optimizer_class.__algo_info__.\n    - Create the dataclasses that enable autocomplete for algorithm selection\n\n    In addition we need to create the code for import statements, a AlgoSelection base\n    class and some code to instantiate the dataclasses.\n\n    \"\"\"\n    # create some basic inputs\n    docstring = _get_docstring_code()\n    modules = _import_optimizer_modules(\"optimagic.optimizers\")\n    all_algos = _get_all_algorithms(modules)\n    filters = _get_filters()\n    all_categories = list(filters)\n    selection_info = _create_selection_info(all_algos, all_categories)\n\n    # create the code for imports\n    imports = _get_imports(modules)\n\n    # create the code for the ABC AlgoSelection class\n    parent_class_snippet = _get_base_class_code()\n\n    # create the code for the dataclasses\n    dataclass_snippets = []\n    for active_categories in selection_info:\n        new_snippet = create_dataclass_code(\n            active_categories=active_categories,\n            all_categories=all_categories,\n            selection_info=selection_info,\n        )\n        dataclass_snippets.append(new_snippet)\n\n    # create the code for the instantiation\n    instantiation_snippet = _get_instantiation_code()\n\n    # Combine all the content into a single string\n    content = (\n        docstring\n        + imports\n        + \"\\n\\n\"\n        + parent_class_snippet\n        + \"\\n\"\n        + \"\\n\\n\".join(dataclass_snippets)\n        + \"\\n\\n\"\n        + instantiation_snippet\n    )\n\n    # Write the combined content to the file\n    with (OPTIMAGIC_ROOT / \"algorithms.py\").open(\"w\") as f:\n        f.write(content)\n\n\n# ======================================================================================\n# Functions to collect algorithms\n# ======================================================================================\n\n\ndef _import_optimizer_modules(package_name: str) -> list[ModuleType]:\n    \"\"\"Collect all public modules in a given package in a list.\"\"\"\n    package = importlib.import_module(package_name)\n    modules = []\n\n    for _, module_name, is_pkg in pkgutil.walk_packages(\n        package.__path__, package.__name__ + \".\"\n    ):\n        module_parts = module_name.split(\".\")\n        if all(not part.startswith(\"_\") for part in module_parts) and not is_pkg:\n            module = importlib.import_module(module_name)\n            modules.append(module)\n\n    return modules\n\n\ndef _get_all_algorithms(modules: list[ModuleType]) -> dict[str, Type[Algorithm]]:\n    \"\"\"Collect all algorithms in modules.\"\"\"\n    out = {}\n    for module in modules:\n        out.update(_get_algorithms_in_module(module))\n    return out\n\n\ndef _get_algorithms_in_module(module: ModuleType) -> dict[str, Type[Algorithm]]:\n    \"\"\"Collect all algorithms in a single module.\"\"\"\n    candidate_dict = dict(inspect.getmembers(module, inspect.isclass))\n    candidate_dict = {\n        k: v for k, v in candidate_dict.items() if hasattr(v, \"__algo_info__\")\n    }\n    algos = {}\n    for candidate in candidate_dict.values():\n        name = candidate.algo_info.name\n        if issubclass(candidate, Algorithm) and candidate is not Algorithm:\n            algos[name] = candidate\n    return algos\n\n\n# ======================================================================================\n# Functions to filter algorithms by selectors\n# ======================================================================================\ndef _is_gradient_based(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.needs_jac  # type: ignore\n\n\ndef _is_gradient_free(algo: Type[Algorithm]) -> bool:\n    return not _is_gradient_based(algo)\n\n\ndef _is_global(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.is_global  # type: ignore\n\n\ndef _is_local(algo: Type[Algorithm]) -> bool:\n    return not _is_global(algo)\n\n\ndef _is_bounded(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.supports_bounds  # type: ignore\n\n\ndef _is_linear_constrained(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.supports_linear_constraints  # type: ignore\n\n\ndef _is_nonlinear_constrained(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.supports_nonlinear_constraints  # type: ignore\n\n\ndef _is_scalar(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.solver_type == AggregationLevel.SCALAR  # type: ignore\n\n\ndef _is_least_squares(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.solver_type == AggregationLevel.LEAST_SQUARES  # type: ignore\n\n\ndef _is_likelihood(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.solver_type == AggregationLevel.LIKELIHOOD  # type: ignore\n\n\ndef _is_parallel(algo: Type[Algorithm]) -> bool:\n    return algo.algo_info.supports_parallelism  # type: ignore\n\n\ndef _get_filters() -> dict[str, Callable[[Type[Algorithm]], bool]]:\n    \"\"\"Create a dict mapping from category names to filter functions.\"\"\"\n    filters: dict[str, Callable[[Type[Algorithm]], bool]] = {\n        \"GradientBased\": _is_gradient_based,\n        \"GradientFree\": _is_gradient_free,\n        \"Global\": _is_global,\n        \"Local\": _is_local,\n        \"Bounded\": _is_bounded,\n        \"LinearConstrained\": _is_linear_constrained,\n        \"NonlinearConstrained\": _is_nonlinear_constrained,\n        \"Scalar\": _is_scalar,\n        \"LeastSquares\": _is_least_squares,\n        \"Likelihood\": _is_likelihood,\n        \"Parallel\": _is_parallel,\n    }\n    return filters\n\n\n# ======================================================================================\n# Functions to create a mapping from a tuple of selectors to subsets of the dict\n# mapping algorithm names to algorithm classes\n# ======================================================================================\n\n\ndef _create_selection_info(\n    all_algos: dict[str, Type[Algorithm]],\n    categories: list[str],\n) -> dict[tuple[str, ...], dict[str, Type[Algorithm]]]:\n    \"\"\"Create a dict mapping from a tuple of selectors to subsets of the all_algos dict.\n\n    Args:\n        all_algos: Dictionary mapping algorithm names to algorithm classes.\n        categories: List of categories to filter by.\n\n    Returns:\n        A dictionary mapping tuples of selectors to dictionaries of algorithm names\n            and their corresponding classes.\n\n    \"\"\"\n    category_combinations = _generate_category_combinations(categories)\n    out = {}\n    for comb in category_combinations:\n        filtered_algos = _apply_filters(all_algos, comb)\n        if filtered_algos:\n            out[comb] = filtered_algos\n    return out\n\n\ndef _generate_category_combinations(categories: list[str]) -> list[tuple[str, ...]]:\n    \"\"\"Generate all combinations of categories, sorted by length in descending order.\n\n    Args:\n        categories: A list of category names.\n\n    Returns:\n        A list of tuples, where each tuple represents a combination of categories.\n\n    \"\"\"\n    result: list[tuple[str, ...]] = []\n    for r in range(len(categories) + 1):\n        result.extend(map(tuple, map(sorted, combinations(categories, r))))\n    return sorted(result, key=len, reverse=True)\n\n\ndef _apply_filters(\n    all_algos: dict[str, Type[Algorithm]], categories: tuple[str, ...]\n) -> dict[str, Type[Algorithm]]:\n    \"\"\"Apply filters to the algorithms based on the given categories.\n\n    Args:\n        all_algos: A dictionary mapping algorithm names to algorithm classes.\n        categories: A tuple of category names to filter by.\n\n    Returns:\n        filtered dictionary of algorithms that match all given categories.\n\n    \"\"\"\n    filtered = all_algos\n    filters = _get_filters()\n    for category in categories:\n        filter_func = filters[category]\n        filtered = {name: algo for name, algo in filtered.items() if filter_func(algo)}\n    return filtered\n\n\n# ======================================================================================\n# Functions to create code for the dataclasses\n# ======================================================================================\n\n\ndef create_dataclass_code(\n    active_categories: tuple[str, ...],\n    all_categories: list[str],\n    selection_info: dict[tuple[str, ...], dict[str, Type[Algorithm]]],\n) -> str:\n    \"\"\"Create the source code for a dataclass representing a selection of algorithms.\n\n    Args:\n        active_categories: A tuple of active category names.\n        all_categories: A list of all category names.\n        selection_info: A dictionary that maps tuples of category names to dictionaries\n            of algorithm names and their corresponding classes.\n\n    Returns:\n        A string containing the source code for the dataclass.\n\n    \"\"\"\n    # get the children of the active categories\n    children = _get_children(active_categories, all_categories, selection_info)\n\n    # get the name of the class to be generated\n    class_name = _get_class_name(active_categories)\n\n    # get code for the dataclass fields\n    field_template = \"    {name}: Type[{class_name}] = {class_name}\"\n    field_strings = []\n    for name, algo_class in selection_info[active_categories].items():\n        field_strings.append(\n            field_template.format(name=name, class_name=algo_class.__name__)\n        )\n    fields = \"\\n\".join(field_strings)\n\n    # get code for the properties to select children\n    child_template = textwrap.dedent(\"\"\"\n        @property\n        def {new_category}(self) -> {class_name}:\n            return {class_name}()\n    \"\"\")\n    child_template = textwrap.indent(child_template, \"    \")\n    child_strings = []\n    for new_category, categories in children.items():\n        child_class_name = _get_class_name(categories)\n        child_strings.append(\n            child_template.format(\n                new_category=new_category, class_name=child_class_name\n            )\n        )\n    children_code = \"\\n\".join(child_strings)\n\n    # assemble the class\n    out = \"@dataclass(frozen=True)\\n\"\n    out += f\"class {class_name}(AlgoSelection):\\n\"\n    out += fields + \"\\n\"\n    if children:\n        out += children_code\n\n    return out\n\n\ndef _get_class_name(active_categories: tuple[str, ...]) -> str:\n    \"\"\"Get the name of the class based on the active categories.\"\"\"\n    return \"\".join(active_categories) + \"Algorithms\"\n\n\ndef _get_children(\n    active_categories: tuple[str, ...],\n    all_categories: list[str],\n    selection_info: dict[tuple[str, ...], dict[str, Type[Algorithm]]],\n) -> dict[str, tuple[str, ...]]:\n    \"\"\"Get the children of the active categories.\n\n    Args:\n        active_categories: A tuple of active category names.\n        all_categories: A list of all category names.\n        selection_info: A dictionary that maps tuples of category names to dictionaries\n            of algorithm names and their corresponding classes.\n\n    Returns:\n        A dict mapping additional categories to a sorted tuple of categories\n            that contains all active categories and the additional category. Entries\n            are only included if the selected categories are in `selection_info`, i.e.\n            if there exist algorithms that are compatible with all categories.\n\n    \"\"\"\n    inactive_categories = sorted(set(all_categories) - set(active_categories))\n    out = {}\n    for new_cat in inactive_categories:\n        new_comb = tuple(sorted(active_categories + (new_cat,)))\n        if new_comb in selection_info:\n            out[new_cat] = new_comb\n    return out\n\n\n# ======================================================================================\n# Functions to create the imports\n# ======================================================================================\n\n\ndef _get_imports(modules: list[ModuleType]) -> str:\n    \"\"\"Create source code to import all algorithms.\"\"\"\n    snippets = [\n        \"from typing import Type\",\n        \"from dataclasses import dataclass\",\n        \"from optimagic.optimization.algorithm import Algorithm\",\n        \"from typing import cast\",\n    ]\n    for module in modules:\n        algorithms = _get_algorithms_in_module(module)\n        class_names = [algo.__name__ for algo in algorithms.values()]\n        for class_name in class_names:\n            snippets.append(f\"from {module.__name__} import {class_name}\")\n    return \"\\n\".join(snippets)\n\n\n# ======================================================================================\n# Functions to create the static parts of the code\n# ======================================================================================\n\n\ndef _get_base_class_code() -> str:\n    \"\"\"Get the source code for the AlgoSelection class.\"\"\"\n    out = textwrap.dedent(\"\"\"\n        @dataclass(frozen=True)\n        class AlgoSelection:\n\n            def _all(self) -> list[Type[Algorithm]]:\n                raw = [field.default for field in self.__dataclass_fields__.values()]\n                return cast(list[Type[Algorithm]], raw)\n\n\n            def _available(self) -> list[Type[Algorithm]]:\n                _all = self._all()\n                return [\n                    a for a in _all if a.algo_info.is_available # type: ignore\n                ]\n\n            @property\n            def All(self) -> list[Type[Algorithm]]:\n                return self._all()\n\n            @property\n            def Available(self) -> list[Type[Algorithm]]:\n                return self._available()\n\n            @property\n            def AllNames(self) -> list[str]:\n                return [str(a.name) for a in self._all()]\n\n            @property\n            def AvailableNames(self) -> list[str]:\n                return [str(a.name) for a in self._available()]\n\n            @property\n            def _all_algorithms_dict(self) -> dict[str, Type[Algorithm]]:\n                return {str(a.name): a for a in self._all()}\n\n            @property\n            def _available_algorithms_dict(self) -> dict[str, Type[Algorithm]]:\n                return {str(a.name): a for a in self._available()}\n\n    \"\"\")\n    return out\n\n\ndef _get_docstring_code() -> str:\n    \"\"\"Get the source code for the docstring of the AlgoSelection class.\"\"\"\n    raw = (\n        '\"\"\"This code was auto-generated by a pre-commit hook and should not be '\n        \"changed.\\n\\nIf you manually change this code, all of your changes will be \"\n        \"overwritten the next time\\nthe pre-commit hook runs.\\n\\nDetailed information \"\n        \"on the purpose of the code can be found here:\\n\"\n        \"https://optimagic.readthedocs.io/en/latest/development/ep-02-typing.html#\"\n        'algorithm-selection\\n\\n\"\"\"\\n'\n    )\n    out = textwrap.dedent(raw)\n    return out\n\n\ndef _get_instantiation_code() -> str:\n    \"\"\"Get the source code for instantiating some classes at the end of the module.\"\"\"\n    out = textwrap.dedent(\"\"\"\n        algos = Algorithms()\n        global_algos = GlobalAlgorithms()\n\n        ALL_ALGORITHMS = algos._all_algorithms_dict\n        AVAILABLE_ALGORITHMS = algos._available_algorithms_dict\n        GLOBAL_ALGORITHMS = global_algos._available_algorithms_dict\n    \"\"\")\n    return out\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": ".tools/test_create_algo_selection_code.py",
    "content": "from create_algo_selection_code import _generate_category_combinations\n\n\ndef test_generate_category_combinations() -> None:\n    categories = [\"a\", \"b\", \"c\"]\n    got = _generate_category_combinations(categories)\n    expected = [\n        (\"a\", \"b\", \"c\"),\n        (\"a\", \"b\"),\n        (\"a\", \"c\"),\n        (\"b\", \"c\"),\n        (\"a\",),\n        (\"b\",),\n        (\"c\",),\n    ]\n    assert got == expected\n"
  },
  {
    "path": ".tools/update_algo_selection_hook.py",
    "content": "#!/usr/bin/env python\nimport importlib.util\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nROOT = Path(__file__).resolve().parents[1]\n\n# sys.executable guarantees we stay inside the pre‑commit venv\nPYTHON = [sys.executable]\n\n\ndef run(cmd: list[str], **kwargs: Any) -> None:\n    subprocess.check_call(cmd, cwd=ROOT, **kwargs)\n\n\ndef ensure_optimagic_is_locally_installed() -> None:\n    if importlib.util.find_spec(\"optimagic\") is None:\n        run([\"uv\", \"pip\", \"install\", \"--python\", sys.executable, \"-e\", \".\"])\n\n\ndef main() -> int:\n    ensure_optimagic_is_locally_installed()\n    run(PYTHON + [\".tools/create_algo_selection_code.py\"])\n\n    ruff_args = [\n        \"--silent\",\n        \"--config\",\n        \"pyproject.toml\",\n        \"src/optimagic/algorithms.py\",\n    ]\n    run([\"ruff\", \"format\", *ruff_args])\n    run([\"ruff\", \"check\", \"--fix\", *ruff_args])\n    return 0  # explicit success code\n\n\nif __name__ == \"__main__\":\n    sys.exit(main())\n"
  },
  {
    "path": ".yamllint.yml",
    "content": "---\nyaml-files:\n  - '*.yaml'\n  - '*.yml'\n  - .yamllint\nrules:\n  braces: enable\n  brackets: enable\n  colons: enable\n  commas: enable\n  comments:\n    level: warning\n  comments-indentation:\n    level: warning\n  document-end: disable\n  document-start:\n    level: warning\n  empty-lines: enable\n  empty-values: disable\n  float-values: disable\n  hyphens: enable\n  indentation: {spaces: 2}\n  key-duplicates: enable\n  key-ordering: disable\n  line-length:\n    max: 88\n    allow-non-breakable-words: true\n    allow-non-breakable-inline-mappings: false\n  new-line-at-end-of-file: enable\n  new-lines:\n    type: unix\n  octal-values: disable\n  quoted-strings: disable\n  trailing-spaces: enable\n  truthy:\n    level: warning\n"
  },
  {
    "path": "CHANGES.md",
    "content": "# Changes\n\nThis is a record of all past optimagic releases and what went into them in reverse\nchronological order. We follow [semantic versioning](https://semver.org/) and all\nreleases are available on [Anaconda.org](https://anaconda.org/optimagic-dev/optimagic).\n\n\n## 0.5.3\n\nThis release introduces **multi-backend plotting** with support for matplotlib, bokeh,\nand altair backends (in addition to the existing plotly backend), **3D visualizations**\nof optimization problems, and several **new optimizer libraries** including PySwarms,\nPyGAD, and gradient-free-optimizers. It also adds **lazy loading** for optional\ndependencies to improve import times. Many contributions in this release were made by\nGoogle Summer of Code (GSoC) 2025 contributors.\n\n- {gh}`665` Skips nag_dfols tests when DFO-LS is not installed ({ghuser}`Swayam-maurya`).\n- {gh}`664` Adds `from __future__ import annotations` to constraints.py to fix\n  annotations issue with Python 3.13 and NumPy 2.4 ({ghuser}`timmens`).\n- {gh}`660` Renames the `bayes_opt` parameter `n_iter` to `stopping_maxiter`\n  ({ghuser}`spline2hg`).\n- {gh}`659` Removes `None` as a valid option for `stopping_criterion` in\n  `convergence_plot` and updates the docstring ({ghuser}`szd5654125`).\n- {gh}`658` Enhances documentation and minor fixes in backend plotting\n  ({ghuser}`r3kste`).\n- {gh}`654` Implements the altair plotting backend ({ghuser}`r3kste`).\n- {gh}`653` Adds `llms.txt` and `llms-full.txt` to documentation\n  ({ghuser}`mostafafaheem`).\n- {gh}`652` Implements the bokeh plotting backend ({ghuser}`r3kste`).\n- {gh}`649` Implements backend plotting for `slice_plot` ({ghuser}`r3kste`).\n- {gh}`647` Implements backend plotting for `convergence_plot` ({ghuser}`r3kste`).\n- {gh}`645` Implements backend plotting for `profile_plot` ({ghuser}`r3kste`).\n- {gh}`644` Adds a how-to guide for changing plotting backends ({ghuser}`r3kste`).\n- {gh}`643` Skips doctest that fails due to negative signed zero handling\n  ({ghuser}`r3kste`).\n- {gh}`641` Implements backend plotting for `params_plot` ({ghuser}`r3kste`).\n- {gh}`639` Adds optimizers from PySwarms ({ghuser}`spline2hg`).\n- {gh}`637` Adds note about `__future__` import ({ghuser}`spline2hg`).\n- {gh}`636` Wraps population-based optimizers from gradient-free-optimizers\n  ({ghuser}`gauravmanmode`).\n- {gh}`633` Migrates bayesian-optimizer docs to new documentation style\n  ({ghuser}`spline2hg`).\n- {gh}`632` Migrates nevergrad optimizers to new documentation style\n  ({ghuser}`gauravmanmode`).\n- {gh}`631` Migrates iminuit docs to new documentation style ({ghuser}`spline2hg`).\n- {gh}`624` Wraps local optimizers from gradient-free-optimizers\n  ({ghuser}`gauravmanmode`).\n- {gh}`621` Implements lazy loading for optional dependencies ({ghuser}`spline2hg`).\n- {gh}`619` Adopts the NumFOCUS code of conduct ({ghuser}`timmens`).\n- {gh}`616` Adds optimizers from PyGAD ({ghuser}`spline2hg`).\n- {gh}`600` Separates data preparation and plotting for `criterion_plot()`\n  ({ghuser}`r3kste`).\n- {gh}`599` Implements the matplotlib backend for `criterion_plot()` ({ghuser}`r3kste`).\n- {gh}`581` Adds 3D visualizations of optimization problems ({ghuser}`shammeer-s`).\n- {gh}`554` Improves documentation of algorithm options ({ghuser}`janosg`).\n\n\n## 0.5.2\n\nThis minor release adds support for two additional optimizer libraries:\n\n- [Nevergrad](https://github.com/facebookresearch/nevergrad): A library for\n  gradient-free optimization developed by Facebook Research.\n- [Bayesian\n  Optimization](https://github.com/bayesian-optimization/BayesianOptimization): A\n  library for constrained bayesian global optimization with Gaussian processes.\n\nIn addition, this release includes several bug fixes and improvements to the\ndocumentation. Many contributions in this release were made by Google Summer of Code\n(GSoC) 2025 applicants, with @gauravmanmode and @spline2hg being the accepted\ncontributors.\n\n- {gh}`620` Uses interactive plotly figures in documentation ({ghuser}`timmens`).\n- {gh}`618` Improves bounds processing when no bounds are specified ({ghuser}`timmens`).\n- {gh}`615` Adds pre-commit hook that checks mypy version consistency ({ghuser}`timmens`).\n- {gh}`613` Exposes converter functionality ({ghuser}`spline2hg`).\n- {gh}`612` Fixes results processing to work with new cobyla optimizer ({ghuser}`janosg`).\n- {gh}`610` Adds `needs_bounds` and `supports_infinite_bounds` fields to algorithm info ({ghuser}`gauravmanmode`).\n- {gh}`608` Adds support for plotly >= 6 ({ghuser}`hmgaudecker`, {ghuser}`timmens`).\n- {gh}`607` Returns `run_explorations` results in a dataclass ({ghuser}`r3kste`).\n- {gh}`605` Enhances batch evaluator checking and processing, introduces the internal\n  `BatchEvaluatorLiteral` literal, and updates CHANGES.md ({ghuser}`janosg`,\n  {ghuser}`timmens`).\n- {gh}`602` Adds optimizer wrapper for bayesian-optimization package ({ghuser}`spline2hg`).\n- {gh}`601` Updates pre-commit hooks and fixes mypy issues ({ghuser}`janosg`).\n- {gh}`598` Fixes and adds links to GitHub in the documentation ({ghuser}`hamogu`).\n- {gh}`594` Refines newly added optimizer wrappers ({ghuser}`janosg`).\n- {gh}`591` Adds multiple optimizers from the nevergrad package ({ghuser}`gauravmanmode`).\n- {gh}`589` Rewrites the algorithm selection pre-commit hook in pure Python to address\n  issues with bash scripts on Windows ({ghuser}`timmens`).\n- {gh}`586` and {gh}`592` Ensure the SciPy `disp` parameter is exposed for the following\n  SciPy algorithms: slsqp, neldermead, powell, conjugate_gradient, newton_cg, cobyla,\n  truncated_newton, trust_constr ({ghuser}`sefmef`, {ghuser}`TimBerti`).\n- {gh}`585` Exposes all parameters of [SciPy's\n  BFGS](https://docs.scipy.org/doc/scipy/reference/optimize.minimize-bfgs.html)\n  optimizer in optimagic ({ghuser}`TimBerti`).\n- {gh}`582` Adds support for handling infinite gradients during optimization\n  ({ghuser}`Aziz-Shameem`).\n- {gh}`579` Implements a wrapper for the PSO optimizer from the\n  [nevergrad](https://github.com/facebookresearch/nevergrad) package ({ghuser}`r3kste`).\n- {gh}`578` Integrates the `intersphinx-registry` package into the documentation for\n  automatic linking to up-to-date external documentation\n  ({ghuser}`Schefflera-Arboricola`).\n- {gh}`576` Wraps oneplusone optimizer from nevergrad ({ghuser}`gauravmanmode`, {ghuser}`gulshan-123`).\n- {gh}`572` and {gh}`573` Fix bugs in error handling for parameter selector processing\n  and constraints checking ({ghuser}`hmgaudecker`).\n- {gh}`570` Adds a how-to guide for adding algorithms to optimagic and improves internal\n  documentation ({ghuser}`janosg`).\n- {gh}`569` Implements a threading batch evaluator ({ghuser}`spline2hg`).\n- {gh}`568` Introduces an initial wrapper for the migrad optimizer from the\n  [iminuit](https://github.com/scikit-hep/iminuit) package ({ghuser}`spline2hg`).\n- {gh}`567` Makes the `fun` argument optional when `fun_and_jac` is provided\n  ({ghuser}`gauravmanmode`).\n- {gh}`563` Fixes a bug in input harmonization for history plotting\n  ({ghuser}`gauravmanmode`).\n- {gh}`552` Refactors and extends the `History` class, removing the internal\n  `HistoryArrays` class ({ghuser}`timmens`).\n- {gh}`485` Adds bootstrap weights functionality ({ghuser}`alanlujan91`).\n\n\n## 0.5.1\n\nThis is a minor release that introduces the new algorithm selection tool and several\nsmall improvements.\n\nTo learn more about the algorithm selection feature check out the following resources:\n\n- [How to specify and configure algorithms](https://optimagic.readthedocs.io/en/latest/how_to/how_to_specify_algorithm_and_algo_options.html)\n- [How to select local optimizers](https://optimagic.readthedocs.io/en/latest/how_to/how_to_algorithm_selection.html)\n\n- {gh}`549` Add support for Python 3.13 ({ghuser}`timmens`)\n- {gh}`550` and {gh}`534` implement the new algorithm selection tool ({ghuser}`janosg`)\n- {gh}`548` and {gh}`531` improve the documentation ({ghuser}`ChristianZimpelmann`)\n- {gh}`544` Adjusts the results processing of the nag optimizers to be compatible\n  with the latest releases ({ghuser}`timmens`)\n- {gh}`543` Adds support for numpy 2.x ({ghuser}`timmens`)\n- {gh}`536` Adds a how-to guide for choosing local optimizers ({ghuser}`mpetrosian`)\n- {gh}`535` Allows algorithm classes and instances in estimation functions\n  ({ghuser}`timmens`)\n- {gh}`532` Makes several small improvements to the documentation.\n\n## 0.5.0\n\nThis is a major release with several breaking changes and deprecations. In this\nrelease we started implementing two major enhancement proposals and renamed the package\nfrom estimagic to optimagic (while keeping the `estimagic` namespace for the estimation\ncapabilities).\n\n- [EP-02: Static typing](https://estimagic.org/en/latest/development/ep-02-typing.html)\n- [EP-03: Alignment with SciPy](https://estimagic.org/en/latest/development/ep-03-alignment.html)\n\nThe implementation of the two enhancement proposals is not complete and will likely\ntake until version `0.6.0`. However, all breaking changes and deprecations (with the\nexception of a minor change in benchmarking) are already implemented such that updating\nto version `0.5.0` is future proof.\n\n- {gh}`500` removes the dashboard, the support for simopt optimizers and the\n  `derivative_plot` ({ghuser}`janosg`)\n- {gh}`502` renames estimagic to optimagic ({ghuser}`janosg`)\n- {gh}`504` aligns `maximize` and `minimize` more closely with scipy. All related\n  deprecations and breaking changes are listed below. As a result, scipy code that uses\n  minimize with the arguments `x0`, `fun`, `jac` and `method` will run without changes\n  in optimagic. Similarly, to `OptimizeResult` gets some aliases so it behaves more\n  like SciPy's.\n- {gh}`506` introduces the new `Bounds` object and deprecates `lower_bounds`,\n  `upper_bounds`, `soft_lower_bounds` and `soft_upper_bounds` ({ghuser}`janosg`)\n- {gh}`507` updates the infrastructure so we can make parallel releases under the names\n  `optimagic` and `estimagic` ({ghuser}`timmens`)\n- {gh}`508` introduces the new `ScalingOptions` object and deprecates the\n  `scaling_options` argument of `maximize` and `minimize` ({ghuser}`timmens`)\n- {gh}`512` implements the new interface for objective functions and derivatives\n  ({ghuser}`janosg`)\n- {gh}`513` implements the new `optimagic.MultistartOptions` object and deprecates the\n  `multistart_options` argument of `maximize` and `minimize` ({ghuser}`timmens`)\n- {gh}`514` and {gh}`516` introduce the `NumdiffResult` object that is returned from\n  `first_derivative` and `second_derivative`. It also fixes several bugs in the\n  pytree handling in `first_derivative` and `second_derivative` and deprecates\n  Richardson Extrapolation and the `key` ({ghuser}`timmens`)\n- {gh}`517` introduces the new `NumdiffOptions` object for configuring numerical\n  differentiation during optimization or estimation ({ghuser}`timmens`)\n- {gh}`519` rewrites the logging code and introduces new `LogOptions` objects\n  ({ghuser}`schroedk`)\n- {gh}`521` introduces the new internal algorithm interface.\n  ({ghuser}`janosg` and {ghuser}`mpetrosian`)\n- {gh}`522` introduces the new `Constraint` objects and deprecates passing\n  dictionaries or lists of dictionaries as constraints ({ghuser}`timmens`)\n\n\n### Breaking changes\n\n- When providing a path for the argument `logging` of the functions\n  `maximize` and `minimize` and the file already exists, the default\n  behavior is to raise an error now. Replacement or extension\n  of an existing file must be explicitly configured.\n- The argument `if_table_exists` in `log_options` has no effect anymore and a\n  corresponding warning is raised.\n- `OptimizeResult.history` is now a `optimagic.History` object instead of a\n  dictionary. Dictionary style access is implemented but deprecated. Other dictionary\n  methods might not work.\n- The result of `first_derivative` and `second_derivative` is now a\n  `optimagic.NumdiffResult` object instead of a dictionary. Dictionary style access is\n  implemented but other dictionary methods might not work.\n- The dashboard is removed\n- The `derivative_plot` is removed.\n- Optimizers from Simopt are removed.\n- Passing callables with the old internal algorithm interface as `algorithm` to\n  `minimize` and `maximize` is not supported anymore. Use the new\n  `Algorithm` objects instead. For examples see: https://tinyurl.com/24a5cner\n\n\n### Deprecations\n\n- The `criterion` argument of `maximize` and `minimize` is renamed to `fun` (as in\n  SciPy).\n- The `derivative` argument of `maximize` and `minimize` is renamed to `jac` (as\n  in SciPy)\n- The `criterion_and_derivative` argument of `maximize` and `minimize` is renamed\n  to `fun_and_jac` to align it with the other names.\n- The `criterion_kwargs` argument of `maximize` and `minimize` is renamed to\n  `fun_kwargs` to align it with the other names.\n- The `derivative_kwargs` argument of `maximize` and `minimize` is renamed to\n  `jac_kwargs` to align it with the other names.\n- The `criterion_and_derivative_kwargs` argument of `maximize` and `minimize` is\n  renamed to `fun_and_jac_kwargs` to align it with the other names.\n- Algorithm specific convergence and stopping criteria are renamed to align them more\n  with NlOpt and SciPy names.\n    - `convergence_relative_criterion_tolerance` -> `convergence_ftol_rel`\n    - `convergence_absolute_criterion_tolerance` -> `convergence_ftol_abs`\n    - `convergence_relative_params_tolerance` -> `convergence_xtol_rel`\n    - `convergence_absolute_params_tolerance` -> `convergence_xtol_abs`\n    - `convergence_relative_gradient_tolerance` -> `convergence_gtol_rel`\n    - `convergence_absolute_gradient_tolerance` -> `convergence_gtol_abs`\n    - `convergence_scaled_gradient_tolerance` -> `convergence_gtol_scaled`\n    - `stopping_max_criterion_evaluations` -> `stopping_maxfun`\n    - `stopping_max_iterations` -> `stopping_maxiter`\n- The arguments `lower_bounds`, `upper_bounds`, `soft_lower_bounds` and\n  `soft_upper_bounds` are deprecated and replaced by `optimagic.Bounds`. This affects\n  `maximize`, `minimize`, `estimate_ml`, `estimate_msm`, `slice_plot` and several\n  other functions.\n- The `log_options` argument of `minimize` and `maximize` is deprecated. Instead,\n  `LogOptions` objects can be passed under the `logging` argument.\n- The class `OptimizeLogReader` is deprecated and redirects to\n  `SQLiteLogReader`.\n- The `scaling_options` argument of `maximize` and `minimize` is deprecated. Instead a\n  `ScalingOptions` object can be passed under the `scaling` argument that was previously\n  just a bool.\n- Objective functions that return a dictionary with the special keys \"value\",\n  \"contributions\" and \"root_contributions\" are deprecated. Instead, likelihood and\n  least-squares functions are marked with a `mark.likelihood` or `mark.least_squares`\n  decorator. There is a detailed how-to guide that shows the new behavior. This affects\n  `maximize`, `minimize`, `slice_plot` and other functions that work with objective\n  functions.\n- The `multistart_options` argument of `minimize` and `maximize` is deprecated. Instead,\n  a `MultistartOptions` object can be passed under the `multistart` argument.\n- Richardson Extrapolation is deprecated in `first_derivative` and `second_derivative`\n- The `key` argument is deprecated in `first_derivative` and `second_derivative`\n- Passing dictionaries or lists of dictionaries as `constraints` to `maximize` or\n  `minimize` is deprecated. Use the new `Constraint` objects instead.\n\n## 0.4.7\n\nThis release contains minor improvements and bug fixes. It is the last release before\nthe package will be renamed to optimagic and two large enhancement proposals will be\nimplemented.\n\n- {gh}`490` adds the attribute `optimize_result` to the `MomentsResult` class\n  ({ghuser}`timmens`)\n- {gh}`483` fixes a bug in the handling of keyword arguments in `bootstrap`\n  ({ghuser}`alanlujan91`)\n- {gh}`477` allows to use an identity weighting matrix in MSM estimation\n  ({ghuser}`sidd3888`)\n- {gh}`473` fixes a bug where bootstrap keyword arguments were ignored\n  `get_moments_cov` ({ghuser}`timmens`)\n- {gh}`467`, {gh}`478`, {gh}`479` and {gh}`480` improve the documentation\n  ({ghuser}`mpetrosian`, {ghuser}`segsell`, and {ghuser}`timmens`)\n\n\n## 0.4.6\n\nThis release drastically improves the optimizer benchmarking capabilities, especially\nwith noisy functions and parallel optimizers. It makes tranquilo and numba optional\ndependencies and is the first version of estimagic to be compatible with Python\n3.11.\n\n\n- {gh}`464` Makes tranquilo and numba optional dependencies ({ghuser}`janosg`)\n- {gh}`461` Updates docstrings for procss_benchmark_results ({ghuser}`segsell`)\n- {gh}`460` Fixes several bugs in the processing of benchmark results with noisy\n  functions ({ghuser}`janosg`)\n- {gh}`459` Prepares benchmarking functionality for parallel optimizers\n  ({ghuser}`mpetrosian` and {ghuser}`janosg`)\n- {gh}`457` Removes some unused files ({ghuser}`segsell`)\n- {gh}`455` Improves a local pre-commit hook ({ghuser}`ChristianZimpelmann`)\n\n\n## 0.4.5\n\n- {gh}`379` Improves the estimation table ({ghuser}`ChristianZimpelmann`)\n- {gh}`445` fixes line endings in local pre-commit hook ({ghuser}`ChristianZimpelmann`)\n- {gh}`443`, {gh}`444`, {gh}`445`, {gh}`446`, {gh}`448` and {gh}`449` are a major\n  refactoring of tranquilo ({ghuser}`timmens` and {ghuser}`janosg`)\n- {gh}`441` Adds an aggregated convergence plot for benchmarks ({ghuser}`mpetrosian`)\n- {gh}`435` Completes the cartis-roberts benchmark set ({ghuser}`segsell`)\n\n## 0.4.4\n\n- {gh}`437` removes fuzzywuzzy as dependency ({ghuser}`aidatak97`)\n- {gh}`432` makes logging compatible with sqlalchemy 2.x ({ghuser}`janosg`)\n- {gh}`430` refactors the getter functions in Tranquilo ({ghuser}`janosg`)\n- {gh}`427` improves pre-commit setup ({ghuser}`timmens` and {ghuser}`hmgaudecker`)\n- {gh}`425` improves handling of notebooks in documentation ({ghuser}`baharcos`)\n- {gh}`423` and {gh}`399` add code to calculate poisdeness constants ({ghuser}`segsell`)\n- {gh}`420` improve CI infrastructure ({ghuser}`hmgaudecker`, {ghuser}`janosg`)\n- {gh}`407` adds global optimizers from scipy ({ghuser}`baharcos`)\n\n## 0.4.3\n\n- {gh}`416` improves documentation and packaging ({ghuser}`janosg`)\n\n## 0.4.2\n\n- {gh}`412` Improves the output of the fides optimizer among other small changes\n  ({ghuser}`janosg`)\n- {gh}`411` Fixes a bug in multistart optimizations with least squares optimizers.\n  See {gh}`410` for details ({ghuser}`janosg`)\n- {gh}`404` speeds up the gqtpar subsolver ({ghuser}`mpetrosian` )\n- {gh}`400` refactors subsolvers ({ghuser}`mpetrosian`)\n- {gh}`398`, {gh}`397`, {gh}`395`, {gh}`390`, {gh}`389`, {gh}`388` continue with the\n  implementation of tranquilo ({ghuser}`segsell`, {ghuser}`timmens`,\n  {ghuser}`mpetrosian`, {ghuser}`janosg`)\n- {gh}`391` speeds up the bntr subsolver ({ghuser}`mpetrosian`)\n\n\n## 0.4.1\n\n- {gh}`307` Adopts a code of condact and governance model\n- {gh}`384` Polish documentation ({ghuser}`janosg` and {ghuser}`mpetrosian`)\n- {gh}`374` Moves the documentation to MyST ({ghuser}`baharcos`)\n- {gh}`365` Adds copybuttos to documentation ({ghuser}`amageh`)\n- {gh}`371` Refactors the pounders algorithm ({ghuser}`segsell`)\n- {gh}`369` Fixes CI ({ghuser}`janosg`)\n- {gh}`367` Fixes the linux environment ({ghuser}`timmens`)\n- {gh}`294` Adds the very first experimental version of tranquilo ({ghuser}`janosg`,\n  {ghuser}`timmens`, {ghuser}`segsell`, {ghuser}`mpetrosian`)\n\n\n## 0.4.0\n\n- {gh}`366` Update  ({ghuser}`segsell`)\n- {gh}`362` Polish documentation ({ghuser}`segsell`)\n\n## 0.3.4\n\n- {gh}`364` Use local random number generators ({ghuser}`timmens`)\n- {gh}`363` Fix pounders test cases ({ghuser}`segsell`)\n- {gh}`361` Update estimation code ({ghuser}`timmens`)\n- {gh}`360` Update results object documentation ({ghuser}`timmens`)\n\n## 0.3.3\n\n- {gh}`357` Adds jax support ({ghuser}`janosg`)\n- {gh}`359` Improves error handling with violated constaints ({ghuser}`timmens`)\n- {gh}`358` Improves cartis roberts set of test functions and improves the\n  default latex rendering of MultiIndex tables ({ghuser}`mpetrosian`)\n\n## 0.3.2\n\n- {gh}`355` Improves test coverage of contraints processing ({ghuser}`janosg`)\n- {gh}`354` Improves test coverage for bounds processing ({ghuser}`timmens`)\n- {gh}`353` Improves history plots ({ghuser}`timmens`)\n- {gh}`352` Improves scaling and benchmarking ({ghuser}`janosg`)\n- {gh}`351` Improves estimation summaries ({ghuser}`timmens`)\n- {gh}`350` Allow empty queries or selectors in constraints ({ghuser}`janosg`)\n\n## 0.3.1\n\n- {gh}`349` fixes multiple small bugs and adds test cases for all of them\n  ({ghuser}`mpetrosian`, {ghuser}`janosg` and {ghuser}`timmens`)\n\n## 0.3.0\n\nFist release with pytree support in optimization, estimation and differentiation\nand much better result objects in optimization and estimation.\n\nBreaking changes\n\n- New `OptimizeResult` object is returned by `maximize` and `minimize`. This\n  breaks all code that expects the old result dictionary. Usage of the new result is\n  explained in the getting started tutorial on optimization.\n- New internal optimizer interface that can break optimization with custom optimizers\n- The inferface of `process_constraints` changed quite drastically. This breaks\n  code that used `process_constraints` to get the number of free parameters or check\n  if constraints are valid. There are new high level functions\n  `estimagic.check_constraints` and `estimagic.count_free_params` instead.\n- Some functions from `estimagic.logging.read_log` are removed and replaced by\n  `estimagic.OptimizeLogReader`.\n- Convenience functions to create namedtuples are removed from `estimagic.utilities`.\n- {gh}`346` Add option to use nonlinear constraints ({ghuser}`timmens`)\n- {gh}`345` Moves estimation_table to new latex functionality of pandas\n  ({ghuser}`mpetrosian`)\n- {gh}`344` Adds pytree support to slice_plot ({ghuser}`janosg`)\n- {gh}`343` Improves the result object of estimation functions and makes msm estimation\n  pytree compatible ({ghuser}`janosg`)\n- {gh}`342` Improves default options of the fides optimizer, allows single constraints\n  and polishes the documentation ({ghuser}`janosg`)\n- {gh}`340` Enables history collection for optimizers that evaluate the criterion\n  function in parallel ({ghuser}`janosg`)\n- {gh}`339` Incorporates user feedback and polishes the documentation.\n- {gh}`338` Improves log reading functions ({ghuser}`janosg`)\n- {gh}`336` Adds pytree support to the dashboard ({ghuser}`roecla`).\n- {gh}`335` Introduces an `OptimizeResult` object and functionality for history\n  plotting ({ghuser}`janosg`).\n- {gh}`333` Uses new history collection feature to speed up benchmarking\n  ({ghuser}`segsell`).\n- {gh}`330` Is a major rewrite of the estimation code ({ghuser}`timmens`).\n- {gh}`328` Improves quadratic surrogate solvers used in pounders and tranquilo\n  ({ghuser}`segsell`).\n- {gh}`326` Improves documentation of numerical derivatives ({ghuser}`timmens`).\n- {gh}`325` Improves the slice_plot ({ghuser}`mpetrosian`)\n- {gh}`324` Adds ability to collect optimization histories without logging\n  ({ghuser}`janosg`).\n- {gh}`311` and {gh}`288` rewrite all plotting code in plotly ({ghuser}`timmens`\n  and {ghuser}`aidatak97`).\n- {gh}`306` improves quadratic surrogate solvers used in pounders and tranquilo\n  ({ghuser}`segsell`).\n- {gh}`305` allows pytrees during optimization and rewrites large parts of the\n  constraints processing ({ghuser}`janosg`).\n- {gh}`303` introduces a new optimizer interface that makes it easier to add optimizers\n  and makes it possible to access optimizer specific information outside of the\n  intrenal_criterion_and_derivative ({ghuser}`janosg` and {ghuser}`roecla`).\n\n## 0.2.5\n\n- {gh}`302` Drastically improves error handling during optimization ({ghuser}`janosg`).\n\n## 0.2.4\n\n- {gh}`304` Removes the chaospy dependency ({ghuser}`segsell`).\n\n## 0.2.3\n\n- {gh}`295` Fixes a small bug in estimation_table ({ghuser}`mpetrosian`).\n- {gh}`286` Adds pytree support for first and second derivative ({ghuser}`timmens`).\n- {gh}`285` Allows to use estimation functions with external optimization\n  ({ghuser}`janosg`).\n- {gh}`283` Adds fast solvers for quadratic trustregion subproblems ({ghuser}`segsell`).\n- {gh}`282` Vastly improves estimation tables ({ghuser}`mpetrosian`).\n- {gh}`281` Adds some tools to work with pytrees ({ghuser}`janosg`\n  and {ghuser}`timmens`).\n- {gh}`278` adds Estimagic Enhancement Proposal 1 for the use of Pytrees in Estimagic\n  ({ghuser}`janosg`)\n\n## 0.2.2\n\n- {gh}`276` Add parallel Nelder-Mead algorithm by {ghuser}`jacekb95`\n- {gh}`267` Update fides by {ghuser}`roecla`\n- {gh}`265` Refactor pounders algorithm by {ghuser}`segsell` and {ghuser}`janosg`.\n- {gh}`261` Add pure Python pounders algorithm by {ghuser}`segsell`.\n\n## 0.2.1\n\n- {gh}`260` Update MSM and ML notebooks by {ghuser}`timmens`.\n- {gh}`259` Several small fixes and improvements by {ghuser}`janosg` and\n  {ghuser}`roecla`.\n\n## 0.2.0\n\nAdd a lot of new functionality with a few minor breaking changes. We have more\noptimizers, better error handling, bootstrap and inference for method of simulated\nmoments. The breaking changes are:\n\\- logging is disabled by default during optimization.\n\\- the log_option \"if_exists\" was renamed to \"if_table_exists\"\n\\- The comparison plot function is removed.\n\\- first_derivative now returns a dictionary, independent of arguments.\n\\- structure of the logging database has changed\n\\- there is an additional boolean flag named `scaling` in minimize and maximize\n\n- {gh}`251` Allows the loading, running and visualization of benchmarks\n  ({ghuser}`janosg`, {ghuser}`mpetrosian` and {ghuser}`roecla`)\n- {gh}`196` Adds support for multistart optimizations ({ghuser}`asouther4` and\n  {ghuser}`janosg`)\n- {gh}`248` Adds the fides optimizer ({ghuser}`roecla`)\n- {gh}`146` Adds `estimate_ml` functionality ({ghuser}`janosg`, {ghuser}`LuisCald`\n  and {ghuser}`s6soverd`).\n- {gh}`235` Improves the documentation ({ghuser}`roecla`)\n- {gh}`216` Adds the ipopt optimizer ({ghuser}`roecla`)\n- {gh}`215` Adds optimizers from the pygmo library ({ghuser}`roecla` and\n  {ghuser}`janosg`)\n- {gh}`212` Adds optimizers from the nlopt library ({ghuser}`mpetrosian`)\n- {gh}`228` Restructures testing and makes changes to log_options.\n- {gh}`149` Adds `estimate_msm` functionality ({ghuser}`janosg` and {ghuser}`loikein`)\n- {gh}`219` Several enhancements by ({ghuser}`tobiasraabe`)\n- {gh}`218` Improve documentation by ({ghuser}`sofyaakimova`) and ({ghuser}`effieHan`)\n- {gh}`214` Fix bug with overlapping \"fixed\" and \"linear\" constraints ({ghuser}`janosg`)\n- {gh}`211` Improve error handling of log reading functions by ({ghuser}`janosg`)\n- {gh}`210` Automatically drop empty constraints by ({ghuser}`janosg`)\n- {gh}`192` Add option to scale optimization problems by ({ghuser}`janosg`)\n- {gh}`202` Refactoring of bootstrap code ({ghuser}`janosg`)\n- {gh}`148` Add bootstrap functionality ({ghuser}`RobinMusolff`)\n- {gh}`208` Several small improvements ({ghuser}`janosg`)\n- {gh}`206` Improve latex and html tables ({ghuser}`mpetrosian`)\n- {gh}`205` Add scipy's least squares optimizers (based on {gh}`197` by\n  ({ghuser}`yradeva93`)\n- {gh}`198` More unit tests for optimizers ({ghuser}`mchandra12`)\n- {gh}`200` Plot intermediate outputs of `first_derivative` ({ghuser}`timmens`)\n\n## 0.1.3 - 2021-06-25\n\n- {gh}`195` Illustrate optimizers in documentation ({ghuser}`sofyaakimova`),\n  ({ghuser}`effieHan`) and ({ghuser}`janosg`)\n- {gh}`201` More stable covariance matrix calculation ({ghuser}`janosg`)\n- {gh}`199` Return intermediate outputs of first_derivative ({ghuser}`timmens`)\n\n## 0.1.2 - 2021-02-07\n\n- {gh}`189` Improve documentation and logging ({ghuser}`roecla`)\n\n## 0.1.1 - 2021-01-13\n\nThis release greatly expands the set of available optimization algorithms, has a better\nand prettier dashboard and improves the documentation.\n\n- {gh}`187` Implement dot notation in algo_options ({ghuser}`roecla`)\n- {gh}`183` Improve documentation ({ghuser}`SofiaBadini`)\n- {gh}`182` Allow for constraints in likelihood inference ({ghuser}`janosg`)\n- {gh}`181` Add DF-OLS optimizer from Numerical Algorithm Group ({ghuser}`roecla`)\n- {gh}`180` Add pybobyqa optimizer from Numerical Algorithm Group ({ghuser}`roecla`)\n- {gh}`179` Allow base_steps and min_steps to be scalars ({ghuser}`tobiasraabe`)\n- {gh}`178` Refactoring of dashboard code ({ghuser}`roecla`)\n- {gh}`177` Add stride as a new dashboard argument ({ghuser}`roecla`)\n- {gh}`176` Minor fix of plot width in dashboard ({ghuser}`janosg`)\n- {gh}`174` Various dashboard improvements ({ghuser}`roecla`)\n- {gh}`173` Add new color palettes and use them in dashboard ({ghuser}`janosg`)\n- {gh}`172` Add high level log reading functions ({ghuser}`janosg`)\n\n## 0.1.0dev1 - 2020-09-08\n\nThis release entails a complete rewrite of the optimization code with many breaking\nchanges. In particular, some optimizers that were available before are not anymore.\nThose will be re-introduced soon. The breaking changes include:\n\n- The database is restructured. The new version simplifies the code,\n  makes logging faster and avoids the sql column limit.\n- Users can provide closed form derivative and/or criterion_and_derivative where\n  the latter one can exploit synergies in the calculation of criterion and derivative.\n  This is also compatible with constraints.\n- Our own (parallelized) first_derivative function is used to calculate gradients\n  during the optimization when no closed form gradients are provided.\n- Optimizer options like convergence criteria and optimization results are harmonized\n  across optimizers.\n- Users can choose from several batch evaluators whenever we parallelize\n  (e.g. for parallel optimizations or parallel function evaluations for numerical\n  derivatives) or pass in their own batch evaluator function as long as it has a\n  compatible interface. The batch evaluator interface also standardizes error handling.\n- There is a well defined internal optimizer interface. Users can select the\n  pre-implemented optimizers by algorithm=\"name_of_optimizer\" or their own optimizer\n  by algorithm=custom_minimize_function\n- Optimizers from pygmo and nlopt are no longer supported (will be re-introduced)\n- Greatly improved error handling.\n- {gh}`169` Add additional dashboard arguments\n- {gh}`168` Rename lower and upper to lower_bound and upper_bound\n  ({ghuser}`ChristianZimpelmann`)\n- {gh}`167` Improve dashboard styling ({ghuser}`roecla`)\n- {gh}`166` Re-add POUNDERS from TAO ({ghuser}`tobiasraabe`)\n- {gh}`165` Re-add the scipy optimizers with harmonized options ({ghuser}`roecla`)\n- {gh}`164` Closed form derivatives for parameter transformations ({ghuser}`timmens`)\n- {gh}`163` Complete rewrite of optimization with breaking changes ({ghuser}`janosg`)\n- {gh}`162` Improve packaging and relax version constraints ({ghuser}`tobiasraabe`)\n- {gh}`160` Generate parameter tables in tex and html ({ghuser}`mpetrosian`)\n\n## 0.0.31 - 2020-06-20\n\n- {gh}`130` Improve wrapping of POUNDERS algorithm ({ghuser}`mo2561057`)\n- {gh}`159` Add Richardson Extrapolation to first_derivative ({ghuser}`timmens`)\n\n## 0.0.30 - 2020-04-22\n\n- {gh}`158` allows to specify a gradient in maximize and minimize ({ghuser}`janosg`)\n\n## 0.0.29 - 2020-04-16\n\n- {gh}`154` Version restrictions for pygmo ({ghuser}`janosg`)\n- {gh}`153` adds documentation for the CLI ({ghuser}`tobiasraabe`)\n- {gh}`152` makes estimagic work with pandas 1.0 ({ghuser}`SofiaBadini`)\n\n## 0.0.28 - 2020-03-17\n\n- {gh}`151` estimagic becomes a noarch package. ({ghuser}`janosg`).\n- {gh}`150` adds command line interface to the dashboard ({ghuser}`tobiasraabe`)\n"
  },
  {
    "path": "CITATION",
    "content": "\nPlease use one of the following samples to cite the optimagic version (change\nx.y) from this installation\n\nText:\n\n[optimagic]  optimagic x.y, 2024\nJanos Gabler, https://github.com/optimagic-dev/optimagic\n\nBibTeX:\n\n@Unpublished{Gabler2024,\n      Title  = {optimagic: A library for nonlinear optimization},\n      Author = {Janos Gabler},\n      Year   = {2024},\n      Url    = {https://github.com/optimagic-dev/optimagic}\n    }\n\nIf you are unsure about which version of optimagic you are using run: `conda list optimagic`.\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright 2019-2021 Janos Gabler\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this\nsoftware and associated documentation files (the \"Software\"), to deal in the Software\nwithout restriction, including without limitation the rights to use, copy, modify,\nmerge, publish, distribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or\nsubstantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\nPURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT\nOR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "<a href=\"https://optimagic.readthedocs.io\">\n    <p align=\"center\">\n        <img src=\"https://raw.githubusercontent.com/optimagic-dev/optimagic/main/docs/source/_static/images/optimagic_logo.svg\" width=50% alt=\"optimagic\">\n    </p>\n</a>\n\n______________________________________________________________________\n\n[![PyPI](https://img.shields.io/pypi/v/optimagic?color=blue)](https://pypi.org/project/optimagic)\n[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/optimagic)](https://pypi.org/project/optimagic)\n[![image](https://img.shields.io/conda/vn/conda-forge/optimagic.svg)](https://anaconda.org/conda-forge/optimagic)\n[![image](https://img.shields.io/conda/pn/conda-forge/optimagic.svg)](https://anaconda.org/conda-forge/optimagic)\n[![PyPI - License](https://img.shields.io/pypi/l/optimagic)](https://pypi.org/project/optimagic)\n[![image](https://readthedocs.org/projects/optimagic/badge/?version=latest)](https://optimagic.readthedocs.io/en/latest)\n[![image](https://img.shields.io/github/actions/workflow/status/optimagic-dev/optimagic/main.yml?branch=main)](https://github.com/optimagic-dev/optimagic/actions?query=branch%3Amain)\n[![image](https://codecov.io/gh/optimagic-dev/optimagic/branch/main/graph/badge.svg)](https://codecov.io/gh/optimagic-dev/optimagic)\n[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/optimagic-dev/optimagic/main.svg)](https://results.pre-commit.ci/latest/github/optimagic-dev/optimagic/main)\n[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)\n[![Downloads](https://pepy.tech/badge/optimagic/month)](https://pepy.tech/project/optimagic)\n[![NumFOCUS](https://img.shields.io/badge/NumFOCUS-affiliated%20project-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org/sponsored-projects/affiliated-projects)\n\noptimagic is a Python package for numerical optimization. It is a unified interface to\noptimizers from SciPy, NlOpt, and many other Python packages. Its features include:\n\n- **SciPy-compatible API.** optimagic's `minimize` function works just like SciPy's, so\n  you don't have to adjust your code. You simply get more optimizers for free.\n- **Powerful diagnostic tools.** Visualize optimizer histories, compare runs, and\n  diagnose convergence problems.\n- **Parallel numerical derivatives.** Compute gradients, jacobians, and hessians with\n  parallel execution.\n- **Bounded, constrained, and unconstrained optimization.** Support for bounds, linear\n  constraints, nonlinear constraints, fixed parameters, and more.\n- **Statistical inference on estimated parameters.** The estimagic subpackage provides\n  functionality for confidence intervals, standard errors, and p-values.\n\n# Installation\n\noptimagic is available on [PyPI](https://pypi.org/project/optimagic) and on\n[conda-forge](https://anaconda.org/conda-forge/optimagic). Install the package with\n\n```console\n$ pip install optimagic\n```\n\nor\n\n```console\n$ conda install -c conda-forge optimagic\n```\n\noptimagic ships with all `scipy` optimizers out of the box. Additional algorithms become\navailable if you install optional packages. For an overview of all supported optimizers\nand how to enable them, see the\n[list of algorithms](https://optimagic.readthedocs.io/en/latest/algorithms.html).\n\n# Usage\n\n```python\nimport optimagic as om\nimport numpy as np\n\n\ndef fun(x):\n    return x @ x\n\n\nresult = om.minimize(fun, params=np.array([1, 2, 3]), algorithm=\"scipy_lbfgsb\")\nresult.params.round(9)  # np.array([0., 0., 0.])\n```\n\n# Documentation\n\nYou find the documentation at <https://optimagic.readthedocs.io> with\n[tutorials](https://optimagic.readthedocs.io/en/latest/tutorials/index.html) and\n[how-to guides](https://optimagic.readthedocs.io/en/latest/how_to/index.html).\n\n# Changes\n\nConsult the\n[release notes](https://optimagic.readthedocs.io/en/latest/development/changes.html) to\nfind out about what is new.\n\n# License\n\noptimagic is distributed under the terms of the [MIT license](LICENSE).\n\n# Citation\n\nIf you use optimagic for your research, please cite it with the following key to help\nothers discover the tool.\n\n```bibtex\n@Unpublished{Gabler2024,\n    Title  = {optimagic: A library for nonlinear optimization},\n    Author = {Janos Gabler},\n    Year   = {2022},\n    Url    = {https://github.com/optimagic-dev/optimagic}\n}\n```\n\n# Acknowledgment\n\nWe thank all institutions that have funded or supported optimagic (formerly estimagic).\n\n<table>\n  <tc>\n    <td><img src=\"docs/source/_static/images/numfocus_logo.png\" width=\"200\"></td>\n    <td><img src=\"docs/source/_static/images/aai-institute-logo.svg\" width=\"185\"></td>\n    <td><img src=\"docs/source/_static/images/tra_logo.png\" width=\"240\"></td>\n    <td><img src=\"docs/source/_static/images/hoover_logo.png\" width=\"192\"></td>\n\n</tc>\n</table>\n"
  },
  {
    "path": "codecov.yml",
    "content": "---\ncodecov:\n  notify:\n    require_ci_to_pass: true\ncoverage:\n  precision: 2\n  round: down\n  range: 50...100\n  status:\n    patch:\n      default:\n        target: 80%\n    project:\n      default:\n        target: 90%\nignore:\n  # Uses numba\n  - src/optimagic/benchmarking/cartis_roberts.py\n  - tests/**/*\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nSPHINXPROJ    = optimagic\nSOURCEDIR     = source\nBUILDDIR      = build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new\n# \"make mode\" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n"
  },
  {
    "path": "docs/make.bat",
    "content": "@ECHO OFF\n\npushd %~dp0\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=sphinx-build\n)\nset SOURCEDIR=source\nset BUILDDIR=build\nset SPHINXPROJ=optimagic\n\nif \"%1\" == \"\" goto help\n\n%SPHINXBUILD% >NUL 2>NUL\nif errorlevel 9009 (\n\techo.\n\techo.The 'sphinx-build' command was not found. Make sure you have Sphinx\n\techo.installed, then set the SPHINXBUILD environment variable to point\n\techo.to the full path of the 'sphinx-build' executable. Alternatively you\n\techo.may add the Sphinx directory to PATH.\n\techo.\n\techo.If you don't have Sphinx installed, grab it from\n\techo.http://sphinx-doc.org/\n\texit /b 1\n)\n\n%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%\ngoto end\n\n:help\n%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%\n\n:end\npopd\n"
  },
  {
    "path": "docs/source/_static/css/custom.css",
    "content": "/* Remove execution count for notebook cells. */\ndiv.prompt {\n  display: none;\n}\n\n\n/* Classes for the index page. */\n.index-card-image {\n    padding-top: 1rem;\n    height: 68px;\n    text-align: center;\n}\n\n.index-card-link {\n    color: var(--sd-color-card-text);\n    font-weight: bold;\n}\n\npre {\n  padding-left: 20px\n}\n\nli pre {\n  padding-left: 20px\n}\n\n.highlight {\n    background: #f5f5f5\n}\n\n.highlight button.copybtn{\n    background-color: #f5f5f5;\n}\n\n.highlight button.copybtn:hover {\n    background-color: #f5f5f5;\n}\n"
  },
  {
    "path": "docs/source/_static/css/termynal.css",
    "content": "/**\n * termynal.js\n *\n * @author Ines Montani <ines@ines.io>\n * @version 0.0.1\n * @license MIT\n */\n\n:root {\n    --color-bg: #0c0c0c;\n    --color-text: #f2f2f2;\n    --color-text-subtle: #a2a2a2;\n}\n\n[data-termynal] {\n    width: 750px;\n    max-width: 100%;\n    background: var(--color-bg);\n    color: var(--color-text);\n    /* font-size: 18px; */\n    font-size: 15px;\n    /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */\n    font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace;\n    border-radius: 4px;\n    padding: 75px 45px 35px;\n    position: relative;\n    -webkit-box-sizing: border-box;\n            box-sizing: border-box;\n    line-height: 1.2;\n}\n\n[data-termynal]:before {\n    content: '';\n    position: absolute;\n    top: 15px;\n    left: 15px;\n    display: inline-block;\n    width: 15px;\n    height: 15px;\n    border-radius: 50%;\n    /* A little hack to display the window buttons in one pseudo element. */\n    background: #d9515d;\n    -webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;\n            box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;\n}\n\n[data-termynal]:after {\n    content: 'bash';\n    position: absolute;\n    color: var(--color-text-subtle);\n    top: 5px;\n    left: 0;\n    width: 100%;\n    text-align: center;\n}\n\na[data-terminal-control] {\n    text-align: right;\n    display: block;\n    color: #aebbff;\n}\n\n[data-ty] {\n    display: block;\n    line-height: 2;\n}\n\n[data-ty]:before {\n    /* Set up defaults and ensure empty lines are displayed. */\n    content: '';\n    display: inline-block;\n    vertical-align: middle;\n}\n\n[data-ty=\"input\"]:before,\n[data-ty-prompt]:before {\n    margin-right: 0.75em;\n    color: var(--color-text-subtle);\n}\n\n[data-ty=\"input\"]:before {\n    content: '$';\n}\n\n[data-ty][data-ty-prompt]:before {\n    content: attr(data-ty-prompt);\n}\n\n[data-ty-cursor]:after {\n    content: attr(data-ty-cursor);\n    font-family: monospace;\n    margin-left: 0.5em;\n    -webkit-animation: blink 1s infinite;\n            animation: blink 1s infinite;\n}\n\n\n/* Cursor animation */\n\n@-webkit-keyframes blink {\n    50% {\n        opacity: 0;\n    }\n}\n\n@keyframes blink {\n    50% {\n        opacity: 0;\n    }\n}\n"
  },
  {
    "path": "docs/source/_static/css/termynal_custom.css",
    "content": ".termynal-comment {\n    color: #4a968f;\n    font-style: italic;\n    display: block;\n}\n\n.termy [data-termynal] {\n    white-space: pre-wrap;\n}\n\na.external-link::after {\n    /* \\00A0 is a non-breaking space\n        to make the mark be on the same line as the link\n    */\n    content: \"\\00A0[↪]\";\n}\n\na.internal-link::after {\n    /* \\00A0 is a non-breaking space\n        to make the mark be on the same line as the link\n    */\n    content: \"\\00A0↪\";\n}\n\n:root {\n    --termynal-green: #137C39;\n    --termynal-red: #BF2D2D;\n    --termynal-yellow: #F4C041;\n    --termynal-white: #f2f2f2;\n    --termynal-black: #0c0c0c;\n    --termynal-blue: #11a8cd;\n    --termynal-grey: #7f7f7f;\n}\n\n.termynal-failed {\n    color: var(--termynal-red);\n}\n\n.termynal-failed-textonly {\n    color: var(--termynal-white);\n    background: var(--termynal-red);\n    font-weight: bold;\n}\n\n.termynal-success {\n    color: var(--termynal-green);\n}\n\n.termynal-success-textonly {\n    color: var(--termynal-white);\n    background: var(--termynal-green);\n    font-weight: bold;\n}\n\n.termynal-skipped {\n    color: var(--termynal-yellow);\n}\n\n.termynal-skipped-textonly {\n    color: var(--termynal-black);\n    background: var(--termynal-yellow);\n    font-weight: bold;\n}\n\n.termynal-warning {\n    color: var(--termynal-yellow);\n}\n\n.termynal-command {\n    color: var(--termynal-green);\n    font-weight: bold;\n}\n\n.termynal-option {\n    color: var(--termynal-yellow);\n    font-weight: bold;\n}\n\n.termynal-switch {\n    color: var(--termynal-red);\n    font-weight: bold;\n}\n\n.termynal-metavar {\n    color: yellow;\n    font-weight: bold;\n}\n\n.termynal-dim {\n    color: var(--termynal-grey);\n}\n\n.termynal-number {\n    color: var(--termynal-blue);\n}\n"
  },
  {
    "path": "docs/source/_static/js/custom.js",
    "content": "/*\n\nThe following code is copied from https://github.com/tiangolo/typer.\n\nThe MIT License (MIT)\n\nCopyright (c) 2019 Sebastián Ramírez\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n*/\n\ndocument.querySelectorAll(\".use-termynal\").forEach(node => {\n    node.style.display = \"block\";\n    new Termynal(node, {\n        lineDelay: 500\n    });\n});\nconst progressLiteralStart = \"---> 100%\";\nconst promptLiteralStart = \"$ \";\nconst customPromptLiteralStart = \"# \";\nconst termynalActivateClass = \"termy\";\nlet termynals = [];\n\nfunction createTermynals() {\n    document\n        .querySelectorAll(`.${termynalActivateClass} .highlight`)\n        .forEach(node => {\n            const text = node.textContent;\n            const lines = text.split(\"\\n\");\n            const useLines = [];\n            let buffer = [];\n            function saveBuffer() {\n                if (buffer.length) {\n                    let isBlankSpace = true;\n                    buffer.forEach(line => {\n                        if (line) {\n                            isBlankSpace = false;\n                        }\n                    });\n                    dataValue = {};\n                    if (isBlankSpace) {\n                        dataValue[\"delay\"] = 0;\n                    }\n                    if (buffer[buffer.length - 1] === \"\") {\n                        // A last single <br> won't have effect\n                        // so put an additional one\n                        buffer.push(\"\");\n                    }\n                    const bufferValue = buffer.join(\"<br>\");\n                    dataValue[\"value\"] = bufferValue;\n                    useLines.push(dataValue);\n                    buffer = [];\n                }\n            }\n            for (let line of lines) {\n                if (line === progressLiteralStart) {\n                    saveBuffer();\n                    useLines.push({\n                        type: \"progress\"\n                    });\n                } else if (line.startsWith(promptLiteralStart)) {\n                    saveBuffer();\n                    const value = line.replace(promptLiteralStart, \"\").trimEnd();\n                    useLines.push({\n                        type: \"input\",\n                        value: value\n                    });\n                } else if (line.startsWith(\"// \")) {\n                    saveBuffer();\n                    const value = \"💬 \" + line.replace(\"// \", \"\").trimEnd();\n                    useLines.push({\n                        value: value,\n                        class: \"termynal-comment\",\n                        delay: 0\n                    });\n                } else if (line.startsWith(customPromptLiteralStart)) {\n                    saveBuffer();\n                    const promptStart = line.indexOf(promptLiteralStart);\n                    if (promptStart === -1) {\n                        console.error(\"Custom prompt found but no end delimiter\", line)\n                    }\n                    const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, \"\")\n                    let value = line.slice(promptStart + promptLiteralStart.length);\n                    useLines.push({\n                        type: \"input\",\n                        value: value,\n                        prompt: prompt\n                    });\n                } else {\n                    buffer.push(line);\n                }\n            }\n            saveBuffer();\n            const div = document.createElement(\"div\");\n            node.replaceWith(div);\n            const termynal = new Termynal(div, {\n                lineData: useLines,\n                noInit: true,\n                lineDelay: 500\n            });\n            termynals.push(termynal);\n        });\n}\n\nfunction loadVisibleTermynals() {\n    termynals = termynals.filter(termynal => {\n        if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) {\n            termynal.init();\n            return false;\n        }\n        return true;\n    });\n}\nwindow.addEventListener(\"scroll\", loadVisibleTermynals);\ncreateTermynals();\nloadVisibleTermynals();\n"
  },
  {
    "path": "docs/source/_static/js/require.js",
    "content": "/** vim: et:ts=4:sw=4:sts=4\n * @license RequireJS 2.3.7 Copyright jQuery Foundation and other contributors.\n * Released under MIT license, https://github.com/requirejs/requirejs/blob/master/LICENSE\n */\nvar requirejs,require,define;!function(global,setTimeout){var req,s,head,baseElement,dataMain,src,interactiveScript,currentlyAddingScript,mainScript,subPath,version=\"2.3.7\",commentRegExp=/\\/\\*[\\s\\S]*?\\*\\/|([^:\"'=]|^)\\/\\/.*$/gm,cjsRequireRegExp=/[^.]\\s*require\\s*\\(\\s*[\"']([^'\"\\s]+)[\"']\\s*\\)/g,jsSuffixRegExp=/\\.js$/,currDirRegExp=/^\\.\\//,op=Object.prototype,ostring=op.toString,hasOwn=op.hasOwnProperty,isBrowser=!(\"undefined\"==typeof window||\"undefined\"==typeof navigator||!window.document),isWebWorker=!isBrowser&&\"undefined\"!=typeof importScripts,readyRegExp=isBrowser&&\"PLAYSTATION 3\"===navigator.platform?/^complete$/:/^(complete|loaded)$/,defContextName=\"_\",isOpera=\"undefined\"!=typeof opera&&\"[object Opera]\"===opera.toString(),contexts={},cfg={},globalDefQueue=[],useInteractive=!1,disallowedProps=[\"__proto__\",\"constructor\"];function commentReplace(e,t){return t||\"\"}function isFunction(e){return\"[object Function]\"===ostring.call(e)}function isArray(e){return\"[object Array]\"===ostring.call(e)}function each(e,t){if(e)for(var i=0;i<e.length&&(!e[i]||!t(e[i],i,e));i+=1);}function eachReverse(e,t){if(e)for(var i=e.length-1;-1<i&&(!e[i]||!t(e[i],i,e));--i);}function hasProp(e,t){return hasOwn.call(e,t)}function getOwn(e,t){return hasProp(e,t)&&e[t]}function eachProp(e,t){for(var i in e)if(hasProp(e,i)&&-1==disallowedProps.indexOf(i)&&t(e[i],i))break}function mixin(i,e,r,n){e&&eachProp(e,function(e,t){!r&&hasProp(i,t)||(!n||\"object\"!=typeof e||!e||isArray(e)||isFunction(e)||e instanceof RegExp?i[t]=e:(i[t]||(i[t]={}),mixin(i[t],e,r,n)))})}function bind(e,t){return function(){return t.apply(e,arguments)}}function scripts(){return document.getElementsByTagName(\"script\")}function defaultOnError(e){throw e}function getGlobal(e){var t;return e&&(t=global,each(e.split(\".\"),function(e){t=t[e]}),t)}function makeError(e,t,i,r){t=new Error(t+\"\\nhttps://requirejs.org/docs/errors.html#\"+e);return t.requireType=e,t.requireModules=r,i&&(t.originalError=i),t}if(void 0===define){if(void 0!==requirejs){if(isFunction(requirejs))return;cfg=requirejs,requirejs=void 0}void 0===require||isFunction(require)||(cfg=require,require=void 0),req=requirejs=function(e,t,i,r){var n,o=defContextName;return isArray(e)||\"string\"==typeof e||(n=e,isArray(t)?(e=t,t=i,i=r):e=[]),n&&n.context&&(o=n.context),r=(r=getOwn(contexts,o))||(contexts[o]=req.s.newContext(o)),n&&r.configure(n),r.require(e,t,i)},req.config=function(e){return req(e)},req.nextTick=void 0!==setTimeout?function(e){setTimeout(e,4)}:function(e){e()},require=require||req,req.version=version,req.jsExtRegExp=/^\\/|:|\\?|\\.js$/,req.isBrowser=isBrowser,s=req.s={contexts:contexts,newContext:newContext},req({}),each([\"toUrl\",\"undef\",\"defined\",\"specified\"],function(t){req[t]=function(){var e=contexts[defContextName];return e.require[t].apply(e,arguments)}}),isBrowser&&(head=s.head=document.getElementsByTagName(\"head\")[0],baseElement=document.getElementsByTagName(\"base\")[0],baseElement)&&(head=s.head=baseElement.parentNode),req.onError=defaultOnError,req.createNode=function(e,t,i){var r=e.xhtml?document.createElementNS(\"http://www.w3.org/1999/xhtml\",\"html:script\"):document.createElement(\"script\");return r.type=e.scriptType||\"text/javascript\",r.charset=\"utf-8\",r.async=!0,r},req.load=function(t,i,r){var e,n=t&&t.config||{};if(isBrowser)return(e=req.createNode(n,i,r)).setAttribute(\"data-requirecontext\",t.contextName),e.setAttribute(\"data-requiremodule\",i),!e.attachEvent||e.attachEvent.toString&&e.attachEvent.toString().indexOf(\"[native code\")<0||isOpera?(e.addEventListener(\"load\",t.onScriptLoad,!1),e.addEventListener(\"error\",t.onScriptError,!1)):(useInteractive=!0,e.attachEvent(\"onreadystatechange\",t.onScriptLoad)),e.src=r,n.onNodeCreated&&n.onNodeCreated(e,n,i,r),currentlyAddingScript=e,baseElement?head.insertBefore(e,baseElement):head.appendChild(e),currentlyAddingScript=null,e;if(isWebWorker)try{setTimeout(function(){},0),importScripts(r),t.completeLoad(i)}catch(e){t.onError(makeError(\"importscripts\",\"importScripts failed for \"+i+\" at \"+r,e,[i]))}},isBrowser&&!cfg.skipDataMain&&eachReverse(scripts(),function(e){if(head=head||e.parentNode,dataMain=e.getAttribute(\"data-main\"))return mainScript=dataMain,cfg.baseUrl||-1!==mainScript.indexOf(\"!\")||(mainScript=(src=mainScript.split(\"/\")).pop(),subPath=src.length?src.join(\"/\")+\"/\":\"./\",cfg.baseUrl=subPath),mainScript=mainScript.replace(jsSuffixRegExp,\"\"),req.jsExtRegExp.test(mainScript)&&(mainScript=dataMain),cfg.deps=cfg.deps?cfg.deps.concat(mainScript):[mainScript],!0}),define=function(e,i,t){var r,n;\"string\"!=typeof e&&(t=i,i=e,e=null),isArray(i)||(t=i,i=null),!i&&isFunction(t)&&(i=[],t.length)&&(t.toString().replace(commentRegExp,commentReplace).replace(cjsRequireRegExp,function(e,t){i.push(t)}),i=(1===t.length?[\"require\"]:[\"require\",\"exports\",\"module\"]).concat(i)),useInteractive&&(r=currentlyAddingScript||getInteractiveScript())&&(e=e||r.getAttribute(\"data-requiremodule\"),n=contexts[r.getAttribute(\"data-requirecontext\")]),n?(n.defQueue.push([e,i,t]),n.defQueueMap[e]=!0):globalDefQueue.push([e,i,t])},define.amd={jQuery:!0},req.exec=function(text){return eval(text)},req(cfg)}function newContext(u){var t,e,f,c,i,b={waitSeconds:7,baseUrl:\"./\",paths:{},bundles:{},pkgs:{},shim:{},config:{}},d={},p={},r={},l=[],h={},n={},m={},g=1,x=1;function v(e,t,i){var r,n,o,a,s,u,c,d,p,f=t&&t.split(\"/\"),l=b.map,h=l&&l[\"*\"];if(e){t=(e=e.split(\"/\")).length-1,b.nodeIdCompat&&jsSuffixRegExp.test(e[t])&&(e[t]=e[t].replace(jsSuffixRegExp,\"\"));for(var m,g=e=\".\"===e[0].charAt(0)&&f?f.slice(0,f.length-1).concat(e):e,x=0;x<g.length;x++)\".\"===(m=g[x])?(g.splice(x,1),--x):\"..\"!==m||0===x||1===x&&\"..\"===g[2]||\"..\"===g[x-1]||0<x&&(g.splice(x-1,2),x-=2);e=e.join(\"/\")}if(i&&l&&(f||h)){e:for(o=(n=e.split(\"/\")).length;0<o;--o){if(s=n.slice(0,o).join(\"/\"),f)for(a=f.length;0<a;--a)if(r=(r=getOwn(l,f.slice(0,a).join(\"/\")))&&getOwn(r,s)){u=r,c=o;break e}!d&&h&&getOwn(h,s)&&(d=getOwn(h,s),p=o)}!u&&d&&(u=d,c=p),u&&(n.splice(0,c,u),e=n.join(\"/\"))}return getOwn(b.pkgs,e)||e}function q(t){isBrowser&&each(scripts(),function(e){if(e.getAttribute(\"data-requiremodule\")===t&&e.getAttribute(\"data-requirecontext\")===f.contextName)return e.parentNode.removeChild(e),!0})}function E(e){var t=getOwn(b.paths,e);return t&&isArray(t)&&1<t.length&&(t.shift(),f.require.undef(e),f.makeRequire(null,{skipMap:!0})([e]),1)}function w(e){var t,i=e?e.indexOf(\"!\"):-1;return-1<i&&(t=e.substring(0,i),e=e.substring(i+1,e.length)),[t,e]}function y(e,t,i,r){var n,o,a,s=null,u=t?t.name:null,c=e,d=!0,p=\"\";return e||(d=!1,e=\"_@r\"+(g+=1)),s=(a=w(e))[0],e=a[1],s&&(s=v(s,u,r),o=getOwn(h,s)),e&&(s?p=i?e:o&&o.normalize?o.normalize(e,function(e){return v(e,u,r)}):-1===e.indexOf(\"!\")?v(e,u,r):e:(s=(a=w(p=v(e,u,r)))[0],i=!0,n=f.nameToUrl(p=a[1]))),{prefix:s,name:p,parentMap:t,unnormalized:!!(e=!s||o||i?\"\":\"_unnormalized\"+(x+=1)),url:n,originalName:c,isDefine:d,id:(s?s+\"!\"+p:p)+e}}function S(e){var t=e.id;return getOwn(d,t)||(d[t]=new f.Module(e))}function k(e,t,i){var r=e.id,n=getOwn(d,r);!hasProp(h,r)||n&&!n.defineEmitComplete?(n=S(e)).error&&\"error\"===t?i(n.error):n.on(t,i):\"defined\"===t&&i(h[r])}function M(t,e){var i=t.requireModules,r=!1;e?e(t):(each(i,function(e){e=getOwn(d,e);e&&(e.error=t,e.events.error)&&(r=!0,e.emit(\"error\",t))}),r||req.onError(t))}function O(){globalDefQueue.length&&(each(globalDefQueue,function(e){var t=e[0];\"string\"==typeof t&&(f.defQueueMap[t]=!0),l.push(e)}),globalDefQueue=[])}function j(e){delete d[e],delete p[e]}function P(){var r,e=1e3*b.waitSeconds,n=e&&f.startTime+e<(new Date).getTime(),o=[],a=[],s=!1,u=!0;if(!t){if(t=!0,eachProp(p,function(e){var t=e.map,i=t.id;if(e.enabled&&(t.isDefine||a.push(e),!e.error))if(!e.inited&&n)E(i)?s=r=!0:(o.push(i),q(i));else if(!e.inited&&e.fetched&&t.isDefine&&(s=!0,!t.prefix))return u=!1}),n&&o.length)return(e=makeError(\"timeout\",\"Load timeout for modules: \"+o,null,o)).contextName=f.contextName,M(e);u&&each(a,function(e){!function r(n,o,a){var e=n.map.id;n.error?n.emit(\"error\",n.error):(o[e]=!0,each(n.depMaps,function(e,t){var e=e.id,i=getOwn(d,e);!i||n.depMatched[t]||a[e]||(getOwn(o,e)?(n.defineDep(t,h[e]),n.check()):r(i,o,a))}),a[e]=!0)}(e,{},{})}),n&&!r||!s||(isBrowser||isWebWorker)&&(i=i||setTimeout(function(){i=0,P()},50)),t=!1}}function a(e){hasProp(h,e[0])||S(y(e[0],null,!0)).init(e[1],e[2])}function o(e,t,i,r){e.detachEvent&&!isOpera?r&&e.detachEvent(r,t):e.removeEventListener(i,t,!1)}function s(e){e=e.currentTarget||e.srcElement;return o(e,f.onScriptLoad,\"load\",\"onreadystatechange\"),o(e,f.onScriptError,\"error\"),{node:e,id:e&&e.getAttribute(\"data-requiremodule\")}}function R(){var e;for(O();l.length;){if(null===(e=l.shift())[0])return M(makeError(\"mismatch\",\"Mismatched anonymous define() module: \"+e[e.length-1]));a(e)}f.defQueueMap={}}return c={require:function(e){return e.require||(e.require=f.makeRequire(e.map))},exports:function(e){if(e.usingExports=!0,e.map.isDefine)return e.exports?h[e.map.id]=e.exports:e.exports=h[e.map.id]={}},module:function(e){return e.module||(e.module={id:e.map.id,uri:e.map.url,config:function(){return getOwn(b.config,e.map.id)||{}},exports:e.exports||(e.exports={})})}},(e=function(e){this.events=getOwn(r,e.id)||{},this.map=e,this.shim=getOwn(b.shim,e.id),this.depExports=[],this.depMaps=[],this.depMatched=[],this.pluginMaps={},this.depCount=0}).prototype={init:function(e,t,i,r){r=r||{},this.inited||(this.factory=t,i?this.on(\"error\",i):this.events.error&&(i=bind(this,function(e){this.emit(\"error\",e)})),this.depMaps=e&&e.slice(0),this.errback=i,this.inited=!0,this.ignore=r.ignore,r.enabled||this.enabled?this.enable():this.check())},defineDep:function(e,t){this.depMatched[e]||(this.depMatched[e]=!0,--this.depCount,this.depExports[e]=t)},fetch:function(){if(!this.fetched){this.fetched=!0,f.startTime=(new Date).getTime();var e=this.map;if(!this.shim)return e.prefix?this.callPlugin():this.load();f.makeRequire(this.map,{enableBuildCallback:!0})(this.shim.deps||[],bind(this,function(){return e.prefix?this.callPlugin():this.load()}))}},load:function(){var e=this.map.url;n[e]||(n[e]=!0,f.load(this.map.id,e))},check:function(){if(this.enabled&&!this.enabling){var t,i,e=this.map.id,r=this.depExports,n=this.exports,o=this.factory;if(this.inited){if(this.error)this.emit(\"error\",this.error);else if(!this.defining){if(this.defining=!0,this.depCount<1&&!this.defined){if(isFunction(o)){if(this.events.error&&this.map.isDefine||req.onError!==defaultOnError)try{n=f.execCb(e,o,r,n)}catch(e){t=e}else n=f.execCb(e,o,r,n);if(this.map.isDefine&&void 0===n&&((r=this.module)?n=r.exports:this.usingExports&&(n=this.exports)),t)return t.requireMap=this.map,t.requireModules=this.map.isDefine?[this.map.id]:null,t.requireType=this.map.isDefine?\"define\":\"require\",M(this.error=t)}else n=o;this.exports=n,this.map.isDefine&&!this.ignore&&(h[e]=n,req.onResourceLoad)&&(i=[],each(this.depMaps,function(e){i.push(e.normalizedMap||e)}),req.onResourceLoad(f,this.map,i)),j(e),this.defined=!0}this.defining=!1,this.defined&&!this.defineEmitted&&(this.defineEmitted=!0,this.emit(\"defined\",this.exports),this.defineEmitComplete=!0)}}else hasProp(f.defQueueMap,e)||this.fetch()}},callPlugin:function(){var s=this.map,u=s.id,e=y(s.prefix);this.depMaps.push(e),k(e,\"defined\",bind(this,function(e){var o,t,i=getOwn(m,this.map.id),r=this.map.name,n=this.map.parentMap?this.map.parentMap.name:null,a=f.makeRequire(s.parentMap,{enableBuildCallback:!0});this.map.unnormalized?(e.normalize&&(r=e.normalize(r,function(e){return v(e,n,!0)})||\"\"),k(t=y(s.prefix+\"!\"+r,this.map.parentMap,!0),\"defined\",bind(this,function(e){this.map.normalizedMap=t,this.init([],function(){return e},null,{enabled:!0,ignore:!0})})),(r=getOwn(d,t.id))&&(this.depMaps.push(t),this.events.error&&r.on(\"error\",bind(this,function(e){this.emit(\"error\",e)})),r.enable())):i?(this.map.url=f.nameToUrl(i),this.load()):((o=bind(this,function(e){this.init([],function(){return e},null,{enabled:!0})})).error=bind(this,function(e){this.inited=!0,(this.error=e).requireModules=[u],eachProp(d,function(e){0===e.map.id.indexOf(u+\"_unnormalized\")&&j(e.map.id)}),M(e)}),o.fromText=bind(this,function(e,t){var i=s.name,r=y(i),n=useInteractive;t&&(e=t),n&&(useInteractive=!1),S(r),hasProp(b.config,u)&&(b.config[i]=b.config[u]);try{req.exec(e)}catch(e){return M(makeError(\"fromtexteval\",\"fromText eval for \"+u+\" failed: \"+e,e,[u]))}n&&(useInteractive=!0),this.depMaps.push(r),f.completeLoad(i),a([i],o)}),e.load(s.name,a,o,b))})),f.enable(e,this),this.pluginMaps[e.id]=e},enable:function(){(p[this.map.id]=this).enabled=!0,this.enabling=!0,each(this.depMaps,bind(this,function(e,t){var i,r;if(\"string\"==typeof e){if(e=y(e,this.map.isDefine?this.map:this.map.parentMap,!1,!this.skipMap),this.depMaps[t]=e,r=getOwn(c,e.id))return void(this.depExports[t]=r(this));this.depCount+=1,k(e,\"defined\",bind(this,function(e){this.undefed||(this.defineDep(t,e),this.check())})),this.errback?k(e,\"error\",bind(this,this.errback)):this.events.error&&k(e,\"error\",bind(this,function(e){this.emit(\"error\",e)}))}r=e.id,i=d[r],hasProp(c,r)||!i||i.enabled||f.enable(e,this)})),eachProp(this.pluginMaps,bind(this,function(e){var t=getOwn(d,e.id);t&&!t.enabled&&f.enable(e,this)})),this.enabling=!1,this.check()},on:function(e,t){(this.events[e]||(this.events[e]=[])).push(t)},emit:function(e,t){each(this.events[e],function(e){e(t)}),\"error\"===e&&delete this.events[e]}},(f={config:b,contextName:u,registry:d,defined:h,urlFetched:n,defQueue:l,defQueueMap:{},Module:e,makeModuleMap:y,nextTick:req.nextTick,onError:M,configure:function(e){e.baseUrl&&\"/\"!==e.baseUrl.charAt(e.baseUrl.length-1)&&(e.baseUrl+=\"/\"),\"string\"==typeof e.urlArgs&&(i=e.urlArgs,e.urlArgs=function(e,t){return(-1===t.indexOf(\"?\")?\"?\":\"&\")+i});var i,r=b.shim,n={paths:!0,bundles:!0,config:!0,map:!0};eachProp(e,function(e,t){n[t]?(b[t]||(b[t]={}),mixin(b[t],e,!0,!0)):b[t]=e}),e.bundles&&eachProp(e.bundles,function(e,t){each(e,function(e){e!==t&&(m[e]=t)})}),e.shim&&(eachProp(e.shim,function(e,t){!(e=isArray(e)?{deps:e}:e).exports&&!e.init||e.exportsFn||(e.exportsFn=f.makeShimExports(e)),r[t]=e}),b.shim=r),e.packages&&each(e.packages,function(e){var t=(e=\"string\"==typeof e?{name:e}:e).name;e.location&&(b.paths[t]=e.location),b.pkgs[t]=e.name+\"/\"+(e.main||\"main\").replace(currDirRegExp,\"\").replace(jsSuffixRegExp,\"\")}),eachProp(d,function(e,t){e.inited||e.map.unnormalized||(e.map=y(t,null,!0))}),(e.deps||e.callback)&&f.require(e.deps||[],e.callback)},makeShimExports:function(t){return function(){var e;return(e=t.init?t.init.apply(global,arguments):e)||t.exports&&getGlobal(t.exports)}},makeRequire:function(o,a){function s(e,t,i){var r,n;return a.enableBuildCallback&&t&&isFunction(t)&&(t.__requireJsBuild=!0),\"string\"==typeof e?isFunction(t)?M(makeError(\"requireargs\",\"Invalid require call\"),i):o&&hasProp(c,e)?c[e](d[o.id]):req.get?req.get(f,e,o,s):(r=y(e,o,!1,!0).id,hasProp(h,r)?h[r]:M(makeError(\"notloaded\",'Module name \"'+r+'\" has not been loaded yet for context: '+u+(o?\"\":\". Use require([])\")))):(R(),f.nextTick(function(){R(),(n=S(y(null,o))).skipMap=a.skipMap,n.init(e,t,i,{enabled:!0}),P()}),s)}return a=a||{},mixin(s,{isBrowser:isBrowser,toUrl:function(e){var t,i=e.lastIndexOf(\".\"),r=e.split(\"/\")[0];return-1!==i&&(!(\".\"===r||\"..\"===r)||1<i)&&(t=e.substring(i,e.length),e=e.substring(0,i)),f.nameToUrl(v(e,o&&o.id,!0),t,!0)},defined:function(e){return hasProp(h,y(e,o,!1,!0).id)},specified:function(e){return e=y(e,o,!1,!0).id,hasProp(h,e)||hasProp(d,e)}}),o||(s.undef=function(i){O();var e=y(i,o,!0),t=getOwn(d,i);t.undefed=!0,q(i),delete h[i],delete n[e.url],delete r[i],eachReverse(l,function(e,t){e[0]===i&&l.splice(t,1)}),delete f.defQueueMap[i],t&&(t.events.defined&&(r[i]=t.events),j(i))}),s},enable:function(e){getOwn(d,e.id)&&S(e).enable()},completeLoad:function(e){var t,i,r,n=getOwn(b.shim,e)||{},o=n.exports;for(O();l.length;){if(null===(i=l.shift())[0]){if(i[0]=e,t)break;t=!0}else i[0]===e&&(t=!0);a(i)}if(f.defQueueMap={},r=getOwn(d,e),!t&&!hasProp(h,e)&&r&&!r.inited){if(!(!b.enforceDefine||o&&getGlobal(o)))return E(e)?void 0:M(makeError(\"nodefine\",\"No define call for \"+e,null,[e]));a([e,n.deps||[],n.exportsFn])}P()},nameToUrl:function(e,t,i){var r,n,o,a,s,u=getOwn(b.pkgs,e);if(u=getOwn(m,e=u?u:e))return f.nameToUrl(u,t,i);if(req.jsExtRegExp.test(e))a=e+(t||\"\");else{for(r=b.paths,o=(n=e.split(\"/\")).length;0<o;--o)if(s=getOwn(r,n.slice(0,o).join(\"/\"))){isArray(s)&&(s=s[0]),n.splice(0,o,s);break}a=n.join(\"/\"),a=(\"/\"===(a+=t||(/^data\\:|^blob\\:|\\?/.test(a)||i?\"\":\".js\")).charAt(0)||a.match(/^[\\w\\+\\.\\-]+:/)?\"\":b.baseUrl)+a}return b.urlArgs&&!/^blob\\:/.test(a)?a+b.urlArgs(e,a):a},load:function(e,t){req.load(f,e,t)},execCb:function(e,t,i,r){return t.apply(r,i)},onScriptLoad:function(e){\"load\"!==e.type&&!readyRegExp.test((e.currentTarget||e.srcElement).readyState)||(interactiveScript=null,e=s(e),f.completeLoad(e.id))},onScriptError:function(e){var i,r=s(e);if(!E(r.id))return i=[],eachProp(d,function(e,t){0!==t.indexOf(\"_@r\")&&each(e.depMaps,function(e){if(e.id===r.id)return i.push(t),!0})}),M(makeError(\"scripterror\",'Script error for \"'+r.id+(i.length?'\", needed by: '+i.join(\", \"):'\"'),e,[r.id]))}}).require=f.makeRequire(),f}function getInteractiveScript(){return interactiveScript&&\"interactive\"===interactiveScript.readyState||eachReverse(scripts(),function(e){if(\"interactive\"===e.readyState)return interactiveScript=e}),interactiveScript}}(this,\"undefined\"==typeof setTimeout?void 0:setTimeout);\n"
  },
  {
    "path": "docs/source/_static/js/termynal.js",
    "content": "/*\n\nThe original author of the file is Ines Montani.\n\ntermynal.js\nA lightweight, modern and extensible animated terminal window, using\nasync/await.\n\n@author Ines Montani <ines@ines.io>\n@version 0.0.1\n@license MIT\n\nAdditions were made by https://github.com/tiangolo/typer.\n\nThe MIT License (MIT)\n\nCopyright (c) 2019 Sebastián Ramírez\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n*/\n\n'use strict';\n\n/** Generate a terminal widget. */\nclass Termynal {\n    /**\n     * Construct the widget's settings.\n     * @param {(string|Node)=} container - Query selector or container element.\n     * @param {Object=} options - Custom settings.\n     * @param {string} options.prefix - Prefix to use for data attributes.\n     * @param {number} options.startDelay - Delay before animation, in ms.\n     * @param {number} options.typeDelay - Delay between each typed character, in ms.\n     * @param {number} options.lineDelay - Delay between each line, in ms.\n     * @param {number} options.progressLength - Number of characters displayed as progress bar.\n     * @param {string} options.progressChar – Character to use for progress bar, defaults to █.\n\t * @param {number} options.progressPercent - Max percent of progress.\n     * @param {string} options.cursor – Character to use for cursor, defaults to ▋.\n     * @param {Object[]} lineData - Dynamically loaded line data objects.\n     * @param {boolean} options.noInit - Don't initialise the animation.\n     */\n    constructor(container = '#termynal', options = {}) {\n        this.container = (typeof container === 'string') ? document.querySelector(container) : container;\n        this.pfx = `data-${options.prefix || 'ty'}`;\n        this.originalStartDelay = this.startDelay = options.startDelay\n            || parseFloat(this.container.getAttribute(`${this.pfx}-startDelay`)) || 600;\n        this.originalTypeDelay = this.typeDelay = options.typeDelay\n            || parseFloat(this.container.getAttribute(`${this.pfx}-typeDelay`)) || 90;\n        this.originalLineDelay = this.lineDelay = options.lineDelay\n            || parseFloat(this.container.getAttribute(`${this.pfx}-lineDelay`)) || 1500;\n        this.progressLength = options.progressLength\n            || parseFloat(this.container.getAttribute(`${this.pfx}-progressLength`)) || 40;\n        this.progressChar = options.progressChar\n            || this.container.getAttribute(`${this.pfx}-progressChar`) || '█';\n\t\tthis.progressPercent = options.progressPercent\n            || parseFloat(this.container.getAttribute(`${this.pfx}-progressPercent`)) || 100;\n        this.cursor = options.cursor\n            || this.container.getAttribute(`${this.pfx}-cursor`) || '▋';\n        this.lineData = this.lineDataToElements(options.lineData || []);\n        this.loadLines()\n        if (!options.noInit) this.init()\n    }\n\n    loadLines() {\n        // Load all the lines and create the container so that the size is fixed\n        // Otherwise it would be changing and the user viewport would be constantly\n        // moving as she/he scrolls\n        const finish = this.generateFinish()\n        finish.style.visibility = 'hidden'\n        this.container.appendChild(finish)\n        // Appends dynamically loaded lines to existing line elements.\n        this.lines = [...this.container.querySelectorAll(`[${this.pfx}]`)].concat(this.lineData);\n        for (let line of this.lines) {\n            line.style.visibility = 'hidden'\n            this.container.appendChild(line)\n        }\n        const restart = this.generateRestart()\n        restart.style.visibility = 'hidden'\n        this.container.appendChild(restart)\n        this.container.setAttribute('data-termynal', '');\n    }\n\n    /**\n     * Initialise the widget, get lines, clear container and start animation.\n     */\n    init() {\n        /**\n         * Calculates width and height of Termynal container.\n         * If container is empty and lines are dynamically loaded, defaults to browser `auto` or CSS.\n         */\n        const containerStyle = getComputedStyle(this.container);\n        this.container.style.width = containerStyle.width !== '0px' ?\n            containerStyle.width : undefined;\n        this.container.style.minHeight = containerStyle.height !== '0px' ?\n            containerStyle.height : undefined;\n\n        this.container.setAttribute('data-termynal', '');\n        this.container.innerHTML = '';\n        for (let line of this.lines) {\n            line.style.visibility = 'visible'\n        }\n        this.start();\n    }\n\n    /**\n     * Start the animation and rener the lines depending on their data attributes.\n     */\n    async start() {\n        this.addFinish()\n        await this._wait(this.startDelay);\n\n        for (let line of this.lines) {\n            const type = line.getAttribute(this.pfx);\n            const delay = line.getAttribute(`${this.pfx}-delay`) || this.lineDelay;\n\n            if (type == 'input') {\n                line.setAttribute(`${this.pfx}-cursor`, this.cursor);\n                await this.type(line);\n                await this._wait(delay);\n            }\n\n            else if (type == 'progress') {\n                await this.progress(line);\n                await this._wait(delay);\n            }\n\n            else {\n                this.container.appendChild(line);\n                await this._wait(delay);\n            }\n\n            line.removeAttribute(`${this.pfx}-cursor`);\n        }\n        this.addRestart()\n        this.finishElement.style.visibility = 'hidden'\n        this.lineDelay = this.originalLineDelay\n        this.typeDelay = this.originalTypeDelay\n        this.startDelay = this.originalStartDelay\n    }\n\n    generateRestart() {\n        const restart = document.createElement('a')\n        restart.onclick = (e) => {\n            e.preventDefault()\n            this.container.innerHTML = ''\n            this.init()\n        }\n        restart.href = '#'\n        restart.setAttribute('data-terminal-control', '')\n        restart.innerHTML = \"restart ↻\"\n        return restart\n    }\n\n    generateFinish() {\n        const finish = document.createElement('a')\n        finish.onclick = (e) => {\n            e.preventDefault()\n            this.lineDelay = 0\n            this.typeDelay = 0\n            this.startDelay = 0\n        }\n        finish.href = '#'\n        finish.setAttribute('data-terminal-control', '')\n        finish.innerHTML = \"fast →\"\n        this.finishElement = finish\n        return finish\n    }\n\n    addRestart() {\n        const restart = this.generateRestart()\n        this.container.appendChild(restart)\n    }\n\n    addFinish() {\n        const finish = this.generateFinish()\n        this.container.appendChild(finish)\n    }\n\n    /**\n     * Animate a typed line.\n     * @param {Node} line - The line element to render.\n     */\n    async type(line) {\n        const chars = [...line.textContent];\n        line.textContent = '';\n        this.container.appendChild(line);\n\n        for (let char of chars) {\n            const delay = line.getAttribute(`${this.pfx}-typeDelay`) || this.typeDelay;\n            await this._wait(delay);\n            line.textContent += char;\n        }\n    }\n\n    /**\n     * Animate a progress bar.\n     * @param {Node} line - The line element to render.\n     */\n    async progress(line) {\n        const progressLength = line.getAttribute(`${this.pfx}-progressLength`)\n            || this.progressLength;\n        const progressChar = line.getAttribute(`${this.pfx}-progressChar`)\n            || this.progressChar;\n        const chars = progressChar.repeat(progressLength);\n\t\tconst progressPercent = line.getAttribute(`${this.pfx}-progressPercent`)\n\t\t\t|| this.progressPercent;\n        line.textContent = '';\n        this.container.appendChild(line);\n\n        for (let i = 1; i < chars.length + 1; i++) {\n            await this._wait(this.typeDelay);\n            const percent = Math.round(i / chars.length * 100);\n            line.textContent = `${chars.slice(0, i)} ${percent}%`;\n\t\t\tif (percent>progressPercent) {\n\t\t\t\tbreak;\n\t\t\t}\n        }\n    }\n\n    /**\n     * Helper function for animation delays, called with `await`.\n     * @param {number} time - Timeout, in ms.\n     */\n    _wait(time) {\n        return new Promise(resolve => setTimeout(resolve, time));\n    }\n\n    /**\n     * Converts line data objects into line elements.\n     *\n     * @param {Object[]} lineData - Dynamically loaded lines.\n     * @param {Object} line - Line data object.\n     * @returns {Element[]} - Array of line elements.\n     */\n    lineDataToElements(lineData) {\n        return lineData.map(line => {\n            let div = document.createElement('div');\n            div.innerHTML = `<span ${this._attributes(line)}>${line.value || ''}</span>`;\n\n            return div.firstElementChild;\n        });\n    }\n\n    /**\n     * Helper function for generating attributes string.\n     *\n     * @param {Object} line - Line data object.\n     * @returns {string} - String of attributes.\n     */\n    _attributes(line) {\n        let attrs = '';\n        for (let prop in line) {\n            // Custom add class\n            if (prop === 'class') {\n                attrs += ` class=${line[prop]} `\n                continue\n            }\n            if (prop === 'type') {\n                attrs += `${this.pfx}=\"${line[prop]}\" `\n            } else if (prop !== 'value') {\n                attrs += `${this.pfx}-${prop}=\"${line[prop]}\" `\n            }\n        }\n\n        return attrs;\n    }\n}\n\n/**\n* HTML API: If current script has container(s) specified, initialise Termynal.\n*/\nif (document.currentScript.hasAttribute('data-termynal-container')) {\n    const containers = document.currentScript.getAttribute('data-termynal-container');\n    containers.split('|')\n        .forEach(container => new Termynal(container))\n}\n"
  },
  {
    "path": "docs/source/algorithms.md",
    "content": "(list_of_algorithms)=\n\n# Optimizers\n\nCheck out {ref}`how-to-select-algorithms` to see how to select an algorithm and specify\n`algo_options` when using `maximize` or `minimize`. The default algorithm options are\ndiscussed in {ref}`algo_options` and their type hints are documented in {ref}`typing`.\n\n## Optimizers from SciPy\n\n(scipy-algorithms)=\n\noptimagic supports most [SciPy](https://scipy.org/) algorithms and SciPy is\nautomatically installed when you install optimagic.\n\n```{eval-rst}\n.. dropdown::  scipy_lbfgsb\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"scipy_lbfgsb\",\n          algo_options={\"stopping_maxiter\": 1_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_slsqp\n\n    .. code-block::\n\n        \"scipy_slsqp\"\n\n    Minimize a scalar function of one or more variables using the SLSQP algorithm.\n\n    SLSQP stands for Sequential Least Squares Programming.\n\n    SLSQP is a line search algorithm. It is well suited for continuously\n    differentiable scalar optimization problems with up to several hundred parameters.\n\n    The optimizer is taken from scipy which wraps the SLSQP optimization subroutine\n    originally implemented by :cite:`Kraft1988`.\n\n    .. note::\n        SLSQP's general nonlinear constraints are not supported yet by optimagic.\n\n    - **convergence.ftol_abs** (float): Precision goal for the value of\n      f in the stopping criterion.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n      the optimization stops, but we do not count this as convergence.\n    - **display** (bool): Set to True to print convergence messages. Default is False. Scipy name: **disp**.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_neldermead\n\n    .. code-block::\n\n      \"scipy_neldermead\"\n\n    Minimize a scalar function using the Nelder-Mead algorithm.\n\n    The Nelder-Mead algorithm is a direct search method (based on function comparison)\n    and is often applied to nonlinear optimization problems for which derivatives are\n    not known.\n    Unlike most modern optimization methods, the Nelder–Mead heuristic can converge to\n    a non-stationary point, unless the problem satisfies stronger conditions than are\n    necessary for modern methods.\n\n    Nelder-Mead is never the best algorithm to solve a problem but rarely the worst.\n    Its popularity is likely due to historic reasons and much larger than its\n    properties warrant.\n\n    The argument `initial_simplex` is not supported by optimagic as it is not\n    compatible with optimagic's handling of constraints.\n\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops,\n      but we do not count this as convergence.\n    - **stopping.maxfun** (int): If the maximum number of function evaluation is reached,\n      the optimization stops but we do not count this as convergence.\n    - **convergence.xtol_abs** (float): Absolute difference in parameters between iterations\n      that is tolerated to declare convergence. As no relative tolerances can be passed to Nelder-Mead,\n      optimagic sets a non zero default for this.\n    - **convergence.ftol_abs** (float): Absolute difference in the criterion value between\n      iterations that is tolerated to declare convergence. As no relative tolerances can be passed to Nelder-Mead,\n      optimagic sets a non zero default for this.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n    - **adaptive** (bool): Adapt algorithm parameters to dimensionality of problem.\n      Useful for high-dimensional minimization (:cite:`Gao2012`, p. 259-277). scipy's default is False.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_powell\n\n   .. code-block::\n\n       \"scipy_powell\"\n\n   Minimize a scalar function using the modified Powell method.\n\n    .. warning::\n        In our benchmark using a quadratic objective function, the Powell algorithm\n        did not find the optimum very precisely (less than 4 decimal places).\n        If you require high precision, you should refine an optimum found with Powell\n        with another local optimizer.\n\n    The criterion function need not be differentiable.\n\n    Powell's method is a conjugate direction method, minimizing the function by a\n    bi-directional search in each parameter's dimension.\n\n    The argument ``direc``, which is the initial set of direction vectors and which\n    is part of the scipy interface is not supported by optimagic because it is\n    incompatible with how optimagic handles constraints.\n\n    - **convergence.xtol_rel (float)**: Stop when the relative movement between parameter\n      vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative improvement between two\n      iterations is smaller than this. More formally, this is expressed as\n\n        .. math::\n\n            \\frac{(f^k - f^{k+1})}{\\\\max{{\\{|f^k|, |f^{k+1}|, 1\\}}}} \\leq\n            \\text{relative_criterion_tolerance}\n\n    - **stopping.maxfun** (int): If the maximum number of function evaluation is reached,\n      the optimization stops but we do not count thisas convergence.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops,\n      but we do not count this as convergence.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_bfgs\n\n    .. code-block::\n\n        \"scipy_bfgs\"\n\n    Minimize a scalar function of one or more variables using the BFGS algorithm.\n\n    BFGS stands for Broyden-Fletcher-Goldfarb-Shanno algorithm. It is a quasi-Newton\n    method that can be used for solving unconstrained nonlinear optimization problems.\n\n    BFGS is not guaranteed to converge unless the function has a quadratic Taylor\n    expansion near an optimum. However, BFGS can have acceptable performance even\n    for non-smooth optimization instances.\n\n    - **convergence.gtol_abs** (float): Stop if all elements of the gradient are smaller than this.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops,\n      but we do not count this as convergence.\n    - **norm** (float): Order of the vector norm that is used to calculate the gradient's \"score\" that\n      is compared to the gradient tolerance to determine convergence. Default is infinite which means that\n      the largest entry of the gradient vector is compared to the gradient tolerance.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n    - **convergence_xtol_rel** (float): Relative tolerance for `x`. Terminate successfully if step size is less than `xk * xrtol` where `xk` is the current parameter vector. Default is 1e-5. SciPy name: **xrtol**.\n    - **armijo_condition** (float): Parameter for Armijo condition rule. Default is 1e-4. Ensures \n\n        .. math::\n\n            f(x_k+\\alpha p_k) \\le f(x_k) \\;+\\mathrm{armijo\\_condition}\\,\\cdot\\,\\alpha\\,\\nabla f(x_k)^\\top p_k, \n        \n      so each step yields at least a fraction **armijo_condition** of the predicted decrease. Smaller ⇒ more aggressive steps, larger ⇒ more conservative ones. SciPy name: **c1**.\n    - **curvature_condition** (float): Parameter for curvature condition rule. Default is 0.9. Ensures \n      \n        .. math::\n\n            \\nabla f(x_k+\\alpha p_k)^\\top p_k \\ge \\mathrm{curvature\\_condition}\\,\\cdot\\,\\nabla f(x_k)^\\top p_k, \n        \n      so the new slope isn’t too negative. Smaller ⇒ stricter curvature reduction (smaller steps), larger ⇒ looser (bigger steps). SciPy name: **c2**.\n```\n\n```{eval-rst}\n.. dropdown::  scipy_conjugate_gradient\n\n    .. code-block::\n\n        \"scipy_conjugate_gradient\"\n\n    Minimize a function using a nonlinear conjugate gradient algorithm.\n\n    The conjugate gradient method finds functions' local optima using just the gradient.\n\n    This conjugate gradient algorithm is based on that of Polak and Ribiere, detailed\n    in :cite:`Nocedal2006`, pp. 120-122.\n\n    Conjugate gradient methods tend to work better when:\n\n      - the criterion has a unique global minimizing point, and no local minima or\n        other stationary points.\n      - the criterion is, at least locally, reasonably well approximated by a\n        quadratic function.\n      - the criterion is continuous and has a continuous gradient.\n      - the gradient is not too large, e.g., has a norm less than 1000.\n      - The initial guess is reasonably close to the criterion's global minimizer.\n\n    - **convergence.gtol_abs** (float): Stop if all elements of the\n      gradient are smaller than this.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n      the optimization stops, but we do not count this as convergence.\n    - **norm** (float): Order of the vector norm that is used to calculate the gradient's\n      \"score\" that is compared to the gradient tolerance to determine convergence.\n      Default is infinite which means that the largest entry of the gradient vector\n      is compared to the gradient tolerance.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_newton_cg\n\n    .. code-block::\n\n        \"scipy_newton_cg\"\n\n    Minimize a scalar function using Newton's conjugate gradient algorithm.\n\n    .. warning::\n        In our benchmark using a quadratic objective function, the truncated newton\n        algorithm did not find the optimum very precisely (less than 4 decimal places).\n        If you require high precision, you should refine an optimum found with Powell\n        with another local optimizer.\n\n    Newton's conjugate gradient algorithm uses an approximation of the Hessian to find\n    the minimum of a function. It is practical for small and large problems\n    (see :cite:`Nocedal2006`, p. 140).\n\n    Newton-CG methods are also called truncated Newton methods. This function differs\n    scipy_truncated_newton because\n\n    - ``scipy_newton_cg``'s algorithm is written purely in Python using NumPy\n      and scipy while ``scipy_truncated_newton``'s algorithm calls a C function.\n\n    - ``scipy_newton_cg``'s algorithm is only for unconstrained minimization\n      while ``scipy_truncated_newton``'s algorithm supports bounds.\n\n    Conjugate gradient methods tend to work better when:\n\n      - the criterion has a unique global minimizing point, and no local minima or\n        other stationary points.\n      - the criterion is, at least locally, reasonably well approximated by a\n        quadratic function.\n      - the criterion is continuous and has a continuous gradient.\n      - the gradient is not too large, e.g., has a norm less than 1000.\n      - The initial guess is reasonably close to the criterion's global minimizer.\n\n    - **convergence.xtol_rel** (float): Stop when the relative movement\n      between parameter vectors is smaller than this. Newton CG uses the average\n      relative change in the parameters for determining the convergence.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n      the optimization stops, but we do not count this as convergence.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_cobyla\n\n  .. code-block::\n\n      \"scipy_cobyla\"\n\n  Minimize a scalar function of one or more variables using the COBYLA algorithm.\n\n  COBYLA stands for Constrained Optimization By Linear Approximation.\n  It is derivative-free and supports nonlinear inequality and equality constraints.\n\n  .. note::\n      Cobyla's general nonlinear constraints is not supported yet by optimagic.\n\n  Scipy's implementation wraps the FORTRAN implementation of the algorithm.\n\n  For more information on COBYLA see :cite:`Powell1994`, :cite:`Powell1998` and\n  :cite:`Powell2007`.\n\n  - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n    the optimization stops, but we do not count this as convergence.\n  - **convergence.xtol_rel** (float): Stop when the relative movement\n    between parameter vectors is smaller than this. In case of COBYLA this is\n    a lower bound on the size of the trust region and can be seen as the\n    required accuracy in the variables but this accuracy is not guaranteed.\n  - **trustregion.initial_radius** (float): Initial value of the trust region radius.\n    Since a linear approximation is likely only good near the current simplex,\n    the linear program is given the further requirement that the solution,\n    which will become the next evaluation point must be within a radius\n    RHO_j from x_j. RHO_j only decreases, never increases. The initial RHO_j is\n    the `trustregion.initial_radius`. In this way COBYLA's iterations behave\n    like a trust region algorithm.\n  - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_truncated_newton\n\n    .. code-block::\n\n        \"scipy_truncated_newton\"\n\n    Minimize a scalar function using truncated Newton algorithm.\n\n    This function differs from scipy_newton_cg because\n\n    - ``scipy_newton_cg``'s algorithm is written purely in Python using NumPy\n      and scipy while ``scipy_truncated_newton``'s algorithm calls a C function.\n\n    - ``scipy_newton_cg``'s algorithm is only for unconstrained minimization\n      while ``scipy_truncated_newton``'s algorithm supports bounds.\n\n    Conjugate gradient methods tend to work better when:\n\n    - the criterion has a unique global minimizing point, and no local minima or\n      other stationary points.\n    - the criterion is, at least locally, reasonably well approximated by a\n      quadratic function.\n    - the criterion is continuous and has a continuous gradient.\n    - the gradient is not too large, e.g., has a norm less than 1000.\n    - The initial guess is reasonably close to the criterion's global minimizer.\n\n    optimagic does not support the ``scale``  nor ``offset`` argument as they are not\n    compatible with the way optimagic handles constraints. It also does not support\n    ``messg_num`` which is an additional way to control the verbosity of the optimizer.\n\n    - **func_min_estimate** (float): Minimum function value estimate. Defaults to 0.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n      the optimization stops, but we do not count this as convergence.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this as\n      convergence.\n    - **convergence.xtol_abs** (float): Absolute difference in parameters\n      between iterations after scaling that is tolerated to declare convergence.\n    - **convergence.ftol_abs** (float): Absolute difference in the\n      criterion value between iterations after scaling that is tolerated\n      to declare convergence.\n    - **convergence.gtol_abs** (float): Stop if the value of the\n      projected gradient (after applying x scaling factors) is smaller than this.\n      If convergence.gtol_abs < 0.0,\n      convergence.gtol_abs is set to\n      1e-2 * sqrt(accuracy).\n    - **max_hess_evaluations_per_iteration** (int): Maximum number of hessian*vector\n      evaluations per main iteration. If ``max_hess_evaluations == 0``, the\n      direction chosen is ``- gradient``. If ``max_hess_evaluations < 0``,\n      ``max_hess_evaluations`` is set to ``max(1,min(50,n/2))`` where n is the\n      length of the parameter vector. This is also the default.\n    - **max_step_for_line_search** (float): Maximum step for the line search.\n      It may be increased during the optimization. If too small, it will be set\n      to 10.0. By default we use scipy's default.\n    - **line_search_severity** (float): Severity of the line search. If < 0 or > 1,\n      set to 0.25. optimagic defaults to scipy's default.\n    - **finitie_difference_precision** (float): Relative precision for finite difference\n      calculations. If <= machine_precision, set to sqrt(machine_precision).\n      optimagic defaults to scipy's default.\n    - **criterion_rescale_factor** (float): Scaling factor (in log10) used to trigger\n      criterion rescaling. If 0, rescale at each iteration. If a large value,\n      never rescale. If < 0, rescale is set to 1.3. optimagic defaults to scipy's\n      default.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_trust_constr\n\n    .. code-block::\n\n        \"scipy_trust_constr\"\n\n    Minimize a scalar function of one or more variables subject to constraints.\n\n    .. warning::\n        In our benchmark using a quadratic objective function, the trust_constr\n        algorithm did not find the optimum very precisely (less than 4 decimal places).\n        If you require high precision, you should refine an optimum found with trust_constr\n        with another local optimizer.\n\n    .. note::\n        Its general nonlinear constraints' handling is not supported yet by optimagic.\n\n    It switches between two implementations depending on the problem definition.\n    It is the most versatile constrained minimization algorithm\n    implemented in SciPy and the most appropriate for large-scale problems.\n    For equality constrained problems it is an implementation of Byrd-Omojokun\n    Trust-Region SQP method described in :cite:`Lalee1998` and in :cite:`Conn2000`,\n    p. 549. When inequality constraints  are imposed as well, it switches to the\n    trust-region interior point method described in :cite:`Byrd1999`.\n    This interior point algorithm in turn, solves inequality constraints by\n    introducing slack variables and solving a sequence of equality-constrained\n    barrier problems for progressively smaller values of the barrier parameter.\n    The previously described equality constrained SQP method is\n    used to solve the subproblems with increasing levels of accuracy\n    as the iterate gets closer to a solution.\n\n    It approximates the Hessian using the Broyden-Fletcher-Goldfarb-Shanno (BFGS)\n    Hessian update strategy.\n\n    - **convergence.gtol_abs** (float): Tolerance for termination\n      by the norm of the Lagrangian gradient. The algorithm will terminate\n      when both the infinity norm (i.e., max abs value) of the Lagrangian\n      gradient and the constraint violation are smaller than the\n      convergence.gtol_abs.\n      For this algorithm we use scipy's gradient tolerance for trust_constr.\n      This smaller tolerance is needed for the sum of squares tests to pass.\n    - **stopping.maxiter** (int): If the maximum number of iterations is reached,\n      the optimization stops, but we do not count this as convergence.\n    - **convergence.xtol_rel** (float): Tolerance for termination by\n      the change of the independent variable. The algorithm will terminate when\n      the radius of the trust region used in the algorithm is smaller than the\n      convergence.xtol_rel.\n    - **trustregion.initial_radius** (float): Initial value of the trust region radius.\n      The trust radius gives the maximum distance between solution points in\n      consecutive iterations. It reflects the trust the algorithm puts in the\n      local approximation of the optimization problem. For an accurate local\n      approximation the trust-region should be large and for an approximation\n      valid only close to the current point it should be a small one.\n      The trust radius is automatically updated throughout the optimization\n      process, with ``trustregion_initial_radius`` being its initial value.\n    - **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_ls_dogbox\n\n    .. code-block::\n\n        \"scipy_ls_dogbox\"\n\n    Minimize a nonlinear least squares problem using a rectangular trust region method.\n\n    Typical use case is small problems with bounds. Not recommended for problems with\n    rank-deficient Jacobian.\n\n    The algorithm supports the following options:\n\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is below this.\n    - **convergence.gtol_rel** (float): Stop when the gradient,\n      divided by the absolute value of the criterion function is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this as\n      convergence.\n    - **tr_solver** (str): Method for solving trust-region subproblems, relevant only\n      for 'trf' and 'dogbox' methods.\n\n      - 'exact' is suitable for not very large problems with dense\n        Jacobian matrices. The computational complexity per iteration is\n        comparable to a singular value decomposition of the Jacobian\n        matrix.\n      - 'lsmr' is suitable for problems with sparse and large Jacobian\n        matrices. It uses the iterative procedure\n        `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n        least-squares problem and only requires matrix-vector product\n        evaluations.\n        If None (default), the solver is chosen based on the type of Jacobian\n        returned on the first iteration.\n    - **tr_solver_options** (dict):  Keyword options passed to trust-region solver.\n\n      - ``tr_solver='exact'``: `tr_options` are ignored.\n      - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_ls_trf\n\n    .. code-block::\n\n        \"scipy_ls_trf\"\n\n    Minimize a nonlinear least squares problem using a trustregion reflective method.\n\n    Trust Region Reflective algorithm, particularly suitable for large sparse problems\n    with bounds. Generally robust method.\n\n    The algorithm supports the following options:\n\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is below this.\n    - **convergence.gtol_rel** (float): Stop when the gradient,\n      divided by the absolute value of the criterion function is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this as\n      convergence.\n    - **tr_solver** (str): Method for solving trust-region subproblems, relevant only\n      for 'trf' and 'dogbox' methods.\n\n      - 'exact' is suitable for not very large problems with dense\n        Jacobian matrices. The computational complexity per iteration is\n        comparable to a singular value decomposition of the Jacobian\n        matrix.\n      - 'lsmr' is suitable for problems with sparse and large Jacobian\n        matrices. It uses the iterative procedure\n        `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n        least-squares problem and only requires matrix-vector product\n        evaluations.\n        If None (default), the solver is chosen based on the type of Jacobian\n        returned on the first iteration.\n    - **tr_solver_options** (dict):  Keyword options passed to trust-region solver.\n\n      - ``tr_solver='exact'``: `tr_options` are ignored.\n      - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_ls_lm\n\n    .. code-block::\n\n        \"scipy_ls_lm\"\n\n    Minimize a nonlinear least squares problem using a Levenberg-Marquardt method.\n\n    Does not handle bounds and sparse Jacobians. Usually the most efficient method for\n    small unconstrained problems.\n\n    The algorithm supports the following options:\n\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is below this.\n    - **convergence.gtol_rel** (float): Stop when the gradient,\n      divided by the absolute value of the criterion function is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this as\n      convergence.\n    - **tr_solver** (str): Method for solving trust-region subproblems, relevant only\n      for 'trf' and 'dogbox' methods.\n\n      - 'exact' is suitable for not very large problems with dense\n        Jacobian matrices. The computational complexity per iteration is\n        comparable to a singular value decomposition of the Jacobian\n        matrix.\n      - 'lsmr' is suitable for problems with sparse and large Jacobian\n        matrices. It uses the iterative procedure\n        `scipy.sparse.linalg.lsmr` for finding a solution of a linear\n        least-squares problem and only requires matrix-vector product\n        evaluations.\n        If None (default), the solver is chosen based on the type of Jacobian\n        returned on the first iteration.\n    - **tr_solver_options** (dict):  Keyword options passed to trust-region solver.\n\n      - ``tr_solver='exact'``: `tr_options` are ignored.\n      - ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_basinhopping\n\n    .. code-block::\n\n        \"scipy_basinhopping\"\n\n    Find the global minimum of a function using the basin-hopping algorithm which combines a global stepping algorithm with local minimization at each step.\n\n    Basin-hopping is a two-phase method that combines a global stepping algorithm with local minimization at each step. Designed to mimic the natural process of energy minimization of clusters of atoms, it works well for similar problems with “funnel-like, but rugged” energy landscapes.\n\n    This is mainly supported for completeness. Consider optimagic's built in multistart\n    optimization for a similar approach that can run multiple optimizations in parallel,\n    supports all local algorithms in optimagic (as opposed to just those from scipy)\n    and allows for a better visualization of the multistart history.\n\n    When provided the derivative is passed to the local minimization method.\n\n    The algorithm supports the following options:\n\n    - **local_algorithm** (str/callable): Any scipy local minimizer: valid options are.\n      \"Nelder-Mead\". \"Powell\". \"CG\". \"BFGS\". \"Newton-CG\". \"L-BFGS-B\". \"TNC\". \"COBYLA\".\n      \"SLSQP\". \"trust-constr\". \"dogleg\". \"trust-ncg\". \"trust-exact\". \"trust-krylov\".\n      or a custom function for local minimization, default is \"L-BFGS-B\".\n    - **n_local_optimizations**: (int) The number local optimizations. Default is 100 as\n      in scipy's default.\n    - **temperature**: (float) Controls the randomness in the optimization process.\n      Higher the temperatures the larger jumps in function value will be accepted.\n      Default is 1.0 as in scipy's default.\n    - **stepsize**: (float) Maximum step size. Default is 0.5 as in scipy's default.\n    - **local_algo_options**: (dict) Additional keyword arguments for the local\n      minimizer. Check the documentation of the local scipy algorithms for details on\n      what is supported.\n    - **take_step**: (callable) Replaces the default step-taking routine. Default is\n      None as in scipy's default.\n    - **accept_test**: (callable) Define a test to judge the acception of steps. Default\n      is None as in scipy's default.\n    - **interval**: (int) Determined how often the step size is updated. Default is 50\n      as in scipy's default.\n    - **convergence.n_unchanged_iterations**: (int) Number of iterations the global\n      minimum estimate stays the same to stops the algorithm. Default is None as in\n      scipy's default.\n    - **seed**: (None, int, numpy.random.Generator,numpy.random.RandomState)Default is\n      None as in scipy's default.\n    - **target_accept_rate**: (float) Adjusts the step size. Default is 0.5 as in scipy's default.\n    - **stepwise_factor**: (float) Step size multiplier upon each step. Lies between (0,1), default is 0.9 as in scipy's default.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_brute\n\n    .. code-block::\n\n        \"scipy_brute\"\n\n    Find the global minimum of a fuction over a given range by brute force.\n\n    Brute force evaluates the criterion at each point and that is why better suited for problems with very few parameters.\n\n    The start values are not actually used because the grid is only defined by bounds.\n    It is still necessary for optimagic to infer the number and format of the\n    parameters.\n\n    Due to the parallelization, this algorithm cannot collect a history of parameters\n    and criterion evaluations.\n\n    The algorithm supports the following options:\n\n    - **n_grid_points** (int):  the number of grid points to use for the brute force\n      search. Default is 20 as in scipy.\n    - **polishing_function** (callable):  Function to seek a more precise minimum near\n      brute-force' best gridpoint taking brute-force's result at initial guess as a\n      positional argument. Default is None providing no polishing.\n    - **n_cores** (int): The number of cores on which the function is evaluated in\n      parallel. Default 1.\n    - **batch_evaluator** (str or callable). An optimagic batch evaluator. Default\n      'joblib'.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_differential_evolution\n\n    .. code-block::\n\n        \"scipy_differential_evolution\"\n\n    Find the global minimum of a multivariate function using differential evolution (DE). DE is a gradient-free method.\n\n    Due to optimagic's general parameter format the integrality and vectorized\n    arguments are not supported.\n\n    The algorithm supports the following options:\n\n    - **strategy** (str): Measure of quality to improve a candidate solution, can be one\n      of the following keywords (default 'best1bin'.)\n      - ‘best1bin’\n      - ‘best1exp’\n      - ‘rand1exp’\n      - ‘randtobest1exp’\n      - ‘currenttobest1exp’\n      - ‘best2exp’\n      - ‘rand2exp’\n      - ‘randtobest1bin’\n      - ‘currenttobest1bin’\n      - ‘best2bin’\n      - ‘rand2bin’\n      - ‘rand1bin’\n\n    - **stopping.maxiter** (int): The maximum number of criterion evaluations\n      without polishing is(stopping.maxiter + 1) * population_size * number of\n      parameters\n    - **population_size_multiplier** (int): A multiplier setting the population size.\n      The number of individuals in the population is population_size * number of\n      parameters. The default 15.\n    - **convergence.ftol_rel** (float): Default 0.01.\n    - **mutation_constant** (float/tuple): The differential weight denoted by F in\n      literature. Should be within 0 and 2.  The tuple form is used to specify\n      (min, max) dithering which can help speed convergence.  Default is (0.5, 1).\n    - **recombination_constant** (float): The crossover probability or CR in the\n      literature determines the probability that two solution vectors will be combined\n      to produce a new solution vector. Should be between 0 and 1. The default is 0.7.\n    - **seed** (int): DE is stochastic. Define a seed for reproducability.\n    - **polish** (bool): Uses scipy's L-BFGS-B for unconstrained problems and\n      trust-constr for constrained problems to slightly improve the minimization.\n      Default is True.\n    - **sampling_method** (str/np.array): Specify the sampling method for the initial\n      population. It can be one of the following options\n      - \"latinhypercube\"\n      - \"sobol\"\n      - \"halton\"\n      - \"random\"\n      - an array specifying the initial population of shape (total population size,\n      number of parameters). The initial population is clipped to bounds before use.\n      Default is 'latinhypercube'\n\n    - **convergence.ftol_abs** (float):\n      CONVERGENCE_SECOND_BEST_ABSOLUTE_CRITERION_TOLERANCE\n    - **n_cores** (int): The number of cores on which the function is evaluated in\n      parallel. Default 1.\n    - **batch_evaluator** (str or callable). An optimagic batch evaluator. Default\n      'joblib'.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_shgo\n\n    .. code-block::\n\n        \"scipy_shgo\"\n\n    Find the global minimum of a fuction using simplicial homology global optimization.\n\n    The algorithm supports the following options:\n\n    - **local_algorithm** (str): The local optimization algorithm to be used. Only\n      COBYLA and SLSQP supports constraints. Valid options are\n      \"Nelder-Mead\". \"Powell\". \"CG\". \"BFGS\". \"Newton-CG\". \"L-BFGS-B\". \"TNC\". \"COBYLA\".\n      \"SLSQP\". \"trust-constr\". \"dogleg\". \"trust-ncg\". \"trust-exact\". \"trust-krylov\"\n      or a custom function for local minimization, default is \"L-BFGS-B\".\n    - **local_algo_options**: (dict) Additional keyword arguments for the local\n      minimizer. Check the documentation of the local scipy algorithms for details on\n      what is supported.\n    - **n_sampling_points** (int): Specify the number of sampling points to construct\n      the simplical complex.\n    - **n_simplex_iterations** (int): Number of iterations to construct the simplical\n      complex. Default is 1 as in scipy.\n    - **sampling_method** (str/callable): The method to use for sampling the search\n      space. Default 'simplicial'.\n    - **max_sampling_evaluations** (int): The maximum number of evaluations of the\n      criterion function in the sampling phase.\n    - **convergence.minimum_criterion_value** (float): Specify the global minimum when\n      it is known. Default is - np.inf. For maximization problems, flip the sign.\n    - **convergence.minimum_criterion_tolerance** (float): Specify the relative error\n      between the current best minimum and the supplied global criterion_minimum\n      allowed. Default is scipy's default, 1e-4.\n    - **stopping.maxiter** (int): The maximum number of iterations.\n    - **stopping.maxfun** (int): The maximum number of criterion\n      evaluations.\n    - **stopping.max_processing_time** (int): The maximum time allowed for the\n      optimization.\n    - **minimum_homology_group_rank_differential** (int): The minimum difference in the\n      rank of the homology group between iterations.\n    - **symmetry** (bool): Specify whether the criterion contains symetric variables.\n    - **minimize_every_iteration** (bool): Specify whether the gloabal sampling points\n      are passed to the local algorithm in every iteration.\n    - **max_local_minimizations_per_iteration** (int): The maximum number of local\n      optimizations per iteration. Default False, i.e. no limit.\n    - **infinity_constraints** (bool): Specify whether to save the sampling points\n      outside the feasible domain. Default is True.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_dual_annealing\n\n    .. code-block::\n\n        \"scipy_dual_annealing\"\n\n    Find the global minimum of a function using dual annealing for continuous variables.\n\n    The algorithm supports the following options:\n\n    - **stopping.maxiter** (int): Specify the maximum number of global searh\n      iterations.\n    - **local_algorithm** (str): The local optimization algorithm to be used. valid\n      options are: \"Nelder-Mead\", \"Powell\", \"CG\", \"BFGS\", \"Newton-CG\", \"L-BFGS-B\",\n      \"TNC\", \"COBYLA\", \"SLSQP\", \"trust-constr\", \"dogleg\", \"trust-ncg\", \"trust-exact\",\n      \"trust-krylov\", Default \"L-BFGS-B\".\n    - **local_algo_options**: (dict) Additional keyword arguments for the local\n      minimizer. Check the documentation of the local scipy algorithms for details on\n      what is supported.\n    - **initial_temperature** (float): The temparature algorithm starts with. The higher values lead to a wider search space. The range is (0.01, 5.e4] and default is 5230.0.\n    - **restart_temperature_ratio** (float): Reanneling starts when the algorithm is decreased to initial_temperature * restart_temperature_ratio. Default is 2e-05.\n    - **visit** (float): Specify the thickness of visiting distribution's tails. Range is (1, 3] and default is scipy's default, 2.62.\n    - **accept** (float): Controls the probability of acceptance. Range is (-1e4, -5] and default is scipy's default, -5.0. Smaller values lead to lower acceptance probability.\n    - **stopping.maxfun** (int): soft limit for the number of criterion evaluations.\n    - **seed** (int, None or RNG): Dual annealing is a stochastic process. Seed or\n      random number generator. Default None.\n    - **no_local_search** (bool): Specify whether to apply a traditional Generalized Simulated Annealing with no local search. Default is False.\n\n```\n\n```{eval-rst}\n.. dropdown::  scipy_direct\n\n    .. code-block::\n\n        \"scipy_direct\"\n\n    Find the global minimum of a function using dividing rectangles method. It is not necessary to provide an initial guess.\n\n    The algorithm supports the following options:\n\n    - **eps** (float): Specify the minimum difference of the criterion values between the current best hyperrectangle and the next potentially best hyperrectangle to be divided determining the trade off between global and local search. Default is 1e-6 differing from scipy's default 1e-4.\n    - **stopping.maxfun** (int/None): Maximum number of criterion evaluations allowed. Default is None which caps the number of evaluations at 1000 * number of dimentions automatically.\n    - **stopping.maxiter** (int): Maximum number of iterations allowed.\n    - **locally_biased** (bool): Determine whether to use the locally biased variant of the algorithm DIRECT_L. Default is True.\n    - **convergence.minimum_criterion_value** (float): Specify the global minimum when it is known. Default is minus infinity. For maximization problems, flip the sign.\n    - **convergence.minimum_criterion_tolerance** (float): Specify the relative error between the current best minimum and the supplied global criterion_minimum allowed. Default is scipy's default, 1e-4.\n    - **volume_hyperrectangle_tolerance** (float): Specify the smallest volume of the hyperrectangle containing the lowest criterion value allowed. Range is (0,1). Default is 1e-16.\n    - **length_hyperrectangle_tolerance** (float): Depending on locally_biased it can refer to normalized side (True) or diagonal (False) length of the hyperrectangle containing the lowest criterion value. Range is (0,1). Default is scipy's default, 1e-6.\n\n```\n\n(own-algorithms)=\n\n## Own optimizers\n\nWe implement a few algorithms from scratch. They are currently considered experimental.\n\n```{eval-rst}\n.. dropdown:: bhhh\n\n    .. code-block::\n\n        \"bhhh\"\n\n    Minimize a likelihood function using the BHHH algorithm.\n\n    BHHH (:cite:`Berndt1974`) can - and should ONLY - be used for minimizing\n    (or maximizing) a likelihood. It is similar to the Newton-Raphson\n    algorithm, but replaces the Hessian matrix with the outer product of the\n    gradient. This approximation is based on the information matrix equality\n    (:cite:`Halbert1982`) and is thus only vaid when minimizing (or maximizing)\n    a likelihood.\n\n    The criterion function :func:`func` should return a dictionary with\n    at least the entry ``{\"contributions\": array_or_pytree}`` where ``array_or_pytree``\n    contains the likelihood contributions of each individual.\n\n    bhhh supports the following options:\n\n    - **convergence.gtol_abs** (float): Stopping criterion for the\n      gradient tolerance. Default is 1e-8.\n    - **stopping.maxiter** (int): Maximum number of iterations.\n      If reached, terminate. Default is 200.\n\n```\n\n```{eval-rst}\n.. dropdown:: neldermead_parallel\n\n    .. code-block::\n\n        \"neldermead_parallel\"\n\n    Minimize a function using the neldermead_parallel algorithm.\n\n    This is a parallel Nelder-Mead algorithm following Lee D., Wiswall M., A parallel\n    implementation of the simplex function minimization routine,\n    Computational Economics, 2007.\n\n    The algorithm was implemented by Jacek Barszczewski\n\n    The algorithm supports the following options:\n\n    - **init_simplex_method** (string or callable): Name of the method to create initial\n      simplex or callable which takes as an argument initial value of parameters\n      and returns initial simplex as j+1 x j array, where j is length of x.\n      The default is \"gao_han\".\n    - **n_cores** (int): Degree of parallization. The default is 1 (no parallelization).\n\n    - **adaptive** (bool): Adjust parameters of Nelder-Mead algorithm to account\n      for simplex size. The default is True.\n\n    - **stopping.maxiter** (int): Maximum number of algorithm iterations.\n      The default is STOPPING_MAX_ITERATIONS.\n\n    - **convergence.ftol_abs** (float): maximal difference between\n      function value evaluated on simplex points.\n      The default is CONVERGENCE_SECOND_BEST_ABSOLUTE_CRITERION_TOLERANCE.\n\n    - **convergence.xtol_abs** (float): maximal distance between points\n      in the simplex. The default is CONVERGENCE_SECOND_BEST_ABSOLUTE_PARAMS_TOLERANCE.\n\n    - **batch_evaluator** (string or callable): See :ref:`batch_evaluators` for\n        details. Default \"joblib\".\n\n```\n\n```{eval-rst}\n.. dropdown:: pounders\n\n    .. code-block::\n\n        \"pounders\"\n\n    Minimize a function using the POUNDERS algorithm.\n\n    POUNDERs (:cite:`Benson2017`, :cite:`Wild2015`, `GitHub repository\n    <https://github.com/erdc/petsc4py>`_)\n\n    can be a useful tool for economists who estimate structural models using\n    indirect inference, because unlike commonly used algorithms such as Nelder-Mead,\n    POUNDERs is tailored for minimizing a non-linear sum of squares objective function,\n    and therefore may require fewer iterations to arrive at a local optimum than\n    Nelder-Mead.\n\n    Scaling the problem is necessary such that bounds correspond to the unit hypercube\n    :math:`[0, 1]^n`. For unconstrained problems, scale each parameter such that unit\n    changes in parameters result in similar order-of-magnitude changes in the criterion\n    value(s).\n\n    pounders supports the following options:\n\n\n    - **convergence.gtol_abs**: Convergence tolerance for the\n      absolute gradient norm. Stop if norm of the gradient is less than this.\n      Default is 1e-8.\n    - **convergence.gtol_rel**: Convergence tolerance for the\n      relative gradient norm. Stop if norm of the gradient relative to the criterion\n      value is less than this. Default is 1-8.\n    - **convergence.gtol_scaled**: Convergence tolerance for the\n      scaled gradient norm. Stop if norm of the gradient divided by norm of the\n      gradient at the initial parameters is less than this.\n      Disabled, i.e. set to False, by default.\n    - **max_interpolation_points** (int): Maximum number of interpolation points.\n      Default is `2 * n + 1`, where `n` is the length of the parameter vector.\n    - **stopping.maxiter** (int): Maximum number of iterations.\n      If reached, terminate. Default is 2000.\n    - **trustregion_initial_radius (float)**: Delta, initial trust-region radius.\n      0.1 by default.\n    - **trustregion_minimal_radius** (float): Minimal trust-region radius.\n      1e-6 by default.\n    - **trustregion_maximal_radius** (float): Maximal trust-region radius.\n      1e6 by default.\n    - **trustregion_shrinking_factor_not_successful** (float): Shrinking factor of\n      the trust-region radius in case the solution vector of the suproblem\n      is not accepted, but the model is fully linear (i.e. \"valid\").\n      Defualt is 0.5.\n    - **trustregion_expansion_factor_successful** (float): Shrinking factor of\n      the trust-region radius in case the solution vector of the suproblem\n      is accepted. Default is 2.\n    - **theta1** (float): Threshold for adding the current x candidate to the\n      model. Function argument to find_affine_points(). Default is 1e-5.\n    - **theta2** (float): Threshold for adding the current x candidate to the model.\n      Argument to get_interpolation_matrices_residual_model(). Default is 1e-4.\n    - **trustregion_threshold_successful** (float): First threshold for accepting the\n      solution vector of the subproblem as the best x candidate. Default is 0.\n    - **trustregion_threshold_very_successful** (float): Second threshold for accepting\n      the solution vector of the subproblem as the best x candidate. Default is 0.1.\n    - **c1** (float): Treshold for accepting the norm of our current x candidate.\n      Function argument to find_affine_points() for the case where input array\n      *model_improving_points* is zero.\n    - **c2** (int): Treshold for accepting the norm of our current x candidate.\n      Equal to 10 by default. Argument to *find_affine_points()* in case\n      the input array *model_improving_points* is not zero.\n    - **trustregion_subproblem_solver** (str): Solver to use for the trust-region\n      subproblem. Two internal solvers are supported:\n      - \"bntr\": Bounded Newton Trust-Region (default, supports bound constraints)\n      - \"gqtpar\": (does not support bound constraints)\n    - **trustregion_subsolver_options** (dict): Options dictionary containing\n      the stopping criteria for the subproblem. It takes different keys depending\n      on the type of subproblem solver used. With the exception of the stopping criterion\n      \"maxiter\", which is always included.\n\n      If the subsolver \"bntr\" is used, the dictionary also contains the tolerance levels\n      \"gtol_abs\", \"gtol_rel\", and \"gtol_scaled\". Moreover, the \"conjugate_gradient_method\"\n      can be provided. Available conjugate gradient methods are:\n      - \"cg\". In this case, two additional stopping criteria are \"gtol_abs_cg\" and \"gtol_rel_cg\"\n      - \"steihaug-toint\"\n      - \"trsbox\" (default)\n\n      If the subsolver \"gqtpar\" is employed, the two stopping criteria are\n      \"k_easy\" and \"k_hard\".\n\n      None of the dictionary keys need to be specified by default, but can be.\n    - **batch_evaluator** (str or callable): Name of a pre-implemented batch evaluator\n      (currently \"joblib\" and \"pathos_mp\") or callable with the same interface\n      as the optimagic batch_evaluators. Default is \"joblib\".\n    - **n_cores (int)**: Number of processes used to parallelize the function\n      evaluations. Default is 1.\n\n```\n\n(tao-algorithms)=\n\n## Optimizers from the Toolkit for Advanced Optimization (TAO)\n\nWe wrap the pounders algorithm from the Toolkit of Advanced optimization. To use it you\nneed to have [petsc4py](https://pypi.org/project/petsc4py/) installed.\n\n```{eval-rst}\n.. dropdown::  tao_pounders\n\n    .. code-block::\n\n        \"tao_pounders\"\n\n    Minimize a function using the POUNDERs algorithm.\n\n    POUNDERs (:cite:`Benson2017`, :cite:`Wild2015`, `GitHub repository\n    <https://github.com/erdc/petsc4py>`_)\n\n    can be a useful tool for economists who estimate structural models using\n    indirect inference, because unlike commonly used algorithms such as Nelder-Mead,\n    POUNDERs is tailored for minimizing a non-linear sum of squares objective function,\n    and therefore may require fewer iterations to arrive at a local optimum than\n    Nelder-Mead.\n\n    Scaling the problem is necessary such that bounds correspond to the unit hypercube\n    :math:`[0, 1]^n`. For unconstrained problems, scale each parameter such that unit\n    changes in parameters result in similar order-of-magnitude changes in the criterion\n    value(s).\n\n    POUNDERs has several convergence criteria. Let :math:`X` be the current parameter\n    vector, :math:`X_0` the initial parameter vector, :math:`g` the gradient, and\n    :math:`f` the criterion function.\n\n    ``absolute_gradient_tolerance`` stops the optimization if the norm of the gradient\n    falls below :math:`\\epsilon`.\n\n    .. math::\n\n        ||g(X)|| < \\epsilon\n\n    ``relative_gradient_tolerance`` stops the optimization if the norm of the gradient\n    relative to the criterion value falls below :math:`epsilon`.\n\n    .. math::\n\n        \\frac{||g(X)||}{|f(X)|} < \\epsilon\n\n    ``scaled_gradient_tolerance`` stops the optimization if the norm of the gradient is\n    lower than some fraction :math:`epsilon` of the norm of the gradient at the initial\n    parameters.\n\n    .. math::\n\n        \\frac{||g(X)||}{||g(X0)||} < \\epsilon\n\n    - **convergence.gtol_abs** (float): Stop if norm of gradient is less than this.\n      If set to False the algorithm will not consider convergence.gtol_abs.\n    - **convergence.gtol_rel** (float): Stop if relative norm of gradient is less\n      than this. If set to False the algorithm will not consider\n      convergence.gtol_rel.\n    - **convergence.scaled_gradient_tolerance** (float): Stop if scaled norm of gradient is smaller\n      than this. If set to False the algorithm will not consider\n      convergence.scaled_gradient_tolerance.\n    - **trustregion.initial_radius** (float): Initial value of the trust region radius.\n      It must be :math:`> 0`.\n    - **stopping.maxiter** (int): Alternative Stopping criterion.\n      If set the routine will stop after the number of specified iterations or\n      after the step size is sufficiently small. If the variable is set the\n      default criteria will all be ignored.\n\n\n```\n\n(nag-algorithms)=\n\n## Optimizers from the Numerical Algorithms Group (NAG)\n\nWe wrap two algorithms from the numerical algorithms group. To use them, you need to\ninstall each of them separately:\n\n- `pip install DFO-LS`\n- `pip install Py-BOBYQA`\n\n```{eval-rst}\n.. dropdown::  nag_dfols\n\n    *Note*: We recommend to install `DFO-LS` version 1.5.3 or higher. Versions of 1.5.0 or lower also work but the versions `1.5.1` and `1.5.2` contain bugs that can lead to errors being raised.\n\n    .. code-block::\n\n        \"nag_dfols\"\n\n    Minimize a function with least squares structure using DFO-LS.\n\n    The DFO-LS algorithm :cite:`Cartis2018b` is designed to solve the nonlinear\n    least-squares minimization problem (with optional bound constraints).\n    Remember to cite :cite:`Cartis2018b` when using DF-OLS in addition to optimagic.\n\n    .. math::\n\n        \\min_{x\\in\\mathbb{R}^n}  &\\quad  f(x) := \\sum_{i=1}^{m}r_{i}(x)^2 \\\\\n        \\text{s.t.} &\\quad  \\text{lower_bounds} \\leq x \\leq \\text{upper_bounds}\n\n    The :math:`r_{i}` are called root contributions in optimagic.\n\n    DFO-LS is a derivative-free optimization algorithm, which means it does not require\n    the user to provide the derivatives of f(x) or :math:`r_{i}(x)`, nor does it\n    attempt to estimate them internally (by using finite differencing, for instance).\n\n    There are two main situations when using a derivative-free algorithm\n    (such as DFO-LS) is preferable to a derivative-based algorithm (which is the vast\n    majority of least-squares solvers):\n\n    1. If the residuals are noisy, then calculating or even estimating their derivatives\n       may be impossible (or at least very inaccurate). By noisy, we mean that if we\n       evaluate :math:`r_{i}(x)` multiple times at the same value of x, we get different\n       results. This may happen when a Monte Carlo simulation is used, for instance.\n\n    2. If the residuals are expensive to evaluate, then estimating derivatives\n       (which requires n evaluations of each :math:`r_{i}(x)` for every point of\n       interest x) may be prohibitively expensive. Derivative-free methods are designed\n       to solve the problem with the fewest number of evaluations of the criterion as\n       possible.\n\n    To read the detailed documentation of the algorithm `click here\n    <https://numericalalgorithmsgroup.github.io/dfols/>`_.\n\n    There are four possible convergence criteria:\n\n    1. when the lower trust region radius is shrunk below a minimum\n       (``convergence.minimal_trustregion_radius_tolerance``).\n\n    2. when the improvements of iterations become very small\n       (``convergence.slow_progress``). This is very similar to\n       ``relative_criterion_tolerance`` but ``convergence.slow_progress`` is more\n       general allowing to specify not only the threshold for convergence but also\n       a period over which the improvements must have been very small.\n\n    3. when a sufficient reduction to the criterion value at the start parameters\n       has been reached, i.e. when\n       :math:`\\frac{f(x)}{f(x_0)} \\leq\n       \\text{convergence.ftol_scaled}`\n\n    4. when all evaluations on the interpolation points fall within a scaled version of\n       the noise level of the criterion function. This is only applicable if the\n       criterion function is noisy. You can specify this criterion with\n       ``convergence.noise_corrected_criterion_tolerance``.\n\n    DF-OLS supports resetting the optimization and doing a fast start by\n    starting with a smaller interpolation set and growing it dynamically.\n    For more information see `their detailed documentation\n    <https://numericalalgorithmsgroup.github.io/dfols/>`_ and :cite:`Cartis2018b`.\n\n    - **clip_criterion_if_overflowing** (bool): see :ref:`algo_options`.\n      convergence.minimal_trustregion_radius_tolerance (float): see\n      :ref:`algo_options`.\n    - **convergence.noise_corrected_criterion_tolerance** (float): Stop when the\n      evaluations on the set of interpolation points all fall within this factor\n      of the noise level.\n      The default is 1, i.e. when all evaluations are within the noise level.\n      If you want to not use this criterion but still flag your\n      criterion function as noisy, set this tolerance to 0.0.\n\n      .. warning::\n          Very small values, as in most other tolerances don't make sense here.\n\n    - **convergence.ftol_scaled** (float):\n      Terminate if a point is reached where the ratio of the criterion value\n      to the criterion value at the start params is below this value, i.e. if\n      :math:`f(x_k)/f(x_0) \\leq\n      \\text{convergence.ftol_scaled}`. Note this is\n      deactivated unless the lowest mathematically possible criterion value (0.0)\n      is actually achieved.\n    - **convergence.slow_progress** (dict): Arguments for converging when the evaluations\n      over several iterations only yield small improvements on average, see\n      see :ref:`algo_options` for details.\n    - **initial_directions (str)**: see :ref:`algo_options`.\n    - **interpolation_rounding_error** (float): see :ref:`algo_options`.\n    - **noise_additive_level** (float): Used for determining the presence of noise\n      and the convergence by all interpolation points being within noise level.\n      0 means no additive noise. Only multiplicative or additive is supported.\n    - **noise_multiplicative_level** (float): Used for determining the presence of noise\n      and the convergence by all interpolation points being within noise level.\n      0 means no multiplicative noise. Only multiplicative or additive is\n      supported.\n    - **noise_n_evals_per_point** (callable): How often to evaluate the criterion\n      function at each point.\n      This is only applicable for criterion functions with noise,\n      when averaging multiple evaluations at the same point produces a more\n      accurate value.\n      The input parameters are the ``upper_trustregion_radius`` (:math:`\\Delta`),\n      the ``lower_trustregion_radius`` (:math:`\\rho`),\n      how many iterations the algorithm has been running for, ``n_iterations``\n      and how many resets have been performed, ``n_resets``.\n      The function must return an integer.\n      Default is no averaging (i.e.\n      ``noise_n_evals_per_point(...) = 1``).\n    - **random_directions_orthogonal** (bool): see :ref:`algo_options`.\n    - **stopping.maxfun** (int): see :ref:`algo_options`.\n    - **threshold_for_safety_step** (float): see :ref:`algo_options`.\n    - **trustregion.expansion_factor_successful** (float): see :ref:`algo_options`.\n    - **trustregion.expansion_factor_very_successful** (float): see :ref:`algo_options`.\n    - **trustregion.fast_start_options** (dict): see :ref:`algo_options`.\n    - **trustregion.initial_radius** (float): Initial value of the trust region radius.\n    - **trustregion.method_to_replace_extra_points (str)**: If replacing extra points in\n      successful iterations, whether to use geometry improving steps or the\n      momentum method. Can be \"geometry_improving\" or \"momentum\".\n    - **trustregion.n_extra_points_to_replace_successful** (int): The number of extra\n      points (other than accepting the trust region step) to replace. Useful when\n      ``trustregion.n_interpolation_points > len(x) + 1``.\n    - **trustregion.n_interpolation_points** (int): The number of interpolation points to\n      use. The default is :code:`len(x) + 1`. If using resets, this is the\n      number of points to use in the first run of the solver, before any resets.\n    - **trustregion.precondition_interpolation** (bool): see :ref:`algo_options`.\n    - **trustregion.shrinking_factor_not_successful** (float): see :ref:`algo_options`.\n    - **trustregion.shrinking_factor_lower_radius** (float): see :ref:`algo_options`.\n    - **trustregion.shrinking_factor_upper_radius** (float): see :ref:`algo_options`.\n    - **trustregion.threshold_successful** (float): Share of the predicted improvement\n      that has to be achieved for a trust region iteration to count as successful.\n    - **trustregion.threshold_very_successful** (float): Share of the predicted\n      improvement that has to be achieved for a trust region iteration to count\n      as very successful.\n\n```\n\n```{eval-rst}\n.. dropdown::  nag_pybobyqa\n\n    .. code-block::\n\n        \"nag_pybobyqa\"\n\n    Minimize a function using the BOBYQA algorithm.\n\n    BOBYQA (:cite:`Powell2009`, :cite:`Cartis2018`, :cite:`Cartis2018a`) is a\n    derivative-free trust-region method. It is designed to solve nonlinear local\n    minimization problems.\n\n    Remember to cite :cite:`Powell2009` and :cite:`Cartis2018` when using pybobyqa in\n    addition to optimagic. If you take advantage of the ``seek_global_optimum`` option,\n    cite :cite:`Cartis2018a` additionally.\n\n    There are two main situations when using a derivative-free algorithm like BOBYQA\n    is preferable to derivative-based algorithms:\n\n    1. The criterion function is not deterministic, i.e. if we evaluate the criterion\n       function multiple times at the same parameter vector we get different results.\n\n    2. The criterion function is very expensive to evaluate and only finite differences\n       are available to calculate its derivative.\n\n    The detailed documentation of the algorithm can be found `here\n    <https://numericalalgorithmsgroup.github.io/pybobyqa/>`_.\n\n    There are four possible convergence criteria:\n\n    1. when the trust region radius is shrunk below a minimum. This is\n       approximately equivalent to an absolute parameter tolerance.\n\n    2. when the criterion value falls below an absolute, user-specified value,\n       the optimization terminates successfully.\n\n    3. when insufficient improvements have been gained over a certain number of\n       iterations. The (absolute) threshold for what constitutes an insufficient\n       improvement, how many iterations have to be insufficient and with which\n       iteration to compare can all be specified by the user.\n\n    4. when all evaluations on the interpolation points fall within a scaled version of\n       the noise level of the criterion function. This is only applicable if the\n       criterion function is noisy.\n\n    - **clip_criterion_if_overflowing** (bool): see :ref:`algo_options`.\n    - **convergence.criterion_value** (float): Terminate successfully if\n      the criterion value falls below this threshold. This is deactivated\n      (i.e. set to -inf) by default.\n    - **convergence.minimal_trustregion_radius_tolerance** (float): Minimum allowed\n      value of the trust region radius, which determines when a successful\n      termination occurs.\n    - **convergence.noise_corrected_criterion_tolerance** (float): Stop when the\n      evaluations on the set of interpolation points all fall within this\n      factor of the noise level.\n      The default is 1, i.e. when all evaluations are within the noise level.\n      If you want to not use this criterion but still flag your\n      criterion function as noisy, set this tolerance to 0.0.\n\n      .. warning::\n          Very small values, as in most other tolerances don't make sense here.\n\n    - **convergence.slow_progress** (dict): Arguments for converging when the evaluations\n      over several iterations only yield small improvements on average, see\n      see :ref:`algo_options` for details.\n    - **initial_directions** (str)``: see :ref:`algo_options`.\n    - **interpolation_rounding_error** (float): see :ref:`algo_options`.\n    - **noise_additive_level** (float): Used for determining the presence of noise\n      and the convergence by all interpolation points being within noise level.\n      0 means no additive noise. Only multiplicative or additive is supported.\n    - **noise_multiplicative_level** (float): Used for determining the presence of noise\n      and the convergence by all interpolation points being within noise level.\n      0 means no multiplicative noise. Only multiplicative or additive is\n      supported.\n    - **noise_n_evals_per_point** (callable): How often to evaluate the criterion\n      function at each point.\n      This is only applicable for criterion functions with noise,\n      when averaging multiple evaluations at the same point produces a more\n      accurate value.\n      The input parameters are the ``upper_trustregion_radius`` (``delta``),\n      the ``lower_trustregion_radius`` (``rho``),\n      how many iterations the algorithm has been running for, ``n_iterations``\n      and how many resets have been performed, ``n_resets``.\n      The function must return an integer.\n      Default is no averaging (i.e. ``noise_n_evals_per_point(...) = 1``).\n    - **random_directions_orthogonal** (bool): see :ref:`algo_options`.\n    - **seek_global_optimum** (bool): whether to apply the heuristic to escape local\n      minima presented in :cite:`Cartis2018a`. Only applies for noisy criterion\n      functions.\n    - **stopping.maxfun** (int): see :ref:`algo_options`.\n    - **threshold_for_safety_step** (float): see :ref:`algo_options`.\n    - **trustregion.expansion_factor_successful** (float): see :ref:`algo_options`.\n    - **trustregion.expansion_factor_very_successful** (float): see :ref:`algo_options`.\n    - **trustregion.initial_radius** (float): Initial value of the trust region radius.\n    - **trustregion.minimum_change_hession_for_underdetermined_interpolation** (bool):\n      Whether to solve the underdetermined quadratic interpolation problem by\n      minimizing the Frobenius norm of the Hessian, or change in Hessian.\n    - **trustregion.n_interpolation_points** (int): The number of interpolation points to\n      use. With $n=len(x)$ the default is $2n+1$ if the criterion is not noisy.\n      Otherwise, it is set to $(n+1)(n+2)/2)$.\n\n      Larger values are particularly useful for noisy problems.\n      Py-BOBYQA requires\n\n      .. math::\n          n + 1 \\leq \\text{trustregion.n_interpolation_points} \\leq (n+1)(n+2)/2.\n    - **trustregion.precondition_interpolation** (bool): see :ref:`algo_options`.\n    - **trustregion.reset_options** (dict): Options for resetting the optimization,\n      see :ref:`algo_options` for details.\n    - **trustregion.shrinking_factor_not_successful** (float): see :ref:`algo_options`.\n    - **trustregion.shrinking_factor_upper_radius** (float): see :ref:`algo_options`.\n    - **trustregion.shrinking_factor_lower_radius** (float): see :ref:`algo_options`.\n    - **trustregion.threshold_successful** (float): see :ref:`algo_options`.\n    - **trustregion.threshold_very_successful** (float): see :ref:`algo_options`.\n\n\n\n```\n\n(pygmo-algorithms)=\n\n## PYGMO2 Optimizers\n\nPlease cite {cite}`Biscani2020` in addition to optimagic when using pygmo. optimagic\nsupports the following [pygmo2](https://esa.github.io/pygmo2) optimizers.\n\n```{eval-rst}\n.. dropdown::  pygmo_gaco\n\n    .. code-block::\n\n        \"pygmo_gaco\"\n\n    Minimize a scalar function using the generalized ant colony algorithm.\n\n    The version available through pygmo is an generalized version of the\n    original ant colony algorithm proposed by :cite:`Schlueter2009`.\n\n    This algorithm can be applied to box-bounded problems.\n\n    Ant colony optimization is a class of optimization algorithms modeled on the\n    actions of an ant colony. Artificial \"ants\" (e.g. simulation agents) locate\n    optimal solutions by moving through a parameter space representing all\n    possible solutions. Real ants lay down pheromones directing each other to\n    resources while exploring their environment. The simulated \"ants\" similarly\n    record their positions and the quality of their solutions, so that in later\n    simulation iterations more ants locate better solutions.\n\n    The generalized ant colony algorithm generates future generations of ants by\n    using a multi-kernel gaussian distribution based on three parameters (i.e.,\n    pheromone values) which are computed depending on the quality of each\n    previous solution. The solutions are ranked through an oracle penalty\n    method.\n\n    - **population_size** (int): Size of the population. If None, it's twice the\n      number of parameters but at least 64.\n    - **batch_evaluator** (str or Callable): Name of a pre-implemented batch\n      evaluator (currently 'joblib' and 'pathos_mp') or Callable with the same\n      interface as the optimagic batch_evaluators. See :ref:`batch_evaluators`.\n    - **n_cores** (int): Number of cores to use.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed\n      to be part of the initial population. This saves one criterion function\n      evaluation that cannot be done in parallel with other evaluations. Default\n      False.\n\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **kernel_size** (int): Number of solutions stored in the solution archive.\n    - **speed_parameter_q** (float): This parameter manages the convergence speed\n      towards the found minima (the smaller the faster). In the pygmo\n      documentation it is referred to as $q$. It must be positive and can be\n      larger than 1. The default is 1.0 until **threshold** is reached. Then it\n      is set to 0.01.\n    - **oracle** (float): oracle parameter used in the penalty method.\n    - **accuracy** (float): accuracy parameter for maintaining a minimum penalty\n      function's values distances.\n    - **threshold** (int): when the iteration counter reaches the threshold the\n      convergence speed is set to 0.01 automatically. To deactivate this effect\n      set the threshold to stopping.maxiter which is the largest allowed\n      value.\n    - **speed_of_std_values_convergence** (int): parameter that determines the\n      convergence speed of the standard deviations. This must be an integer\n      (`n_gen_mark` in pygmo and pagmo).\n    - **stopping.max_n_without_improvements** (int): if a positive integer is\n      assigned here, the algorithm will count the runs without improvements, if\n      this number exceeds the given value, the algorithm will be stopped.\n    - **stopping.maxfun** (int): maximum number of function\n      evaluations.\n    - **focus** (float): this parameter makes the search for the optimum greedier\n      and more focused on local improvements (the higher the greedier). If the\n      value is very high, the search is more focused around the current best\n      solutions. Values larger than 1 are allowed.\n    - **cache** (bool): if True, memory is activated in the algorithm for multiple calls.\n\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_bee_colony\n\n    .. code-block::\n\n        \"pygmo_bee_colony\"\n\n    Minimize a scalar function using the artifical bee colony algorithm.\n\n    The Artificial Bee Colony Algorithm was originally proposed by\n    :cite:`Karaboga2007`. The implemented version of the algorithm is proposed\n    in :cite:`Mernik2015`. The algorithm is only suited for bounded parameter\n    spaces.\n\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed\n      to be part of the initial population. This saves one criterion function\n      evaluation that cannot be done in parallel with other evaluations. Default\n      False.\n    - **max_n_trials** (int): Maximum number of trials for abandoning a source.\n      Default is 1.\n    - **population_size** (int): Size of the population. If None, it's twice the\n      number of parameters but at least 20.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_de\n\n    .. code-block::\n\n        \"pygmo_de\"\n\n    Minimize a scalar function using the differential evolution algorithm.\n\n    Differential Evolution is a heuristic optimizer originally presented in\n    :cite:`Storn1997`. The algorithm is only suited for bounded parameter\n    spaces.\n\n    - **population_size** (int): Size of the population. If None, it's twice the\n      number of parameters but at least 10.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed\n      to be part of the initial population. This saves one criterion function\n      evaluation that cannot be done in parallel with other evaluations. Default\n      False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **weight_coefficient** (float): Weight coefficient. It is denoted by $F$ in\n      the main paper and must lie in [0, 2]. It controls the amplification of\n      the differential variation $(x_{r_2, G} - x_{r_3, G})$.\n    - **crossover_probability** (float): Crossover probability.\n    - **mutation_variant (str or int)**: code for the mutation variant to create a\n      new candidate individual. The default is . The following are available:\n\n        - \"best/1/exp\" (1, when specified as int)\n        - \"rand/1/exp\" (2, when specified as int)\n        - \"rand-to-best/1/exp\" (3, when specified as int)\n        - \"best/2/exp\" (4, when specified as int)\n        - \"rand/2/exp\" (5, when specified as int)\n        - \"best/1/bin\" (6, when specified as int)\n        - \"rand/1/bin\" (7, when specified as int)\n        - \"rand-to-best/1/bin\" (8, when specified as int)\n        - \"best/2/bin\" (9, when specified as int)\n        - \"rand/2/bin\" (10, when specified as int)\n    - **convergence.criterion_tolerance**: stopping criteria on the criterion\n      tolerance. Default is 1e-6. It is not clear whether this is the absolute\n      or relative criterion tolerance.\n    - **convergence.xtol_rel**: stopping criteria on the x\n      tolerance. In pygmo the default is 1e-6 but we use our default value of\n      1e-5.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_sea\n\n    .. code-block::\n\n        \"pygmo_sea\"\n\n    Minimize a scalar function using the (N+1)-ES simple evolutionary algorithm.\n\n    This algorithm represents the simplest evolutionary strategy, where a population of\n    $\\lambda$ individuals at each generation produces one offspring by mutating its best\n    individual uniformly at random within the bounds. Should the offspring be better\n    than the worst individual in the population it will substitute it.\n\n    See :cite:`Oliveto2007`.\n\n    The algorithm is only suited for bounded parameter spaces.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 10.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): number of generations to consider. Each generation\n      will compute the objective function once.\n\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_sga\n\n    .. code-block::\n\n        \"pygmo_sga\"\n\n    Minimize a scalar function using a simple genetic algorithm.\n\n    A detailed description of the algorithm can be found `in the pagmo2 documentation\n    <https://esa.github.io/pagmo2/docs/cpp/algorithms/sga.html>`_.\n\n    See also :cite:`Oliveto2007`.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **crossover_probability** (float): Crossover probability.\n    - **crossover_strategy** (str): the crossover strategy. One of “exponential”,“binomial”,\n      “single” or “sbx”. Default is \"exponential\".\n    - **eta_c** (float): distribution index for “sbx” crossover. This is an inactive\n      parameter if other types of crossovers are selected. Can be in [1, 100].\n    - **mutation_probability** (float): Mutation probability.\n    - **mutation_strategy** (str): Mutation strategy. Must be \"gaussian\", \"polynomial\" or\n      \"uniform\". Default is \"polynomial\".\n    - **mutation_polynomial_distribution_index** (float): Must be in [0, 1]. Default is 1.\n    - **mutation_gaussian_width** (float): Must be in [0, 1]. Default is 1.\n    - **selection_strategy (str)**: Selection strategy. Must be \"tournament\" or \"truncated\".\n    - **selection_truncated_n_best** (int): number of best individuals to use in the\n      \"truncated\" selection mechanism.\n    - **selection_tournament_size** (int): size of the tournament in the \"tournament\"\n      selection mechanism. Default is 1.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_sade\n\n    .. code-block::\n\n        \"pygmo_sade\"\n\n    Minimize a scalar function using Self-adaptive Differential Evolution.\n\n    The original Differential Evolution algorithm (pygmo_de) can be significantly\n    improved introducing the idea of parameter self-adaptation.\n\n    Many different proposals have been made to self-adapt both the crossover and the\n    F parameters of the original differential evolution algorithm. pygmo's\n    implementation supports two different mechanisms. The first one, proposed by\n    :cite:`Brest2006`, does not make use of the differential evolution operators to\n    produce new values for the weight coefficient $F$ and the crossover probability\n    $CR$ and, strictly speaking, is thus not self-adaptation, rather parameter control.\n    The resulting differential evolution variant is often referred to as jDE.\n    The second variant is inspired by the ideas introduced by :cite:`Elsayed2011` and\n    uses a variaton of the selected DE operator to produce new $CR$ anf $F$ parameters\n    for each individual. This variant is referred to iDE.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - jde (bool): Whether to use the jDE self-adaptation variant to control the $F$ and\n      $CR$ parameter. If True jDE is used, else iDE.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **mutation_variant** (int or str): code for the mutation variant to create a new\n      candidate individual. The default is \"rand/1/exp\". The first ten are the\n      classical mutation variants introduced in the orginal DE algorithm, the remaining\n      ones are, instead, considered in the work by :cite:`Elsayed2011`.\n      The following are available:\n\n        - \"best/1/exp\" or 1\n        - \"rand/1/exp\" or 2\n        - \"rand-to-best/1/exp\" or 3\n        - \"best/2/exp\" or 4\n        - \"rand/2/exp\" or 5\n        - \"best/1/bin\" or 6\n        - \"rand/1/bin\" or 7\n        - \"rand-to-best/1/bin\" or 8\n        - \"best/2/bin\" or 9\n        - \"rand/2/bin\" or 10\n        - \"rand/3/exp\" or 11\n        - \"rand/3/bin\" or 12\n        - \"best/3/exp\" or 13\n        - \"best/3/bin\" or 14\n        - \"rand-to-current/2/exp\" or 15\n        - \"rand-to-current/2/bin\" or 16\n        - \"rand-to-best-and-current/2/exp\" or 17\n        - \"rand-to-best-and-current/2/bin\" or 18\n\n    - **keep_adapted_params** (bool):  when true the adapted parameters $CR$ anf $F$ are\n      not reset between successive calls to the evolve method. Default is False.\n    - ftol (float): stopping criteria on the x tolerance.\n    - xtol (float): stopping criteria on the f tolerance.\n\n\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_cmaes\n\n    .. code-block::\n\n        \"pygmo_cmaes\"\n\n    Minimize a scalar function using the Covariance Matrix Evolutionary Strategy.\n\n    CMA-ES is one of the most successful algorithm, classified as an Evolutionary\n    Strategy, for derivative-free global optimization. The version supported by\n    optimagic is the version described in :cite:`Hansen2006`.\n\n    In contrast to the pygmo version, optimagic always sets force_bounds to True. This\n    avoids that ill defined parameter values are evaluated.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **backward_horizon** (float): backward time horizon for the evolution path. It must\n      lie betwen 0 and 1.\n    - **variance_loss_compensation** (float): makes partly up for the small variance loss in\n      case the indicator is zero. `cs` in the MATLAB Code of :cite:`Hansen2006`. It must\n      lie between 0 and 1.\n    - **learning_rate_rank_one_update** (float): learning rate for the rank-one update of\n      the covariance matrix. `c1` in the pygmo and pagmo documentation. It must lie\n      between 0 and 1.\n    - **learning_rate_rank_mu_update** (float): learning rate for the rank-mu update of the\n      covariance matrix. `cmu` in the pygmo and pagmo documentation. It must lie between\n      0 and 1.\n    - **initial_step_size** (float): initial step size, :math:`\\sigma^0` in the original\n      paper.\n    - **ftol** (float): stopping criteria on the x tolerance.\n    - **xtol** (float): stopping criteria on the f tolerance.\n    - **keep_adapted_params** (bool):  when true the adapted parameters are not reset\n      between successive calls to the evolve method. Default is False.\n\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_simulated_annealing\n\n    .. code-block::\n\n        \"pygmo_simulated_annealing\"\n\n    Minimize a function with the simulated annealing algorithm.\n\n    This version of the simulated annealing algorithm is, essentially, an iterative\n    random search procedure with adaptive moves along the coordinate directions. It\n    permits uphill moves under the control of metropolis criterion, in the hope to avoid\n    the first local minima encountered. This version is the one proposed in\n    :cite:`Corana1987`.\n\n    .. note: When selecting the starting and final temperature values it helps to think\n        about the tempertaure as the deterioration in the objective function value that\n        still has a 37% chance of being accepted.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **start_temperature** (float): starting temperature. Must be > 0.\n    - **end_temperature** (float): final temperature. Our default (0.01) is lower than in\n      pygmo and pagmo. The final temperature must be positive.\n    - **n_temp_adjustments** (int): number of temperature adjustments in the annealing\n      schedule.\n    - **n_range_adjustments** (int): number of adjustments of the search range performed at\n      a constant temperature.\n    - **bin_size** (int): number of mutations that are used to compute the acceptance rate.\n    - **start_range** (float): starting range for mutating the decision vector. It must lie\n      between 0 and 1.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_pso\n\n    .. code-block::\n\n        \"pygmo_pso\"\n\n    Minimize a scalar function using Particle Swarm Optimization.\n\n    Particle swarm optimization (PSO) is a population based algorithm inspired by the\n    foraging behaviour of swarms. In PSO each point has memory of the position where it\n    achieved the best performance xli (local memory) and of the best decision vector\n    :math:`x^g` in a certain neighbourhood, and uses this information to update its\n    position.\n\n    For a survey on particle swarm optimization algorithms, see :cite:`Poli2007`.\n\n    Each particle determines its future position :math:`x_{i+1} = x_i + v_i` where\n\n    .. math:: v_{i+1} = \\omega (v_i + \\eta_1 \\cdot \\mathbf{r}_1 \\cdot (x_i - x^{l}_i) +\n        \\eta_2 \\cdot \\mathbf{r}_2 \\cdot (x_i - x^g))\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 10.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n\n    - **omega** (float): depending on the variant chosen, :math:`\\omega` is the particles'\n      inertia weight or the construction coefficient. It must lie between 0 and 1.\n    - **force_of_previous_best** (float): :math:`\\eta_1` in the equation above. It's the\n      magnitude of the force, applied to the particle’s velocity, in the direction of\n      its previous best position. It must lie between 0 and 4.\n    - **force_of_best_in_neighborhood** (float): :math:`\\eta_2` in the equation above. It's\n      the magnitude of the force, applied to the particle’s velocity, in the direction\n      of the best position in its neighborhood. It must lie between 0 and 4.\n    - **max_velocity** (float): maximum allowed particle velocity as fraction of the box\n      bounds. It must lie between 0 and 1.\n    - **algo_variant (int or str)**: algorithm variant to be used:\n        - 1 or \"canonical_inertia\": Canonical (with inertia weight)\n        - 2 or \"social_and_cog_rand\": Same social and cognitive rand.\n        - 3 or \"all_components_rand\": Same rand. for all components\n        - 4 or \"one_rand\": Only one rand.\n        - 5 or \"canonical_constriction\": Canonical (with constriction fact.)\n        - 6 or \"fips\": Fully Informed (FIPS)\n\n    - **neighbor_definition (int or str)**: swarm topology that defines each particle's\n      neighbors that is to be used:\n\n        - 1 or \"gbest\"\n        - 2 or \"lbest\"\n        - 3 or \"Von Neumann\"\n        - 4 or \"Adaptive random\"\n\n    - **neighbor_param** (int): the neighbourhood parameter. If the lbest topology is\n      selected (neighbor_definition=2), it represents each particle's indegree (also\n      outdegree) in the swarm topology. Particles have neighbours up to a radius of k =\n      neighbor_param / 2 in the ring. If the Randomly-varying neighbourhood topology is\n      selected (neighbor_definition=4), it represents each particle’s maximum outdegree\n      in the swarm topology. The minimum outdegree is 1 (the particle always connects\n      back to itself). If neighbor_definition is 1 or 3 this parameter is ignored.\n    - **keep_velocities** (bool): when true the particle velocities are not reset between\n      successive calls to `evolve`.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_pso_gen\n\n    .. code-block::\n\n        \"pygmo_pso_gen\"\n\n    Minimize a scalar function with generational Particle Swarm Optimization.\n\n    Particle Swarm Optimization (generational) is identical to pso, but does update the\n    velocities of each particle before new particle positions are computed (taking into\n    consideration all updated particle velocities). Each particle is thus evaluated on\n    the same seed within a generation as opposed to the standard PSO which evaluates\n    single particle at a time. Consequently, the generational PSO algorithm is suited\n    for stochastic optimization problems.\n\n    For a survey on particle swarm optimization algorithms, see :cite:`Poli2007`.\n\n    Each particle determines its future position :math:`x_{i+1} = x_i + v_i` where\n\n    .. math:: v_{i+1} = \\omega (v_i + \\eta_1 \\cdot \\mathbf{r}_1 \\cdot (x_i - x^{l}_i) +\n        \\eta_2 \\cdot \\mathbf{r}_2 \\cdot (x_i - x^g))\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 10.\n    - **batch_evaluator (str or Callable)**: Name of a pre-implemented batch evaluator\n      (currently 'joblib' and 'pathos_mp') or Callable with the same interface as the\n      optimagic batch_evaluators. See :ref:`batch_evaluators`.\n    - **n_cores** (int): Number of cores to use.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n\n    - **omega** (float): depending on the variant chosen, :math:`\\omega` is the particles'\n      inertia weight or the constructuion coefficient. It must lie between 0 and 1.\n    - **force_of_previous_best** (float): :math:`\\eta_1` in the equation above. It's the\n      magnitude of the force, applied to the particle’s velocity, in the direction of\n      its previous best position. It must lie between 0 and 4.\n    - **force_of_best_in_neighborhood** (float): :math:`\\eta_2` in the equation above. It's\n      the magnitude of the force, applied to the particle’s velocity, in the direction\n      of the best position in its neighborhood. It must lie between 0 and 4.\n    - **max_velocity** (float): maximum allowed particle velocity as fraction of the box\n      bounds. It must lie between 0 and 1.\n    - **algo_variant** (int): code of the algorithm's variant to be used:\n\n        - 1 or \"canonical_inertia\": Canonical (with inertia weight)\n        - 2 or \"social_and_cog_rand\": Same social and cognitive rand.\n        - 3 or \"all_components_rand\": Same rand. for all components\n        - 4 or \"one_rand\": Only one rand.\n        - 5 or \"canonical_constriction\": Canonical (with constriction fact.)\n        - 6 or \"fips\": Fully Informed (FIPS)\n\n    - **neighbor_definition** (int): code for the swarm topology that defines each\n      particle's neighbors that is to be used:\n\n        - 1 or \"gbest\"\n        - 2 or \"lbest\"\n        - 3 or \"Von Neumann\"\n        - 4 or \"Adaptive random\"\n\n    - **neighbor_param** (int): the neighbourhood parameter. If the lbest topology is\n      selected (neighbor_definition=2), it represents each particle's indegree (also\n      outdegree) in the swarm topology. Particles have neighbours up to a radius of k =\n      neighbor_param / 2 in the ring. If the Randomly-varying neighbourhood topology is\n      selected (neighbor_definition=4), it represents each particle’s maximum outdegree\n      in the swarm topology. The minimum outdegree is 1 (the particle always connects\n      back to itself). If neighbor_definition is 1 or 3 this parameter is ignored.\n    - **keep_velocities** (bool): when true the particle velocities are not reset between\n      successive calls to `evolve`.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_mbh\n\n    .. code-block::\n\n        \"pygmo_mbh\"\n\n    Minimize a scalar function using generalized Monotonic Basin Hopping.\n\n    Monotonic basin hopping, or simply, basin hopping, is an algorithm rooted in the\n    idea of mapping the objective function $f(x_0)$ into the local minima found starting\n    from $x_0$. This simple idea allows a substantial increase of efficiency in solving\n    problems, such as the Lennard-Jones cluster or the MGA-1DSM interplanetary\n    trajectory problem that are conjectured to have a so-called funnel structure.\n\n    See :cite:`Wales1997` for the paper introducing the basin hopping idea for a\n    Lennard-Jones cluster optimization.\n\n    pygmo provides an original generalization of this concept resulting in a\n    meta-algorithm that operates on a population. When a population containing a single\n    individual is used the original method is recovered.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 250.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **inner_algorithm** (pygmo.algorithm): an pygmo algorithm or a user-defined algorithm,\n      either C++ or Python. If None the `pygmo.compass_search` algorithm will be used.\n    - **stopping.max_inner_runs_without_improvement** (int): consecutive runs of the inner\n      algorithm that need to result in no improvement for mbh to stop.\n    - **perturbation** (float): the perturbation to be applied to each component.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_xnes\n\n    .. code-block::\n\n        \"pygmo_xnes\"\n\n    Minimize a scalar function using Exponential Evolution Strategies.\n\n    Exponential Natural Evolution Strategies is an algorithm closely related to CMAES\n    and based on the adaptation of a gaussian sampling distribution via the so-called\n    natural gradient. Like CMAES it is based on the idea of sampling new trial vectors\n    from a multivariate distribution and using the new sampled points to update the\n    distribution parameters. Naively this could be done following the gradient of the\n    expected fitness as approximated by a finite number of sampled points. While this\n    idea offers a powerful lead on algorithmic construction it has some major drawbacks\n    that are solved in the so-called Natural Evolution Strategies class of algorithms by\n    adopting, instead, the natural gradient. xNES is one of the most performing variants\n    in this class.\n\n    See :cite:`Glasmachers2010` and the `pagmo documentation on xNES\n    <https://esa.github.io/pagmo2/docs/cpp/algorithms/xnes.html#_CPPv4N5pagmo4xnesE>`_\n    for details.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n\n    - **learning_rate_mean_update** (float): learning rate for the mean update\n      (:math:`\\eta_\\mu`). It must be between 0 and 1 or None.\n    - **learning_rate_step_size_update** (float): learning rate for the step-size update. It\n      must be between 0 and 1 or None.\n    - **learning_rate_cov_matrix_update** (float): learning rate for the covariance matrix\n      update. It must be between 0 and 1 or None.\n    - **initial_search_share** (float): share of the given search space that will be\n      initally searched. It must be between 0 and 1. Default is 1.\n    - **ftol** (float): stopping criteria on the x tolerance.\n    - **xtol** (float): stopping criteria on the f tolerance.\n    - **keep_adapted_params** (bool): when true the adapted parameters are not reset between\n      successive calls to the evolve method. Default is False.\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_gwo\n\n    .. code-block::\n\n        \"pygmo_gwo\"\n\n    Minimize a scalar function usinng the Grey Wolf Optimizer.\n\n    The grey wolf optimizer was proposed by :cite:`Mirjalili2014`. The pygmo\n    implementation that is wrapped by optimagic is pased on the pseudo code provided in\n    that paper.\n\n    This algorithm is a classic example of a highly criticizable line of search that led\n    in the first decades of our millenia to the development of an entire zoo of\n    metaphors inspiring optimzation heuristics. In our opinion they, as is the case for\n    the grey wolf optimizer, are often but small variations of already existing\n    heuristics rebranded with unnecessray and convoluted biological metaphors. In the\n    case of GWO this is particularly evident as the position update rule is shokingly\n    trivial and can also be easily seen as a product of an evolutionary metaphor or a\n    particle swarm one. Such an update rule is also not particulary effective and\n    results in a rather poor performance most of times.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_compass_search\n\n    .. code-block::\n\n        \"pygmo_compass_search\"\n\n    Minimize a scalar function using compass search.\n\n    The algorithm is described in :cite:`Kolda2003`.\n\n    It is considered slow but reliable. It should not be used for stochastic problems.\n\n    - **population_size** (int): Size of the population. Even though the algorithm is not\n      population based the population size does affect the results of the algorithm.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxfun** (int): maximum number of function evaluations.\n    - **start_range** (float): the start range. Must be in (0, 1].\n    - **stop_range** (float): the stop range. Must be in (0, start_range].\n    - **reduction_coeff** (float): the range reduction coefficient. Must be in (0, 1).\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_ihs\n\n    .. code-block::\n\n        \"pygmo_ihs\"\n\n    Minimize a scalar function using the improved harmony search algorithm.\n\n    Improved harmony search (IHS) was introduced by :cite:`Mahdavi2007`.\n    IHS supports stochastic problems.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **choose_from_memory_probability** (float): probability of choosing from memory\n      (similar to a crossover probability).\n    - **min_pitch_adjustment_rate** (float): minimum pitch adjustment rate. (similar to a\n      mutation rate). It must be between 0 and 1.\n    - **max_pitch_adjustment_rate** (float): maximum pitch adjustment rate. (similar to a\n      mutation rate). It must be between 0 and 1.\n    - **min_distance_bandwidth** (float): minimum distance bandwidth. (similar to a mutation\n      width). It must be positive.\n    - **max_distance_bandwidth** (float): maximum distance bandwidth. (similar to a mutation\n      width).\n```\n\n```{eval-rst}\n.. dropdown::  pygmo_de1220\n\n    .. code-block::\n\n        \"pygmo_de1220\"\n\n    Minimize a scalar function using Self-adaptive Differential Evolution, pygmo flavor.\n\n    See `the PAGMO documentation for details\n    <https://esa.github.io/pagmo2/docs/cpp/algorithms/de1220.html>`_.\n\n    - **population_size** (int): Size of the population. If None, it's twice the number of\n      parameters but at least 64.\n    - **seed** (int): seed used by the internal random number generator.\n    - **discard_start_params** (bool): If True, the start params are not guaranteed to be\n      part of the initial population. This saves one criterion function evaluation that\n      cannot be done in parallel with other evaluations. Default False.\n    - **jde** (bool): Whether to use the jDE self-adaptation variant to control the $F$ and\n      $CR$ parameter. If True jDE is used, else iDE.\n    - **stopping.maxiter** (int): Number of generations to evolve.\n    - **allowed_variants** (array-like object): allowed mutation variants (can be codes\n      or strings). Each code refers to one mutation variant to create a new candidate\n      individual. The first ten refer to the classical mutation variants introduced in\n      the original DE algorithm, the remaining ones are, instead, considered in the work\n      by :cite:`Elsayed2011`. The default is [\"rand/1/exp\", \"rand-to-best/1/exp\",\n      \"rand/1/bin\", \"rand/2/bin\", \"best/3/exp\", \"best/3/bin\", \"rand-to-current/2/exp\",\n      \"rand-to-current/2/bin\"]. The following are available:\n\n        - 1 or \"best/1/exp\"\n        - 2 or \"rand/1/exp\"\n        - 3 or \"rand-to-best/1/exp\"\n        - 4 or \"best/2/exp\"\n        - 5 or \"rand/2/exp\"\n        - 6 or \"best/1/bin\"\n        - 7 or \"rand/1/bin\"\n        - 8 or \"rand-to-best/1/bin\"\n        - 9 or \"best/2/bin\"\n        - 10 or \"rand/2/bin\"\n        - 11 or \"rand/3/exp\"\n        - 12 or \"rand/3/bin\"\n        - 13 or \"best/3/exp\"\n        - 14 or \"best/3/bin\"\n        - 15 or \"rand-to-current/2/exp\"\n        - 16 or \"rand-to-current/2/bin\"\n        - 17 or \"rand-to-best-and-current/2/exp\"\n        - 18 or \"rand-to-best-and-current/2/bin\"\n\n    - **keep_adapted_params** (bool):  when true the adapted parameters $CR$ anf $F$ are not\n      reset between successive calls to the evolve method. Default is False.\n    - **ftol** (float): stopping criteria on the x tolerance.\n    - **xtol** (float): stopping criteria on the f tolerance.\n\n```\n\n(ipopt-algorithm)=\n\n## The Interior Point Optimizer (ipopt)\n\noptimagic's support for the Interior Point Optimizer ({cite}`Waechter2005`,\n{cite}`Waechter2005a`, {cite}`Waechter2005b`, {cite}`Nocedal2009`) is built on\n[cyipopt](https://cyipopt.readthedocs.io/en/latest/index.html), a Python wrapper for the\n[Ipopt optimization package](https://coin-or.github.io/Ipopt/index.html).\n\nTo use ipopt, you need to have\n[cyipopt installed](https://cyipopt.readthedocs.io/en/latest/index.html)\n(`conda install cyipopt`).\n\n```{eval-rst}\n.. dropdown:: ipopt\n\n    .. code-block::\n\n        \"ipopt\"\n\n    Minimize a scalar function using the Interior Point Optimizer.\n\n    This implementation of the Interior Point Optimizer (:cite:`Waechter2005`,\n    :cite:`Waechter2005a`, :cite:`Waechter2005b`, :cite:`Nocedal2009`) relies on\n    `cyipopt <https://cyipopt.readthedocs.io/en/latest/index.html>`_, a Python\n    wrapper for the `Ipopt optimization package\n    <https://coin-or.github.io/Ipopt/index.html>`_.\n\n    There are two levels of termination criteria. If the usual \"desired\"\n    tolerances (see tol, dual_inf_tol etc) are satisfied at an iteration, the\n    algorithm immediately terminates with a success message. On the other hand,\n    if the algorithm encounters \"acceptable_iter\" many iterations in a row that\n    are considered \"acceptable\", it will terminate before the desired\n    convergence tolerance is met. This is useful in cases where the algorithm\n    might not be able to achieve the \"desired\" level of accuracy.\n\n    The options are analogous to the ones in the `ipopt documentation\n    <https://coin-or.github.io/Ipopt/OPTIONS.html#>`_ with the exception of the\n    linear solver options which are here bundled into a dictionary. Any argument\n    that takes \"yes\" and \"no\" in the ipopt documentation can also be passed as a\n    `True` and `False`, respectively. and any option that accepts \"none\" in\n    ipopt accepts a Python `None`.\n\n    The following options are not supported:\n      - `num_linear_variables`: since optimagic may reparametrize your problem\n        and this changes the parameter problem, we do not support this option.\n      - derivative checks\n      - print options.\n\n\n    - **convergence.ftol_rel** (float): The algorithm\n      terminates successfully, if the (scaled) non linear programming error\n      becomes smaller than this value.\n\n    - **mu_target** (float): Desired value of complementarity. Usually, the barrier\n      parameter is driven to zero and the termination test for complementarity\n      is measured with respect to zero complementarity. However, in some cases\n      it might be desired to have Ipopt solve barrier problem for strictly\n      positive value of the barrier parameter. In this case, the value of\n      \"mu_target\" specifies the final value of the barrier parameter, and the\n      termination tests are then defined with respect to the barrier problem for\n      this value of the barrier parameter. The valid range for this real option\n      is 0 ≤ mu_target  and its default value is 0.\n\n    - **s_max** (float): Scaling threshold for the NLP error.\n\n    - **stopping.maxiter** (int):  If the maximum number of iterations is\n      reached, the optimization stops, but we do not count this as successful\n      convergence. The difference to ``max_criterion_evaluations`` is that one\n      iteration might need several criterion evaluations, for example in a line\n      search or to determine if the trust region radius has to be shrunk.\n    - **stopping.max_wall_time_seconds** (float): Maximum number of walltime clock seconds.\n    - **stopping.max_cpu_time** (float): Maximum number of CPU seconds.\n      A limit on CPU seconds that Ipopt can use to solve one problem.\n      If during the convergence check this limit is exceeded, Ipopt will\n      terminate with a corresponding message. The valid range for this\n      real option is 0 < max_cpu_time and its default value is :math:`1e+20` .\n\n    - **dual_inf_tol** (float): Desired threshold for the dual infeasibility.\n      Absolute tolerance on the dual infeasibility. Successful termination\n      requires that the max-norm of the (unscaled) dual infeasibility is less\n      than this threshold. The valid range for this real option is 0 <\n      dual_inf_tol and its default value is 1.\n    - **constr_viol_tol** (float): Desired threshold for the constraint and bound\n      violation. Absolute tolerance on the constraint and variable bound\n      violation. Successful termination requires that the max-norm of the\n      (unscaled) constraint violation is less than this threshold.\n      If option ``bound_relax_factor``  is not zero 0, then Ipopt relaxes given variable bounds.\n      The value of constr_viol_tol is used to restrict the absolute amount of this bound\n      relaxation. The valid range for this real option is 0 < constr_viol_tol\n      and its default value is 0.0001.\n    - **compl_inf_tol** (float): Desired threshold for the complementarity conditions.\n      Absolute tolerance on the complementarity. Successful termination\n      requires that the max-norm of the (unscaled) complementarity is\n      less than this threshold. The valid range for this real option is\n      0 < text{compl_inf_tol and its default is 0.0001.\n    - **acceptable_iter** (int): Number of \"acceptable\" iterates before termination.\n      If the algorithm encounters this many successive \"acceptable\"\n      iterates (see above on the acceptable heuristic), it terminates, assuming\n      that the problem has been solved to best possible accuracy given\n      round-off. If it is set to zero, this heuristic is disabled. The valid\n      range for this integer option is 0 ≤ acceptable_iter.\n    - **acceptable_tol** (float):\"Acceptable\" convergence tolerance (relative).\n      Determines which (scaled) overall optimality error is considered to be \"acceptable\".\n      The valid range for this real option is 0 < acceptable_tol.\n    - **acceptable_dual_inf_tol** (float):  \"Acceptance\" threshold for the dual\n      infeasibility. Absolute tolerance on the dual infeasibility. \"Acceptable\"\n      termination requires that the (max-norm of the unscaled) dual\n      infeasibility is less than this threshold; see also  ``acceptable_tol`` . The\n      valid range for this real option is 0 < acceptable_dual_inf_tol and its\n      default value is :math:`1e+10.`\n    - **acceptable_constr_viol_tol** (float): \"Acceptance\" threshold for the constraint violation.\n      Absolute tolerance on the constraint violation.\n      \"Acceptable\" termination requires that the max-norm\n      of the (unscaled) constraint violation is less than this threshold; see\n      also  ``acceptable_tol`` . The valid range for this real option is 0 <\n      acceptable_constr_viol_tol and its default value is 0.01.\n    - **acceptable_compl_inf_tol** (float): \"Acceptance\" threshold for the\n      complementarity conditions. Absolute tolerance on the complementarity.\n      \"Acceptable\" termination requires that the max-norm of the (unscaled)\n      complementarity is less than this threshold; see also  ``acceptable_tol`` . The\n      valid range for this real option is 0 < text{acceptable_compl_inf_tol and its\n      default value is 0.01.\n    - **acceptable_obj_change_tol** (float): \"Acceptance\" stopping criterion based on\n      objective function change. If the relative\n      change of the objective function (scaled by :math:`max(1,|f(x)|)` ) is less than\n      this value, this part of the acceptable tolerance termination is\n      satisfied; see also  ``acceptable_tol`` . This is useful for the quasi-Newton\n      option, which has trouble to bring down the dual infeasibility. The valid\n      range for this real option is 0 ≤ acceptable_obj_change_tol and its\n      default value is :math:`1e+20` .\n\n    - **diverging_iterates_tol** (float): Threshold for maximal value of primal iterates.\n      If any component of the primal iterates exceeded this value (in\n      absolute terms), the optimization is aborted with the exit message that\n      the iterates seem to be diverging. The valid range for this real option is\n      0 < diverging_iterates_tol and its default value is :math:`1e+20` .\n    - **nlp_lower_bound_inf** (float): any bound less or equal this value will be\n      considered -inf (i.e. not lwer bounded). The valid range for this real\n      option is unrestricted and its default value is :math:`-1e+19` .\n    - **nlp_upper_bound_inf** (float): any bound greater or this value will be\n      considered :math:`+\\inf` (i.e. not upper bunded). The valid range for this real\n      option is unrestricted and its default value is :math:`1e+19` .\n    - **fixed_variable_treatment (str)**: Determines how fixed variables should be\n      handled. The main difference between those options is that the starting\n      point in the \"make_constraint\" case still has the fixed variables at their\n      given values, whereas in the case \"make_parameter(_nodual)\" the functions\n      are always evaluated with the fixed values for those variables. Also, for\n      \"relax_bounds\", the fixing bound constraints are relaxed (according to\n      ``bound_relax_factor`` ). For all but \"make_parameter_nodual\", bound\n      multipliers are computed for the fixed variables. The default value for\n      this string option is \"make_parameter\". Possible values:\n\n             - \"make_parameter\": Remove fixed variable from optimization variables\n             - \"make_parameter_nodual\": Remove fixed variable from optimization\n               variables and do not compute bound multipliers for fixed variables\n             - \"make_constraint\": Add equality constraints fixing variables\n             - \"relax_bounds\": Relax fixing bound constraints\n    - **dependency_detector (str)**: Indicates which linear solver\n      should be used to detect linearly dependent equality constraints. This is\n      experimental and does not work well. The default value for this string\n      option is \"none\". Possible values:\n\n            - \"none\" or None: don't check; no extra work at beginning\n            - \"mumps\": use MUMPS\n            - \"wsmp\": use WSMP\n            - \"ma28\": use MA28\n    - **dependency_detection_with_rhs (str or bool)**: Indicates if the right hand\n      sides of the constraints should be considered in addition to gradients\n      during dependency detection. The default value for this string option is\n      \"no\". Possible values: 'yes', 'no', True, False.\n\n    - **kappa_d** (float): Weight for linear damping term (to handle one-sided bounds).\n      See Section 3.7 in implementation paper. The valid range for this\n      real option is 0 ≤ kappa_d and its default value is :math:`1e-05` .\n    - **bound_relax_factor** (float): Factor for initial relaxation of the bounds.\n      Before start of the optimization, the bounds given by the user are\n      relaxed. This option sets the factor for this relaxation. Additional, the\n      constraint violation tolerance  ``constr_viol_tol``  is used to bound the\n      relaxation by an absolute value. If it is set to zero, then then bounds\n      relaxation is disabled. See Eqn.(35) in implementation paper. Note that\n      the constraint violation reported by Ipopt at the end of the solution\n      process does not include violations of the original (non-relaxed) variable\n      bounds. See also option honor_original_bounds. The valid range for this\n      real option is 0 ≤ bound_relax_factor  and its default value is :math:`1e-08` .\n    - **honor_original_bounds** (str or bool): Indicates whether final points should\n      be projected into original bunds. Ipopt might relax the bounds during the\n      optimization (see, e.g., option  ``bound_relax_factor`` ). This option\n      determines whether the final point should be projected back into the\n      user-provide original bounds after the optimization. Note that violations\n      of constraints and complementarity reported by Ipopt at the end of the\n      solution process are for the non-projected point. The default value for\n      this string option is \"no\". Possible values: 'yes', 'no', True, False\n\n    - **check_derivatives_for_naninf (str)**: whether to check for NaN / inf in the\n      derivative matrices.\n      Activating this option will cause an error if an\n      invalid number is detected in the constraint Jacobians or the Lagrangian\n      Hessian. If this is not activated, the test is skipped, and the algorithm\n      might proceed with invalid numbers and fail. If test is activated and an\n      invalid number is detected, the matrix is written to output with\n      print_level corresponding to J_MORE_DETAILED; so beware of large output!\n      The default value for this string option is \"no\".\n    - **jac_c_constant (str or bool)**: Indicates whether to assume that all equality\n      constraints are linear Activating this option will cause Ipopt to ask\n      for the Jacobian of the equality constraints only once from the NLP and\n      reuse this information later. The default value for this string option\n      is \"no\". Possible values: yes, no, True, False.\n    - **jac_d_constant (str or bool)**: Indicates whether to\n      assume that all inequality constraints are linear Activating this option\n      will cause Ipopt to ask for the Jacobian of the inequality constraints\n      only once from the NLP and reuse this information later. The default value\n      for this string option is \"no\". Possible values: yes, no, True, False\n    - **hessian_constant (str or bool)**: Indicates whether to assume the problem is a QP\n      (quadratic objective, linear constraints). Activating this option will\n      cause Ipopt to ask for the Hessian of the Lagrangian function only once\n      from the NLP and reuse this information later. The default value for this\n      string option is \"no\". Possible values: yes, no, True, False.\n\n    - **nlp_scaling_method (str)**: Select the technique used for scaling the NLP.\n      Selects the technique used for scaling the problem internally before it is\n      solved. For user-scaling, the parameters come from the NLP. If you are\n      using AMPL, they can be specified through suffixes (\"scaling_factor\") The\n      default value for this string option is \"gradient-based\". Possible values:\n\n            - \"none\": no problem scaling will be performed - \"user-scaling\": scaling\n              parameters will come from the user - \"gradient-based\":\n              scale the problem so the maximum gradient at the starting point is\n              ``nlp_scaling_max_gradient`` .\n            - \"equilibration-based\": scale the problem so that first derivatives are\n              of order 1 at random points (uses Harwell routine MC19)\n    - **obj_scaling_factor** (float): Scaling factor for the objective function.\n      This option sets a scaling factor for the objective function. The\n      scaling is seen internally by Ipopt but the unscaled objective is\n      reported in the console output. If additional scaling parameters are\n      computed (e.g. user-scaling or gradient-based), both factors are\n      multiplied. If this value is chosen to be negative, Ipopt will maximize\n      the objective function instead of minimizing it. The valid range for\n      this real option is unrestricted and its default value is 1.\n    - **nlp_scaling_max_gradient** (float): Maximum gradient after NLP scaling.\n      This is the gradient scaling cut-off. If the maximum gradient is above\n      this value, then gradient based scaling will be performed. Scaling\n      parameters are calculated to scale the maximum gradient back to this\n      value. (This is g_max in Section 3.8 of the implementation paper.) Note:\n      This option is only used if  ``nlp_scaling_method``  is chosen as\n      \"gradient-based\". The valid range for this real option is :math:`0 <\n      \\text{nlp_scaling_max_gradient}` and its default value is 100.\n    - **nlp_scaling_obj_target_gradient** (float): advanced! Target value for\n      objective function gradient size. If a positive number is chosen, the\n      scaling factor for the objective function is computed so that the\n      gradient has the max norm of the given size at the starting point. This\n      overrides  ``nlp_scaling_max_gradient``  for the objective function. The valid\n      range for this real option is 0 ≤ nlp_scaling_obj_target_gradient and\n      its default value is 0.\n    - **nlp_scaling_constr_target_gradient** (float): arget value for constraint function gradient size. If a positive number is chosen, the scaling factors for the constraint functions are computed so that the gradient has the max norm of the given size at the starting point. This overrides nlp_scaling_max_gradient for the constraint functions. The valid range for this real option is 0 ≤ nlp_scaling_constr_target_gradient and its default value is 0.\n    - **nlp_scaling_min_value** (float): Minimum value of\n      gradient-based scaling values. This is the lower bound for the scaling\n      factors computed by gradient-based scaling method. If some derivatives\n      of some functions are huge, the scaling factors will otherwise become\n      very small, and the (unscaled) final constraint violation, for example,\n      might then be significant. Note: This option is only used if\n      ``nlp_scaling_method`` is chosen as \"gradient-based\". The valid range for\n      this real option is 0 ≤ nlp_scaling_min_value and its default value is\n      :math:`1e-08`.\n\n    - **bound_push** (float): Desired minimum absolute distance from the initial\n      point to bound. Determines how much the initial point might have to be\n      modified in order to be sufficiently inside the bounds (together with\n      ``bound_frac`` ). (This is kappa_1 in Section 3.6 of implementation paper.)\n      The valid range for this real option is 0 < bound_push and its default\n      value is 0.01.\n    - **bound_frac** (float): Desired minimum relative distance\n      from the initial point to bound. Determines how much the initial point\n      might have to be modified in order to be sufficiently inside the bounds\n      (together with \"bound_push\"). (This is kappa_2 in Section 3.6 of\n      implementation paper.) The valid range for this real option is 0 <\n      bound_frac ≤ 0.5 and its default value is 0.01.\n    - **slack_bound_push** (float): Desired minimum absolute distance from the\n      initial slack to bound. Determines how much the initial slack\n      variables might have to be modified in order to be sufficiently inside the inequality bounds\n      (together with  ``slack_bound_frac`` ). (This is kappa_1 in Section 3.6 of\n      implementation paper.) The valid range for this real option is 0 <\n      slack_bound_push and its default value is 0.01.\n    - **slack_bound_frac** (float): Desired minimum relative distance from the\n      initial slack to bound. Determines how much the initial slack\n      variables might have to be modified in order to be sufficiently inside the inequality bounds\n      (together with  ``slack_bound_push`` ). (This is kappa_2 in Section 3.6 of\n      implementation paper.) The valid range for this real option is 0 <\n      slack_bound_frac ≤ 0.5 and its default value is 0.01.\n    - **constr_mult_init_max** (float): Maximum allowed least-square guess of\n      constraint multipliers. Determines how large the initial least-square\n      guesses of the constraint multipliers are allowed to be (in max-norm).\n      If the guess is larger than this value, it is discarded and all\n      constraint multipliers are set to zero. This options is also used when\n      initializing the restoration phase. By default,\n      \"resto.constr_mult_init_max\" (the one used in RestoIterateInitializer)\n      is set to zero. The valid range for this real option is 0 ≤\n      constr_mult_init_max and its default value is 1000.\n    - **bound_mult_init_val** (float): Initial value for the bound multipliers.\n      All dual variables corresponding to bound constraints are initialized\n      to this value. The valid range for this real option is\n      0 < bound_mult_init_val and its default value is 1.\n    - **bound_mult_init_method (str)**: Initialization method\n      for bound multipliers This option defines how the iterates for the bound\n      multipliers are initialized. If \"constant\" is chosen, then all bound\n      multipliers are initialized to the value of  ``bound_mult_init_val``. If\n      \"mu-based\" is chosen, the each value is initialized to the the value of\n      \"mu_init\" divided by the corresponding slack variable. This latter\n      option might be useful if the starting point is close to the optimal\n      solution. The default value for this string option is \"constant\".\n      Possible values:\n\n            - \"constant\": set all bound multipliers to the value of  ``bound_mult_init_val``\n            - \"mu-based\": initialize to mu_init/x_slack\n    - **least_square_init_primal (str or bool)**:\n      Least square initialization of the primal variables. If set to\n      yes, Ipopt ignores the user provided point and solves a least square\n      problem for the primal variables (x and s) to fit the linearized\n      equality and inequality constraints.This might be useful if the user\n      doesn't know anything about the starting point, or for solving an LP or\n      QP. The default value for this string option is \"no\".  Possible values:\n\n            - \"no\": take user-provided point\n            - \"yes\": overwrite user-provided point with least-square estimates\n    - **least_square_init_duals (str or bool)**: Least square\n      initialization of all dual variables If set to yes, Ipopt tries to\n      compute least-square multipliers (considering ALL dual variables). If\n      successful, the bound multipliers are possibly corrected to be at\n      least  ``bound_mult_init_val`` . This might be useful if the user doesn't\n      know anything about the starting point, or for solving an LP or QP.\n      This overwrites option  ``bound_mult_init_method`` . The default value for\n      this string option is \"no\". Possible values:\n\n            - \"no\": use  ``bound_mult_init_val``  and least-square equality constraint multipliers\n            - \"yes\": overwrite user-provided point with least-square estimates\n    - **warm_start_init_point (str or bool)**: Warm-start for initial point\n      Indicates whether this optimization should use a warm start\n      initialization, where values of primal and dual variables are given\n      (e.g., from a previous optimization of a related problem.) The default\n      value for this string option is \"no\". Possible values:\n\n            - \"no\" or False: do not use the warm start initialization\n            - \"yes\" or True: use the warm start initialization\n    - **warm_start_same_structure (str or bool)**:\n      Advanced feature! Indicates whether a problem with a structure\n      identical t the previous one is to be solved. If enabled, then the\n      algorithm assumes that an NLP is now to be solved whose structure is\n      identical to one that already was considered (with the same NLP\n      object). The default value for this string option is \"no\". Possible\n      values: yes, no, True, False.\n    - **warm_start_bound_push** (float): same as\n      ``bound_push`` for the regular initializer. The valid range for this real\n      option is 0 < warm_start_bound_push and its default value is 0.001.\n    - **warm_start_bound_frac** (float): same as  ``bound_frac``  for the regular\n      initializer The valid range for this real option is 0 <\n      warm_start_bound_frac ≤ 0.5 and its default value is 0.001.\n    - **warm_start_slack_bound_push** (float): same as  ``slack_bound_push``  for the\n      regular initializer The valid range for this real option is 0 <\n      warm_start_slack_bound_push and its default value is 0.001.\n    - **warm_start_slack_bound_frac** (float): same as  ``slack_bound_frac``  for the\n      regular initializer The valid range for this real option is 0 <\n      warm_start_slack_bound_frac ≤ 0.5 and its default value is 0.001.\n    - **warm_start_mult_bound_push** (float): same as  ``mult_bound_push``  for the\n      regular initializer The valid range for this real option is 0 <\n      warm_start_mult_bound_push and its default value is 0.001.\n    - **warm_start_mult_init_max** (float): Maximum initial value for the\n      equality multipliers. The valid range for this real option is\n      unrestricted and its default value is :math:`1e+06` .\n    - **warm_start_entire_iterate (str or bool)**: Tells algorithm whether to use the GetWarmStartIterate\n      method in the NLP. The default value for this string option is \"no\".\n      Possible values:\n\n            - \"no\": call GetStartingPoint in the NLP\n            - \"yes\": call GetWarmStartIterate in the NLP\n    - **warm_start_target_mu** (float): Advanced and experimental! The valid range\n      for this real option is unrestricted and its default value is 0.\n\n    - **option_file_name (str)**: File name of options file. By default, the name\n      of the Ipopt options file is \"ipopt.opt\" - or something else if\n      specified in the IpoptApplication::Initialize call. If this option is\n      set by SetStringValue BEFORE the options file is read, it specifies the\n      name of the options file. It does not make any sense to specify this\n      option within the options file. Setting this option to an empty string\n      disables reading of an options file.\n    - **replace_bounds (bool or str)**:\n      Whether all variable bounds should be replaced by inequality\n      constraints. This option must be set for the inexact algorithm. The\n      default value for this string option is \"no\". Possible values: \"yes\",\n      \"no\", True, False.\n    - **skip_finalize_solution_call (str or bool)**: Whether a\n      call to NLP::FinalizeSolution after optimization should be suppressed.\n      In some Ipopt applications, the user might want to call the\n      FinalizeSolution method separately. Setting this option to \"yes\" will\n      cause the IpoptApplication object to suppress the default call to that\n      method. The default value for this string option is \"no\". Possible\n      values: \"yes\", \"no\", True, False\n    - **timing_statistics (str or bool)**:\n      Indicates whether to measure time spend in components of Ipopt and NLP\n      evaluation.  The overall algorithm time is unaffected by this option.\n      The default value for this string option is \"no\". Possible values:\n      \"yes\", \"no\", True, False\n\n    - **mu_max_fact** (float): Factor for initialization of maximum value for\n      barrier parameter. This option determines the upper bound on the barrier\n      parameter. This upper bound is computed as the average complementarity\n      at the initial point times the value of this option. (Only used if\n      option \"mu_strategy\" is chosen as \"adaptive\".) The valid range for this\n      real option is 0 < mu_max_fact and its default value is 1000.\n    - **mu_max** (float): Maximum value for barrier parameter. This option specifies an\n      upper bound on the barrier parameter in the adaptive mu selection mode.\n      If this option is set, it overwrites the effect of mu_max_fact. (Only\n      used if option \"mu_strategy\" is chosen as \"adaptive\".) The valid range\n      for this real option is 0 < mu_max and its default value is\n      100000.\n    - **mu_min** (float): Minimum value for barrier parameter. This option\n      specifies the lower bound on the barrier parameter in the adaptive mu\n      selection mode. By default, it is set to the minimum of :math:`1e-11`  and\n      min( ``tol`` , ``compl_inf_tol`` )/( ``barrier_tol_factor`` +1), which should be a\n      reasonable value. (Only used if option  ``mu_strategy``  is chosen as\n      \"adaptive\".) The valid range for this real option is 0 < mu_min and its\n      default value is :math:`1e-11` .\n    - **adaptive_mu_globalization (str)**: Globalization\n      strategy for the adaptive mu selection mode. To achieve global\n      convergence of the adaptive version, the algorithm has to switch to the\n      monotone mode (Fiacco-McCormick approach) when convergence does not seem\n      to appear. This option sets the criterion used to decide when to do this\n      switch. (Only used if option \"mu_strategy\" is chosen as \"adaptive\".) The\n      default value for this string option is \"obj-constr-filter\". Possible\n      values:\n\n            - \"kkt-error\": nonmonotone decrease of kkt-error\n            - \"obj-constr-filter\": 2-dim filter for objective and constraint violation\n            - \"never-monotone-mode\": disables globalization.\n    - **adaptive_mu_kkterror_red_iters** (float): advanced feature! Maximum\n      number of iterations requiring sufficient progress. For the\n      \"kkt-error\" based globalization strategy, sufficient progress must be\n      made for \"adaptive_mu_kkterror_red_iters\" iterations. If this number\n      of iterations is exceeded, the globalization strategy switches to the\n      monotone mode. The valid range for this integer option is 0 ≤\n      adaptive_mu_kkterror_red_iters and its default value is 4.\n    - **adaptive_mu_kkterror_red_fact** (float): advanced feature! Sufficient\n      decrease factor for \"kkt-error\" globalization strategy. For the\n      \"kkt-error\" based globalization strategy, the error must decrease by\n      this factor to be deemed sufficient decrease. The valid range for this\n      real option is 0 < adaptive_mu_kkterror_red_fact < 1 and its default\n      value is 0.9999.\n    - **filter_margin_fact** (float): advanced feature! Factor\n      determining width of margin for obj-constr-filter adaptive\n      globalization strategy. When using the adaptive globalization\n      strategy, \"obj-constr-filter\", sufficient progress for a filter entry\n      is defined as follows: (new obj) < (filter obj) -\n      filter_margin_fact*(new constr-viol) OR (new constr-viol) < (filter\n      constr-viol) - filter_margin_fact*(new constr-viol). For the\n      description of the \"kkt-error-filter\" option see  ``filter_max_margin`` .\n      The valid range for this real option is 0 < filter_margin_fact < 1 and\n      its default value is :math:`10-05` .\n    - **filter_max_margin** (float): advanced\n      feature! Maximum width of margin in obj-constr-filter adaptive\n      globalization strategy. The valid range for this real option is 0 <\n      filter_max_margin and its default value is 1.\n    - **adaptive_mu_restore_previous_iterate (str or bool)**: advanced feature!\n      Indicates if the previous accepted iterate should be restored if the\n      monotone mode is entered. When the globalization strategy for the\n      adaptive barrier algorithm switches to the monotone mode, it can\n      either start from the most recent iterate (no), or from the last\n      iterate that was accepted (yes). The default value for this string\n      option is \"no\". Possible values: \"yes\", \"no\", True, False\n    - **adaptive_mu_monotone_init_factor** (float): advanced feature! Determines\n      the initial value of the barrier parameter when switching to the\n      monotone mode. When the globalization strategy for the adaptive\n      barrier algorithm switches to the monotone mode and fixed_mu_oracle is\n      chosen as \"average_compl\", the barrier parameter is set to the current\n      average complementarity times the value of\n      \"adaptive_mu_monotone_init_factor\". The valid range for this real\n      option is 0 < adaptive_mu_monotone_init_factor and its default value\n      is 0.8.\n    - **adaptive_mu_kkt_norm_type (str)**: advanced! Norm used for the KKT\n      error in the adaptive mu globalization strategies. When computing the\n      KKT error for the globalization strategies, the norm to be used is\n      specified with this option. Note, this option is also used in the\n      QualityFunctionMuOracle. The default value for this string option is\n      \"2-norm-squared\". Possible values:\n\n            - \"1-norm\": use the 1-norm (abs sum)\n            - \"2-norm-squared\": use the 2-norm squared (sum of squares)\n            - \"max-norm\": use the infinity norm (max)\n            - \"2-norm\": use 2-norm\n    - **mu_strategy (str)**: Update strategy for barrier\n      parameter. Determines which barrier parameter update strategy is to be\n      used. The default value for this string option is \"monotone\". Possible values:\n\n            - \"monotone\": use the monotone (Fiacco-McCormick) strategy\n            - \"adaptive\": use the adaptive update strategy\n    - **mu_oracle (str)**: Oracle for a new barrier parameter in the adaptive strategy.\n      Determines how a new barrier parameter is computed in each \"free-mode\" iteration of the\n      adaptive barrier parameter strategy. (Only considered if \"adaptive\" is\n      selected for option \"mu_strategy\"). The default value for this string\n      option is \"quality-function\". Possible values:\n\n            - \"probing\": Mehrotra's probing heuristic\n            - \"loqo\": LOQO's centrality rule\n            - \"quality-function\": minimize a quality function\n    - **fixed_mu_oracle (str)**:\n      Oracle for the barrier parameter when switching to fixed mode.\n      Determines how the first value of the barrier parameter should be\n      computed when switching to the \"monotone mode\" in the adaptive\n      strategy. (Only considered if \"adaptive\" is selected for option\n      \"mu_strategy\".) The default value for this string option is\n      \"average_compl\". Possible values:\n\n            - \"probing\": Mehrotra's probing heuristic\n            - \"loqo\": LOQO's centrality rule\n            - \"quality-function\": minimize a quality function\n            - \"average_compl\": base on current average complementarity\n    - **mu_init** (float): Initial value for the barrier parameter. This option\n      determines the initial value for the barrier parameter (mu). It is\n      only relevant in the monotone, Fiacco-McCormick version of the\n      algorithm. (i.e., if \"mu_strategy\" is chosen as \"monotone\") The valid\n      range for this real option is 0 < mu_init and its default value is 0.1.\n    - **barrier_tol_factor** (float): Factor for mu in barrier stop test.\n      The convergence tolerance for each barrier problem in the monotone\n      mode is the value of the barrier parameter times \"barrier_tol_factor\".\n      This option is also used in the adaptive mu strategy during the\n      monotone mode. This is kappa_epsilon in implementation paper. The\n      valid range for this real option is 0 < barrier_tol_factor and its\n      default value is 10.\n    - **mu_linear_decrease_factor** (float): Determines\n      linear decrease rate of barrier parameter. For the Fiacco-McCormick\n      update procedure the new barrier parameter mu is obtained by taking\n      the minimum of mu*\"mu_linear_decrease_factor\" and\n      mu^\"superlinear_decrease_power\". This is kappa_mu in implementation\n      paper. This option is also used in the adaptive mu strategy during the\n      monotone mode. The valid range for this real option is 0 <\n      mu_linear_decrease_factor < 1 and its default value is 0.2.\n    - **mu_superlinear_decrease_power** (float): Determines superlinear decrease\n      rate of barrier parameter. For the Fiacco-McCormick update procedure\n      the new barrier parameter mu is obtained by taking the minimum of\n      mu*\"mu_linear_decrease_factor\" and mu^\"superlinear_decrease_power\".\n      This is theta_mu in implementation paper. This option is also used in\n      the adaptive mu strategy during the monotone mode. The valid range for\n      this real option is 1 < mu_superlinear_decrease_power < 2 and its\n      default value is 1.5.\n    - **mu_allow_fast_monotone_decrease (str or bool)**:\n      Advanced feature! Allow skipping of barrier problem if barrier test i\n      already met. The default value for this string option is \"yes\".\n      Possible values:\n\n            - \"no\": Take at least one iteration per barrier problem even if the\n              barrier test is already met for the updated barrier parameter\n            - \"yes\": Allow fast decrease of mu if barrier test it met\n    - **tau_min** (float): Advanced feature! Lower bound on fraction-to-the-boundary\n      parameter tau. This is tau_min in the implementation paper. This\n      option is also used in the adaptive mu strategy during the monotone\n      mode. The valid range for this real option is 0 < tau_min < 1 and its\n      default value is 0.99.\n    - **sigma_max** (float): Advanced feature! Maximum\n      value of the centering parameter. This is the upper bound for the\n      centering parameter chosen by the quality function based barrier\n      parameter update. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The valid range for this real option is 0 <\n      sigma_max and its default value is 100.\n\n    - **sigma_min** (float): Advanced\n      feature! Minimum value of the centering parameter. This is the lower\n      bound for the centering parameter chosen by the quality function based\n      barrier parameter update. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The valid range for this real option is 0 ≤\n      sigma_min and its default value is :math:`10-06` .\n    - **quality_function_norm_type (str)**: Advanced feature.\n      Norm used for components of the quality\n      function. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The default value for this string option is\n      \"2-norm-squared\". Possible values:\n\n            - \"1-norm\": use the 1-norm (abs sum)\n            - \"2-norm-squared\": use the 2-norm squared (sum of squares)\n            - \"max-norm\": use the infinity norm (max)\n            - \"2-norm\": use 2-norm\n    - **quality_function_centrality (str)**: Advanced\n      feature. The penalty term for centrality that is included in quality\n      function. This determines whether a term is added to the quality\n      function to penalize deviation from centrality with respect to\n      complementarity. The complementarity measure here is the xi in the\n      Loqo update rule. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The default value for this string option is\n      \"none\". Possible values:\n\n            - \"none\": no penalty term is added\n            - \"log\": complementarity * the log of the centrality measure\n            - \"reciprocal\": complementarity * the reciprocal of the centrality\n              measure\n            - \"cubed-reciprocal\": complementarity * the reciprocal of the centrality\n              measure cubed\n    - **quality_function_balancing_term (str)**: Advanced\n      feature. The balancing term included in the quality function for\n      centrality. This determines whether a term is added to the quality\n      function that penalizes situations where the complementarity is much\n      smaller than dual and primal infeasibilities. Only used if option\n      \"mu_oracle\" is set to \"quality-function\". The default value for this\n      string option is \"none\". Possible values:\n\n            - \"none\": no balancing term is adde\n            - \"cubic\":  :math:`max(0,\\max(\\text{dual_inf},\\text{primal_inf})-\\text{compl})^3`\n    - **quality_function_max_section_steps** (int): Maximum number of search\n      steps during direct search procedure determining the optimal centering\n      parameter. The golden section search is performed for the quality\n      function based mu oracle. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The valid range for this integer option is 0 ≤\n      quality_function_max_section_steps and its default value is 8.\n    - **quality_function_section_sigma_tol** (float): advanced feature!\n      Tolerance for the section search procedure determining the optimal\n      centering parameter (in sigma space). The golden section search is\n      performed for the quality function based mu oracle. Only used if\n      option \"mu_oracle\" is set to \"quality-function\". The valid range for\n      this real option is 0 ≤ quality_function_section_sigma_tol < 1 and its\n      default value is 0.01.\n    - **quality_function_section_qf_tol** (float):\n      advanced feature! Tolerance for the golden section search procedure\n      determining the optimal centering parameter (in the function value\n      space). The golden section search is performed for the quality\n      function based mu oracle. Only used if option \"mu_oracle\" is set to\n      \"quality-function\". The valid range for this real option is 0 ≤\n      quality_function_section_qf_tol < 1 and its default value is 0.\n\n    - **line_search_method (str)**: Advanced feature. Globalization method used in\n      backtracking line search. Only the \"filter\" choice is officially\n      supported. But sometimes, good results might be obtained with the other\n      choices. The default value for this string option is \"filter\". Possible values:\n\n             - \"filter\": Filter method\n             - \"cg-penalty\": Chen-Goldfarb penalty function\n             - \"penalty\": Standard penalty function\n    - **alpha_red_factor** (float): Advanced feature.\n      Fractional reduction of the trial step size\n      in the backtracking lne search. At every step of the backtracking line\n      search, the trial step size is reduced by this factor. The valid range\n      for this real option is 0 < alpha_red_factor < 1 and its default value\n      is 0.5.\n    - **accept_every_trial_step (str or bool)**: Always accept the first\n      trial step. Setting this option to \"yes\" essentially disables the line\n      search and makes the algorithm take aggressive steps, without global\n      convergence guarantees. The default value for this string option is\n      \"no\". Possible values: \"yes\", \"no\", True, False.\n    - **accept_after_max_steps** (float): advanced feature.\n      Accept a trial point after maximal this\n      number of steps een if it does not satisfy line search conditions.\n      Setting this to -1 disables this option. The valid range for this\n      integer option is -1 ≤ accept_after_max_steps and its default value is -1.\n    - **alpha_for_y (str)**: Method to determine the step size for constraint\n      multipliers (alpha_y) . The default value for this string option is\n      \"primal\". Possible values:\n\n            - \"primal\": use primal step size\n            - \"bound-mult\": use step size for the bound multipliers (good for LPs)\n            - \"min\": use the min of primal and bound multipliers\n            - \"max\": use the max of primal and bound multipliers\n            - \"full\": take a full step of size one\n            - \"min-dual-infeas\": choose step size minimizing new dual infeasibility\n            - \"safer-min-dual-infeas\": like \"min_dual_infeas\", but safeguarded by\n              \"min\" and \"max\"\n            - \"primal-and-full\": use the primal step size, and full step if\n              delta_x <= alpha_for_y_tol\n            - \"dual-and-full\": use the dual step size, and full step if\n              delta_x <= alpha_for_y_tol\n            - \"acceptor\": Call LSAcceptor to get step size for y\n    - **alpha_for_y_tol** (float): Tolerance for\n      switching to full equality multiplier steps. This is only relevant if\n      \"alpha_for_y\" is chosen \"primal-and-full\" or \"dual-and-full\". The step\n      size for the equality constraint multipliers is taken to be one if the\n      max-norm of the primal step is less than this tolerance. The valid range\n      for this real option is 0 ≤ alpha_for_y_tol and its default value is 10.\n    - **tiny_step_tol** (float): Advanced feature. Tolerance for detecting\n      numerically insignificant steps. If the search direction in the primal\n      variables (x and s) is, in relative terms for each component, less than\n      this value, the algorithm accepts the full step without line search. If\n      this happens repeatedly, the algorithm will terminate with a\n      corresponding exit message. The default value is 10 times machine\n      precision. The valid range for this real option is 0 ≤ tiny_step_tol and\n      its default value is 2.22045 · :math:`1e-15`.\n    - **tiny_step_y_tol** (float): Advanced\n      feature. Tolerance for quitting because of numerically insignificant\n      steps. If the search direction in the primal variables (x and s) is, in\n      relative terms for each component, repeatedly less than tiny_step_tol,\n      and the step in the y variables is smaller than this threshold, the\n      algorithm will terminate. The valid range for this real option is 0 ≤\n      tiny_step_y_tol and its default value is 0.01.\n\n    - **watchdog_shortened_iter_trigger** (int): Number of shortened iterations\n      that trigger the watchdog. If the number of successive iterations in\n      which the backtracking line search did not accept the first trial point\n      exceeds this number, the watchdog procedure is activated. Choosing \"0\"\n      here disables the watchdog procedure. The valid range for this integer\n      option is 0 ≤ watchdog_shortened_iter_trigger and its default value is\n      10.\n    - **watchdog_trial_iter_max** (int): Maximum number of watchdog\n      iterations. This option determines the number of trial iterations\n      allowed before the watchdog procedure is aborted and the algorithm\n      returns to the stored point. The valid range for this integer option\n      is 1 ≤ watchdog_trial_iter_max and its default value is 3.\n      theta_max_fact (float): Advanced feature. Determines upper bound for\n      constraint violation in the filter. The algorithmic parameter\n      theta_max is determined as theta_max_fact times the maximum of 1 and\n      the constraint violation at initial point. Any point with a\n      constraint violation larger than theta_max is unacceptable to the\n      filter (see Eqn. (21) in the implementation paper). The valid range\n      for this real option is 0 < theta_max_fact and its default value is\n      10000.\n    - **theta_min_fact** (float): advanced feature. Determines\n      constraint violation threshold in the switching rule. The\n      algorithmic parameter theta_min is determined as\n      theta_min_fact times the maximum of 1 and the constraint\n      violation at initial point. The switching rules treats an\n      iteration as an h-type iteration whenever the current\n      constraint violation is larger than theta_min (see paragraph\n      before Eqn. (19) in the implementation paper). The valid\n      range for this real option is 0 < theta_min_fact and its\n      default value is 0.0001.\n    - **eta_phi** (float): advanced!\n      Relaxation factor in the Armijo condition. See Eqn. (20) in\n      the implementation paper. The valid range for this real\n      option is 0 < eta_phi < 0.5 and its default value is :math:`1e-08`.\n    - **delta** (float): advanced! Multiplier for constraint violation\n      in the switching rule. See Eqn. (19) in the implementation\n      paper. The valid range for this real option is 0 < delta and\n      its default value is 1.\n    - **s_phi** (float): advanced! Exponent for\n      linear barrier function model in the switching rule. See Eqn.\n      (19) in the implementation paper. The valid range for this\n      real option is 1 < s_phi and its default value is 2.3.\n    - **s_theta** (float): advanced! Exponent for current constraint\n      violation in the switching rule. See Eqn. (19) in the\n      implementation paper. The valid range for this real option is\n      1 < s_theta and its default value is 1.1.\n    - **gamma_phi** (float):\n      advanced! Relaxation factor in the filter margin for the\n      barrier function. See Eqn. (18a) in the implementation paper.\n      The valid range for this real option is 0 < gamma_phi < 1 and\n      its default value is :math:`1e-08`.\n    - **gamma_theta** (float): advanced!\n      Relaxation factor in the filter margin for the constraint\n      violation. See Eqn. (18b) in the implementation paper. The\n      valid range for this real option is 0 < gamma_theta < 1 and\n      its default value is :math:`1e-05`.\n    - **alpha_min_frac** (float): advanced!\n      Safety factor for the minimal step size (before switching to\n      restoration phase). This is gamma_alpha in Eqn. (20) in the\n      implementation paper. The valid range for this real option is\n      0 < alpha_min_frac < 1 and its default value is 0.05.\n    - **max_soc** (int): Maximum number of second order correction trial steps\n      at each iteration. Choosing 0 disables the second order\n      corrections. This is p^{max} of Step A-5.9 of Algorithm A in\n      the implementation paper. The valid range for this integer\n      option is 0 ≤ max_soc and its default value is 4.\n    - **kappa_soc** (float): advanced! Factor in the sufficient reduction rule\n      for second order correction. This option determines how much\n      a second order correction step must reduce the constraint\n      violation so that further correction steps are attempted. See\n      Step A-5.9 of Algorithm A in the implementation paper. The\n      valid range for this real option is 0 < kappa_soc and its\n      default value is 0.99.\n    - **obj_max_inc** (float): advanced!\n      Determines the upper bound on the acceptable increase of\n      barrier objective function. Trial points are rejected if they\n      lead to an increase in the barrier objective function by more\n      than obj_max_inc orders of magnitude. The valid range for\n      this real option is 1 < obj_max_inc and its default value is 5.\n    - **max_filter_resets** (int): advanced! Maximal allowed number\n      of filter resets. A positive number enables a heuristic\n      that resets the filter, whenever in more than\n      \"filter_reset_trigger\" successive iterations the last\n      rejected trial steps size was rejected because of the\n      filter. This option determine the maximal number of resets\n      that are allowed to take place. The valid range for this\n      integer option is 0 ≤ max_filter_resets and its default\n      value is 5.\n    - **filter_reset_trigger** (int): Advanced! Number\n      of iterations that trigger the filter reset. If the filter\n      reset heuristic is active and the number of successive\n      iterations in which the last rejected trial step size was\n      rejected because of the filter, the filter is reset. The\n      valid range for this integer option is 1 ≤\n      filter_reset_trigger and its default value is 5.\n    - **corrector_type (str)**: advanced! The type of corrector steps that should\n      be taken. If \"mu_strategy\" is \"adaptive\", this option determines what\n      kind of corrector steps should be tried. Changing this option is\n      experimental. The default value for this string option is \"none\".\n      Possible values:\n\n        - \"none\" or None: no corrector\n        - \"affine\": corrector step towards mu=0\n        - \"primal-dual\": corrector step towards current mu\n    - **skip_corr_if_neg_curv (str or bool)**: advanced!\n      Whether to skip the corrector step in negative curvature\n      iteration. The corrector step is not tried if negative curvature has been\n      encountered during the computation of the search direction in the current\n      iteration. This option is only used if \"mu_strategy\" is \"adaptive\".\n      Changing this option is experimental. The default value for this string\n      option is \"yes\". Possible values: \"yes\", \"no\", True, False.\n    - **skip_corr_in_monotone_mode (str or bool)**: Advanced! Whether to skip the\n      corrector step during monotone brrier parameter mode. The corrector step\n      is not tried if the algorithm is currently in the monotone mode (see also\n      option \"barrier_strategy\"). This option is only used if \"mu_strategy\" is\n      \"adaptive\". Changing this option is experimental. The default value for\n      this string option is \"yes\". Possible values: \"yes\", \"no\", True, False\n    - **corrector_compl_avrg_red_fact** (float): advanced! Complementarity tolerance\n      factor for accepting corrector step. This option determines the factor by\n      which complementarity is allowed to increase for a corrector step to be\n      accepted. Changing this option is experimental. The valid range for this\n      real option is 0 < corrector_compl_avrg_red_fact and its default value is\n      1.\n    - **soc_method** (int): Ways to apply second order correction. This option\n      determines the way to apply second order correction, 0 is the method\n      described in the implementation paper. 1 is the modified way which adds\n      alpha on the rhs of x and s rows. Officially, the valid range for this\n      integer option is 0 ≤ soc_method ≤ 1 and its default value is 0 but only 0\n      and 1 are allowed.\n\n    - **nu_init** (float): advanced! Initial value of the penalty parameter. The\n      valid range for this real option is 0 < nu_init and its default value is\n      :math:`1e-06`.\n    - **nu_inc** (float): advanced! Increment of the penalty parameter. The\n      valid range for this real option is 0 < nu_inc and its default value is\n      0.0001.\n    - **rho** (float): advanced! Value in penalty parameter update formula.\n      The valid range for this real option is 0 < rho < 1 and its default value\n      is 0.1.\n    - **kappa_sigma** (float): advanced! Factor limiting the deviation of\n      dual variables from primal estimates. If the dual variables deviate from\n      their primal estimates, a correction is performed. See Eqn. (16) in the\n      implementation paper. Setting the value to less than 1 disables the\n      correction. The valid range for this real option is 0 < kappa_sigma and\n      its default value is :math:`1e+10`.\n    - **recalc_y (str or bool)**: Tells the algorithm to\n      recalculate the equality and inequality multipliers as least square\n      estimates. This asks the algorithm to recompute the multipliers, whenever\n      the current infeasibility is less than recalc_y_feas_tol. Choosing yes\n      might be helpful in the quasi-Newton option. However, each recalculation\n      requires an extra factorization of the linear system. If a limited memory\n      quasi-Newton option is chosen, this is used by default. The default value\n      for this string option is \"no\". Possible values:\n\n          - \"no\" or False: use the Newton step to update the multipliers\n          - \"yes\" or True: use least-square multiplier\n    - **estimates recalc_y_feas_tol** (float): Feasibility threshold for\n      recomputation of multipliers. If recalc_y is chosen and the current\n      infeasibility is less than this value, then the multipliers are\n      recomputed. The valid range for this real option is 0 < recalc_y_feas_tol\n      and its default value is :math:`1e-06`.\n    - **slack_move** (float): advanced! Correction\n      size for very small slacks. Due to numerical issues or the lack of an\n      interior, the slack variables might become very small. If a slack becomes\n      very small compared to machine precision, the corresponding bound is moved\n      slightly. This parameter determines how large the move should be. Its\n      default value is mach_eps^{3/4}. See also end of Section 3.5 in\n      implementation paper - but actual implementation might be somewhat\n      different. The valid range for this real option is 0 ≤ slack_move and its\n      default value is 1.81899 · :math:`1e-12`.\n    - **constraint_violation_norm_type (str)**: advanced!\n      Norm to be used for the constraint violation in te line search.\n      Determines which norm should be used when the algorithm computes the\n      constraint violation in the line search. The default value for this string\n      option is \"1-norm\". Possible values:\n\n          - \"1-norm\": use the 1-norm\n          - \"2-norm\": use the 2-norm\n          - \"max-norm\": use the infinity norm\n\n    - **mehrotra_algorithm (str or bool)**: Indicates whether to do Mehrotra's\n      predictor-corrector algorithm. If enabled, line search is disabled and the\n      (unglobalized) adaptive mu strategy is chosen with the \"probing\" oracle,\n      and \"corrector_type=affine\" is used without any safeguards; you should not\n      set any of those options explicitly in addition. Also, unless otherwise\n      specified, the values of  ``bound_push`` ,  ``bound_frac`` , and\n      ``bound_mult_init_val`` are set more aggressive, and sets\n      \"alpha_for_y=bound_mult\". The Mehrotra's predictor-corrector algorithm\n      works usually very well for LPs and convex QPs. The default value for this\n      string option is \"no\". Possible values: \"yes\", \"no\", True, False.\n    - **fast_step_computation (str or bool)**: Indicates if the linear system should\n      be solved quickly. If enabled, the algorithm assumes that the linear\n      system that is solved to obtain the search direction is solved\n      sufficiently well. In that case, no residuals are computed to verify the\n      solution and the computation of the search direction is a little faster.\n      The default value for this string option is \"no\". Possible values: \"yes\",\n      \"no\", True, False.\n    - **min_refinement_steps** (int): Minimum number of iterative\n      refinement steps per linear system solve. Iterative refinement (on the\n      full asymmetric system) is performed for each right hand side. This\n      option determines the minimum number of iterative refinements (i.e. at\n      least \"min_refinement_steps\" iterative refinement steps are enforced per\n      right hand side.) The valid range for this integer option is 0 ≤\n      min_refinement_steps and its default value is 1.\n    - **max_refinement_steps** (int): Maximum number of iterative refinement\n      steps per linear system\n      solve. Iterative refinement (on the full unsymmetric system) is performed\n      for each right hand side. This option determines the maximum number of\n      iterative refinement steps. The valid range for this integer option is 0 ≤\n      max_refinement_steps and its default value is 10.\n    - **residual_ratio_max** (float): advanced! Iterative refinement tolerance.\n      Iterative refinement is\n      performed until the residual test ratio is less than this tolerance (or\n      until \"max_refinement_steps\" refinement steps are performed). The valid\n      range for this real option is 0 < residual_ratio_max and its default value\n      is :math:`1e-10`.\n    - **residual_ratio_singular** (float): advanced! Threshold for\n      declaring linear system singular after filed iterative refinement. If the\n      residual test ratio is larger than this value after failed iterative\n      refinement, the algorithm pretends that the linear system is singular. The\n      valid range for this real option is 0 < residual_ratio_singular and its\n      default value is :math:`1e-05`.\n    - **residual_improvement_factor** (float): advanced!\n      Minimal required reduction of residual test ratio in iterative refinement.\n      If the improvement of the residual test ratio made by one iterative\n      refinement step is not better than this factor, iterative refinement is\n      aborted. The valid range for this real option is 0 <\n      residual_improvement_factor and its default value is 1.\n\n    - **neg_curv_test_tol** (float): Tolerance for heuristic to ignore wrong\n      inertia. If nonzero, incorrect inertia in the augmented system is ignored,\n      and Ipopt tests if the direction is a direction of positive curvature.\n      This tolerance is alpha_n in the paper by :cite:`Chiang2014` and it\n      determines when the direction is considered to be sufficiently positive. A\n      value in the range of [1e-12, 1e-11] is recommended. The valid range for\n      this real option is 0 ≤ neg_curv_test_tol and its default value is 0.\n    - **neg_curv_test_reg (str or bool)**: Whether to do the curvature test with the\n      primal regularization (see :cite:`Chiang2014`). The default value for\n      this string option is \"yes\". Possible values:\n\n          - \"yes\" or True: use primal regularization with the\n            inertia-free curvature test\n          - \"no\" or False: use original IPOPT approach, in which the\n            primal regularization is ignored\n    - **max_hessian_perturbation** (float): Maximum value of regularization\n      parameter for handling negative curvature. In order to guarantee that the\n      search directions are indeed proper descent directions, Ipopt requires\n      that the inertia of the (augmented) linear system for the step computation\n      has the correct number of negative and positive eigenvalues. The idea is\n      that this guides the algorithm away from maximizers and makes Ipopt more\n      likely converge to first order optimal points that are minimizers. If the\n      inertia is not correct, a multiple of the identity matrix is added to the\n      Hessian of the Lagrangian in the augmented system. This parameter gives\n      the maximum value of the regularization parameter. If a regularization of\n      that size is not enough, the algorithm skips this iteration and goes to\n      the restoration phase. This is delta_w^max in the implementation paper.\n      The valid range for this real option is 0 < max_hessian_perturbation and\n      its default value is :math:`1e+20`.\n    - **min_hessian_perturbation** (float): Smallest\n      perturbation of the Hessian block. The size of the perturbation of the\n      Hessian block is never selected smaller than this value, unless no\n      perturbation is necessary. This is delta_w^min in implementation paper.\n      The valid range for this real option is 0 ≤ min_hessian_perturbation and\n      its default value is :math:`1e-20`.\n    - **perturb_inc_fact_first** (float): Increase\n      factor for x-s perturbation for very first perturbation. The factor by\n      which the perturbation is increased when a trial value was not sufficient\n      - this value is used for the computation of the very first perturbation\n      and allows a different value for the first perturbation than that used\n      for the remaining perturbations. This is bar_kappa_w^+ in the\n      implementation paper. The valid range for this real option is 1 <\n      perturb_inc_fact_first and its default value is 100.\n    - **perturb_inc_fact** (float): Increase factor for x-s perturbation. The factor\n      by which the perturbation is increased when a trial value was not\n      sufficient - this value is used for the computation of all\n      perturbations except for\n      the first. This is kappa_w^+ in the implementation paper. The valid\n      range for this real option is 1 < perturb_inc_fact and its default value\n      is 8.\n    - **perturb_dec_fact** (float): Decrease factor for x-s perturbation.\n      The factor by which the perturbation is decreased when a trial value is\n      deduced from the size of the most recent successful perturbation. This\n      is kappa_w^- in the implementation paper. The valid range for this real\n      option is 0 < perturb_dec_fact < 1 and its default value is 0.333333.\n    - **first_hessian_perturbation** (float): Size of first x-s perturbation\n      tried. The first value tried for the x-s perturbation in the inertia\n      correction scheme. This is delta_0 in the implementation paper. The\n      valid range for this real option is 0 < first_hessian_perturbation and\n      its default value is 0.0001.\n    - **jacobian_regularization_value** (float): Size\n      of the regularization for rank-deficient constraint Jacobians. This is\n      bar delta_c in the implementation paper. The valid range for this real\n      option is 0 ≤ jacobian_regularization_value and its default value is\n      :math:`1e-08`.\n    - **jacobian_regularization_exponent** (float): advanced! Exponent for\n      mu in the regularization for rnk-deficient constraint Jacobians. This is\n      kappa_c in the implementation paper. The valid range for this real\n      option is 0 ≤ jacobian_regularization_exponent and its default value is\n      0.25.\n    - **perturb_always_cd (str or bool)**: advanced! Active permanent\n      perturbation of constraint linearization. Enabling this option leads to\n      using the delta_c and delta_d perturbation for the computation of every\n      search direction. Usually, it is only used when the iteration matrix is\n      singular. The default value for this string option is \"no\". Possible\n      values: \"yes\", \"no\", True, False.\n\n    - **expect_infeasible_problem (str or bool)**: Enable heuristics to quickly\n      detect an infeasible problem. This options is meant to activate\n      heuristics that may speed up the infeasibility determination if you\n      expect that there is a good chance for the problem to be infeasible. In\n      the filter line search procedure, the restoration phase is called more\n      quickly than usually, and more reduction in the constraint violation is\n      enforced before the restoration phase is left. If the problem is square,\n      this option is enabled automatically. The default value for this string\n      option is \"no\". Possible values: \"yes\", \"no\", True, False.\n    - **expect_infeasible_problem_ctol** (float): Threshold for disabling\n      \"expect_infeasible_problem\" option. If the constraint violation becomes\n      smaller than this threshold, the \"expect_infeasible_problem\" heuristics\n      in the filter line search are disabled. If the problem is square, this\n      options is set to 0. The valid range for this real option is 0 ≤\n      expect_infeasible_problem_ctol and its default value is 0.001.\n    - **expect_infeasible_problem_ytol** (float): Multiplier threshold for\n      activating \"xpect_infeasible_problem\" option. If the max norm of the\n      constraint multipliers becomes larger than this value and\n      \"expect_infeasible_problem\" is chosen, then the restoration phase is\n      entered. The valid range for this real option is 0 <\n      expect_infeasible_problem_ytol and its default value is :math:`1e+08`.\n    - **start_with_resto (str or bool)**: Whether to switch to restoration phase\n      in first iteration.Setting this option to \"yes\" forces the algorithm to\n      switch to the feasibility restoration phase in the first iteration. If\n      the initial point is feasible, the algorithm will abort with a failure.\n      The default value for this string option is \"no\". Possible values:\n      \"yes\", \"no\", True, False\n    - **soft_resto_pderror_reduction_factor** (float):\n      Required reduction in primal-dual error in the soft restoration phase.\n      The soft restoration phase attempts to reduce the primal-dual error with\n      regular steps. If the damped primal-dual step (damped only to satisfy\n      the fraction-to-the-boundary rule) is not decreasing the primal-dual\n      error by at least this factor, then the regular restoration phase is\n      called. Choosing \"0\" here disables the soft restoration phase. The valid\n      range for this real option is 0 ≤ soft_resto_pderror_reduction_factor\n      and its default value is 0.9999.\n    - **max_soft_resto_iters** (int): advanced!\n      Maximum number of iterations performed successively in soft rstoration\n      phase. If the soft restoration phase is performed for more than so many\n      iterations in a row, the regular restoration phase is called. The valid\n      range for this integer option is 0 ≤ max_soft_resto_iters and its\n      default value is 10.\n    - **required_infeasibility_reduction** (float): Required\n      reduction of infeasibility before leaving restoration phase. The\n      restoration phase algorithm is performed, until a point is found that is\n      acceptable to the filter and the infeasibility has been reduced by at\n      least the fraction given by this option. The valid range for this real\n      option is 0 ≤ required_infeasibility_reduction < 1 and its default value\n      is 0.9.\n    - **max_resto_iter** (int): advanced! Maximum number of successive\n      iterations in restoration phase.The algorithm terminates with an error\n      message if the number of iterations successively taken in the\n      restoration phase exceeds this number. The valid range for this integer\n      option is 0 ≤ max_resto_iter and its default value is 3000000.\n    - **evaluate_orig_obj_at_resto_trial (str or bool)**: Determines if the\n      original objective function should be evaluated at restoration phase\n      trial points. Enabling this option makes the restoration phase algorithm\n      evaluate the objective function of the original problem at every trial\n      point encountered during the restoration phase, even if this value is\n      not required. In this way, it is guaranteed that the original objective\n      function can be evaluated without error at all accepted iterates;\n      otherwise the algorithm might fail at a point where the restoration\n      phase accepts an iterate that is good for the restoration phase problem,\n      but not the original problem. On the other hand, if the evaluation of\n      the original objective is expensive, this might be costly. The default\n      value for this string option is \"yes\". Possible values: \"yes\", \"no\",\n      True, False\n    - **resto_penalty_parameter** (float): advanced! Penalty parameter\n      in the restoration phase objective function. This is the parameter rho in\n      equation (31a) in the Ipopt implementation paper. The valid range for\n      this real option is 0 < resto_penalty_parameter and its default value is\n      1000.\n    - **resto_proximity_weight** (float): advanced! Weighting factor for the\n      proximity term in restoration pase objective. This determines how\n      the parameter zeta in equation (29a) in the implementation paper\n      is computed. zeta here is resto_proximity_weight*sqrt(mu), where\n      mu is the current barrier parameter. The valid range for this real\n      option is 0 ≤ resto_proximity_weight and its default value is 1.\n    - **bound_mult_reset_threshold** (float): Threshold for resetting bound\n      multipliers after the restoration pase. After returning from the\n      restoration phase, the bound multipliers are updated with a Newton\n      step for complementarity. Here, the change in the primal variables\n      during the entire restoration phase is taken to be the\n      corresponding primal Newton step. However, if after the update the\n      largest bound multiplier exceeds the threshold specified by this\n      option, the multipliers are all reset to 1.\n      The valid range for this real option is 0 ≤ bound_mult_reset_threshold\n      and its default value is 1000.\n    - **constr_mult_reset_threshold** (float):\n      Threshold for resetting equality and inequality multipliers ater\n      restoration phase. After returning from the restoration phase, the\n      constraint multipliers are recomputed by a least square estimate. This\n      option triggers when those least-square estimates should be ignored.\n      The valid range for this real option is 0 ≤ constr_mult_reset_threshold\n      and its default value is 0.\n    - **resto_failure_feasibility_threshold** (float): advanced!\n      Threshold for primal infeasibility to declare failure\n      of restoration phase. If the restoration phase is terminated because of\n      the \"acceptable\" termination criteria and the primal infeasibility is\n      smaller than this value, the restoration phase is declared to have\n      failed. The default value is actually 1e2*tol, where tol is the general\n      termination tolerance. The valid range for this real option is 0 ≤\n      resto_failure_feasibility_threshold and its default value is 0.\n\n    - **limited_memory_aug_solver (str)**: advanced! Strategy for solving the\n      augmented system for low-rank Hessian.\n      The default value for this string option is \"sherman-morrison\".\n      Possible values:\n\n          - \"sherman-morrison\": use Sherman-Morrison formula\n          - \"extended\": use an extended augmented system\n    - **limited_memory_max_history** (int): Maximum size of the history for the\n      limited quasi-Newton Hessian approximation. This option determines the\n      number of most recent iterations that are taken into account for the\n      limited-memory quasi-Newton approximation. The valid range for this\n      integer option is 0 ≤ limited_memory_max_history and\n      its default value is 6.\n    - **limited_memory_update_type (str)**: Quasi-Newton update formula for the\n      limited memory quasi-Newton approximation. The default value for this\n      string option is \"bfgs\". Possible values:\n\n          - \"bfgs\": BFGS update (with skipping)\n          - \"sr1\": SR1 (not working well)\n    - **limited_memory_initialization (str)**:\n      Initialization strategy for the limited memory quasi-Newton\n      aproximation. Determines how the diagonal Matrix B_0 as the first term in\n      the limited memory approximation should be computed. The default value for\n      this string option is \"scalar1\". Possible values:\n\n          - \"scalar1\": sigma = s^Ty/s^Ts\n          - \"scalar2\": sigma = y^Ty/s^Ty\n          - \"scalar3\": arithmetic average of scalar1 and scalar2\n          - \"scalar4\": geometric average of scalar1 and scalar2\n          - \"constant\": sigma = limited_memory_init_val\n    - **limited_memory_init_val** (float): Value for B0 in low-rank update. The\n      starting matrix in the low rank update, B0, is chosen to be this multiple\n      of the identity in the first iteration (when no updates have been\n      performed yet), and is constantly chosen as this value, if\n      \"limited_memory_initialization\" is \"constant\". The valid range for this\n      real option is 0 < limited_memory_init_val and its default value is 1.\n    - **limited_memory_init_val_max** (float): Upper bound on value for B0 in\n      low-rank update. The starting matrix in the low rank update, B0, is chosen\n      to be this multiple of the identity in the first iteration (when no\n      updates have been performed yet), and is constantly chosen as this value,\n      if \"limited_memory_initialization\" is \"constant\". The valid range for this\n      real option is 0 < limited_memory_init_val_max and its default value is\n      :math:`1e+08`.\n    - **limited_memory_init_val_min** (float): Lower bound on value for B0 in\n      low-rank update. The starting matrix in the low rank update, B0, is chosen\n      to be this multiple of the identity in the first iteration (when no\n      updates have been performed yet), and is constantly chosen as this value,\n      if \"limited_memory_initialization\" is \"constant\". The valid range for this\n      real option is 0 < limited_memory_init_val_min and its default value is\n      :math:`1e-08`.\n    - **limited_memory_max_skipping** (int): Threshold for successive\n      iterations where update is skipped. If the update is skipped more than\n      this number of successive iterations, the quasi-Newton approximation is\n      reset. The valid range for this integer option is 1 ≤\n      limited_memory_max_skipping and its default value is 2.\n    - **limited_memory_special_for_resto (str or bool)**: Determines if the\n      quasi-Newton updates should be special dring the restoration phase. Until\n      Nov 2010, Ipopt used a special update during the restoration phase, but it\n      turned out that this does not work well. The new default uses the regular\n      update procedure and it improves results. If for some reason you want to\n      get back to the original update, set this option to \"yes\". The default\n      value for this string option is \"no\". Possible values: \"yes\", \"no\", True,\n      False.\n    - **hessian_approximation (str)**: Indicates what Hessian information is\n      to be used. This determines which kind of information for the Hessian of\n      the Lagrangian function is used by the algorithm. The default value for\n      this string option is \"limited-memory\". Possible values: - \"exact\": Use\n      second derivatives provided by the NLP. - \"limited-memory\": Perform a\n      limited-memory quasi-Newton approximation\n    - **hessian_approximation_space (str)**: advanced!\n      Indicates in which subspace the Hessian information is to\n      be approximated. The default value for this string option is\n      \"nonlinear-variables\". Possible values: - \"nonlinear-variables\": only in\n      space of nonlinear variables. - \"all-variables\": in space of all variables\n      (without slacks)\n    - **linear_solver (str)**: Linear solver used for step\n      computations. Determines which linear algebra package is to be used for\n      the solution of the augmented linear system (for obtaining the search\n      directions). The default value for this string option is \"ma27\". Possible\n      values:\n\n          - \"mumps\" (use the Mumps package, default)\n          - \"ma27\" (load the Harwell routine MA27 from library at runtime)\n          - \"ma57\" (load the Harwell routine MA57 from library at runtime)\n          - \"ma77\" (load the Harwell routine HSL_MA77 from library at runtime)\n          - \"ma86\" (load the Harwell routine MA86 from library at runtime)\n          - \"ma97\" (load the Harwell routine MA97 from library at runtime)\n          - \"pardiso\" (load the Pardiso package from pardiso-project.org\n            from user-provided library at runtime)\n          - \"custom\" (use custom linear solver (expert use))\n    - **linear_solver_options** (dict or None): dictionary with the\n      linear solver options, possibly including `linear_system_scaling`,\n      `hsllib` and `pardisolib`. See the `ipopt documentation for details\n      <https://coin-or.github.io/Ipopt/OPTIONS.html>`_. The linear solver\n      options are not automatically converted to float at the moment.]\n\n```\n\n(fides-algorithm)=\n\n## The Fides Optimizer\n\noptimagic supports the\n[Fides Optimizer](https://fides-optimizer.readthedocs.io/en/latest). To use Fides, you\nneed to have [the fides package](https://github.com/fides-dev/fides) installed\n(`pip install fides>=0.7.4`, make sure you have at least 0.7.1).\n\n```{eval-rst}\n.. dropdown:: fides\n\n  .. code-block::\n\n      \"fides\"\n\n  `Fides <https://fides-optimizer.readthedocs.io/en/latest>`_ implements an Interior\n  Trust Region Reflective for boundary costrained optimization problems based on the\n  papers :cite:`Coleman1994` and :cite:`Coleman1996`. Accordingly, Fides is named after\n  the Roman goddess of trust and reliability. In contrast to other optimizers, Fides\n  solves the full trust-region subproblem exactly, which can yields higher quality\n  proposal steps, but is computationally more expensive. This makes Fides particularly\n  attractive for optimization problems with objective functions that are computationally\n  expensive to evaluate and the computational cost of solving the trust-region\n  subproblem is negligible.\n\n  - **hessian_update_strategy** (str): Hessian Update Strategy to employ. You can provide\n    a lowercase or uppercase string or a\n    fides.hession_approximation.HessianApproximation class instance. FX, SSM, TSSM and\n    GNSBFGS are not supported by optimagic. The available update strategies are:\n\n      - **bb**: Broydens \"bad\" method as introduced :cite:`Broyden1965`.\n      - **bfgs**: Broyden-Fletcher-Goldfarb-Shanno update strategy.\n      - **bg**: Broydens \"good\" method as introduced in :cite:`Broyden1965`.\n      - You can use a general BroydenClass Update scheme using the Broyden class from\n        `fides.hessian_approximation`. This is a generalization of BFGS/DFP methods\n        where the parameter :math:`phi` controls the convex combination between the\n        two. This is a rank 2 update strategy that preserves positive-semidefiniteness\n        and symmetry (if :math:`\\phi \\in [0,1]`). It is described in\n        :cite:`Nocedal1999`, Chapter 6.3.\n      - **dfp**: Davidon-Fletcher-Powell update strategy.\n      - **sr1**: Symmetric Rank 1 update strategy as described in :cite:`Nocedal1999`,\n        Chapter 6.2.\n\n  - **convergence.ftol_abs** (float): absolute convergence criterion\n    tolerance. This is only the interpretation of this parameter if the relative\n    criterion tolerance is set to 0. Denoting the absolute criterion tolerance by\n    :math:`\\alpha` and the relative criterion tolerance by :math:`\\beta`, the\n    convergence condition on the criterion improvement is\n    :math:`|f(x_k) - f(x_{k-1})| < \\alpha + \\beta \\cdot |f(x_{k-1})|`\n  - **convergence.ftol_rel** (float): relative convergence criterion\n    tolerance. This is only the interpretation of this parameter if the absolute\n    criterion tolerance is set to 0 (as is the default). Denoting the absolute\n    criterion tolerance by :math:`\\alpha` and the relative criterion tolerance by\n    :math:`\\beta`, the convergence condition on the criterion improvement is\n    :math:`|f(x_k) - f(x_{k-1})| < \\alpha + \\beta \\cdot |f(x_{k-1})|`\n  - **convergence.xtol_abs** (float): The optimization terminates\n    successfully when the step size falls below this number, i.e. when\n    :math:`||x_{k+1} - x_k||` is smaller than this tolerance.\n  - **convergence.gtol_abs** (float): The optimization terminates\n    successfully when the gradient norm is less or equal than this tolerance.\n  - **convergence.gtol_rel** (float): The optimization terminates\n    successfully when the norm of the gradient divided by the absolute function value\n    is less or equal to this tolerance.\n\n  - **stopping.maxiter** (int): maximum number of allowed iterations.\n  - **stopping.max_seconds** (int): maximum number of walltime seconds, deactivated by\n    default.\n\n  - **trustregion.initial_radius** (float): Initial trust region radius. Default is 1.\n  - **trustregion.stepback_strategy** (str): search refinement strategy if proposed step\n    reaches a parameter bound. The default is \"truncate\". The available options are:\n\n      - \"reflect\": recursive reflections at boundary.\n      - \"reflect_single\": single reflection at boundary.\n      - \"truncate\": truncate step at boundary and re-solve the restricted subproblem\n      - \"mixed\": mix reflections and truncations\n\n  - **trustregion.subspace_dimension** (str): Subspace dimension in which the subproblem\n    will be solved. The default is \"2D\". The following values are available:\n\n      - \"2D\": Two dimensional Newton/Gradient subspace\n      - \"full\": full dimensionality\n      - \"scg\": Conjugated Gradient subspace via Steihaug's method\n\n  - **trustregion.max_stepback_fraction** (float): Stepback parameter that controls how\n    close steps are allowed to get to the boundary. It is the maximal fraction of a\n    step to take if full step would reach breakpoint.\n\n  - **trustregion.decrease_threshold** (float): Acceptance threshold for trust region\n    ratio. The default is 0.25 (:cite:`Nocedal2006`). The radius is decreased if the\n    trust region ratio is below this value. This is denoted by :math:`\\\\mu` in\n    algorithm 4.1 in :cite:`Nocedal2006`.\n  - **trustregion.increase_threshold** (float): Threshold for the trust region radius\n    ratio above which the trust region radius can be increased. This is denoted by\n    :math:`\\eta` in algorithm 4.1 in :cite:`Nocedal2006`. The default is 0.75\n    (:cite:`Nocedal2006`).\n  - **trustregion.decrease_factor** (float): factor by which trust region radius will be\n    decreased in case it is decreased. This is denoted by :math:`\\gamma_1` in\n    algorithm 4.1 in :cite:`Nocedal2006` and its default is 0.25.\n  - **trustregion.increase_factor** (float): factor by which trust region radius will be\n    increase in case it is increase. This is denoted by :math:`\\gamma_2` in algorithm\n    4.1 in :cite:`Nocedal2006` and its default is 2.0.\n\n  - **trustregion.refine_stepback** (bool): whether to refine stepbacks via optimization.\n    Default is False.\n  - **trustregion.scaled_gradient_as_possible_stepback** (bool): whether the scaled\n    gradient should be added to the set of possible stepback proposals. Default is\n    False.\n\n```\n\n## The NLOPT Optimizers (nlopt)\n\noptimagic supports the following [NLOPT](https://nlopt.readthedocs.io/en/latest/)\nalgorithms. Please add the\n[appropriate citations](https://nlopt.readthedocs.io/en/latest/Citing_NLopt/) in\naddition to optimagic when using an NLOPT algorithm. To install nlopt run\n`conda install nlopt`.\n\n```{eval-rst}\n.. dropdown:: nlopt_bobyqa\n\n    .. code-block::\n\n        \"nlopt_bobyqa\"\n\n    Minimize a scalar function using the BOBYQA algorithm.\n\n    The implementation is derived from the BOBYQA subroutine of M. J. D. Powell.\n\n    The algorithm performs derivative free bound-constrained optimization using\n    an iteratively constructed quadratic approximation for the objective function.\n    Due to its use of quadratic appoximation, the algorithm may perform poorly\n    for objective functions that are not twice-differentiable.\n\n    For details see :cite:`Powell2009`.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_neldermead\n\n    .. code-block::\n\n        \"nlopt_neldermead\"\n\n    Minimize a scalar function using the Nelder-Mead simplex algorithm.\n\n    The basic algorithm is described in :cite:`Nelder1965`.\n\n    The difference between the nlopt implementation an the original implementation is\n    that the nlopt version supports bounds. This is done by moving all new points that\n    would lie outside the bounds exactly on the bounds.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_praxis\n\n    .. code-block::\n\n        \"nlopt_praxis\"\n\n    Minimize a scalar function using principal-axis method.\n\n    This is a gradient-free local optimizer originally described in :cite:`Brent1972`.\n    It assumes quadratic form of the optimized function and repeatedly updates a set of conjugate\n    search directions.\n\n    The algorithm is not invariant to scaling of the objective function and may\n    fail under its certain rank-preserving transformations (e.g., will lead to\n    a non-quadratic shape of the objective function).\n\n    The algorithm is not determenistic and it is not possible to achieve\n    detereminancy via seed setting.\n\n    The algorithm failed on a simple benchmark function with finite parameter bounds.\n    Passing arguments `lower_bounds` and `upper_bounds` has been disabled for this\n    algorithm.\n\n    The difference between the nlopt implementation an the original implementation is\n    that the nlopt version supports bounds. This is done by returning infinity (Inf)\n    when the constraints are violated. The implementation of bound constraints\n    is achieved at the const of significantly reduced speed of convergence.\n    In case of bounded constraints, this method is dominated by `nlopt_bobyqa`\n    and `nlopt_cobyla`.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_cobyla\n\n    .. code-block::\n\n        \"nlopt_cobyla\"\n\n    Minimize a scalar function using the cobyla method.\n\n    The alggorithm is derived from Powell's Constrained Optimization BY Linear\n    Approximations (COBYLA) algorithm. It is a derivative-free optimizer with\n    nonlinear inequality and equality constrains, described in :cite`Powell1994`.\n\n    It constructs successive linear approximations of the objective function and\n    constraints via a simplex of n+1 points (in n dimensions), and optimizes these\n    approximations in a trust region at each step.\n\n    The the nlopt implementation differs from the original implementation in a\n    a few ways:\n    - Incorporates all of the NLopt termination criteria.\n    - Adds explicit support for bound constraints.\n    - Allows the algorithm to increase the trust-reion radius if the predicted\n    imptoovement was approximately right and the simplex is satisfactory.\n    - Pseudo-randomizes simplex steps in the algorithm, aimproving robustness by\n    avoiding accidentally taking steps that don't improve conditioning, preserving\n    the deterministic nature of the algorithm.\n    - Supports unequal initial-step sizes in the different parameters.\n\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_sbplx\n\n    .. code-block::\n\n        \"nlopt_sbplx\"\n\n    Minimize a scalar function using the \"Subplex\" algorithm.\n\n    The alggorithm is a reimplementation of  Tom Rowan's \"Subplex\" algorithm.\n    See :cite:`Rowan1990`.\n    Subplex is a variant of Nedler-Mead that uses Nedler-Mead on a sequence of\n    subspaces. It is climed to be more efficient and robust than the original\n    Nedler-Mead algorithm.\n\n    The difference between this re-implementation and the original algorithm\n    of Rowan, is that it explicitly supports bound constraints providing big\n    improvement in the case where the optimum lies against one of the constraints.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_newuoa\n\n    .. code-block::\n\n        \"nlopt_newuoa\"\n\n    Minimize a scalar function using the NEWUOA algorithm.\n\n    The algorithm is derived from the NEWUOA subroutine of M.J.D Powell which\n    uses iteratively constructed quadratic approximation of the objctive fucntion\n    to perform derivative-free unconstrained optimization. Fore more details see:\n    :cite:`Powell2004`.\n\n    The algorithm in `nlopt` has been modified to support bound constraints. If all\n    of the bound constraints are infinite, this function calls the `nlopt.LN_NEWUOA`\n    optimizers for uncsonstrained optimization. Otherwise, the `nlopt.LN_NEWUOA_BOUND`\n    optimizer for constrained problems.\n\n    `NEWUOA` requires the dimension n of the parameter space to be `≥ 2`, i.e. the\n    implementation does not handle one-dimensional optimization problems.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_tnewton\n\n    .. code-block::\n\n        \"nlopt_tnewton\"\n\n    Minimize a scalar function using the \"TNEWTON\" algorithm.\n\n    The alggorithm is based on a Fortran implementation of a preconditioned\n    inexact truncated Newton algorithm written by Prof. Ladislav Luksan.\n\n    Truncated Newton methods are a set of algorithms designed to solve large scale\n    optimization problems. The algorithms use (inaccurate) approximations of the\n    solutions to Newton equations, using conjugate gradient methodds, to handle the\n    expensive calculations of derivatives during each iteration.\n\n    Detailed description of algorithms is given in :cite:`Dembo1983`.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_lbfgs\n\n    .. code-block::\n\n        \"nlopt_lbfgs\"\n\n    Minimize a scalar function using the \"LBFGS\" algorithm.\n\n    The alggorithm is based on a Fortran implementation of low storage BFGS algorithm\n    written by Prof. Ladislav Luksan.\n\n    LFBGS is an approximation of the original Broyden–Fletcher–Goldfarb–Shanno algorithm\n    based on limited use of memory. Memory efficiency is obtained by preserving a limi-\n    ted number (<10) of past updates of candidate points and gradient values and using\n    them to approximate the hessian matrix.\n\n    Detailed description of algorithms is given in :cite:`Nocedal1989`, :cite:`Nocedal1980`.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_ccsaq\n\n    .. code-block::\n\n        \"nlopt_ccsaq\"\n\n    Minimize a scalar function using CCSAQ algorithm.\n\n    CCSAQ uses the quadratic variant of the conservative convex separable approximation.\n    The algorithm performs gradient based local optimization with equality (but not\n    inequality) constraints. At each candidate point x, a quadratic approximation\n    to the criterion faunction is computed using the value of gradient at point x. A\n    penalty term is incorporated to render optimizaion convex and conservative. The\n    algorithm is \"globally convergent\" in the sense that it is guaranteed to con-\n    verge to a local optimum from any feasible starting point.\n\n    The implementation is based on CCSA algorithm described in :cite:`Svanberg2002`.\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_mma\n\n    .. code-block::\n\n        \"nlopt_mma\"\n\n    Minimize a scalar function using the method of moving asymptotes (MMA).\n\n    The implementation is based on an algorithm described in :cite:`Svanberg2002`.\n\n    The algorithm performs gradient based local optimization with equality (but\n    not inequality) constraints. At each candidate point x, an approximation to the\n    criterion faunction is computed using the value of gradient at point x. A quadratic\n    penalty term is incorporated to render optimizaion convex and conservative. The\n    algorithm is \"globally convergent\" in the sense that it is guaranteed to con-\n    verge to a local optimum from any feasible starting point.\n\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_var\n\n    .. code-block::\n\n        \"nlopt_var\"\n\n    Minimize a scalar function limited memory switching variable-metric method.\n\n    The algorithm relies on saving only limited number M of past updates of the\n    gradient to approximate the inverse hessian. The large is M, the more memory is\n    consumed\n\n    Detailed explanation of the algorithm, including its two variations of  rank-2 and\n    rank-1 methods can be found in the following paper :cite:`Vlcek2006` .\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n    - **rank_1_update** (bool): Whether I rank-1 or rank-2 update is used.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_slsqp\n\n    .. code-block::\n\n        \"nlopt_slsqp\"\n\n    Optimize a scalar function based on SLSQP method.\n\n    SLSQP solves gradient based nonlinearly constrained optimization problems.\n    The algorithm treats the optimization problem as a sequence of constrained\n    least-squares problems.\n\n    The implementation is based on the procedure described in :cite:`Kraft1988`\n    and :cite:`Kraft1994` .\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_direct\n\n    .. code-block::\n\n        \"nlopt_direct\"\n\n    Optimize a scalar function based on DIRECT method.\n\n    DIRECT is the DIviding RECTangles algorithm for global optimization, described\n    in :cite:`Jones1993` .\n\n    Variations of the algorithm include locally biased routines (distinguished by _L\n    suffix) that prove to be more efficients for functions that have few local minima.\n    See the following for the DIRECT_L variant :cite:`Gablonsky2001` .\n\n    Locally biased algorithms can be implmented both with deterministic and random\n    (distinguished by _RAND suffix) search algorithm.\n\n    Finally, both original and locally biased variants can be implemented with and\n    without the rescaling of the bound constraints.\n\n    Boolean arguments `locally_biased`, 'random_search', and 'unscaled_bouds' can be\n    set to `True` or `False` to determine which method is run. The comprehensive list\n    of available methods are:\n    - \"DIRECT\"\n    - \"DIRECT_L\"\n    - \"DIRECT_L_NOSCAL\"\n    - \"DIRECT_L_RAND\"\n    - \"DIRECT_L_RAND_NOSCAL\"\n    - \"DIRECT_RAND\"\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n    - **locally_biased** (bool): Whether the \"L\" version of the algorithm is selected.\n    - **random_search** (bool): Whether the randomized version of the algorithm is selected.\n    - **unscaled_bounds** (bool): Whether the \"NOSCAL\" version of the algorithm is selected.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_esch\n\n    .. code-block::\n\n        \"nlopt_esch\"\n\n    Optimize a scalar function using the ESCH algorithm.\n\n    ESCH is an evolutionary algorithm that supports bound constraints only. Specifi\n    cally, it does not support nonlinear constraints.\n\n    More information on this method can be found in\n    :cite:`DaSilva2010` , :cite:`DaSilva2010a` , :cite:`Beyer2002`  and :cite:`Vent1975` .\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this\n      as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_isres\n\n    .. code-block::\n\n        \"nlopt_isres\"\n\n    Optimize a scalar function using the ISRES algorithm.\n\n    ISRES is an implementation of \"Improved Stochastic Evolution Strategy\"\n    written for solving optimization problems with non-linear constraints. The\n    algorithm is supposed to be a global method, in that it has heuristics to\n    avoid local minima. However, no convergence proof is available.\n\n    The original method and a refined version can be found, respecively, in\n    :cite:`PhilipRunarsson2005` and :cite:`Thomas2000` .\n\n\n    - **convergence.xtol_rel** (float):  Stop when the relative\n      movement between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute\n      movement between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of\n      the criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of\n      function evaluation is reached, the optimization stops but we do not count\n      this as convergence.\n```\n\n```{eval-rst}\n.. dropdown:: nlopt_crs2_lm\n\n    .. code-block::\n\n        \"nlopt_crs2_lm\"\n\n    Optimize a scalar function using the CRS2_LM algorithm.\n\n    This implementation of controlled random search method with local mutation is based\n    on :cite:`Kaelo2006` .\n\n    The original CRS method is described in :cite:`Price1978`  and :cite:`Price1983` .\n\n    CRS class of algorithms starts with random population of points and evolves the\n    points \"randomly\". The size of the initial population can be set via the param-\n    meter population_size. If the user doesn't specify a value, it is set to the nlopt\n    default of 10*(n+1).\n\n    - **convergence.xtol_rel** (float):  Stop when the relative movement\n      between parameter vectors is smaller than this.\n    - **convergence.xtol_abs** (float): Stop when the absolute movement\n      between parameter vectors is smaller than this.\n    - **convergence.ftol_rel** (float): Stop when the relative\n      improvement between two iterations is smaller than this.\n    - **convergence.ftol_abs** (float): Stop when the change of the\n      criterion function between two iterations is smaller than this.\n    - **stopping.maxfun** (int): If the maximum number of function\n      evaluation is reached, the optimization stops but we do not count this as\n      convergence.\n    - **population_size** (int): Size of the population. If None, it's set to be\n      10 * (number of parameters + 1).\n```\n\n## Optimizers from iminuit\n\noptimagic supports the [IMINUIT MIGRAD Optimizer](https://iminuit.readthedocs.io/). To\nuse MIGRAD, you need to have\n[the iminuit package](https://github.com/scikit-hep/iminuit) installed\n(`pip install iminuit`).\n\n```{eval-rst}\n.. dropdown::  iminuit_migrad\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.iminuit_migrad(stopping_maxfun=10_000, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"iminuit_migrad\",\n          algo_options={\"stopping_maxfun=10_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.iminuit_migrad.IminuitMigrad\n\n```\n\n## Nevergrad Optimizers\n\noptimagic supports following algorithms from the\n[Nevergrad](https://facebookresearch.github.io/nevergrad/index.html) library. To use\nthese optimizers, you need to have\n[the nevergrad package](https://github.com/facebookresearch/nevergrad) installed.\n(`pip install nevergrad`).\\\nTwo algorithms from nevergrad are not available in optimagic.\\\n`SPSA (Simultaneous Perturbation Stochastic Approximation)` - This is WIP in nevergrad\nand hence imprecise.\\\n`AXP (AX-platfofm)` - Very slow and not recommended.\n\n```{eval-rst}\n.. dropdown::  nevergrad_pso\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_pso(stopping_maxfun=1_000, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_pso\",\n          algo_options={\"stopping_maxfun\": 1_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradPSO\n\n```\n\n```{eval-rst}\n.. dropdown::  nevergrad_cmaes\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_cmaes(stopping_maxfun=1_000, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_cmaes\",\n          algo_options={\"stopping_maxfun\": 1_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradCMAES\n\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_oneplusone\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_oneplusone(stopping_maxfun=1_000, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_oneplusone\",\n          algo_options={\"stopping_maxfun\": 1_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradOnePlusOne\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_de\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_de(population_size=\"large\", ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_de\",\n          algo_options={\"population_size\": \"large\", ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradDifferentialEvolution\n```\n\n```{eval-rst}\n.. dropdown::  nevergrad_bo\n\n    .. note::\n\n        Using this optimizer requires the `bayes-optim` package to be installed as well.\n        This can be done with `pip install bayes-optim`.\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_bo(stopping_maxfun=1_000, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_bo\",\n          algo_options={\"stopping_maxfun\": 1_000, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradBayesOptim\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_emna\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_emna(noise_handling=False, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_emna\",\n          algo_options={\"noise_handling\": False, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradEMNA\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_cga\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_cga(stopping_maxfun=10_000)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_cga\",\n          algo_options={\"stopping_maxfun\": 10_000}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradCGA\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_eda\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_eda(stopping_maxfun=10_000)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_eda\",\n          algo_options={\"stopping_maxfun\": 10_000}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradEDA\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_tbpsa\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_tbpsa(noise_handling=False, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_tbpsa\",\n          algo_options={\"noise_handling\": False, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradTBPSA\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_randomsearch\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_randomsearch(opposition_mode=\"quasi\", ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_randomsearch\",\n          algo_options={\"opposition_mode\": \"quasi\", ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradRandomSearch\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_samplingsearch\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_samplingsearch(sampler=\"Hammersley\", scrambled=True)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_samplingsearch\",\n          algo_options={\"sampler\": \"Hammersley\", \"scrambled\": True}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradSamplingSearch\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_wizard\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        from optimagic.optimizers.nevergrad_optimizers import Wizard\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_wizard(optimizer= Wizard.NGOptRW, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_wizard\",\n          algo_options={\"optimizer\": \"NGOptRW\", ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradWizard\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.Wizard\n```\n\n```{eval-rst}\n.. dropdown:: nevergrad_portfolio\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        from optimagic.optimizers.nevergrad_optimizers import Portfolio\n        om.minimize(\n          ...,\n          algorithm=om.algos.nevergrad_portfolio(optimizer= Portfolio.BFGSCMAPlus, ...)\n        )\n\n    or\n\n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"nevergrad_portfolio\",\n          algo_options={\"optimizer\": \"BFGSCMAPlus\", ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.NevergradPortfolio\n    .. autoclass:: optimagic.optimizers.nevergrad_optimizers.Portfolio\n```\n\n## Bayesian Optimization\n\nWe wrap the\n[BayesianOptimization](https://github.com/bayesian-optimization/BayesianOptimization)\npackage. To use it, you need to have\n[bayesian-optimization](https://pypi.org/project/bayesian-optimization/) installed.\nNote: This optimizer requires `bayesian_optimization > 2.0.0` to be installed which is\nincompatible with `nevergrad > 1.0.3`.\n\n```{eval-rst}\n.. dropdown::  bayes_opt\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.bayes_opt(n_iter=50, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"bayes_opt\",\n          algo_options={\"n_iter\": 50, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.bayesian_optimizer.BayesOpt\n\n```\n\n## Gradient Free Optimizers\n\nOptimizers from the\n[gradient_free_optimizers](https://github.com/SimonBlanke/Gradient-Free-Optimizers?tab=readme-ov-file)\npackage are available in optimagic. To use it, you need to have\n[gradient_free_optimizers](https://pypi.org/project/gradient_free_optimizers) installed.\n\n```{eval-rst}\n.. dropdown:: gfo_hillclimbing\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_hillclimbing(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_hillclimbing\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOHillClimbing\n    :members:\n    :inherited-members: Algorithm, object\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_stochastichillclimbing\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_stochastichillclimbing(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_stochastichillclimbing\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOStochasticHillClimbing\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_repulsinghillclimbing\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_repulsinghillclimbing(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_repulsinghillclimbing\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFORepulsingHillClimbing\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_simulatedannealing\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_simulatedannealing(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_simulatedannealing\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSimulatedAnnealing\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_downhillsimplex\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_downhillsimplex(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_downhillsimplex\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODownhillSimplex\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_powells_method\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_powells_method(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_powells_method\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOPowellsMethod\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n.. dropdown:: gfo_pso\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=om.algos.gfo_pso(stopping_maxiter=1_000, ...),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=[1.0, 2.0, 3.0],\n      algorithm=\"gfo_pso\",\n      algo_options={\"stopping_maxiter\": 1_000, ...},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParticleSwarmOptimization\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n```{eval-rst}\n\n.. dropdown:: gfo_parallel_tempering\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=om.algos.gfo_parallel_tempering(population_size=15, n_iter_swap=5),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=\"gfo_parallel_tempering\",\n      algo_options={\"population_size\": 15, \"n_iter_swap\": 5},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParallelTempering\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n```\n\n```{eval-rst}\n.. dropdown:: gfo_spiral_optimization\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=om.algos.gfo_spiral_optimization(population_size=15, decay_rate=0.95),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=\"gfo_spiral_optimization\",\n      algo_options={\"population_size\": 15, \"decay_rate\": 0.95},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSpiralOptimization\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n```\n\n```{eval-rst}\n.. dropdown:: gfo_genetic_algorithm\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=om.algos.gfo_genetic_algorithm(population_size=20, mutation_rate=0.6),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=\"gfo_genetic_algorithm\",\n      algo_options={\"population_size\": 20, \"mutation_rate\": 0.6},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOGeneticAlgorithm\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n```\n\n```{eval-rst}\n.. dropdown:: gfo_evolution_strategy\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=om.algos.gfo_evolution_strategy(population_size=15, crossover_rate=0.4),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=\"gfo_evolution_strategy\",\n      algo_options={\"population_size\": 15, \"crossover_rate\": 0.4},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOEvolutionStrategy\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n```\n\n```{eval-rst}\n.. dropdown:: gfo_differential_evolution\n\n  **How to use this algorithm.**\n\n  .. code-block:: python\n\n    import optimagic as om\n    import numpy as np\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=om.algos.gfo_differential_evolution(population_size=20, mutation_rate=0.8),\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  or using the string interface:\n      \n  .. code-block:: python\n\n    om.minimize(\n      fun=lambda x: x @ x,\n      params=np.array([1.0, 2.0, 3.0]),\n      algorithm=\"gfo_differential_evolution\",\n      algo_options={\"population_size\": 20, \"mutation_rate\": 0.8},\n      bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5]))\n    )\n\n  **Description and available options:**\n\n  .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODifferentialEvolution\n    :members:\n    :inherited-members: Algorithm, object  \n    :member-order: bysource\n\n```\n\n## Pygad Optimizer\n\nWe wrap the pygad optimizer. To use it you need to have\n[pygad](https://pygad.readthedocs.io/en/latest/) installed.\n\n```{eval-rst}\n.. dropdown::  pygad\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.pygad(num_generations=100, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"pygad\",\n          algo_options={\"num_generations\": 100, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.pygad_optimizer.Pygad\n```\n\n## PySwarms Optimizers\n\noptimagic supports the following continuous algorithms from the\n[PySwarms](https://pyswarms.readthedocs.io/en/latest/) library: (GlobalBestPSO,\nLocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have\n[the pyswarms package](https://github.com/ljvmiranda921/pyswarms) installed.\n(`pip install pyswarms`).\n\n```{eval-rst}\n.. dropdown::  pyswarms_global_best\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.pyswarms_global_best(n_particles=50, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"pyswarms_global_best\",\n          algo_options={\"n_particles\": 50, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGlobalBestPSO\n      :members:\n      :inherited-members: Algorithm, object\n\n```\n\n```{eval-rst}\n.. dropdown::  pyswarms_local_best\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.pyswarms_local_best(n_particles=50, k_neighbors=3, ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"pyswarms_local_best\",\n          algo_options={\"n_particles\": 50, \"k_neighbors\": 3, ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsLocalBestPSO\n      :members:\n      :inherited-members: Algorithm, object\n\n```\n\n```{eval-rst}\n.. dropdown::  pyswarms_general\n\n    **How to use this algorithm:**\n\n    .. code-block::\n\n        import optimagic as om\n        om.minimize(\n          ...,\n          algorithm=om.algos.pyswarms_general(n_particles=50, topology_type=\"star\", ...)\n        )\n        \n    or\n        \n    .. code-block::\n\n        om.minimize(\n          ...,\n          algorithm=\"pyswarms_general\",\n          algo_options={\"n_particles\": 50, \"topology_type\": \"star\", ...}\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGeneralPSO\n      :members:\n      :inherited-members: Algorithm, object\n\n```\n\n## References\n\n```{eval-rst}\n.. bibliography:: refs.bib\n    :labelprefix: algo_\n    :filter: docname in docnames\n    :style: unsrt\n```\n"
  },
  {
    "path": "docs/source/conf.py",
    "content": "#!/usr/bin/env python3\n#\n# optimagic documentation build configuration file, created by\n# sphinx-quickstart on Fri Jan 18 10:59:27 2019.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport datetime as dt\nimport os\nfrom importlib.metadata import version\n\nfrom intersphinx_registry import get_intersphinx_mapping\n\nyear = dt.datetime.now().year\n\nauthor = \"Janos Gabler\"\n\n# Set variable so that todos are shown in local build\non_rtd = os.environ.get(\"READTHEDOCS\") == \"True\"\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.todo\",\n    \"sphinx.ext.coverage\",\n    \"sphinx.ext.extlinks\",\n    \"sphinx.ext.intersphinx\",\n    \"sphinx.ext.mathjax\",\n    \"sphinx.ext.viewcode\",\n    \"sphinx.ext.napoleon\",\n    \"sphinx_copybutton\",\n    \"myst_nb\",\n    \"sphinxcontrib.bibtex\",\n    \"sphinx_design\",\n    \"sphinxcontrib.mermaid\",\n    \"sphinx_llm.txt\",\n    \"sphinx_llms_txt\",\n]\n\nmyst_enable_extensions = [\n    \"colon_fence\",\n    \"dollarmath\",\n    \"html_image\",\n]\nmyst_fence_as_directive = [\"mermaid\"]\n\n\ncopybutton_prompt_text = \">>> \"\ncopybutton_only_copy_prompt_lines = False\n\nbibtex_bibfiles = [\"refs.bib\"]\n\nautodoc_member_order = \"bysource\"\nautodoc_class_signature = \"separated\"\nautodoc_default_options = {\n    \"exclude-members\": \"__init__\",\n    \"members\": True,\n    \"undoc-members\": True,\n    \"member-order\": \"bysource\",\n    \"class-doc-from\": \"class\",\n}\nautodoc_preserve_defaults = True\nautodoc_type_aliases = {\n    \"PositiveInt\": \"optimagic.typing.PositiveInt\",\n    \"NonNegativeInt\": \"optimagic.typing.NonNegativeInt\",\n    \"PositiveFloat\": \"optimagic.typing.PositiveFloat\",\n    \"NonNegativeFloat\": \"optimagic.typing.NonNegativeFloat\",\n    \"NegativeFloat\": \"optimagic.typing.NegativeFloat\",\n    \"GtOneFloat\": \"optimagic.typing.GtOneFloat\",\n    \"UnitIntervalFloat\": \"optimagic.typing.UnitIntervalFloat\",\n    \"YesNoBool\": \"optimagic.typing.YesNoBool\",\n    \"DirectionLiteral\": \"optimagic.typing.DirectionLiteral\",\n    \"BatchEvaluatorLiteral\": \"optimagic.typing.BatchEvaluatorLiteral\",\n    \"ErrorHandlingLiteral\": \"optimagic.typing.ErrorHandlingLiteral\",\n}\n\nautodoc_mock_imports = [\n    \"bokeh\",\n    \"cloudpickle\",\n    \"cyipopt\",\n    \"fides\",\n    \"joblib\",\n    \"nlopt\",\n    \"pytest\",\n    \"pygmo\",\n    \"scipy\",\n    \"sqlalchemy\",\n    \"tornado\",\n    \"petsc4py\",\n    \"statsmodels\",\n    \"numba\",\n]\n\nextlinks = {\n    \"ghuser\": (\"https://github.com/%s\", \"%s\"),\n    \"gh\": (\"https://github.com/optimagic-dev/optimagic/pull/%s\", \"%s\"),\n}\n\nintersphinx_mapping = get_intersphinx_mapping(\n    packages={\"numpy\", \"scipy\", \"pandas\", \"python\"}\n)\n\nlinkcheck_ignore = [\n    r\"https://tinyurl\\.com/*.\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = [\".rst\", \".ipynb\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"optimagic\"\ncopyright = f\"2019 - {year}, {author}\"  # noqa: A001\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\nrelease = version(\"optimagic\").split(\"+\")[0]\nversion = \".\".join(release.split(\".\")[:2])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n    \"_build\",\n    \"**.ipynb_checkpoints\",\n    \"how_to/how_to_slice_plot_3d.ipynb\",\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\npygments_dark_style = \"monokai\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\nif on_rtd:\n    pass\nelse:\n    todo_include_todos = True\n    todo_emit_warnings = True\n\n# -- Options for myst-nb  ----------------------------------------\nnb_execution_mode = \"force\"  # \"off\", \"force\", \"cache\", \"auto\"\nnb_execution_allow_errors = False\nnb_merge_streams = True\nnb_scroll_outputs = True\n\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 1000\n\n# List of notebooks that will not be executed.\nnb_execution_excludepatterns = [\n    # Problem with latex rendering\n    \"estimation_tables_overview.ipynb\",\n    # too long runtime\n    \"bootstrap_montecarlo_comparison.ipynb\",\n    \"how_to_slice_plot_3d.ipynb\",\n]\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\n\n# Add any paths that contain custom static files (such as style sheets) here, relative\n# to this directory. They are copied after the built-in static files, so a file named\n# \"default.css\" will overwrite the built-in \"default.css\".\nhtml_css_files = [\"css/termynal.css\", \"css/termynal_custom.css\", \"css/custom.css\"]\n\nhtml_js_files = [\n    \"js/termynal.js\",\n    \"js/custom.js\",\n    \"js/require.js\",\n]\n\n\n# Add any paths that contain custom static files (such as style sheets) here, relative\n# to this directory. They are copied after the builtin static files, so a file named\n# \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = False\n\n# If true, links to the source (either copied by sphinx on on github)\nhtml_copy_source = True\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = True\n\nhtml_title = \"optimagic\"\n\nhtml_theme_options = {\n    \"sidebar_hide_name\": True,\n    \"navigation_with_keys\": True,\n    \"light_logo\": \"images/optimagic_logo.svg\",\n    \"dark_logo\": \"images/optimagic_logo_dark_mode.svg\",\n    \"light_css_variables\": {\n        \"color-brand-primary\": \"#f04f43\",\n        \"color-brand-content\": \"#f04f43\",\n    },\n    \"dark_css_variables\": {\n        \"color-brand-primary\": \"#f04f43\",\n        \"color-brand-content\": \"#f04f43\",\n    },\n    \"source_repository\": \"https://github.com/optimagic-dev/optimagic\",\n    \"source_branch\": \"main\",\n    \"source_directory\": \"docs/source/\",\n    \"footer_icons\": [\n        {\n            \"name\": \"GitHub\",\n            \"url\": \"https://github.com/optimagic-dev/optimagic\",\n            \"html\": \"\"\"\n                <svg stroke=\"currentColor\" fill=\"currentColor\" stroke-width=\"0\" viewBox=\"0 0 16 16\">\n                    <path fill-rule=\"evenodd\" d=\"M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z\"></path>\n                </svg>\n            \"\"\",\n            \"class\": \"\",\n        },\n        {\n            \"name\": \"Zulip\",\n            \"url\": \"https://ose.zulipchat.com/#narrow/channel/221432-optimagic\",\n            \"html\": \"\"\"\n                <svg stroke=\"currentColor\" fill=\"currentColor\" stroke-width=\"0\" role=\"img\" viewBox=\"0 0 24 24\" height=\"1em\" width=\"1em\"\n                xmlns=\"http://www.w3.org/2000/svg\">\n                <path d=\"M22.767 3.589c0 1.209-.543 2.283-1.37 2.934l-8.034 7.174c-.149.128-.343-.078-.235-.25l2.946-5.9c.083-.165-.024-.368-.194-.368H4.452c-1.77 0-3.219-1.615-3.219-3.59C1.233 1.616 2.682 0 4.452 0h15.096c1.77-.001 3.219 1.614 3.219 3.589zM4.452 24h15.096c1.77 0 3.219-1.616 3.219-3.59 0-1.974-1.449-3.59-3.219-3.59H8.12c-.17 0-.277-.202-.194-.367l2.946-5.9c.108-.172-.086-.378-.235-.25l-8.033 7.173c-.828.65-1.37 1.725-1.37 2.934 0 1.974 1.448 3.59 3.218 3.59z\"></path></svg>\n            \"\"\",\n            \"class\": \"\",\n        },\n    ],\n}\n"
  },
  {
    "path": "docs/source/development/changes.md",
    "content": "(changes)=\n\n```{include} ../../../CHANGES.md\n```\n"
  },
  {
    "path": "docs/source/development/code_of_conduct.md",
    "content": "(coc)=\n\n## Code of Conduct\n\nThe optimagic project has a [Code of Conduct][conduct] to which all contributors must\nadhere. See details in the [written policy statement][conduct].\n\n[conduct]: https://github.com/optimagic-dev/optimagic/blob/main/.github/CODE_OF_CONDUCT.md\n"
  },
  {
    "path": "docs/source/development/credits.md",
    "content": "# Credits\n\n## The optimagic Team\n\n```{eval-rst}\n+---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+\n+ .. figure:: ../_static/images/janos.jpg                       + .. figure:: ../_static/images/mariam.jpg                          + .. figure:: ../_static/images/tim.jpeg                            + .. figure:: ../_static/images/klara.jpg                           +\n+     :width: 120px                                             +     :width: 120px                                                 +     :width: 120px                                                 +     :width: 120px                                                 +\n+                                                               +                                                                   +                                                                   +                                                                   +\n+     `Janoś Gabler <https://github.com/janosg>`_               +     `Mariam Petrosyan <https://github.com/mpetrosian>`_           +     `Tim Mensinger <https://github.com/timmens>`_                 +     `Klara Röhrl <https://github.com/roecla>`_                    +\n+---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+\n+ .. figure:: ../_static/images/tobi.png                        + .. figure:: ../_static/images/annica.jpeg                         + .. figure:: ../_static/images/sebi.jpg                            + .. figure:: ../_static/images/bahar.jpg                           +\n+     :width: 120px                                             +     :width: 120px                                                 +     :width: 120px                                                 +     :width: 120px                                                 +\n+                                                               +                                                                   +                                                                   +                                                                   +\n+     `Tobias Raabe <https://github.com/tobiasraabe>`_          +     `Annica Gehlen <https://github.com/amageh>`_                  +     `Sebastian Gsell <https://github.com/segsell>`_               +     `Bahar Coskun <https://github.com/baharcos>`_                 +\n+---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+\n+ .. figure:: ../_static/images/aida.jpg                        + .. figure:: ../_static/images/hmg.jpg                             + .. figure:: ../_static/images/ken.jpeg                            +                                                                   +\n+     :width: 120px                                             +     :width: 120px                                                 +     :width: 120px                                                 +                                                                   +\n+                                                               +                                                                   +                                                                   +                                                                   +\n+     `Aida Takhmazova <https://github.com/aidatak97>`_         +     `Hans-Martin von Gaudecker <https://github.com/hmgaudecker>`_ +     `Kenneth L. Judd <https://kenjudd.org/>`_                     +                                                                   +\n+---------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+-------------------------------------------------------------------+\n```\n\nJanoś is the original developer and architect behind optimagic (formerly estimagic). All\nteam members are active contributors in terms of commits, advice or community building.\nHans-Martin and Ken support optimagic with funding and their expertise.\n\n## Contributors\n\nWe are grateful for many contributions from the community. In particular, we want to\nthank Moritz Mendel, Max Blesch, Christian Zimpelmann, Robin Musolff, Sofia Badini,\nSofya Akimova, Xuefei Han, Leiqiong Wan, Andrew Souther, Luis Calderon, Linda\nMaokomatanda, Madhurima Chandra, and Vijaybabu Gangaprasad.\n\n## Acknowledgements\n\nWe thank all institutions that have funded or supported optimagic (formerly estimagic)\n\n```{image} ../_static/images/aai-institute-logo.svg\n---\nwidth: 185px\n---\n```\n\n```{image} ../_static/images/numfocus_logo.png\n---\nwidth: 200\n---\n```\n\n```{image} ../_static/images/tra_logo.png\n---\nwidth: 240px\n---\n```\n\n```{image} ../_static/images/hoover_logo.png\n---\nwidth: 192px\n---\n```\n\n```{image} ../_static/images/transferlab-logo.svg\n---\nwidth: 420px\n---\n```\n"
  },
  {
    "path": "docs/source/development/enhancement_proposals.md",
    "content": "# Enhancement Proposals\n\noptimagic Enhancement Proposals (EPs) can be used to discuss and design large changes.\nEP-00 details the EP process, the optimagic governance model and the optimagic Code of\nConduct. It is the only EP that gets continuously updated.\n\nThese EPs are currently in place:\n\n```{toctree}\n---\nmaxdepth: 1\n---\nep-00-governance-model.md\nep-01-pytrees.md\nep-02-typing.md\nep-03-alignment.md\n```\n"
  },
  {
    "path": "docs/source/development/ep-00-governance-model.md",
    "content": "(ep-00)=\n\n# EP-00: Governance model & code of conduct\n\n```{eval-rst}\n+------------+------------------------------------------------------------------+\n| Author     | `Maximilian Blesch <https://github.com/MaxBlesch>`_,             |\n|            | `Janoś Gabler <https://github.com/janosg>`_,                     |\n|            | `Hans-Martin von Gaudecker <https://github.com/hmgaudecker>`_,   |\n|            | `Annica Gehlen <https://github.com/amageh>`_,                    |\n|            | `Sebastian Gsell <https://github.com/segsell>`_,                 |\n|            | `Tim Mensinger <https://github.com/timmens>`_,                   |\n|            | `Mariam Petrosyan <https://github.com/mpetrosian>`_,             |\n|            | `Tobias Raabe <https://github.com/tobiasraabe>`_,                |\n|            | `Klara Röhrl <https://github.com/roecla>`_                       |\n+------------+------------------------------------------------------------------+\n| Status     | Accepted                                                         |\n+------------+------------------------------------------------------------------+\n| Type       | Standards Track                                                  |\n+------------+------------------------------------------------------------------+\n| Created    | 2022-04-28                                                       |\n+------------+------------------------------------------------------------------+\n| Resolution |                                                                  |\n+------------+------------------------------------------------------------------+\n```\n\n## Purpose\n\nThis document formalizes the optimagic code of conduct and governance model. In case of\nchanges, this document can be updated following the optimagic Enhancement Proposal\nprocess detailed below.\n\n```{include} ../../../CODE_OF_CONDUCT.md\n```\n\n## optimagic governance model\n\n### Summary\n\nThe governance model strives to be lightweight and based on\n[consensus](https://numpy.org/doc/stable/dev/governance/governance.html#consensus-based-decision-making-by-the-community)\nof all interested parties. Most work happens in GitHub issues and pull requests (regular\ndecision process). Any interested party can voice their concerns or veto on proposed\nchanges. If this happens, the optimagic Enhancement Proposal (EP) process can be used to\niterate over proposals until consesus is reached (controversial decision process). If\nnecessary, members of the steering council can moderate heated debates and help to\nbroker a consensus.\n\n### Regular decision process\n\nMost changes to optimagic are additions of new functionality or strict improvements of\nexisting functionality. Such changes can be discussed in GitHub issues and discussions\nand implemented in pull requests. They do not require an optimagic Enhancement Proposal.\n\nBefore starting to work on optimagic, contributors should read\n[how to contribute](how-to) and the [styleguide](styleguide). They can also reach out to\nexisting contributors if any help is needed or anything remains unclear. We are all\nhappy to help onboarding new contributors in any way necessary. For example, we have\ngiven introductions to git and GitHub in the past to help people make a contribution to\noptimagic.\n\nPull requests should be opened as soon as work is started. They should contain a good\ndescription of the planned work such that any interested party can participate in the\ndiscussion around the changes. If planned changes turn out to be controversial, their\ndesign should be discussed in an optimagic Enhancement Proposal before the actual work\nstarts. When the work is finished, the author of a pull request can request a review. In\nmost cases, previous discussions will show who is a suitable reviewer. If in doubt, tag\n[janosg](https://github.com/janosg). Pull requests can be merged if there is at least\none approving review.\n\nReviewers should be polite, welcoming and helpful to the author of the pull request who\nmight have spent many hours working on the changes. Authors of pull requests should keep\nin mind that reviewers' time is valuable. Major points should be discussed publicly on\nGitHub, but very critical feedback or small details can be moved to private discussions\n— if the latter are necessary at all (see\n[the bottom section of this blog post](https://rgommers.github.io/2019/06/the-cost-of-an-open-source-contribution/)\nfor an excellent discussion of the burden that review comments place on maintainers,\nwhich might not always be obvious). Video calls can help if a discussion gets stuck. The\ncode of conduct applies to all interactions related to code reviews.\n\n### optimagic Enhancement Proposals (EPs) / Controversial decision process\n\nLarge changes to optimagic can be proposed in optimagic Enhancement Proposals, short\nEPs. They serve the purpose of summarising discussions that may happen in chats, issues,\npull requests, in person, or by any other means. Simple extensions (like adding new\noptimizers) do not need to be discussed with such a formal process.\n\nEPs are written as markdown documents that become part of the documentation. Opening an\nEP means opening a pull request that adds the markdown document to the documentation. It\nis not necessary to already have a working implementations for the planned changes, even\nthough it might be a good idea to have rough prototypes for solutions to the most\nchallenging parts.\n\nIf the author of an EP feels that it is ready to be accepted they need to make a post in\nthe relevant [Zulip topic](https://ose.zulipchat.com) and a comment on the PR that\ncontains the following information:\n\n1. Summary of all contentious aspects of the EP and how they have been resolved\n1. Every interested party has seven days to comment on the PR proposing the EP, either\n   with approval or objections. While only objections are relevant for the decision\n   making process, approvals are a good way to signal interest in the planned change and\n   recognize the work of the authors.\n1. If there are no unresolved objections after seven days, the EP will automatically be\n   accepted and can be merged.\n\nNote that the pull requests that actually implement the proposed enhancements still\nrequire a standard review cycle.\n\n### Steering Council\n\nThe optimagic Steering Council consists of five people who take responsibility for the\nfuture development of optimagic and the optimagic community. Being a member of the\nsteering council comes with no special rights. The main roles of the steering council\nare:\n\n- Facilitate the growth of optimagic and the optimagic community by organizing community\n  events, identifying funding opportunities and improving the experience of all\n  community members.\n- Develop a roadmap, break down large changes into smaller projects and find\n  contributors to work on the implementation of these projects.\n- Ensure that new contributors are onboarded and assisted and that pull requests are\n  reviewed in a timely fashion.\n- Step in as moderators when discussions get heated, help to achieve consensus on\n  controversial topics and enforce the code of conduct.\n\nThe Steering Council is elected by the optimagic community during a community meeting.\n\nCandidates need to be active community members and can be nominated by other community\nmembers or themselves until the start of the election. Nominated candidates need to\naccept the nomination before the start of the election.\n\nIf there are only five candidates, the Steering Council is elected by acclamation. Else,\nevery participant casts five votes. The 5 candidates with the most votes become elected.\nCandidates can vote for themselves. Ties are resolved by a second round of voting where\neach participant casts as many votes as there are positions left. Remaining ties are\nresolved by randomization.\n\nCurrent memebers of the optimagic Steering Council are:\n\n- [Janoś Gabler](https://github.com/janosg)\n- [Annica Gehlen](https://github.com/amageh)\n- [Hans-Martin von Gaudecker](https://github.com/hmgaudecker)\n- [Tim Mensinger](https://github.com/timmens)\n- [Mariam Petrosyan](https://github.com/mpetrosian)\n\n### Community meeting\n\nCommunity meetings can be held to elect a steering council, make changes to the\ngovernance model or code of conduct, or to make other decisions that affect the\ncommunity as a whole. Moreover, they serve to keep the community updated about the\ndevelopment of optimagic and get feedback.\n\nCommunity meetings need to be announced via our public channels (e.g. the\n[zulip workspace](https://ose.zulipchat.com) or GitHub discussions) with sufficient time\nuntil the meeting. The definition of sufficient time will increase with the size of the\ncommunity.\n"
  },
  {
    "path": "docs/source/development/ep-01-pytrees.md",
    "content": "(eppytrees)=\n\n# EP-01: Pytrees\n\n```{eval-rst}\n+------------+------------------------------------------------------------------+\n| Author     | `Janos Gabler <https://github.com/janosg>`_                      |\n+------------+------------------------------------------------------------------+\n| Status     | Accepted                                                         |\n+------------+------------------------------------------------------------------+\n| Type       | Standards Track                                                  |\n+------------+------------------------------------------------------------------+\n| Created    | 2022-01-28                                                       |\n+------------+------------------------------------------------------------------+\n| Resolution |                                                                  |\n+------------+------------------------------------------------------------------+\n```\n\n## Abstract\n\nThis EEP explains how we will use pytrees to allow for more flexible specification of\nparameters for optimization or differentiation, more convenient ways of writing moment\nfunctions for msm estimation and more. The actual code to work with pytrees is\nimplemented in [Pybaum], developed by {ghuser}`janosg` and {ghuser}`tobiasraabe`.\n\n## Backwards compatibility\n\nAll changes are fully backwards compatible.\n\n## Motivation\n\nEstimagic has many functions that require user written functions as inputs. Examples\nare:\n\n- criterion functions and their derivatives for optimization\n- functions of which numerical derivatives are taken\n- functions that calculate simulated moments\n- functions that calculate bootstrap statistics\n\nIn all cases, there are some restrictions on possible inputs and outputs of the user\nwritten functions. For example, parameters for numerical optimization need to be\nprovided as pandas.DataFrame with a `\"value\"` column. Simulated moments and bootstrap\nstatistics need to be returned as a pandas.Series, etc.\n\nPytrees allow to relax many of those restrictions on interfaces of user provided\nfunctions. This is not only more convenient for users, but sometimes also allows to\nreduce overhead because the user can choose optimal data structures for their problem.\n\n## Background: What is a pytree\n\nPytree is a term used in TensorFlow and JAX to refer to a tree-like structure built out\nof container-like Python objects with arbitrary levels of nesting.\n\nWhat is a container can be re-defined for each application. By default, lists, tuples\nand dicts are considered containers and everything else is a leaf. Then the following\nare examples of pytrees:\n\n```python\n[1, \"a\", np.arange(3)]  # 3 leaves\n\n[1, {\"k1\": 2, \"k2\": (3, 4)}, 5]  # 5 leaves\n\nnp.arange(5)  # 1 leaf\n```\n\nWhat makes pytrees so powerful are the operations defined for them. The most important\nones are:\n\n- `tree_flatten`: Convert any pytree into a flat list of leaves + metadata\n- `tree_unflatten`: The inverse of `tree_flatten`\n- `tree_map`: Apply a function to all leaves in a pytree\n- `leaf_names`: Generate a list of names for all leaves in a pytree\n\nThe above examples of pytrees would look as follows when flattened (with a default\ndefinition of containers):\n\n```python\n[1, \"a\", np.arange(3)]\n\n[1, 2, 3, 4, 5]\n\n[np.arange(5)]\n```\n\nBy adding numpy arrays to the registry of container like objects, each of the three\nexamples above would have five leafs. The flattened versions would look as follows:\n\n```python\n[1, \"a\", 0, 1, 2]\n\n[1, 2, 3, 4, 5]\n\n[0, 1, 2, 3, 4]\n```\n\nNeedless to say, it is possible to register anything as container. For example, we would\nadd pandas.Series and pandas.DataFrame (with varying definitions, depending on the\napplication).\n\n## Difference between pytrees in JAX and estimagic\n\nMost JAX functions\n[only work with Pytrees of arrays](https://jax.readthedocs.io/en/latest/pytrees.html#pytrees-and-jax-functions)\nand scalars, i.e. pytrees where container types are dicts, lists and tuples and all\nleaves are arrays or scalars. We will just call them pytrees of arrays because scalars\nare converted to arrays by JAX.\n\nThere are two ways to look at such pytrees:\n\n1. As pytree of arrays -> `tree_flatten` produces a list of arrays\n1. As pytree of numbers -> `tree_flatten` produces a list of numbers\n\nThe only difference between the two perspectives is that for the second one, arrays have\nbeen registered as container types that can be flattened. In JAX the term `ravel`\ninstead of `flatten` is sometimes used to make clear that the second perspective is\nmeant.\n\nEstimagic functions work with slightly more general pytrees. On top of arrays, they can\nalso contain scalars, pandas.Series and pandas.DataFrames.\n\nAgain, there are two possible ways to look at such pytrees:\n\n1. As pytree of arrays, numbers, Series and DataFrames -> `tree_flatten` produces a list\n   of arrays numbers, Series and DataFrames.\n1. As pytree of numbers -> `tree_flatten` produces a list of numbers\n\nAgain, the difference between the two is which objects are registered as container types\nand the rules to flatten and unflatten them are defined.\n\nWhile numpy arrays, scalars and pandas.Series have only one natural way of defining the\nflattening rules, this becomes more complex for DataFrames due to the way `params`\nDataFrames were used in estimagic before.\n\nWe define the following rules: If a DataFrame contains a column called `\"value\"`, we\ninterpret them as classical estimagic DataFrame and only consider the entries in the\n`\"value\"` column when flattening the DataFrame into a list of numbers. If there is no\ncolumn `\"value\"`, all numeric columns of the DataFrame are considered.\n\nNote that internally, we will sometimes define flattening rules such that only some\nother columnn, e.g. only `\"lower_bound\"` is considered. However we never look at more\nthan one column of a classical estimagic params DataFrame at a time.\n\nTo distinguish between the different pytrees we use the terms JAX-pytree and\nestimagic-pytree.\n\n## Optimization with pytrees\n\nIn this section we look at possible ways to specify optimizations when parameters and\nsome outputs of criterion functions can be estimagic-pytrees.\n\nAs an example we use a hypothetical criterion function with pytree inputs and outputs to\ndescribe how a user can optimize it. We also give a rough intuition what happens behind\nthe scenes.\n\n### The criterion function\n\nConsider a criterion function that takes parameters in the following format:\n\n```python\nparams = {\n    \"delta\": 0.95,\n    \"utility\": pd.DataFrame(\n        [[0.5, 0]] * 3, index=[\"a\", \"b\", \"c\"], columns=[\"value\", \"lower_bound\"]\n    ),\n    \"probs\": np.array([[0.8, 0.2], [0.3, 0.7]]),\n}\n```\n\nThe criterion function returns a dictionary of the form:\n\n```python\n{\n    \"value\": 1.1,\n    \"contributions\": {\"a\": np.array([0.36, 0.25]), \"b\": 0.49},\n    \"root_contributions\": {\"a\": np.array([0.6, 0.5]), \"b\": 0.7},\n}\n```\n\n### Run an optimization\n\n```python\nfrom estimagic import minimize\n\nminimize(\n    criterion=crit,\n    params=params,\n    algorithm=\"scipy_lbfgsb\",\n)\n```\n\nThe internal optimizer (in this case the lbfgsb algorithm from scipy) will see a wrapped\nversion of `crit`. That version takes a 1d numpy array as its only argument and returns\na scalar float (the `\"value\"` entry of the result of `crit`). Numerical derivatives are\nalso taken on that function.\n\nIf instead a derivative based least squares optimizer like `\"scipy_ls_dogbox\"` had been\nused, the internal optimizer would see a modified version of `crit` that takes a 1d\nnumpy array and returns a 1d numpy array (the flattened version of the\n`\"root_contributions\"` entry of the result of `crit`).\n\n### The optimization output\n\nThe following entries of the output of minimize are affected by the change:\n\n- `\"solution_params\"`: A pytree with the same structure as `params`\n- `\"solution_criterion\"`: The output dictionary of `crit` evaluated solution params\n- `solution_derivative`: Maybe we should not even have this entry.\n\n```{note}\nWe need to discuss if and in which form we want to have a solution\nderivative entry. In it's current form it is useless if constraints are used. This gets\nworse when we allow for pytrees and translating this into a meaningful shape might be\nvery difficult.\n```\n\n### Add bounds\n\nBounds on parameters that are inside a DataFrame with `\"value\"` column can simply be\nspecified as before. For all others, there are separate `lower_bounds` and\n`upper_bounds` arguments in `maximize` and `minimize`.\n\n`lower_bounds` and `upper_bounds` are pytrees of the same structure as `params` or a\nsubtree that preserves enough structure to match all bounds. For example:\n\n```python\nminimize(\n    criterion=crit,\n    params=params,\n    algorithm=\"scipy_lbfgsb\",\n    lower_bounds={\"delta\": 0},\n    upper_bounds={\"delta\": 1},\n)\n```\n\nThis would add bounds for delta, keep the bounds on all `\"utility\"` parameters, and\nleave the `\"probs\"` parameters unbounded.\n\n### Add a constraint\n\nCurrently, parameters to which a constraint is applied are selected via a `\"loc\"` or\n`\"query\"` entry in the constraints dictionary.\n\nThis keeps working as long as params are specified as a single DataFrame containing a\n`\"value\"` column. If a more general pytree is used we need a \"selector\" entry instead.\nThe value of that entry is a callable that takes the pytree and returns selected\nparameters.\n\nThe `selector` function may return the parameters in the form of an estimagic-pytree.\nShould order play a role for the constraints (e.g., increasing) the constraint will be\napplied to the flattened version of the pytree returned by the `selector` function.\nHowever, in the case that order matters, we advise users to return one-dimensional\narrays (explicit is better than implicit).\n\nAs an example, let's add probability constraints for each row of `\"probs\"`:\n\n```python\nconstraints = [\n    {\"selector\": lambda params: params[\"probs\"][0], \"type\": \"probability\"},\n    {\"selector\": lambda params: params[\"probs\"][1], \"type\": \"probability\"},\n]\n\nminimize(\n    criterion=crit,\n    params=params,\n    algorithm=\"scipy_lbfgsb\",\n    constraints=constraints,\n)\n```\n\nThe required changes to support this are relatively simple. This is because most\nfunctions that deal with constraints already work with a 1d array of parameters and the\n`\"loc\"` and `\"query\"` entries of constraints are internally translated to positions in\nthat array very early on.\n\n### Derivatives during optimization\n\nIf numerical derivatives are used, they are already taken on a modified function that\nmaps from 1d numpy array to scalars or 1d numpy arrays. Allowing for estimagic-pytrees\nin parameters and criterion outputs will not pose any difficulties here.\n\nClosed form derivatives need to have the following interface: They expect `params` in\nthe exact same format as the criterion function as first argument. They return a\nderivative in the same format as our numerical derivative functions or JAXs autodiff\nfunctions when applied to the criterion function.\n\n## Numerical derivatives with pytrees\n\n### Problem: Higher dimensional extensions of pytrees\n\nThe derivative of a function that maps from a 1d array to a 1d array (usually called\nJacobian) is a 2d matrix. If the 1d arrays are replaced by pytrees, we need a two\ndimensional extension of the pytrees. Below we will look at how JAX does this and why we\ncannot simply copy that solution.\n\n### The JAX solution\n\nLet's look at an example. We first define a function in terms of 1d arrays and then in\nterms of pytrees and look at a JAX calculated jacobian in both cases:\n\n```python\ndef square(x):\n    return x**2\n\n\nx = jnp.array([1, 2, 3, 4, 5, 6.0])\n\njacobian(square)(x)\n```\n\n```bash\nDeviceArray([[ 2.,  0.,  0.,  0.,  0.,  0],\n             [ 0.,  4.,  0.,  0.,  0.,  0],\n             [ 0.,  0.,  6.,  0.,  0.,  0],\n             [ 0.,  0.,  0.,  8.,  0.,  0],\n             [ 0.,  0.,  0.,  0., 10.,  0],\n             [ 0.,  0.,  0.,  0.,  0., 12]], dtype=float32)\n```\n\n```python\ndef tree_square(x):\n    out = {\n        \"c\": x[\"a\"] ** 2,\n        \"d\": x[\"b\"].flatten() ** 2,\n    }\n\n    return out\n\n\ntree_x = {\"a\": jnp.array([1, 2.0]), \"b\": jnp.array([[3, 4], [5, 6.0]])}\n\n\njacobian(tree_square)(tree_x)\n```\n\nInstead of showing the entire results, let's just look at the resulting tree structure\nand array shapes:\n\n```python\n{\n    \"c\": {\n        \"a\": (2, 2),\n        \"b\": (2, 2, 2),\n    },\n    \"d\": {\n        \"a\": (4, 2),\n        \"b\": (4, 2, 2),\n    },\n}\n```\n\nThe outputs for hessians have even deeper nesting and three dimensional arrays inside\nthe nested dictionary. Similarly, we would get higher dimensional arrays if one of the\noriginal pytrees had already contained a 2d array.\n\n### Extending the JAX solution to estimagic-pytrees\n\nJAX pytrees can only contain arrays, whereas estimagic-pytrees may contain scalars,\npandas.Series and pandas.DataFrames (with or without `\"value\"` column). Unfortunately,\nthis poses non-trivial challenges for numerical derivatives because those data types\nhave no natural extension in arbtirary dimensions.\n\nOur solution needs to fulfill two requirements:\n\n1\\. Compatible with JAX in the sense than whenever a derivative can be calculated with\nJAX it can also be calculated with estimagic and the result has the same structure. 2.\nCompatible with the rest of estimagic in the sense that any function that can be\noptimized can also be differentiated. In the special case of differentiating with\nrespect to a DataFrame it also needs to be backwards compatible.\n\nA solution that achieves this is to treat Series and DataFrames with `\"value\"` columns\nas 1d arrays and other DataFrames as 2d arrays, then proceed as in JAX and finally try\nto preserve as much index and column information as possible.\n\nThis leads to very natural results in the typical usecases with flat dicts of Series or\nparams DataFrames both as inputs and outputs and is backwards compatible with everything\nthat is supported already.\n\nHowever, similar to JAX, not everything that is supported will also be a good idea.\nPredicting where a pandas Object is preserved and where it will be replaced by an array\nmight be hard for very nested pytrees. However, these rules are mainly defined to avoid\nhard limitations that have to be checked and documented. Users will learn to avoid too\nmuch complexity by avoiding complex pytrees as inputs and outputs at the same time.\n\nTo see this in action, let's look at an example. We repeat the example from the JAX\ninterface above with the following changes:\n\n1. The 1d numpy array in x[\"a\"] is replaced by a DataFrame with `\"value\"` column\n1. The \"d\" entry in the output becomes a Series instead of a 1d numpy array.\n\n```python\ndef pd_tree_square(x):\n    out = {\n        \"c\": x[\"a\"][\"value\"] ** 2,\n        \"d\": pd.Series(x[\"b\"].flatten() ** 2, index=list(\"jklm\")),\n    }\n\n    return out\n\n\npd_tree_x = {\n    \"a\": pd.DataFrame(data=[[1], [2]], index=[\"alpha\", \"beta\"], columns=[\"value\"]),\n    \"b\": np.array([[3, 4], [5, 6]]),\n}\n\npd_tree_square(pd_tree_x)\n```\n\n```\n{\n    'c':\n        \"alpha\"    1\n        \"beta\"     4\n        dtype: int64,\n    'd':\n        \"j\"        9\n        \"k\"       16\n        \"l\"       25\n        \"m\"       36\n        dtype: int64,\n}\n```\n\nThe resulting shapes of the jacobian will be the same as before. For all arrays with\nonly two dimensions we can preserve some information from the Series and DataFrame\nindices. On the higher dimensional ones, this will be lost.\n\n```python\n{\n    \"c\": {\n        \"a\": (2, 2),  # df with columns [\"alpha\", \"beta\"], index [\"alpha\", \"beta\"]\n        \"b\": (2, 2, 2),  # numpy array without label information\n    },\n    \"d\": {\n        \"a\": (4, 2),  # columns [\"alpha\", \"beta\"], index [0, 1, 2, 3]\n        \"b\": (4, 2, 2),  # numpy array without label information\n    },\n}\n```\n\nTo get more intuition for the structure of the result, let's add a few labels to the\nvery first jacobian:\n\n```{eval-rst}\n+--------+----------+----------+----------+----------+----------+----------+----------+\n|        |          | a        |          | b        |          |          |          |\n+--------+----------+----------+----------+----------+----------+----------+----------+\n|        |          | alpha    | beta     | j        | k        | l        | m        |\n+--------+----------+----------+----------+----------+----------+----------+----------+\n| c      | alpha    | 2        | 0        | 0        | 0        | 0        | 0        |\n+        +----------+----------+----------+----------+----------+----------+----------+\n|        | beta     | 0        | 4        | 0        | 0        | 0        | 0        |\n+--------+----------+----------+----------+----------+----------+----------+----------+\n| d      | 0        | 0        | 0        | 6        | 0        | 0        | 0        |\n+        +----------+----------+----------+----------+----------+----------+----------+\n|        | 1        | 0        | 0        | 0        | 8        | 0        | 0        |\n+        +----------+----------+----------+----------+----------+----------+----------+\n|        | 2        | 0        | 0        | 0        | 0        | 10       | 0        |\n+        +----------+----------+----------+----------+----------+----------+----------+\n|        | 3        | 0        | 0        | 0        | 0        | 0        | 12       |\n+--------+----------+----------+----------+----------+----------+----------+----------+\n```\n\nThe indices [\"j\", \"k\", \"l\", \"m\"] unfortunately never made it into the result because\nthey were only applied to elements that already came from a 2d array and thus always\nhave a 3d Jacobian, i.e. the result entry `[\"c\"][b\"]` is a reshaped version of the upper\nright 2 by 4 array and the result entry `[\"d\"][\"b\"]` is a reshaped version of the lower\nright 4 by 4 array.\n\n### Implementation\n\nWe use the following terminology to describe the implementation:\n\n- input_tree: The pytree containing parameters, i.e. inputs to the function that is\n  differentiated.\n- output_tree: The pytree that is returned by the function being differentiated\n- derivative_tree: The pytree we want to generate, i.e. the pytree that would be\n  returned by JAX jacobian.\n- flat_derivative: The matrix version of the derivative_tree\n\nTo simply reproduce the JAX behavior with pytrees of arrays, we could proceed in the\nfollowing steps:\n\n- Create a modified function that maps from 1d array to 1d array\n- Calculate flat_derivative by taking numerical derivatives just as before\n- Calculate the shapes of all arrays in derivative_tree by concatenating the shapes of\n  the cartesian product of flattend output_tree and input_tree\n- Calculate the 2d versions of those arrays by taking the product over elements in the\n  shape tuple before concatenating.\n- Create a list of lists containing all arrays that will be in derivative_tree. The\n  values are taken from flat_derivative, using the previously calculated shapes.\n- call `tree_unflatten` on the inner lists with the treedef corresponding to input_tree.\n- call `tree_unflatten` on the result of that with the treedef corresponding to\n  output_tree.\n\nTo implement the extension to estimagic pytrees we would probably do exactly the same\nbut have a bit more preparation and post-processing to do.\n\n## General aspects of pytrees in estimation functions\n\n### Estimation summaries\n\nCurrently, estimation summaries are DataFrames. The estimated parameters are in the\n`\"value\"` column. There are other columns with standard errors, p-values, significance\nstars and confidence intervals.\n\nThis is another form of higher dimensional extension of pytrees, where we need to add\nadditional columns. There are two ways in which estimation summaries could be presented.\nI suggest we offer both. The first is more geared towards generating estimation tables\nand serving as actual summary to be looked at in a jupyter notebook. It is also\nbackwards compatible and should thus be the default. The second is more geared towards\nfurther calculations. There will be utility functions to convert between the two.\n\nBoth formats will be explained using the `params` pytree from the optimization example\n(reproduced here for convenience):\n\n#### Format 1: Everything becomes a DataFrame\n\nIn this approach we do the following conversions:\n\n1. numpy arrays are flattened and converted to DataFrames with one column called\n   `\"value\"`. The index contains the original positions of elements.\n1. pandas.Series are converted to DataFrames. The index remains unchanged. The column is\n   called `\"value\"`.\n1. scalars become DataFrames with one row with index 0 and one column called `\"value\"`.\n1. DataFrames without `\"value\"` column are stacked into a DataFrame with just one column\n   called `\"value\"`.\n1. DataFrames with `\"value\"` column are reduced to that column.\n\nAfter these transformations, all numbers of the original pytree are stored in DataFrames\nwith `\"value\"` column. Additional columns with standard errors and the like can then\nsimply be assigned as before.\n\nFor more intuition, let's see how this would look in an example. For simplicity we only\nadd a column with stars and ommit standard errors, p-values and confidence intervals. We\nuse the same example as in the optimization section:\n\n```python\nparams = {\n    \"delta\": 0.95,\n    \"utility\": pd.DataFrame(\n        [[0.5, 0]] * 3, index=[\"a\", \"b\", \"c\"], columns=[\"value\", \"lower_bound\"]\n    ),\n    \"probs\": np.array([[0.8, 0.2], [0.3, 0.7]]),\n}\n```\n\n```\n{\n'delta':\n          value stars\n    0     0.95   ***,\n'utility':\n          value stars\n    a     0.5    **\n    b     0.5    **\n    c     0.5    **,\n'probs':\n          value stars\n    0 0   0.8   ***\n      1   0.2     *\n    1 0   0.3    **\n      1   0.7   ***,\n}\n```\n\n#### Format 2: Dictionary of pytrees\n\nThe second solution is a dictionary of pytrees the keys are the columns of the current\nsummary but probably in plural, i.e. \"values\", \"standard_errors\", \"p-values\", ...;\n\nEach value is a pytree with the exact same structure as `params`. If this pytree\ncontains DataFrames with `\"value\"` column, only that column is updated. i.e. standard\nerrors would be accessed via `summary[\"standard_errors\"][\"my_df\"][\"value\"]`.\n\n### Representation of covariance matrices\n\nA covariance matrix is a two dimensional extension of a `params` pytree. We could\ntheoretically handle it exactly the same way as Jacobians. However, this would not be\nuseful for statistical tests and visualization if it contains more than 2 dimensional\narrays (as the Jacobian example does).\n\nWe thus propose to have two possible formats in which covariance matrices can be\nreturned:\n\n1. The pytree variant described in the above Jacobian example. This will be useful to\n   look at sub-matrices of the full covariance matrix as long as the `params` pytree\n   only contains one dimensional arrays, Series and DataFrames with `\"value\"` columns.\n1. A DataFrame containing the covariance matrix of the flattened parameter vector. The\n   index and columns of the DataFrames can be constructed from the `leaf_names` function\n   in `pybaum`. We could also triviall add a function there that constructs an index\n   that is easier to work with for selecting elements and let the user choose between\n   the two versions.\n\nThe function that maps from the flat version (which would be calculated internally) to\nthe pytree version is the same as we need for numerical derivatives. The inverse of that\nfunction is probably not too difficult to implement and can also be useful for\nderivatives.\n\n### params\n\nEverything that can be used as `params` in optimization and differentiation can also be\nused as `params` in estimation. The registries used in pytree functions are identical.\n\n## ML specific aspects of pytrees\n\nThe output of the log likelihood functions is a dictionary with the entries:\n\n- `\"value\"`: a scalar float\n- `\"contributions\"`: a 1d numpy array or pandas.Series\n\nMoreover, there can be arbitrary additional entries.\n\nThe only change is that `\"contributions\"` can now be any estimagic pytree.\n\n## MSM specific aspects of pytrees\n\n### Valid formats of empirical and simulated moments\n\nThere are three types of moments in MSM estimation:\n\n- `empirical moments`\n- The output of `simulate_moments`\n- The output of `calculate_moments`, needed to get a moments covariance matrix.\n\nWe propose that moments can be stored as any valid estimagic pytree but of course all\nthree types of moments have to be aligned, i.e. be stored in a tree of the same\nstructure. We will raise an error if the trees do not have the same structure.\n\nThis is a generalization of an interface that has already proven useful in\n[respy](https://github.com/OpenSourceEconomics/respy),\n[sid](https://github.com/covid-19-impact-lab/sid) and other applications. In the future,\nthe project specific implementations of flatten and unflatten functions could simply be\ndeleted.\n\n### Representation of the weighting matrix and moments_cov\n\nThe weighting matrix for MSM estimation is represented as a DataFrame in the same way as\nthe flat representation of the covariance matrices. Of course, the conversion functions\nthat work for covariance matrices would also work here, but it is highly unlikely that a\ndifferent representation of a weighting matrix is ever needed.\n\nNote that the user does not have to construct this weighting matrix manually. They can\ngenerate them using `get_moments_cov` and `get_weighting_matrix`, so they do not need\nany knowledge of how the flattening works.\n\n### Pepresentation of sensitivity measures\n\nSensitivity measures are similar to covariance matrices in the sense that they require a\ntwo dimensional extension of pytrees. The only difference is that for covariance\nmatrices the two pytrees the same (namely the `params`) and for sensitivity measures\nthey are different (one is `params`, the other `moments`).\n\nWe therefore suggest to use the same solution, i.e. to offer a flat representation in\nform of a DataFrame, a pytree representation and functions to convert between the two.\n\n## Compatibility with estimation tables\n\nEstimation tables are constructed from estimation summaries. This continues to work for\nsummaries where everything has been converted to DataFrames. Users will select\nindividual DataFrames from a pytree of DataFrames, possibly concatenate or filter them\nand pass them to the estimation table function.\n\n## Compatibility with plotting functions\n\nThe following functions are affected:\n\n- `plot_univariate_effects`\n- `convergence_plot`\n- `lollipop_plot`\n- `derivative_plot`\n\nMost of them can be adjusted easily to the proposed changes. On all others we will\nsimply raise errors and provide tutorials to work around the limitations.\n\n## Compatibility with Dashboard\n\nThe main challenge for the dashboard is that pytrees have no natural multi-column\nextension and thus it becomes harder to specify a group or name column. However, these\nfeatures have not been used very much anyways.\n\nWe propose to write a better automatic grouping and naming function for pytrees. That\nway it is simply not necessary to provide group and name columns and most of the users\nwill get a better dashboard experience.\n\nRules of thumb for both should be:\n\n1. Only parameters where the start values have a similar magnitude can be in the same\n   group, i.e. displayed in one lineplot.\n1. Parameters that are close to each other in the tree (i.e. have a common beginning in\n   their leaf_name should be in the same group.\n1. The plot title should subsume the commen parts of the tree-structure (i.e. name we\n   get from `pybaum.leaf_names`.\n1. Most line plots should have approximately 5 lines, none should have more than 8.\n\n## Advanced options for functions that work with pytrees\n\nThere are two argument to `tree_flatten` and other pytree functions that determine which\nentries in a pytree are considered a leaf and which a container as well as how\ncontainers are flattened. 1. `registry` and 2. `is_leaf`. See the documentation of\n`pybaum` for details.\n\nTo allow for absolute flexibility, each function that works with pytrees needs to allow\na user to pass in a `registry` and an `is_leaf` argument. If a function works with\nmultiple pytrees (e.g. in `estimate_msm` the `params` are a pytree and\n`emprirical_moments` are a pytree) it needs to allow users to pass in multiple\nregistries and is_leaf functions (e.g. `params_registry`, `params_is_leaf` and\n`moments_registry`, `moments_is_leaf`.\n\nHowever, we need only as many registries as there are different pytrees. For example\nsince `simulated_moments` and `empirical_moments` always need to be pytrees with the\nsame structure, they do not need separate registries and is_leaf functions.\n\n## Pytree related reasons for a switch to result objects\n\nThere will be an other EEP that proposes to replace the result dictionaries we currently\nuse everywhere in estimagic by result objects. While this in not completely related to\npytrees, the switch to pytrees provides a few additional reasons:\n\n1. Since we sometimes provide provide results in several formats (e.g. summaries as dict\n   of pytrees and as pytree of DataFrames), the result dictionary would become too large\n   and confusing. Having result objects that just calculate specific formats on demand\n   can alleviate this.\n1. The result object can serve as a simplfied wrapper to pytree functions and pytree\n   conversion functions between pytree formats that abstracts from registry, is_leaf and\n   treedefs.\n1. Results objects allow to define a `__repr__` which becomes really useful as soon as\n   parameters are not just one DataFrame but for example, a dict of DataFrames.\n\n## Compatibility with JAX autodiff\n\nWhile we allow for pytrees of arrays, numbers and DataFrames, JAX only allows pytrees of\narrays and numbers for automatic differentiation.\n\nIf you want to use automatic differentiation with estimagic you will thus have to\nrestrict yourself in the way you specify parameters.\n\n[pybaum]: https://github.com/OpenSourceEconomics/pybaum\n"
  },
  {
    "path": "docs/source/development/ep-02-typing.md",
    "content": "(eeptyping)=\n\n# EP-02: Static typing\n\n```{eval-rst}\n+------------+------------------------------------------------------------------+\n| Author     | `Janos Gabler <https://github.com/janosg>`_                      |\n+------------+------------------------------------------------------------------+\n| Status     | Accepted                                                         |\n+------------+------------------------------------------------------------------+\n| Type       | Standards Track                                                  |\n+------------+------------------------------------------------------------------+\n| Created    | 2024-05-02                                                       |\n+------------+------------------------------------------------------------------+\n| Resolution |                                                                  |\n+------------+------------------------------------------------------------------+\n```\n\n## Abstract\n\nThis enhancement proposal explains the adoption of static typing in optimagic. The goal\nis to reap a number of benefits:\n\n- Users will benefit from IDE tools such as easier discoverability of options and\n  autocompletion.\n- Developers and users will find code easier to read due to type hints.\n- The codebase will become more robust due to static type checking and use of stricter\n  types in internal functions.\n\nAchieving these goals requires more than adding type hints. optimagic is currently\nmostly [stringly typed](https://wiki.c2.com/?StringlyTyped). For example, optimization\nalgorithms are selected via strings. Another example are\n[constraints](https://estimagic.readthedocs.io/en/latest/how_to_guides/optimization/how_to_specify_constraints.html),\nwhich are dictionaries with a fixed set of required keys.\n\nThis enhancement proposal outlines how we can accommodate the changes needed to reap the\nbenefits of static typing without breaking users' code in too many places.\n\n## Motivation and resources\n\n- [Writing Python like it's Rust](https://kobzol.github.io/rust/python/2023/05/20/writing-python-like-its-rust.html).\n  A very good blogpost that summarizes the drawbacks of \"stringly-typed\" Python code and\n  shows how to incorporate typing philosophies from Rust into Python projects. Read this\n  if you don't have time to read the other resources.\n- [Robust Python](https://www.oreilly.com/library/view/robust-python/9781098100650/), an\n  excellent book that discusses how to design code around types and provides an\n  introduction to static type checkers in Python.\n- [jax enhancement proposal](https://jax.readthedocs.io/en/latest/jep/12049-type-annotations.html)\n  for adopting static typing. It has a very good discussion on benefits of static\n  typing.\n- [Subclassing in Python Redux](https://hynek.me/articles/python-subclassing-redux/)\n  explains which types of subclassing are considered harmful and was very helpful for\n  designing this proposal.\n\n(design-philosophy)=\n\n## Design Philosophy\n\nThe core principles behind this enhancement proposal can be summarized by the following\npoints. This is an extension to our existing\n[styleguide](https://estimagic.org/en/latest/development/styleguide.html) which will be\nupdated if this proposal is accepted.\n\n- User facing functions should be generous regarding their input type. Example: the\n  `algorithm` argument can be a string, `Algorithm` class or `Algorithm` instance. The\n  `algo_options` can be an `AlgorithmOptions` object or a dictionary of keyword\n  arguments.\n- User facing functions should be strict about their output types. A strict output type\n  does not just mean that the output type is known (and not a generous Union), but that\n  it is a proper type that enables static analysis for available attributes. Example:\n  whenever possible, public functions should not return dicts but proper result types\n  (e.g. `OptimizeResult`, `NumdiffResult`, ...)\n- Internal functions should be strict about input and output types; Typically, a public\n  function will check all arguments, convert them to a proper type and then call an\n  internal function. Example: `minimize` will convert any valid value for `algorithm`\n  into an `Algorithm` instance and then call an internal function with that type.\n- Each argument that previously accepted strings or option dictionaries now also accepts\n  input types that are more amenable to static analysis and offer better autocomplete.\n  Example: `algo_options` could just be a dict of keyword arguments. Now it can also be\n  an `AlgorithmOptions` instance that enables autocomplete and static analysis for\n  attribute access.\n- Fixed field types should only be used if all fields are known. An example where this\n  is not the case are collections of benchmark problems, where the set of fields depends\n  on the selected benchmark sets and other things. In such situations, dictionaries that\n  map strings to BenchmarkProblem objects are a good idea.\n- For backwards compatibility and compatibility with SciPy, we allow things we don't\n  find ideal (e.g. selecting algorithms via strings). However, the documentation should\n  mostly show our prefered way of doing things. Alternatives can be hidden in tabs and\n  expandable boxes.\n- Whenever possible, use immutable types. Whenever things need to be changeable,\n  consider using an immutable type with copy constructors for modified instances.\n  Example: instances of `Algorithm` are immutable but using `Algorithm.with_option`\n  users can create modified copies.\n- The main entry point to optimagic are functions, objects are mostly used for\n  configuration and return types. This takes the best of both worlds: we get the safety\n  and static analysis that (in Python) can only be achieved using objects but the\n  beginner friendliness and freedom provided by functions. Example: Having a `minimize`\n  function, it is very easy to add the possibility of running minimizations with\n  multiple algorithms in parallel and returning the best value. Having a `.solve` method\n  on an algorithm object would require a whole new interface for this.\n\n## Changes for optimization\n\nThe following changes apply to all functions that are directly related to optimization,\ni.e. `maximize`, `minimize`, `slice_plot`, `criterion_plot`, `params_plot`,\n`count_free_params`, `check_constraints` and `OptimizeResult`.\n\n### The objective function\n\n#### Current situation\n\nThe objective or criterion function is the function being optimized.\n\nThe same criterion function can work for scalar, least-squares and likelihood\noptimizers. Moreover, a criterion function can return additional data that is stored in\nthe log file (if logging is active). All of this is achieved by returning a dictionary\ninstead of just a scalar float.\n\nFor the simplest case, where only scalar optimizers are used, `criterion` returns a\nfloat. Here are two examples of this simple case.\n\nThe **first example** represents `params` as a flat numpy array and returns a float.\nThis would also be compatible with SciPy:\n\n```python\ndef sphere(params: np.ndarray) -> float:\n    return params @ params\n```\n\nThe **second example** also returns a float but uses a different format for the\nparameters:\n\n```python\ndef dict_sphere(params: dict) -> float:\n    return params[\"a\"] ** 2 + params[\"b\"] ** 2\n```\n\nIf the user wants the criterion function to be compatible with specialized optimizers\nfor least-squares problems, the criterion function needs to return a dictionary.\n\n```python\ndef least_squares_sphere(params: np.ndarray) -> dict[str, Any]:\n    return {\"root_contributions\": params}\n```\n\nHere the `\"root_contributions\"` are the least-squares residuals. The dictionary key\ntells optimagic how to interpret the output. This is needed because optimagic has no way\nof finding out whether a criterion function that returns a vector (or pytree) is a\nleast-squares function or a likelihood function. Of course all specialized problems can\nstill be solved with scalar optimizers.\n\nThe criterion function can also return a dictionary, if the user wants to store some\ninformation in the log file. This is independent of having a least-squares function or\nnot. An example is:\n\n```python\ndef logging_sphere(x: np.ndarray) -> dict[str, Any]:\n    return {\"value\": x @ x, \"mean\": x.mean(), \"std\": x.std()}\n```\n\nHere `\"value\"` is the actual scalar criterion value. All other fields are unknown to\noptimagic and therefore just logged in the database if logging is active.\n\nThe specification of likelihood functions is very analogous to least-squares functions\nand therefore omitted here.\n\n**Things we want to keep**\n\n- Allow using the same criterion function for scalar, likelihood and least-squares\n  optimizers. This feature makes it easy to try out and compare very different\n  algorithms with minimal code changes.\n- No restrictions on the type of additional arguments of the criterion function.\n- Maintain compatibility with scipy.optimize when the criterion function returns a\n  scalar.\n\n**Problems**\n\n- Most users of optimagic find it hard to write criterion functions that return the\n  correct dictionary. Therefore, they don't use the logging feature and we often get\n  questions about specifying least-squares problems correctly.\n- Internally we can make almost no assumptions about the output of a criterion function,\n  making the code that processes the criterion output very complex and full of if\n  conditions.\n- We only know whether the specified criterion function is compatible with the selected\n  optimizer after we evaluate it once. This means that users see errors only very late.\n- While optional, in least-squares problems it is possible that a user specifies\n  `root_contributions`, `contributions` and `value` even though any of them could be\n  constructed out of the `root_contributions`. This redundancy of information means that\n  we need to check the consistency of all user provided function outputs.\n\n#### Proposal\n\nIn the current situation, the dictionary return type solves two different problems that\nwill now be solved separately.\n\n##### Specifying different problem types\n\nThe simplest way of specifying a least-squares function becomes:\n\n```python\nimport optimagic as om\n\n\n@om.mark.least_squares\ndef ls_sphere(params):\n    return params\n```\n\nAnalogously, the simplest way of specifying a likelihood function becomes:\n\n```python\n@om.mark.likelihood\ndef ll_sphere(params):\n    return params**2\n```\n\nThe simplest way of specifying a scalar function stays unchanged, but optionally a\n`mark.scalar` decorator can be used:\n\n```python\n@om.mark.scalar  # this is optional\ndef sphere(params):\n    return params @ params\n```\n\nExcept for the decorators, these three functions are specified the same way as in other\npython libraries that support specialized optimizers (e.g.\n`scipy.optimize.least_squares`). The reason why we need the decorators is that we\nsupport all kinds of optimizers in the same interface.\n\n##### Return additional information\n\nIf users additionally want to return information that should be stored in the log file,\nthey need to use a specific Object as return type.\n\n```python\n@dataclass(frozen=True)\nclass FunctionValue:\n    value: float | PyTree\n    info: dict[str, Any]\n```\n\nAn example of a least-squares function that also returns additional info for the log\nfile would look like this:\n\n```python\nfrom optimagic import FunctionValue\n\n\n@om.mark.least_squares\ndef least_squares_sphere(params):\n    out = FunctionValue(\n        value=params, info={\"p_mean\": params.mean, \"p_std\": params.std()}\n    )\n    return out\n```\n\nAnd analogous for scalar and likelihood functions, where again the `mark.scalar`\ndecorator is optional.\n\n##### Optionally replace decorators by type hints\n\nThe purpose of the decorators is to tell us the output type of the criterion function.\nThis is necessary because there is no way of distinguishing between likelihood and\nleast-squares functions from the output alone and because we want to know the function\ntype before we evaluate the function once.\n\nAn alternative that might be more convenient for advanced Python programmers would be to\ndo this via type hints. In this case, the return types need to be a bit more\nfine-grained:\n\n```python\n@dataclass(frozen=True)\nclass ScalarFunctionValue(FunctionValue):\n    value: float\n    info: dict[str, Any]\n\n\n@dataclass(frozen=True)\nclass LeastSquaresFunctionValue(FunctionValue):\n    value: PyTree\n    info: dict[str, Any]\n\n\n@dataclass(frozen=True)\nclass LikelihoodFunctionValue(FunctionValue):\n    value: PyTree\n    info: dict[str, Any]\n```\n\nA least-squares function could then be specified without decorator as follows:\n\n```python\nfrom optimagic import LeastSquaresFunctionValue\n\n\ndef least_squares_sphere(params: np.ndarray) -> LeastSquaresFunctionValue:\n    out = LeastSquaresFunctionValue(\n        value=params, info={\"p_mean\": params.mean, \"p_std\": params.std()}\n    )\n    return out\n```\n\nThis approach works nicely in projects that use type hints already. However, it would be\nhard for users who have never heard about type hints. Therefore, we should implement it\nbut not use it in beginner tutorials and always make clear that this is completely\noptional.\n\n##### Summary of output types\n\nThe output type of the objective function is `float | PyTree[float] | FunctionValue`.\n\n### Bundling bounds\n\n#### Current situation\n\nCurrently we have four arguments of `maximize`, `minimize`, and related functions that\nlet the user specify bounds:\n\n```python\nom.minimize(\n    # ...\n    lower_bounds=params - 1,\n    upper_bounds=params + 1,\n    soft_lower_bounds=params - 2,\n    soft_upper_bounds=params + 2,\n    # ...\n)\n```\n\nEach of them is a pytree that mirrors the structure of `params` or `None`\n\n**Problems**\n\n- Usually, all of these arguments are used together and passing them around individually\n  is annoying.\n- The names are very long because the word `bounds` is repeated.\n\n#### Proposal\n\nWe bundle the bounds together in a `Bounds` type:\n\n```python\nbounds = om.Bounds(\n    lower=params - 1,\n    upper=params + 1,\n    soft_lower=params - 2,\n    soft_upper=params + 2,\n)\nom.minimize(\n    # ...\n    bounds=bounds,\n    # ...\n)\n```\n\nAs a bonus feature, the `Bounds` type can do some checks on the bounds at instance\ncreation time such that users get errors before running an optimization.\n\nUsing the old arguments will be deprecated.\n\nSince there is no need to modify instances of `Bounds`, it should be immutable.\n\nTo improve the alignment with SciPy, we can also allow users to pass a\n`scipy.optimize.Bounds` object as bounds. Internally, this will be converted to our\n`Bounds` object.\n\n### Constraints\n\n#### Current situation\n\nCurrently, constraints are dictionaries with a set of required keys. The exact\nrequirements depend on the type of constraints and even on the structure of `params`.\n\nEach constraint needs a way to select the parameters to which the constraint applies.\nThere are three dictionary keys for this:\n\n- `\"loc\"`, which works if params are numpy arrays, `pandas.Series` or\n  `pandas.DataFrame`.\n- `\"query\"`, which works only if `params` are `pandas.DataFrame`\n- `\"Selector\"`, which works for all valid formats of `params`.\n\nMoreover, each constraint needs to specify its type using the `\"type\"` key.\n\nSome constraints have additional required keys:\n\n- Linear constraints have `\"weights\"`, `\"lower_bound\"`, `\"upper_bound\"`, and `\"value\"`.\n- Nonlinear constraints have `\"func\"`, `\"lower_bound\"`, `\"upper_bound\"`, and `\"value\"`.\n\nDetails and examples can be found\n[here](https://estimagic.readthedocs.io/en/latest/how_to_guides/optimization/how_to_specify_constraints.html).\n\n**Things we want to keep**\n\n- The constraints interface is very declarative; Constraints purely collect information\n  and are completely separate from the implementation.\n- All three ways of selecting parameters have their strength and can be very concise and\n  readable in specific applications.\n\n**Problems**\n\n- Constraints are hard to document and generally not understood by most users.\n- Having multiple ways of selecting parameters (not all compatible with all `params`\n  formats) is confusing for users and annoying when processing constraints. We have to\n  handle the case where no selection or multiple selections are specified.\n- Dicts with required keys are brittle and do not provide autocomplete. This is made\n  worse by the fact that each type of constraint requires different sets of keys.\n\n#### Proposal\n\n1. We implement simple dataclasses for each type of constraint.\n1. We get rid of `loc` and `query` as parameter selection methods. Instead, we show in\n   the documentation how both selection methods can be used inside a `selector`\n   function.\n\nExamples of the new syntax are:\n\n```python\nconstraints = [\n    om.constraints.FixedConstraint(selector=lambda x: x[0, 5]),\n    om.constraints.IncreasingConstraint(selector=lambda x: x[1:4]),\n]\n\nres = om.minimize(\n    fun=criterion,\n    params=np.array([2.5, 1, 1, 1, 1, -2.5]),\n    algorithm=\"scipy_lbfgsb\",\n    constraints=constraints,\n)\n```\n\nSince there is no need to modify instances of constraints, they should be immutable.\n\nAll constraints can subclass `Constraint` which will only have the `selector` attribute.\nDuring the deprecation phase, `Constraint` will also have `loc` and `query` attributes.\n\nThe current `cov` and `sdcorr` constraints apply to flattened covariance matrices, as\nwell as standard deviations and flattened correlation matrices. This comes from a time\nwhere optimagic only supported an essentially flat parameter format (`DataFrames` with\n`\"value\"` column). We can exploit the current deprecation cycle to rename the current\n`cov` and `sdcorr` constraints to `FlatCovConstraint` and `FlatSdcorrConstraint`. This\nprepares the introduction of a more natural `CovConstraint` and `SdcorrConstraint`\nlater.\n\n(algorithm-selection)=\n\n### Algorithm selection\n\n#### Current situation\n\n`algorithm` is a string or a callable that satisfies the internal algorithm interface.\nIf the user passes a string, we look up the algorithm implementation in a dictionary\ncontaining all installed algorithms. We implement suggestions for typical typos based on\nfuzzy matching of strings.\n\n**Things we want to keep**\n\n- optimagic can be used just like scipy\n\n**Problems**\n\n- There is no autocomplete.\n- It is very easy to make typos and they only get caught at runtime.\n- Users cannot select algorithms without reading the documentation.\n\n#### Proposal\n\nThe following proposal is quite ambitious and split into multiple steps. Thanks to\n[@schroedk](https://github.com/schroedk) for helpful discussions on this topic.\n\n##### Step 1: Passing algorithm classes and objects\n\nFor compatibility with SciPy we continue to allow algorithm strings. However, the\npreferred ways of selecting algorithms are now:\n\n1. Passing an algorithm class\n1. Passing a configured algorithm object\n\nBoth new ways become possible because of changes to the internal algorithm interface.\nSee [here](algorithm-interface) for the proposal.\n\nWe remove the possibility of passing callables that comply with the old internal\nalgorithm interface.\n\nIn a simple example, algorithm selection via algorithm classes looks as follows:\n\n```python\nom.minimize(\n    lambda x: x @ x,\n    params=np.arange(5),\n    algorithm=om.algorithms.scipy_neldermead,\n)\n```\n\nPassing a configured instance of an algorithm looks as follows:\n\n```python\nom.minimize(\n    lambda x: x @ x,\n    params=np.arange(5),\n    algorithm=om.algorithms.scipy_neldermead(adaptive=True),\n)\n```\n\n##### Step 2: Achieving autocomplete without too much typing\n\nThere are many ways in which the above behavior could be achieved with full autocomplete\nsupport. For reasons that will become clear in the next section, we choose to represent\n`algorithms` as a dataclass. Alternatives are enums, `__init__` files, NamedTuples, etc.\n\nA prototype for that dataclass looks as follows:\n\n```python\nfrom typing import Type\n\n\n@dataclass(frozen=True)\nclass Algorithms:\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    # ...\n    # many more\n    # ...\n\n\nalgorithms = Algorithms()\n```\n\nCurrently, all algorithms are collected in a dictionary that is created\nprogrammatically. Representing algorithms in a static data structure instead requires a\nlot more typing and therefore code to maintain. This situation will become even worse\nwith some of the features we propose below. Therefore, we want to automate the creation\nof the dataclass.\n\nTo this end, we can write a function that automatically creates the code for the\n`Algorithms` dataclass. This function can be executed in a local pre-commit hook to make\nsure all generated code is up-to-date in every commit. It can also be executed in a\n[pytest hook](https://docs.pytest.org/en/7.1.x/how-to/writing_hook_functions.html)\n(before the collection phase) to make sure everything is up-to-date when tests run.\n\nUsers of optimagic (and their IDEs) will never know that this code was not typed in by a\nhuman, which guarantees that autocomplete and static analysis will work without\nproblems.\n\n```{note}\nWe can also use [pytest-hooks](https://docs.pytest.org/en/7.1.x/how-to/writing_hook_functions.html)\nto make sure the\n```\n\n##### Step 3: Filtered autocomplete\n\nHaving the flat `Algorithms` data structure would be enough if every user knew exactly\nwhich algorithm they want to use and just needed help typing in the name. However, this\nis very far from realistic. Most users have little knowledge about optimization\nalgorithms. In the best case, they know a few properties of their problems (e.g. whether\nit is differentiable) and their goal (e.g. do they need a local or global solution).\n\nTo exemplify what we want to achieve, assume a simplified situation with 4 algorithms.\nWe only consider whether an algorithm is gradient free or gradient based. Here is the\nfictitious list:\n\n- `neldermead`: `gradient_free`\n- `bobyqa`: `gradient_free`\n- `lbfgs`: `gradient_based`\n- `slsqp`: `gradient_based`\n\nWe want the following behavior:\n\nThe user types `om.algorithms.` and autocomplete shows\n\n|                 |\n| --------------- |\n| `GradientBased` |\n| `GradientFree`  |\n| `neldermead`    |\n| `bobyqa`        |\n| `lbfgs`         |\n| `slsqp`         |\n\nA user can either select one of the algorithms (lowercase) directly or filter further by\nselecting a category (CamelCase). This would look as follows:\n\nThe user types `om.algorithms.GradientFree.` and autocomplete shows\n\n|              |\n| ------------ |\n| `neldermead` |\n| `bobyqa`     |\n\nOnce the user arrives at an algorithm, a subclass of `Algorithm` is returned. This class\nwill be passed to `minimize` or `maximize`. Passing configured instances of `Algorithm`s\nwill be discussed in [Algorithm Options](algorithm-options).\n\nIn practice, we would have a lot more algorithms and a lot more categories. Some\ncategories might be mutually exclusive, in that case the second category is omitted\nafter the first one is selected.\n\nWe have the following categories:\n\n- `GradientBased` vs. `GradientFree`\n- `Local` vs. `Global`\n- `Bounded` vs. `Unbounded`\n- `Scalar` vs. `LeastSquares` vs. `Likelihood`\n- `LinearConstrained` vs. `NonlinearConstrained` vs. `Unconstrained`\n\nPotentially, we could also offer a `.All` attribute that returns a list of all currently\nselected algorithms. That way a user could for example loop over all `Bounded` and\n`GradientBased` `LeastSquares` algorithms and compare them in a criterion plot.\n\nThese categories match nicely with our\n[algorithm selection tutorials](https://effective-programming-practices.vercel.app/scientific_computing/optimization_algorithms/objectives_materials.html).\n\nTo achieve this behavior, we would have to implement something like this:\n\n```python\n@dataclass(frozen=True)\nclass GradientBasedAlgorithms:\n    lbfgs: Type[LBFGS] = LBFGS\n    slsqp: Type[SLSQP] = SLSQP\n\n    @property\n    def All(self) -> List[om.typing.Algorithm]:\n        return [LBFGS, SLSQP]\n\n\n@dataclass(frozen=True)\nclass GradientFreeAlgorithms:\n    neldermead: Type[NelderMead] = NelderMead\n    bobyqa: Type[Bobyqa] = Bobyqa\n\n    @property\n    def All(self) -> List[om.typing.Algorithm]:\n        return [NelderMead, Bobyqa]\n\n\n@dataclass(frozen=True)\nclass Algorithms:\n    lbfgs: Type[LBFGS] = LBFGS\n    slsqp: Type[SLSQP] = SLSQP\n    neldermead: Type[NelderMead] = NelderMead\n    bobyqa: Type[Bobyqa] = Bobyqa\n\n    @property\n    def GradientBased(self) -> GradientBasedAlgorithms:\n        return GradientBasedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeAlgorithms:\n        return GradientFreeAlgorithms()\n\n    @property\n    def All(self) -> List[om.typing.Algorithm]:\n        return [LBFGS, SLSQP, NelderMead, Bobyqa]\n```\n\nIf implemented by hand, this would require an enormous amount of typing and introduce a\nvery high maintenance burden. Whenever a new algorithm was added to optimagic, we would\nhave to register it in multiple nested dataclasses.\n\nThe code generation approach detailed in the previous section can solve this problom.\nWhile it might have been overkill to achieve basic autocomplete, it is justified to\nachieve this filtering behavior. How the relevant information for filtering (e.g.\nwhether an algorithm is gradient based) is collected, will be discussed in\n[internal algorithms](algorithm-interface).\n\n```{note}\nThe use of dataclasses is an implementation detail. This enhancement proposal only\ndefines the autocomplete behavior we want to achieve. Everything else can be changed\nlater as we see fit.\n```\n\n(algorithm-options)=\n\n### Algorithm options\n\nAlgorithm options refer to options that are not handled by optimagic but directly by the\nalgorithms. Examples are convergence criteria, stopping criteria and advanced\nconfiguration of algorithms. Some of them are supported by many algorithms (e.g.\nstopping after a maximum number of function evaluations is reached), some are supported\nby certain classes of algorithms (e.g. most genetic algorithms have a population size,\nmost trustregion algorithms allow to set an initial trustregion radius) and some of them\nare completely specific to one algorithm (e.g. ipopt has more than 100 very specific\noptions, `nag_dfols` supports very specific restarting strategies, ...).\n\nWhile nothing can be changed about the fact that every algorithm supports different\noptions (e.g. there is simply no trustregion radius in a genetic algorithm), we go very\nfar in harmonizing `algo_options` across optimizers:\n\n1. Options that are the same in spirit (e.g. stop after a specific number of iterations)\n   get the same name across all optimizers wrapped in optimagic. Most of them even get\n   the same default value.\n1. Options that have non-descriptive (and often heavily abbreviated) names in their\n   original implementation get more readable names, even if they appear only in a single\n   algorithm.\n1. Options that are specific to a well known optimizer (e.g. `ipopt`) are not renamed\n\n#### Current situation\n\nThe user passes `algo_options` as a dictionary of keyword arguments. All options that\nare not supported by the selected algorithm are discarded with a warning. The names of\nmost options are very descriptive (even though a bit too long at times).\n\nWe implement basic namespaces by introducing a dot notation. Example:\n\n```python\noptions = {\n    \"stopping.max_iterations\": 1000,\n    \"stopping.max_criterion_evaluations\": 1500,\n    \"convergence.relative_criterion_tolerance\": 1e-6,\n    \"convergence.scaled_gradient_tolerance\": 1e-6,\n    \"initial_radius\": 0.1,\n    \"population_size\": 100,\n}\n```\n\nThe option dictionary is then used as follows:\n\n```python\nminimize(\n    # ...\n    algorithm=\"scipy_lbfgsb\",\n    algo_options=options,\n    # ...\n)\n```\n\nIn the example, only the options `stopping.max_criterion_evaluations`,\n`stopping.max_iterations` and `convergence.relative_criterion_tolerance` are supported\nby `scipy_lbfgsb`. All other options would be ignored.\n\n```{note}\nThe `.` notation in `stopping.max_iterations` is just syntactic sugar. Internally, the\noption is called `stopping_max_iterations` because all options need to be valid\nPython variable names.\n```\n\n**Things we want to keep**\n\n- The ability to provide global options that are filtered for each optimizer. Mixing the\n  options for all optimizers in a single dictionary and discarding options that do not\n  apply to the selected optimizer allows to loop very efficiently over very different\n  algorithms (without `if` conditions in the user's code). This is very good for quick\n  experimentation, e.g. solving the same problem with three different optimizers and\n  limiting each optimizer to 100 function evaluations.\n- The basic namespaces help to quickly see what is influenced by a specific option. This\n  works especially well to distinguish stopping options and convergence criteria from\n  other tuning parameters of the algorithms. However, it would be enough to keep them as\n  a naming convention if we find it hard to support the `.` notation.\n- All options are documented in the optimagic documentation, i.e. we do not link to the\n  docs of original packages. Now they will also be discoverable in an IDE.\n\n**Problems**\n\n- There is no autocomplete and the only way to find out which options are supported is\n  the documentation.\n- A small typo in an option name can easily lead to the option being discarded.\n- Option dictionaries can grow very big.\n- The fact that option dictionaries are mutable can lead to errors, for example when a\n  user wants to try out a grid of values for one tuning parameter while keeping all\n  other options constant.\n\n#### Proposal\n\nWe want to offer multiple entry points for passing additional options to algorithms.\nUsers can pick the one that works best for their particular use-case. The current\nsolution remains valid but not recommended.\n\n##### Configured algorithms\n\nInstead of passing an `Algorithm` class (as described in\n[Algorithm Selection](algorithm-selection)) the user can create an instance of their\nselected algorithm. When creating the instance, they have autocompletion for all options\nsupported by the selected algorithm. `Algorithm`s are immutable.\n\n```python\nalgo = om.algorithms.scipy_lbfgsb(\n    stopping_max_iterations=1000,\n    stopping_max_criterion_evaluations=1500,\n    convergence_relative_criterion_tolerance=1e-6,\n)\nminimize(\n    # ...\n    algorithm=algo,\n    # ...\n)\n```\n\n##### Copy constructors on algorithms\n\nGiven an instance of an `Algorithm`, a user can easily create a modified copy of that\ninstance by using the `with_option` method.\n\n```python\n# using copy constructors to create variants\nbase_algo = om.algorithms.fides(stopping_max_iterations=1000)\nalgorithms = [base_algo.with_option(initial_radius=r) for r in [0.1, 0.2, 0.5]]\n\nfor algo in algorithms:\n    minimize(\n        # ...\n        algorithm=algo,\n        # ...\n    )\n```\n\nWe can provide additional methods `with_stopping` and `with_convergence` that call\n`with_option` internally but provide two additional features:\n\n1. They validate that the option is indeed a stopping/convergence criterion.\n1. They allow to omit the `convergence_` or `stopping_` at the beginning of the option\n   name and can thus reduce repetition in the option names. This recreates the\n   namespaces we currently achieve with the dot notation:\n\n```python\n# using copy constructors for better namespaces\nalgo = (\n    om.algorithms.scipy_lbfgsb()\n    .with_stopping(\n        max_iterations=1000,\n        max_criterion_evaluations=1500,\n    )\n    .with_convergence(\n        relative_criterion_tolerance=1e-6,\n    )\n)\n\nminimize(\n    # ...\n    algorithm=algo,\n    # ...\n)\n```\n\n##### Global option object\n\nAs before, the user can pass a global set of options to `maximize` or `minimize`. We\ncontinue to support option dictionaries but also allow `AlgorithmOption` objects that\nenable better autocomplete and immutability. We can construct them using a similar\npre-commit hook approach as discussed in [algorithm selection](algorithm-selection).\nGlobal options override the options that were directly passed to an optimizer. For\nconsistency, `AlgorithmOptions` can offer the `with_stopping`, `with_convergence` and\n`with_option` copy-constructors, so users can modify options safely. Probably, this\napproach should be featured less prominently in the documentation as it offers no\nguarantees that the specified options are compatible with the selected algorithm.\n\nThe previous example continues to work. Examples of the new possibilities are:\n\n```python\noptions = om.AlgorithmOptions(\n    stopping_max_iterations=1000,\n    stopping_max_criterion_evaluations=1500,\n    convergence_relative_criterion_tolerance=1e-6,\n    convergence_scaled_gradient_tolerance=1e-6,\n    initial_radius=0.1,\n    population_size=100,\n)\n\n\nminimize(\n    # ...\n    algorithm=om.algorithms.scipy_lbfgsb,\n    algo_options=options,\n    # ...\n)\n```\n\n```{note}\nIn my currently planned implementation, autocomplete will not work reliably for the\ncopy constructors (`with_option`, `with_stopping` and `with_convergence`). The main\nreason is that most editors do not play well with `functools.wraps` or any other means\nof dynamic signature creation. For more details, see the discussions about the\n[Internal Algorithm Interface](algorithm-interface).\n```\n\n### Custom derivatives\n\nProviding custom derivatives to optimagic is slightly complicated because we support\nscalar, likelihood and least-squares problems in the same interface. Moreover, we allow\nto either provide a `derivative` function or a joint `criterion_and_derivative` function\nthat allow users to exploit synergies between evaluating the criterion and the\nderivative.\n\n#### Current situation\n\nThe `derivative` argument can currently be one of three things:\n\n- A `callable`: This is assumed to be the relevant derivative of `criterion`. If a\n  scalar optimizer is used, it is the gradient of the criterion value w.r.t. params. If\n  a likelihood optimizer is used, it is the jacobian of the likelihood contributions\n  w.r.t. params. If a least-squares optimizer is used, it is the jacobian of the\n  residuals w.r.t. params.\n- A `dict`: The dict must have three keys `\"value\"`, `\"contributions\"` and\n  `\"root_contributions\"`. The corresponding values are the three callables described\n  above.\n- `None`: In this case, a numerical derivative is calculated.\n\nThe `criterion_and_derivative` argument exactly mirrors `derivative` but each callable\nreturns a tuple of the criterion value and the derivative instead.\n\n**Things we want to keep**\n\n- It is good that synergies between `criterion` and `derivative` can be exploited.\n- There are three arguments (`criterion`, `derivative`, `criterion_and_derivative`).\n  This makes sure that every algorithm can run efficiently when looping over algorithms\n  and keeping everything else equal. With SciPy's approach of setting `jac=True` if one\n  wants to use a joint criterion and derivative function, a gradient free optimizer\n  would have no chance of evaluating just the criterion.\n- Scalar, least-squares and likelihood problems are supported in one interface.\n\n**Problems**\n\n- A dict with required keys is brittle\n- Autodiff needs to be handled completely outside of optimagic\n- The names `criterion`, `derivative` and `criterion_and_derivative` are not aligned\n  with scipy and very long.\n- Providing derivatives to optimagic is perceived as complicated and confusing.\n\n#### Proposal\n\n```{note}\nThe following section uses the new names `fun`, `jac` and `fun_and_jac` instead of\n`criterion`, `derivative` and `criterion_and_derivative`.\n```\n\nTo improve the integration with modern automatic differentiation frameworks, `jac` or\n`fun_and_jac` can also be a string `\"jax\"` or a more autocomplete friendly enum\n`om.autodiff_backend.JAX`. This can be used to signal that the objective function is jax\ncompatible and jax should be used to calculate its derivatives. In the long run we can\nadd PyTorch support and more. Since this is mostly about a signal of compatibility, it\nwould be enough to set one of the two arguments to `\"jax\"`, the other one can be left at\n`None`. Here is an example:\n\n```python\nimport jax.numpy as jnp\nimport optimagic as om\n\n\ndef jax_sphere(x):\n    return jnp.dot(x, x)\n\n\nres = om.minimize(\n    fun=jax_sphere,\n    params=jnp.arange(5),\n    algorithm=om.algorithms.scipy_lbfgsb,\n    jac=\"jax\",\n)\n```\n\nIf a custom callable is provided as `jac` or `fun_and_jac`, it needs to be decorated\nwith `@om.mark.least_squares` or `om.mark.likelihood` if it is not the gradient of a\nscalar function values. Using the `om.mark.scalar` decorator is optional. For a simple\nleast-squares problem this looks as follows:\n\n```python\nimport numpy as np\n\n\n@om.mark.least_squares\ndef ls_sphere(params):\n    return params\n\n\n@om.mark.least_squares\ndef ls_sphere_jac(params):\n    return np.eye(len(params))\n\n\nres = om.minimize(\n    fun=ls_sphere,\n    params=np.arange(5),\n    algorithm=om.algorithms.scipy_ls_lm,\n    jac=ls_sphere_jac,\n)\n```\n\nNote that here we have a least-squares problem and solve it with a least-squares\noptimizer. However, any least-squares problem can also be solved with scalar optimizers.\n\nWhile optimagic could convert the least-squares derivative to the gradient of the scalar\nfunction value, this is generally inefficient. Therefore, a user can provide multiple\ncallables of the objective function in such a case, so we can pick the best one for the\nchosen optimizer.\n\n```python\n@om.mark.scalar\ndef sphere_grad(params):\n    return 2 * params\n\n\nres = om.minimize(\n    fun=ls_sphere,\n    params=np.arange(5),\n    algorithm=om.algorithms.scipy_lbfgsb,\n    jac=[ls_sphere_jac, sphere_grad],\n)\n```\n\nSince a scalar optimizer was chosen to solve the least-squares problem, optimagic would\npick the `sphere_grad` as derivative. If a leas-squares solver was chosen, we would use\n`ls_sphere_jac`.\n\n### Other option dictionaries\n\n#### Current situation\n\nWe often allow to switch on some behavior with a bool or a string value and then\nconfigure the behavior with an option dictionary. Examples are:\n\n- `logging` (`str | pathlib.Path | False`) and `log_options` (dict)\n- `scaling` (`bool`) and `scaling_options` (dict)\n- `error_handling` (`Literal[\"raise\", \"continue\"]`) and `error_penalty` (dict)\n- `multistart` (`bool`) and `multistart_options`\n\nMoreover we have option dictionaries whenever we have nested invocations of optimagic\nfunctions. Examples are:\n\n- `numdiff_options` in `minimize` and `maximize`\n- `optimize_options` in `estimate_msm` and `estimate_ml`\n\n**Things we want to keep**\n\n- Complex behavior like logging or multistart can be switched on in extremely simple\n  ways, without importing anything and without looking up supported options.\n- The interfaces are very declarative and decoupled from our implementation.\n\n**Problems**\n\n- Option dictionaries are brittle and don't support autocomplete.\n- It can be confusing if someone provided `scaling_options` or `multistart_options` but\n  they take no effect because `scaling` or `multistart` were not set to `True`.\n\n#### Proposal\n\nWe want to keep a simple way of enabling complex behavior (with some default options)\nbut get rid of having two separate arguments (one to switch the behavior on and one to\nconfigure it). This means that we have to be generous regarding input types.\n\n##### Logging\n\nCurrently we only implement logging via an sqlite database. All `log_options` are\nspecific to this type of logging. However, logging is slow and we should support more\ntypes of logging. For this, we can implement a simple `Logger` abstraction. Advanced\nusers could implement their own logger.\n\nAfter the changes, `logging` can be any of the following:\n\n- `False` (or anything Falsy): No logging is used.\n- A `str` or `pathlib.Path`: Logging is used at default options.\n- An instance of `optimagic.Logger`. There will be multiple subclasses, e.g.\n  `SqliteLogger` which allow us to switch out the logging backend. Each subclass might\n  have different optional arguments.\n\nThe `log_options` are deprecated. Using dictionaries instead of `Option` objects will be\nsupported during a deprecation cycle.\n\n##### Scaling, error handling and multistart\n\nIn contrast to logging, scaling, error handling and multistart are deeply baked into\noptimagic's minimize function. Therefore, it does not make sense to create abstractions\nfor these features that would make them replaceable components that can be switched out\nfor other implementations by advanced users. Most of these features are already\nperceived as advanced and allow for a lot of configuration.\n\nWe therefore suggest the following argument types:\n\n- `scaling`: `bool | ScalingOptions`\n- `error_handling`: `bool | ErrorHandlingOptions`\n- `multistart`: `bool | MultistartOptions`\n\nAll of the Option objects are simple dataclasses that mirror the current dictionaries.\nAll `_options` arguments are deprecated.\n\n##### `numdiff_options` and similar\n\nDictionaries are still supported but we also offer more autocomplete friendly\ndataclasses as alternative.\n\n(algorithm-interface)=\n\n### The internal algorithm interface and `Algorithm` objects\n\n#### Current situation\n\nCurrently, algorithms are defined as `minimize` functions that are decorated with\n`om.mark_minimizer`. The `minimize` function returns a dictionary with a few mandatory\nand several optional keys. Algorithms can provide information to optimagic in two ways:\n\n1. The signature of the minimize function signals whether the algorithm needs\n   derivatives and whether it supports bounds and nonlinear constraints. Moreover, it\n   signals which algorithm specific options are supported. Default values for algorithm\n   specific options are also defined in the signature of the minimize function.\n1. `@mark_minimizer` collects the following information via keyword arguments:\n\n- Is the algorithm a scalar, least-squares or likelihood optimizer?\n- The algorithm name.\n- Does the algorithm require well scaled problems?\n- Is the algorithm currently installed?\n- Is the algorithm global or local?\n- Should the history tracking be disabled (e.g. because the algorithm tracks its own\n  history)?\n- Does the algorithm parallelize criterion evaluations?\n\nA slightly simplified example of the current internal algorithm interface is:\n\n```python\n@mark_minimizer(\n    name=\"scipy_neldermead\",\n    needs_scaling=False,\n    primary_criterion_entry=\"value\",\n    is_available=IS_SCIPY_AVAILABLE,\n    is_global=False,\n    disable_history=False,\n)\ndef scipy_neldermead(\n    criterion,\n    x,\n    lower_bounds,\n    upper_bounds,\n    *,\n    stopping_max_iterations=1_000_000,\n    stopping_max_criterion_evaluations=1_000_000,\n    convergence_absolute_criterion_tolerance=1e-8,\n    convergence_absolute_params_tolerance=1e-8,\n    adaptive=False,\n):\n    options = {\n        \"maxiter\": stopping_max_iterations,\n        \"maxfev\": stopping_max_criterion_evaluations,\n        # both tolerances seem to have to be fulfilled for Nelder-Mead to converge.\n        # if not both are specified it does not converge in our tests.\n        \"xatol\": convergence_absolute_params_tolerance,\n        \"fatol\": convergence_absolute_criterion_tolerance,\n        \"adaptive\": adaptive,\n    }\n\n    res = scipy.optimize.minimize(\n        fun=criterion,\n        x0=x,\n        bounds=_get_scipy_bounds(lower_bounds, upper_bounds),\n        method=\"Nelder-Mead\",\n        options=options,\n    )\n\n    return process_scipy_result(res)\n```\n\nThe first two arguments (`criterion` and `x`) are mandatory. The lack of any arguments\nrelated to derivatives signifies that `scipy_neldermead` is a gradient free algorithm.\nThe bounds related arguments show that it supports box constraints. The remaining\narguments define the supported stopping criteria and algorithm options as well as their\ndefault values.\n\nThe decorator simply attaches information to the function as `_algorithm_info`\nattribute. This originated as a hack but was never changed afterwards. The\n`AlgorithmInfo` looks as follows:\n\n```python\nclass AlgoInfo(NamedTuple):\n    primary_criterion_entry: str\n    name: str\n    parallelizes: bool\n    needs_scaling: bool\n    is_available: bool\n    arguments: list  # this is read from the signature\n    is_global: bool = False\n    disable_history: bool = False\n```\n\n**Things we want to keep**\n\n- The internal interface has proven flexible enough for many optimizers we had not\n  wrapped when we designed it. It is easy to add more optional arguments to the\n  decorator without breaking any existing code.\n- The decorator approach completely hides how we represent algorithms internally.\n- Since we read a lot of information from function signatures (as opposed to registering\n  options somewhere), there is no duplicated information. If we change the approach to\n  collecting information, we still need to ensure there is no duplication or possibility\n  to provide wrong information to optimagic.\n\n**Problems**\n\n- Type checkers complain about the `._algorithm_info` hack.\n- All computations and signature checking are done eagerly for all algorithms at import\n  time. This is one of the reasons why imports are slow.\n- The first few arguments to the minimize functions follow a naming scheme and any typo\n  in those names would lead to situations that are hard to debug (e.g. if `lower_bound`\n  was miss-typed as `lower_buond` we would assume that the algorithm does not support\n  lower bounds but has a tuning parameter called `lower_buond`).\n\n#### Proposal\n\nWe first show the proposed new algorithm interface and discuss the changes later.\n\n```python\n@om.mark.minimizer(\n    name=\"scipy_neldermead\",\n    needs_scaling=False,\n    problem_type=om.ProblemType.Scalar,\n    is_available=IS_SCIPY_AVAILABLE,\n    is_global=False,\n    disable_history=False,\n    needs_derivatives=False,\n    needs_parallelism=False,\n    supports_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n)\n@dataclass(frozen=True)\nclass ScipyNelderMead(Algorithm):\n    stopping_max_iterations: int = 1_000_000\n    stopping_max_criterion_evaluations: int = 1_000_000\n    convergence_absolute_criterion_tolerance: float = 1e-8\n    convergence_absolute_params_tolerance: float = 1e-8\n    adaptive = False\n\n    def __post_init__(self):\n        # check everything that cannot be handled by the type system\n        assert self.convergence_absolute_criterion_tolerance > 0\n        assert self.convergence_absolute_params_tolerance > 0\n\n    def _solve_internal_problem(\n        self, problem: InternalProblem, x0: NDArray[float]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"maxiter\": self.stopping_max_iterations,\n            \"maxfev\": self.stopping_max_criterion_evaluations,\n            \"xatol\": self.convergence_absolute_params_tolerance,\n            \"fatol\": self.convergence_absolute_criterion_tolerance,\n            \"adaptive\": self.adaptive,\n        }\n\n        res = minimize(\n            fun=problom.scalar.fun,\n            x0=x,\n            bounds=_get_scipy_bounds(problom.bounds),\n            method=\"Nelder-Mead\",\n            options=options,\n        )\n\n        return process_scipy_result(res)\n```\n\n1. The new internal algorithms are dataclasses, where all algorithm options are\n   dataclass fields. This enables us to obtain information about the options via the\n   `__dataclass_fields__` attribute without inspecting signatures or imposing naming\n   conventions on non-option arguments.\n1. The `_solve_internal_problem` method receives an instance of `InternalProblem` and\n   `x0` (the start values) as arguments. `InternalProblem` collects the criterion\n   function, its derivatives, bounds, etc. This again avoids any potential for typos in\n   argument names.\n1. The `mark.minimizer` decorator collects all the information that was previously\n   collected via optional arguments with naming conventions. This information is\n   available while constructing the instance of `InternalProblem`. Thus we can make sure\n   that attributes that were not requested (e.g. derivatives if `needs_derivative` is\n   `False`) raise an `AttributeError` if used.\n1. The minimize function returns an `InternalOptimizeResult` instead of a dictionary.\n\nThe copy constructors (`with_option`, `with_convergence`, and `with_stopping`) are\ninherited from `optimagic.Algorithm`. This means, that they will have `**kwargs` as\nsignature and thus do not support autocomplete. However, they can check that all\nspecified options are actually in the `__dataclass_fields__` and thus provide feedback\nbefore an optimization is run.\n\nAll breaking changes of the internal algorithm interface are done without deprecation\ncycle.\n\n```{note}\nThe `_solve_internal_problem` method is private because users should not call it; This\nalso prepares adding a public `minimize` method that internally calls the\n`minimize` function.\n```\n\nTo make things more concrete, here are prototypes for components related to the\n`InternalProblem` and `InternalOptimizeResult`.\n\n```{note}\nThe names of the internal problem are already aligned with the new names for\nthe objective function and its derivatives.\n```\n\n```python\nfrom numpy.typing import NDArray\nfrom dataclasses import dataclass\nfrom typing import Callable, Tuple\nimport optimagic as om\n\n\n@dataclass(frozen=True)\nclass ScalarProblemFunctions:\n    fun: Callable[[NDArray[float]], float]\n    jac: Callable[[NDArray[float]], NDArray[float]]\n    fun_and_jac: Callable[[NDArray[float]], Tuple[float, NDArray[float]]]\n\n\n@dataclass(frozen=True)\nclass LeastSquaresProblemFunctions:\n    fun: Callable[[NDArray[float]], NDArray[float]]\n    jac: Callable[[NDArray[float]], NDArray[float]]\n    fun_and_jac: Callable[[NDArray[float]], Tuple[NDArray[float], NDArray[float]]]\n\n\n@dataclass(frozen=True)\nclass LikelihoodProblemFunctions:\n    fun: Callable[[NDArray[float]], NDArray[float]]\n    jac: Callable[[NDArray[float]], NDArray[float]]\n    fun_and_jac: Callable[[NDArray[float]], Tuple[NDArray[float], NDArray[float]]]\n\n\n@dataclass(frozen=True)\nclass InternalProblem:\n    scalar: ScalarProblemFunctions\n    least_squares: LeastSquaresProblemFunctions\n    likelihood: LikelihoodProblemFunctions\n    bounds: om.Bounds | None\n    linear_constraints: list[om.LinearConstraint] | None\n    nonlinear_constraints: list[om.NonlinearConstraint] | None\n```\n\nThe `InternalOptimizeResult` formalizes the current dictionary solution:\n\n```python\n@dataclass(frozen=True)\nclass InternalOptimizeResult:\n    solution_x: NDArray[float]\n    solution_criterion: float\n    n_criterion_evaluations: int | None\n    n_derivative_evaluations: int | None\n    n_iterations: int | None\n    success: bool | None\n    message: str | None\n```\n\n#### Alternative to `mark.minimizer`\n\nInstead of collecting information about the optimizers via the `mark.minimizer`\ndecorator, we could require the `Algorithm` subclasses to provide that information via\nclass variables. The presence of all required class variables could be enforced via\n`__init_subclass__`.\n\nThe two approaches are equivalent in terms of achievable functionality. I see the\nfollowing advantages and disadvantages:\n\n**Advantages of decorator approach**\n\n- Easier for beginners as no subtle concepts (such as the difference between instance\n  and class variables) are involved\n- Very easy way to provide default values for some of the collected variables\n- Every user of optimagic is familiar with `mark` decorators\n- Autocomplete while filling out the arguments of the mark decorator\n- Very clear visual separation of algorithm options and attributes optimagic needs to\n  know about.\n\n**Advantages of class variable approach**\n\n- More familiar for people with object oriented background\n- Possibly better ways to enforce the presence of the class variables via static\n  analysis\n\nI am personally leaning towards the decorator approach but any feedback on this topic is\nwelcome.\n\n## Numerical differentiation\n\n### Current situation\n\nThe following proposal applies to the functions `first_derivative` and\n`second_derivative`. Both functions have an interface that has grown over time and both\nreturn a relatively complex result dictionary. There are several arguments that govern\nwhich entries are stored in the result dictionary.\n\nThe functions `first_derivative` and `second_derivative` allow params to be arbitrary\npytrees. They work for scalar and vector valued functions and a `key` argument makes\nsure that they work for `criterion` functions that return a dict containing `\"value\"`,\n`\"contributions\"`, and `\"root_contributions\"`.\n\nIn contrast to optimization, all pytree handling (for params and function outputs) is\nmixed with the calculation of the numerical derivatives. This can produce more\ninformative error messages and save some memory. However it increases complexity\nextremely because we can make very few assumptions on types. There are many if\nconditions to deal with this situation.\n\nThe interface is further complicated by supporting Richardson Extrapolation. This\nfeature was inspired by [numdifftools](https://numdifftools.readthedocs.io/en/latest/)\nbut has not produced convincing results in benchmarks.\n\n**Things we want to keep**\n\n- `params` and function values can be pytrees\n- support for optimagic `criterion` functions (now functions that return\n  `FunctionValue`)\n- Many optional arguments to influence the details of the numerical differentiation\n- Rich output format that helps to get insights on the precision of the numerical\n  differentiation\n- Ability to optionally pass in a function evaluation at `params` or return a function\n  evaluation at `params`\n\n**Problems**\n\n- We can make no assumptions on types inside the function because pytree handling is\n  mixed with calculations\n- Support for Richardson extrapolation complicates the interface and implementation but\n  has not been convincing in benchmarks\n- Pytree handling is acatually incomplete (`base_steps`, `min_steps` and `step_ratio`\n  are assumed to be flat numpy arrays)\n- Many users expect the output of a function for numerical differentiation to be just\n  the gradient, jacobian or hessian, not a more complex result object.\n\n### Proposal\n\n#### Separation of calculations and pytree handling\n\nAs in numerical optimization, we should implement the core functionality for first and\nsecond derivative for functions that map from 1-Dimensional numpy arrays to\n1-Dimensional numpy arrays. All pytree handling or other handling of function outputs\n(e.g. functions that return a `FunctionValue`) should be done outside of the core\nfunctions.\n\n#### Deprecate Richardson Extrapolation (and prepare alternatives)\n\nThe goal of implementing Richardson Extrapolation was to get more precise estimates of\nnumerical derivatives when it is hard to find an optimal step size. Example use-cases we\nhad in mind were:\n\n- Optimization of a function that is piecewise flat, e.g. the likelihood function of a\n  naively implemented multinomial probit\n- Optimization or standard error estimation of slightly noisy functions, e.g. functions\n  of an MSM estimation problem\n- Standard error estimation of wiggly functions where the slope and curvature at the\n  minimum does not yield reasonable standard errors and confidence intervals\n\nUnfortunately, the computational cost of Richardson extrapolation is too high for any\napplication during optimization. Moreover, our practical experience with Richardson\nExtrapolation was not positive and it seems that Richardson extrapolation is not\ndesigned for our use-cases. It is designed as a sequence acceleration method that\nreduces roundoff error while shrinking a step size to zero, whereas in our application\nit might often be better to take a larger step size (for example, the success of\nderivative free trust-region optimizers suggest less local slope and curvature\ninformation is more useful than actual derivatives for optimization; similarly,\nnumerical derivatives with larger step sizes could be seen as an estimate of a\n[quasi jacobian](https://arxiv.org/abs/1907.13093) and inference based on it might have\ngood statistical properties).\n\nWe therefore propose to remove Richardson extrapolation and open an Issue to work on\nalternatives. Examples for alternatives could be:\n\n- [Moré and Wild (2010)](https://www.mcs.anl.gov/papers/P1785.pdf) propose an approach\n  to calculate optimal step sizes for finite difference differentiation of noisy\n  functions\n- We could think about aggregating derivative estimates at multiple step sizes in a way\n  that produces worst case standard errors and confidence intervals\n- ...\n\n```{note}\nRichardson extrapolation was only completed for first derivatives, even though it is\nalready prepared in the interface for second derivatives.\n```\n\n#### Better `NumdiffResult` object\n\nThe result dictionary will be replaced by a `NumdiffResult` object. All arguments that\ngovern which results are stored will be removed. If some of the formerly optional\nresults require extra computation that we wanted to avoid by making them optional, they\ncan be properties or methods of the result object.\n\n#### Jax inspired high-level interfaces\n\nSince our `first_derivative` and `second_derivative` functions need to fulfill very\nspecific requirements for use during optimization, they need to return a complex result\nobject. However, this can be annoying in simple situations where users just want a\ngradient, jacobian or hessian.\n\nTo cover these simple situations and provide a high level interface to our numdiff\nfunctions, we can provide a set of jax inspired decorators:\n\n- `@grad`\n- `@value_and_grad`\n- `@jac` (no distinction between `@jacrev` and `jacfwd` necessary)\n- `@value_and_jac`\n- `@hessian`\n- `@value_and_hessian`\n\nAll of these will be very simple wrappers around `first_derivative` and\n`second_derivative` with very low implementation and maintenance costs.\n\n## Benchmarking\n\n### `get_benchmark_problems`\n\n#### Current situation\n\nAs other functions in optimagic, `get_benchmark_problems` follows a design where\nbehavior can be switched on by a bool and configured by an options dictionary. The\nfollowing arguments are related to this:\n\n- `additive_noise` and `additive_noise_options`\n- `multiplicative_noise` and `multiplicative_noise_options`\n- `scaling` and `scaling_options`\n\nAll of them have the purpose of adding some difficult characteristics to an existing\nbenchmark set, so we can analyze how well an optimizer can deal with this situation.\n\nThe name of the benchmark set is passed in as a string.\n\nThe return value of `get_benchmark_problems` is a nested dictionary. The keys in the\nouter dictionary are the names of benchmark problems. The inner dictionaries represent\nbenchmark problems.\n\n**Things we want to keep**\n\n- Benchmark problems are collected in a dict, not in a fixed-field data structure. This\n  makes it easy to merge problems from multiple benchmark sets or filter benchmark sets.\n  A fixed field data structure would not work here.\n\n**Problems**\n\n- As discussed before, having separate arguments for switching-on behavior and\n  configuring it can be dangerous\n- Each single benchmark problem should not be represented as a dictionary\n- Adding noise or scaling problems should be made more flexible and generic\n\n#### Proposal\n\n##### Add noise to benchmark problems\n\nThe four arguments `additive_noise`, `multiplicative_noise`, `additive_noise_options`,\nand `multiplicative_noise_options` are combined in one `noise` argument. This `noise`\nargument can be `bool | BenchmarkNoise`. If `False`, no noise is added. If `True`,\nstandard normal noise is added.\n\nWe implement several subclasses of `BenchmarkNoise` to cover the current use cases. As\nsyntactic sugar, we can make `BenchmarkNoise` instances addable (by implementing an\n`__add__` method) so multiple sources of noise can be combined.\n\nA rough prototype for `BenchmarkNoise` looks as follows:\n\n```python\nFvalType = TypeVar(\"FvalType\", bound=float | NDArray[float])\n\n\nclass BenchmarkNoise(ABC):\n    @abstractmethod\n    def draw_noise(\n        self, fval: FvalType, params: NDArray, size: int, rng: np.random.Generator\n    ) -> FvalType:\n        pass\n\n    def __add__(self, other: BenchmarkNoise):\n        pass\n```\n\nPassing `fval` and `params` to `draw_noise` enables use to implement multiplicative\nnoise (i.e. noise where the standard deviation scales with the function value) and\nstochastic or deterministic wiggle (e.g. a sine curve that depends on params).\nTherefore, this proposal does not just cover everything that is currently implemented\nbut also adds new functionality we wanted to implement.\n\n##### Add scaling issues to benchmark problems\n\nThe `scaling_options` argument is deprecated. The `scaling` argument can be\n`bool | BenchmarkScaler`. We implement `LinspaceBenchmarkScaler` to cover everything\nthat is implemented right now but more types of scaling can be implemented in the\nfuture. A rough prototype of `BenchmarkScaler` looks as follows:\n\n```python\nclass BenchmarkScaler(ABC):\n    @abstractmethod\n    def scale(self, params: NDArray) -> NDArray:\n        pass\n\n    @abstractmethod\n    def unscale(self, params: NDArray) -> NDArray:\n        pass\n```\n\n##### Representing benchmark problems\n\nInstead of the fixed-field dictionary we will have a dataclass with corresponding\nfields. This would roughly look as follows:\n\n```python\n@dataclass\nclass BenchmarkProblem:\n    fun: Callable[[NDArray], FunctionValue]\n    start_x: NDArray\n    solution_x: NDArray | None\n    start_fun: float\n    solution_fun: float\n```\n\n### `run_benchmark`\n\n#### Current situation\n\n`run_benchmark` takes `benchmark_problems` (covered in the previous section),\n`optimize_options` and a few other arguments and returns a nested dictionary\nrepresenting benchmark results.\n\n`optimize_options` can be a list of algorithm names, a dict with algorithm names as\nvalues or a nested dict of keyword arguments for `minimize`.\n\n**Things we want to keep**\n\n- Benchmark results are collected in a dict, not in a fixed-field data structure. This\n  makes it easy to merge results from multiple benchmark sets or filter benchmark\n  results. A fixed field data structure would not work here.\n\n**Problems**\n\n- `optimize_options` are super flexible but error prone and hard to write as there is no\n  autocomplete support\n- Each single benchmark result should not be represented as a dictionary\n\n#### Proposal\n\nWe restrict the typo of `optimize_options` to\n`dict[str, Type[Algorithm] | Algorithm | OptimizeOptions]`. Here, `OptimizeOptions` will\nbe a simple dataclass that we need for `estimate_ml` and `estimate_msm` anyways.\n\nPassing just lists of algorithm names is deprecated. Passing dicts as optimize options\nis also deprecated. Most use-cases will be covered by passing dictionaries of configured\nAlgorithms as optimize options. Actually using the full power of passing\n`OptimizeOptions` will be rarely needed.\n\nThe return type of `run_benchmark` will be `dict[tuple[str], BenchmarkResult]`\n\n`BenchmarkResult` is a dataclass with fields that mirror the keys of the current\ndictionary. It will roughly look as follows:\n\n```python\n@dataclass\nclass BenchmarkResult:\n    params_history: list[NDArray]\n    fun_history: list[float]\n    time_history: list[float]\n    batches_history: list[int]\n    solution: OptimizeResult\n```\n\n## Estimation\n\nThe changes to the estimation functions `estimate_ml` and `estimate_msm` will be\nminimal:\n\n- `lower_bounds` and `upper_bounds` are replaced by `bounds` (as in optimization)\n- `numdiff_options` and `optimize_options` become dataclasses\n- `logging` and `log_options` get aligned with our proposal for optimization\n\nIn the long run we plan a general overhaul of `MSM` estimation that provides better\naccess to currently internal objects such as the MSM objective function.\n\n## Type checkers and their configuration\n\nWe choose mypy as static type checker and run it as part of our continuous integration.\n\nOnce this enhancement proposal is fully implemented, we want to use the following\nsettings:\n\n```\ncheck_untyped_defs = true\ndisallow_any_generics = true\ndisallow_untyped_defs = true\ndisallow_incomplete_defs = true\nno_implicit_optional = true\nwarn_redundant_casts = true\nwarn_unused_ignores = true\n```\n\nIn addition to CI, we could also run type-checks as part of the pre-commit hooks. An\nexample where this is done can be found\n[here](https://github.com/google/jax/blob/de0fd722f0c4c0c238884f0e64e4ef8da72e4c1d/.pre-commit-config.yaml#L33).\n\n## Runtime type checking\n\nSince most of our users do not use static type checkers we will still need to check the\ntype of most user inputs so we can give them early feedback when problems arise. Thus we\ncannot remove our current error handling just because many of these errors could now be\ncaught by static analysis.\n\nWe can investigate using `jaxtyping`'s pytest hooks to enable runtime typecheckers like\nbeartype during testing but it is not a priority for now.\n\n## Changes in documentation\n\nAll type information in docstrings will be removed.\n\nWhenever there are now multiple ways of doing things, we show the ones that support\nautocomplete and static analysis most prominently. We can achieve this via tabs, similar\nto how\n[pytask](https://pytask-dev.readthedocs.io/en/stable/tutorials/defining_dependencies_products.html#products)\ndoes it.\n\nThe general structure of the documentation is not affected by this enhancement proposal.\n\n## Summary of breaking changes\n\n- The internal algorithm interface changes completely without deprecations\n- The support for Richardson Extrapolation in `first_derivative` is dropped without\n  deprecation; The corresponding arguments `n_steps` and `step_ratio` are removed.\n- The return type of `first_derivative` and `second_derivative` changes from dict to\n  `NumdiffResult` without deprecations. The arguments `return_func_value` and\n  `return_info` are dropped.\n- The representation of benchmark problems and benchmark results changes without\n  deprecations\n\n## Summary of deprecations\n\nThe following deprecations become active in version `0.5.0`. The functionality will be\nremoved in version `0.6.0` which should be scheduled for approximately half a year after\nthe realease of `0.5.0`.\n\n- Returning a `dict` in the objective function io deprecated. Return `FunctionValue`\n  instead. In addition, likelihood and least-squares problems need to be decorated with\n  `om.mark.likelihood` and `om.mark_least_squares`.\n- The arguments `lower_bounds`, `upper_bounds`, `soft_lower_bounds` and\n  `soft_upper_bounds` are deprecated. Use `bounds` instead. `bounds` can be\n  `optimagic.Bounds` or `scipy.optimize.Bounds` objects.\n- Specifying constraints with dictionaries is deprecated. Use the corresponding subclass\n  of `om.constraints.Constraint` instead. In addition, all selection methods except for\n  `selector` are deprecated.\n- The `covariance` constraint is renamed to `FlatCovConstraint` and the `sdcorr`\n  constraint is renamed to `FlatSdcorrConstraint` to prepare the introduction of more\n  natural (non-flattened) covariance and sdcorr constraints.\n- The `log_options` argument of `maximize` and `minimize` is deprecated and gets\n  subsumed in the `logging` argument.\n- The `scaling_options` argument of `maximize` and `minimize` is deprecated and gets\n  subsumed in the `scaling` argument.\n- The `error_penalty` argument of `maximize` and `minimize` is deprecated and gets\n  subsumed in the `error_handling` argument.\n- The `multistart_options` argument of `maximize` and `minimize` is deprecated and gets\n  subsumed in the `multistart` argument.\n- The arguments `additive_noise`, `additive_noise_options`, `multiplicative_noise`, and\n  `multiplicative_noise_options` in `get_benchmark_problems` are deprecated and combined\n  into `noise`.\n- The `scaling_options` argument in `get_benchmark_problems` is deprecated and subsumed\n  in the `scaling` argument.\n- Passing just a list of algorithm strings as `optimize_options` in `run_benchmark` is\n  deprecated.\n"
  },
  {
    "path": "docs/source/development/ep-03-alignment.md",
    "content": "(eepalignment)=\n\n# EP-03: Alignment with SciPy\n\n```{eval-rst}\n+------------+------------------------------------------------------------------+\n| Author     | `Janos Gabler <https://github.com/janosg>`_                      |\n+------------+------------------------------------------------------------------+\n| Status     | Accepted                                                         |\n+------------+------------------------------------------------------------------+\n| Type       | Standards Track                                                  |\n+------------+------------------------------------------------------------------+\n| Created    | 2024-07-09                                                       |\n+------------+------------------------------------------------------------------+\n| Resolution |                                                                  |\n+------------+------------------------------------------------------------------+\n```\n\n## Abstract\n\nThis enhancement proposal explains how we will better align optimagic with\n`scipy.minimize`. Scipy is the most widely used optimizer library in Python and most of\nour new users are switching over from SciPy.\n\nThe goal is therefore simple: Make it as easy as possible for SciPy users to use\noptimagic. In most cases this means that the only thing that has to be changed is the\nimport statement for the `minimize` function:\n\n```python\n# from scipy.optimize import minimize\nfrom optimagic import minimize\n```\n\n## Design goals\n\n- If we can make code written for SciPy run with optimagic, we should do so\n- If we cannot make it run, the user should get a helpful error message that explains\n  how the code needs to be adjusted.\n\n## Aligning names\n\n| **Old Name**                               | **Proposed Name**         | **Source** |\n| ------------------------------------------ | ------------------------- | ---------- |\n| `criterion`                                | `fun`                     | scipy      |\n| `criterion_kwargs`                         | `fun_kwargs`              |            |\n| `params`                                   | `x0`                      |            |\n| `derivative`                               | `jac`                     | scipy      |\n| `derivative_kwargs`                        | `jac_kwargs`              |            |\n| `criterion_and_derivative`                 | `fun_and_jac`             |            |\n| `criterion_and_derivative_kwargs`          | `fun_and_jac_kwargs`      |            |\n| `stopping_max_criterion_evaluations`       | `stopping_maxfun`         | scipy      |\n| `stopping_max_iterations`                  | `stopping_maxiter`        | scipy      |\n| `convergence_absolute_criterion_tolerance` | `convergence_ftol_abs`    | NlOpt      |\n| `convergence_relative_criterion_tolerance` | `convergence_ftol_rel`    | NlOpt      |\n| `convergence_absolute_params_tolerance`    | `convergence_xtol_abs`    | NlOpt      |\n| `convergence_relative_params_tolerance`    | `convergence_xtol_rel`    | NlOpt      |\n| `convergence_absolute_gradient_tolerance`  | `convergence_gtol_abs`    | NlOpt      |\n| `convergence_relative_gradient_tolerance`  | `convergence_gtol_rel`    | NlOpt      |\n| `convergence_scaled_gradient_tolerance`    | `convergence_gtol_scaled` |            |\n\nWhile it seems that many names are taken from NlOpt and not from SciPy, this is a bit\nmisleading. SciPy does use the words `xtol`, `ftol` and `gtol` just like NlOpt, but it\ndoes not completely harmonize them between algorithms. We therefore chose NlOpt's\nversion which is understandable for everyone who knows SciPy but more readable than\nSciPy's.\n\n## Names we do not want to align\n\n- We do not want to rename `algorithm` to `method` because our algorithm names are\n  different from SciPy, so people who switch over from SciPy need to adjust their code\n  anyways.\n- We do not want to rename `algo_options` to `options` for the same reason.\n\nInstead we can provide aliases for those.\n\n## Additional aliases\n\nTo make it even easier for SciPy users to switch to optimagic, we can provide additional\naliases in `minimize` and `maximize` that let them used their SciPy code without changes\nor help to adjust it by showing good error messages. The following arguments are\nrelevant:\n\n- `method`: In SciPy this is used instead of `algorithm` to select the optimization\n  algorithm. We opted against simply renaming `algorithm` to `method` because our naming\n  scheme of algorithms is (and has to be) different from SciPy. By using `method`\n  instead of `algorithm`, users could select SciPy algorithms by their SciPy name. If\n  `method` and `algorithm` are both provided, they would get an error.\n- `tol`: We do not want to support one `tol` argument for all kinds of different\n  convergence criteria but could raise an error for people who use it and point them to\n  the relevant parts of our documentation.\n- `args`: we can support `args` as an alternative to `fun_kwargs`\n- `options`: This is the SciPy counterpart to our `algo_options`. We do not want to\n  support this as our option names are different but we can provide a good error message\n  with pointers to our documentation if someone uses it.\n- `hess` and `hessp`: Currently we don't support closed form hessians. If we support\n  them they will be called `hess`. In the meantime, this can raise a\n  `NotImplementedError`.\n- `callback`: Currently we do not support `callback`s. If we support them they will be\n  called `callback` and be as compatible with SciPy as possible. In the meantime we can\n  raise a `NotImplementedError`.\n- If a user sets `jac=True` we raise and error and explain how to use `fun_and_jac`\n  instead.\n\n## Letting algorithms pick their default values\n\nCurrently we try to align default values for convergence criteria and other algorithm\noptions across algorithms and even across optimizer packages. This means that sometimes\nalgorithms that are used via optimagic produce different results than the same algorithm\nused via SciPy or other packages.\n\nMoreover, it is possible that we deviate from algorithm options that the original\nauthors carefully picked because they maximize performance on a relevant benchmark set.\n\nI therefore propose that in the future we do not try to align algorithm options across\nalgorithms and packages.\n\n## Implementation\n\nAll renamings are done with a careful deprecation cycle. The deprecations become active\nin version `0.5.0`. Old names will be removed in version `0.6.0` which should be\nscheduled for approximately half a year after the release of `0.5.0`.\n"
  },
  {
    "path": "docs/source/development/how_to_contribute.md",
    "content": "(how-to-contribute)=\n\n# How to contribute\n\n## 1. Intro\n\nWe welcome and greatly appreciate contributions of all forms and sizes! Whether it's\nupdating the documentation, adding small extensions, or implementing new features, every\neffort is valued.\n\nFor substantial changes, please contact us in advance. This allows us to discuss your\nideas and guide the development process from the beginning. You can start a conversation\nby posting an issue on GitHub or by emailing [janosg](https://github.com/janosg).\n\nTo get familiar with the codebase, we recommend checking out our\n[issue tracker](https://github.com/optimagic-dev/optimagic/issues) for some immediate\nand clearly defined tasks.\n\n## 2. Before you start\n\nOnce you've decided to contribute, please review the {ref}`style_guide` (see the next\npage) to ensure your work aligns with the project's coding standards.\n\nWe manage new features through Pull Requests (PRs). Contributors work on their local\ncopy of optimagic, modifying and extending the codebase there, before opening a PR to\npropose merging their changes into the main branch.\n\nRegular contributors gain push access to unprotected branches, which simplifies the\ncontribution process (see Notes below).\n\n## 3. Step-by-step guide\n\n1. Fork the [optimagic repository](https://github.com/optimagic-dev/optimagic/). This\n   action creates a copy of the repository with write access for you.\n\n```{note}\nFor regular contributors: **Clone** the [repository](https://github.com/optimagic-dev/optimagic/) to your local machine and create a new branch for implementing your changes. You can push your branch directly to the remote optimagic repository and open a PR from there.\n```\n\n1. Clone your forked repository to your disk. This is where you'll make all your\n   changes.\n\n1. Open your terminal and execute the following commands from the root directory of your\n   local optimagic repository:\n\n   ```console\n   $ prek install\n   ```\n\n   This activates pre-commit hooks for linting and style formatting.\n\n   ```{note}\n   `prek` is not managed by pixi and must be installed globally. You can find\n   installation instructions at [github.com/j178/prek](https://github.com/j178/prek).\n   ```\n\n   You can then run the test suite with:\n\n   ```console\n   $ pixi run tests\n   ```\n\n   which installs the development dependencies and runs pytest. To run the type checker,\n   use:\n\n   ```console\n   $ pixi run mypy\n   ```\n\n   To see all available pixi tasks, run:\n\n   ```console\n   $ pixi task list\n   ```\n\n1. Implement your fix or feature. Use git to add, commit, and push your changes to the\n   remote repository. For more on git and how to stage and commit your work, refer to\n   these\n   [online materials](https://effective-programming-practices.vercel.app/git/staging/objectives_materials.html).\n\n1. Contributions are validated in two main ways. We run a comprehensive test suite to\n   ensure compatibility with the existing codebase and employ\n   [pre-commit hooks](https://effective-programming-practices.vercel.app/git/pre_commits/objectives_materials.html)\n   to maintain quality and adherence to our style guidelines. Opening a PR (see below)\n   triggers optimagic's\n   [Continuous Integration (CI)](https://docs.github.com/en/actions/automating-builds-and-tests/about-continuous-integration)\n   workflow, which runs the full test suite, pre-commit hooks, and other checks on a\n   remote server.\n\n   You can also run the test suite locally for\n   [debugging](https://effective-programming-practices.vercel.app/debugging/pdbp/objectives_materials.html).\n\n   With prek installed, linters run before each commit. Commits are rejected if any\n   checks fail. Note that some linters may automatically fix errors by modifying the\n   code in-place. Remember to re-stage the files after such modifications.\n\n```{tip}\nSkip the next paragraph if you haven't worked on the documentation.\n```\n\n1. Assuming you have updated the documentation, verify that it builds correctly. Run:\n\n   ```console\n   $ pixi run build-docs\n   ```\n\n   This command builds the HTML documentation, saving all files in the `docs/build/html`\n   directory. You can view the documentation with your preferred web browser by opening\n   `docs/build/html/index.html` or any other file. Similar to the online documentation,\n   you can navigate to different pages simply by clicking on the links.\n\n1. Once all tests and hooks pass locally, push your changes to your forked repository\n   and create a pull request through GitHub: Go to the Github repository of your fork. A\n   banner on your fork's GitHub repository will prompt you to open a PR.\n\n   ```{note}\n   Regular contributors with push access can directly push their local branch to the remote optimagic repository and initiate a PR from there.\n   ```\n\n   Follow the steps outlined in the optimagic\n   [PR template](https://github.com/optimagic-dev/optimagic/blob/main/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md)\n   to describe your contribution, the problem it addresses, and your proposed solution.\n\n   Opening a PR initiates a complete CI run, including the `pytest` suite, linters, code\n   coverage checks, doctests, and building the HTML documentation. Monitor the CI\n   workflow status on your PR page and make necessary modifications to your code based\n   on the results, iterating until all tests pass.\n\n1. Request a review from one of the main contributors once all CI tests pass. Address\n   any feedback or suggestions by making the necessary changes and committing them.\n\n1. After your PR is approved, one of the main contributors will merge it into\n   optimagic's main branch.\n"
  },
  {
    "path": "docs/source/development/index.md",
    "content": "# Development\n\n```{toctree}\n---\nmaxdepth: 1\n---\ncode_of_conduct\nhow_to_contribute\nstyleguide\nenhancement_proposals\ncredits\nchanges\n```\n"
  },
  {
    "path": "docs/source/development/styleguide.md",
    "content": "(style_guide)=\n\n# Styleguide\n\nYour contribution should fulfill the criteria provided below.\n\n## Styleguide for the codebase\n\n- Functions have no side effect. : If you modify a mutable argument, make a copy at the\n  beginning of the function.\n\n- Use good names for functions and variables : *\"You should name a variable using the\n  same care with which you name a first-born child.\"*, Robert C. Martin, Clean Code: A\n  Handbook of Agile Software Craftsmanship.\n\n  A bit more concretely, this means:\n\n  The length of a variable name should be proportional to its scope. In a list\n  comprehension or short loop, i might be an acceptable name for the running variable,\n  but variables that are used at many different places should have descriptive names.\n\n  The name of variables should reflect the content or meaning of the variable and not\n  only the type. Names like `dict_list` would not have been a good name for the\n  `constraints`.\n\n  Function names should contain a verb. Moreover, the length of a function name is\n  typically inversely proportional to its scope. The public functions like `maximize`\n  and `minimize` can have very short names. At a lower level of abstraction you\n  typically need more words to describe what a function does.\n\n- User facing functions should be generous regarding their input type. Example: the\n  `algorithm` argument can be a string, `Algorithm` class or `Algorithm` instance. The\n  `algo_options` can be an `AlgorithmOptions` object or a dictionary of keyword\n  arguments.\n\n- User facing functions should be strict about their output types. A strict output type\n  does not just mean that the output type is known (and not a generous Union), but that\n  it is a proper type that enables static analysis for available attributes. Example:\n  whenever possible, public functions should not return dicts but proper result types\n  (e.g. `OptimizeResult`, `NumdiffResult`, ...)\n\n- Internal functions should be strict about input and output types; Typically, a public\n  function will check all arguments, convert them to a proper type and then call an\n  internal function. Example: `minimize` will convert any valid value for `algorithm`\n  into an `Algorithm` instance and then call an internal function with that type.\n\n- Fixed field types should only be used if all fields are known. An example where this\n  is not the case are collections of benchmark problems, where the set of fields depends\n  on the selected benchmark sets and other things. In such situations, dictionaries that\n  map strings to BenchmarkProblem objects are a good idea.\n\n- Think about autocomplete! If you want to accept a string as an argument (e.g. an\n  algorithm name) also accept input types that are more amenable to static analysis and\n  offer better autocomplete.\n\n- Whenever possible, use immutable types. Whenever things need to be changeable,\n  consider using an immutable type with copy constructors for modified instances.\n  Example: instances of `Algorithm` are immutable but using `Algorithm.with_option`\n  users can create modified copies.\n\n- The main entry point to optimagic are functions, objects are mostly used for\n  configuration and return types. This takes the best of both worlds: we get the safety\n  and static analysis that (in Python) can only be achieved using objects but the\n  beginner friendliness and freedom provided by functions. Example: Having a `minimize`\n  function, it is very easy to add the possibility of running minimizations with\n  multiple algorithms in parallel and returning the best value. Having a `.solve` method\n  on an algorithm object would require a whole new interface for this.\n\n- Deep modules. : This is a term coined by\n  [John Ousterhout](https://www.youtube.com/watch?v=bmSAYlu0NcY). A deep module is a\n  module that has just one public function. This function calls the private functions\n  (i.e. functions that start with an underscore) defined further down in the module and\n  reads almost like a table of contents to the whole module.\n\n- Never import a private function in another module : By following this strictly, you\n  can be sure that you can rename or refactor private functions without looking at other\n  modules. Of course it is also not a solution to copy paste the function! If you would\n  like to import a function that starts with an underscore, rename it.\n\n- All functions have a [Google style](https://tinyurl.com/mxams9k) docstring : The\n  docstring describes all arguments and outputs. For arrays, please document how many\n  dimensions and what shape they have. Look around in the code to find examples if you\n  are in doubt. Example:\n\n  ```python\n  def ordered_logit(formula, data):\n      \"\"\"Estimate an ordered probit model with maximum likelihood.\n\n      Args:\n          formula (str): A patsy formula.\n          data (str): A pandas DataFrame.\n\n      Returns:\n          res: optimization result.\n\n      \"\"\"\n      pass\n  ```\n\n  In particular each docstring should start with a one liner that describes very\n  concisely what the function does. The one liner should be in imperative mode, i.e. not\n  \"This function does\" ...\" , but \"Do ...\" and end with a period.\n\n- Unit tests : If you write a small helper whose interface might change during\n  refactoring, it is sufficient if the function that calls it is tested. But all\n  functions that are exposed to the user must have unit tests.\n\n- Enable pre-commit hooks by executing `prek install` in a terminal in the root of the\n  optimagic repository. This makes sure that your formatting is consistent with what we\n  expect.\n\n- Use `pathlib` for all file paths operations. : You can find the pathlib documentation\n  [here](https://docs.python.org/3/library/pathlib.html)\n\n- Object serialization. : Pickling and unpickling of DataFrames should be done with\n  `pd.read_pickle` and `pd.to_pickle`.\n\n- Don't use global variables unless absolutely necessary : Exceptions are global\n  variables from a config file that replace magic numbers. Never use mutable global\n  variables!\n\n## Styleguide for the documentation\n\n- General. : The documentation is rendered with\n  [Sphinx](https://www.sphinx-doc.org/en/master/) and written in **Markedly Structured\n  Text.** How-to guides are usually Jupyter notebooks.\n\n- The documentation follows the [diataxis](https://diataxis.fr) framework.\n"
  },
  {
    "path": "docs/source/estimagic/explanation/bootstrap_ci.md",
    "content": "(bootstrap-cis)=\n\n# Bootstrap Confidence Intervals\n\nWe use the notation and formulations provided in chapter 10 of {cite}`Hansen2020`.\n\nThe first supported confidence interval type is the **\"percentile\"** confidence\ninterval, as discussed in section 10.10 of the Hansen textbook. Let\n$\\{ \\hat{\\theta}_1^*, ..., \\hat{\\theta}_B^*\\}$ denote the estimates of estimator\n$\\hat{\\theta}$ for the B bootstrap samples. The idea of the percentile confidence\ninterval is to simply take the empirical quantiles $q_{p}^*$ of this distributions, so\nwe have\n\n$$\nCI^{percentile} = [q_{\\alpha/2}^*, q_{1-\\alpha/2}^*].\n$$\n\nThe second supported confidence interval **\"normal\"** is based on a normal approximation\nand discussed in Hansen's section 10.9. Let $s_{boot}$ be the sample standard error of\nthe distribution of bootstrap estimators, $z_q$ the q-quantile of a standard normal\ndistribution and $\\hat{\\theta}$ be the full sample estimate of $\\theta$. Then, the\nasymptotic normal confidence interval is given by\n\n$$\nCI^{normal} = [\\hat{\\theta} - z_{1- \\alpha/2} s_{boot},\n\\hat{\\theta} + z_{1- \\alpha/2} s_{boot}].\n$$\n\nThe bias-corrected **\"bc\"** bootstrap confidence interval addresses the issue of biased\nestimators. This problem is often present when estimating nonlinear models. Econometric\ndetails are discussed in section 10.17 of Hansen. Let\n\n$$\np^* = \\frac{1}{B} \\sum_{b=1}^B 1(\\hat{\\theta}_b^* \\leq \\hat{\\theta})\n$$\n\nand define $z_0^* = \\Phi^{-1} (p^*)$, where $\\Phi$ is the standard normal cdf. The bias\ncorrection works via correcting the significance level. Define\n$x(\\alpha) = \\Phi(z_\\alpha + 2 z_0^*)$ as the corrected significance level for a target\nsignificant level of $\\alpha$. Then, the bias-corrected confidence interval is given by\n\n$$\nCI^{bc} = [q_{x(\\alpha/2)}^*, q_{x(1-\\alpha/2)}^*].\n$$\n\nA further refined version of the bias-corrected confidence interval is the\nbias-corrected and accelerated interval, short **\"bca\"**, as discussed in section 10.20\nof Hansen. The general idea is to correct for skewness sampling distribution. Downsides\nof this confidence interval are that it takes quite a lot of time to compute, since it\nfeatures calculating leave-one-out estimates of the original sample. Formally, again,\nthe significance levels are adjusted. Define\n\n$$\n\\hat{a}=\\frac{\\sum_{i=1}^{n}\\left(\\bar{\\theta}-\\hat{\\theta}_{(-i)}\\right)^{3}}\n{6\\left(\\sum_{i=1}^{n}\\left(\\bar{\\theta}-\\hat{\\theta}_{(-i)}\\right)^{2}\n\\right)^{3 / 2}},\n$$\n\nwhere $\\bar{\\theta}=\\frac{1}{n} \\sum_{i=1}^{n} \\widehat{\\theta}_{(-i)}$. This is an\nestimator for the skewness of $\\hat{\\theta}$. Then, the corrected significance level is\ngiven by\n\n$$\nx(\\alpha)=\\Phi(z_{0}+\\frac{z_{\\alpha}+z_{0}}{1-a(z_{\\alpha}+z_{0})})\n$$\n\nand the bias-corrected and accelerated confidence interval is given by\n\n$$\nCI^{bca} = [q_{x(\\alpha/2)}^*, q_{x(1-\\alpha/2)}^*].\n$$\n\nThe studentized confidence interval, here called **\"t\"** type confidence interval first\nstudentizes the bootstrap parameter distribution, i.e. applies the transformation\n$\\frac{\\hat{\\theta}_b-\\hat{\\theta}}{s_{boot}}$, and then builds the confidence interval\nbased on the estimated quantile function of the studentized data $\\hat{G}$:\n\n$$\nCI^{t} = \\left[\\hat{\\theta}+\\hat{\\sigma} \\hat{G}^{-1}(\\alpha / 2),\n\\hat{\\theta}+\\hat{\\sigma} \\hat{G}^{-1}(1-\\alpha / 2)\\right]\n$$\n\nThe final supported confidence interval method is the **\"basic\"** bootstrap confidence\ninterval, which is derived in section 3.4 of {cite}`Wassermann2006`, where it is called\nthe pivotal confidence interval. It is given by\n\n$$\nCI^{basic} = \\left[\\hat{\\theta}+\\left(\\hat{\\theta}-\\hat{\\theta}_{u}^{\\star}\\right), \\hat{\\theta}+\\left(\\hat{\\theta}-\\hat{\\theta}_{l}^{\\star}\\right)\\right],\n$$\n\nwhere $\\hat{\\theta}_{u}^{\\star}$ denotes the $1-\\alpha/2$ empirical quantile of the\nbootstrap estimate distribution for parameter $\\theta$ and $\\hat{\\theta}_{l}^{\\star}$\ndenotes the $\\alpha/2$ quantile.\n\n```{eval-rst}\n.. bibliography:: ../../refs.bib\n    :filter: docname in docnames\n```\n"
  },
  {
    "path": "docs/source/estimagic/explanation/bootstrap_montecarlo_comparison.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Bootstrap Monte Carlo Comparison\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In this juypter notebook, we perform a Monte Carlo exercise to illustrate the importance of using the cluster robust variant of the bootstrap when data within clusters is correlated. \\n\",\n    \"\\n\",\n    \"The main idea is to repeatedly draw clustered samples, get both uniform and clustered bootstrap estimates in these samples, and then compare how often the true null hypothesis is rejected.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Data Generating Process\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The true data generating process is given by\\n\",\n    \"\\n\",\n    \"$$ logit(y_{i,g}) = \\\\beta_0 + \\\\beta_1 (x_{i,g}) + \\\\epsilon_{i,g}, $$\\n\",\n    \"\\n\",\n    \"where the independent variable $x_{i,g} = x_i + x_g$ and the noise term $\\\\epsilon_{i,g} = \\\\epsilon_i + \\\\epsilon_g$ each consist of an individual and a cluster term.\\n\",\n    \"\\n\",\n    \"In the simulations we perform below, we have $\\\\beta_0 = \\\\beta_1 =0$. $x_i$ and $x_g$ are drawn from a standard normal distribution, and $\\\\epsilon_i$ and $\\\\epsilon_g$ are drawn from a normal distribution with $\\\\mu_0$ and $\\\\sigma=0.5$. The value of $\\\\sigma$ is chosen to not blow up rejection rates in the independent case too much.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import matplotlib.pyplot as plt\\n\",\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"import scipy\\n\",\n    \"import statsmodels.api as sm\\n\",\n    \"from joblib import Parallel, delayed\\n\",\n    \"\\n\",\n    \"import estimagic as em\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def create_clustered_data(nclusters, nobs_per_cluster, true_beta=0):\\n\",\n    \"    \\\"\\\"\\\"Create a bivariate clustered dataset with specified number of\\n\",\n    \"    clusters and number of observations per cluster that has a population\\n\",\n    \"    value of true_beta for the logit coefficient on the independent variable.\\n\",\n    \"\\n\",\n    \"    Args:\\n\",\n    \"        nclusters (int): Number of clusters.\\n\",\n    \"        nobs_per_cluster (int): Number of observations per cluster.\\n\",\n    \"        true_beta (int): The true logit coefficient on x.\\n\",\n    \"\\n\",\n    \"    Returns:\\n\",\n    \"        pd.DataFrame: Clustered dataset.\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    x_cluster = np.random.normal(size=nclusters)\\n\",\n    \"    x_ind = np.random.normal(size=nobs_per_cluster * nclusters)\\n\",\n    \"    eps_cluster = np.random.normal(size=nclusters, scale=0.5)\\n\",\n    \"    eps_ind = np.random.normal(size=nobs_per_cluster * nclusters, scale=0.5)\\n\",\n    \"\\n\",\n    \"    y = []\\n\",\n    \"    x = []\\n\",\n    \"    cluster = []\\n\",\n    \"\\n\",\n    \"    for g in range(nclusters):\\n\",\n    \"        for i in range(nobs_per_cluster):\\n\",\n    \"            key = (i + 1) * (g + 1) - 1\\n\",\n    \"\\n\",\n    \"            arg = (\\n\",\n    \"                true_beta * (x_cluster[g] + x_ind[key]) + eps_ind[key] + eps_cluster[g]\\n\",\n    \"            )\\n\",\n    \"\\n\",\n    \"            y_prob = 1 / (1 + np.exp(-arg))\\n\",\n    \"            y.append(np.random.binomial(n=1, p=y_prob))\\n\",\n    \"            x.append(x_cluster[g] + x_ind[(i + 1) * (g + 1) - 1])\\n\",\n    \"            cluster.append(g)\\n\",\n    \"\\n\",\n    \"    y = np.array(y)\\n\",\n    \"    x = np.array(x)\\n\",\n    \"    cluster = np.array(cluster)\\n\",\n    \"\\n\",\n    \"    return pd.DataFrame({\\\"y\\\": y, \\\"x\\\": x, \\\"cluster\\\": cluster})\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Monte Carlo Simulation Code\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The following function computes bootstrap t-values. As suggested my Cameron and Miller (2015), critical values are the 0.975 quantiles from a t distribution with `n_clusters` -1 degrees of freedom.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def get_t_values(data, sample_size=200, hyp_beta=0, cluster=False):\\n\",\n    \"    \\\"\\\"\\\"Get bootstrap t-values for testing the hypothesis that beta == hyp_beta.\\n\",\n    \"\\n\",\n    \"    Args:\\n\",\n    \"        data (pd.DataFrame): Original dataset.\\n\",\n    \"        sample_size (int): Number of bootstrap samples to draw.\\n\",\n    \"        hyp_beta (float): Hypothesised value of beta.\\n\",\n    \"        cluster (bool): Whether or not to cluster on the cluster column.\\n\",\n    \"\\n\",\n    \"    Returns:\\n\",\n    \"        float: T-Value of hypothesis.\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"\\n\",\n    \"    def logit_wrap(df):\\n\",\n    \"        y = df[\\\"y\\\"]\\n\",\n    \"        x = df[\\\"x\\\"]\\n\",\n    \"\\n\",\n    \"        result = sm.Logit(y, sm.add_constant(x)).fit(disp=0).params\\n\",\n    \"\\n\",\n    \"        return pd.Series(result, index=[\\\"constant\\\", \\\"x\\\"])\\n\",\n    \"\\n\",\n    \"    if cluster is False:\\n\",\n    \"        result = em.bootstrap(data=data, outcome=logit_wrap, n_draws=sample_size)\\n\",\n    \"        estimates = pd.DataFrame(result.outcomes)[\\\"x\\\"]\\n\",\n    \"\\n\",\n    \"    else:\\n\",\n    \"        result = em.bootstrap(\\n\",\n    \"            data=data,\\n\",\n    \"            outcome=logit_wrap,\\n\",\n    \"            n_draws=sample_size,\\n\",\n    \"            cluster_by=\\\"cluster\\\",\\n\",\n    \"        )\\n\",\n    \"        estimates = pd.DataFrame(result.outcomes)[\\\"x\\\"]\\n\",\n    \"\\n\",\n    \"    return (estimates.mean() - hyp_beta) / estimates.std()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def monte_carlo(nsim, nclusters, nobs_per_cluster, true_beta=0, n_cores=8):\\n\",\n    \"    \\\"\\\"\\\"Run a simulation for rejection rates and a logit data generating process.\\n\",\n    \"\\n\",\n    \"    Rejection rates are based on a t distribution with nclusters-1 degrees of freedom.\\n\",\n    \"\\n\",\n    \"    Args:\\n\",\n    \"        nsim (int): Number of Monte Carlo draws.\\n\",\n    \"        nclusters (int): Number of clusters in each generated dataset.\\n\",\n    \"        nobs_per_cluster (int) Number of observations per cluster.\\n\",\n    \"        true_beta (int): Population value of logit coefficient on x.\\n\",\n    \"        n_cores (int): Number of jobs for Parallelization.\\n\",\n    \"\\n\",\n    \"    Returns:\\n\",\n    \"        pd.DataFrame: DataFrame of average rejection rates.\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    np.zeros(nsim)\\n\",\n    \"\\n\",\n    \"    np.zeros(nsim)\\n\",\n    \"\\n\",\n    \"    def loop():\\n\",\n    \"        df = create_clustered_data(nclusters, nobs_per_cluster, true_beta)\\n\",\n    \"\\n\",\n    \"        return [get_t_values(df), get_t_values(df, cluster=True)]\\n\",\n    \"\\n\",\n    \"    t_value_array = np.array(\\n\",\n    \"        Parallel(n_jobs=n_cores)(delayed(loop)() for _ in range(nsim))\\n\",\n    \"    )\\n\",\n    \"    t_value_array = np.array([loop() for _ in range(nsim)])\\n\",\n    \"\\n\",\n    \"    crit = scipy.stats.t.ppf(0.975, nclusters - 1)\\n\",\n    \"\\n\",\n    \"    result = pd.DataFrame(np.abs(t_value_array) > crit, columns=[\\\"uniform\\\", \\\"cluster\\\"])\\n\",\n    \"\\n\",\n    \"    return result\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Results\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Here, we perform Monte Carlo simulations with the above functions. In each simulation, the sample size is 200, but the number of clusters varies across simulations. Be warned that the code below takes a long time to run.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"np.random.seed(505)\\n\",\n    \"\\n\",\n    \"results_list = []\\n\",\n    \"\\n\",\n    \"for g, k in [[20, 50], [100, 10], [500, 2]]:\\n\",\n    \"    results_list.append(monte_carlo(nsim=100, nclusters=g, nobs_per_cluster=k))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"mean_rejection_data = pd.DataFrame([x.mean() for x in results_list])\\n\",\n    \"mean_rejection_data[\\\"nclusters\\\"] = [20, 100, 500]\\n\",\n    \"mean_rejection_data.set_index(\\\"nclusters\\\", inplace=True)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"Text(0.5, 0.98, 'Comparison of Rejection Rates')\"\n      ]\n     },\n     \"execution_count\": 7,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAsAAAAH2CAYAAAB+5DrCAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAA9hAAAPYQGoP6dpAACI/UlEQVR4nOzdd1xTV/8H8E/YG1RARFAcOEARJ+JCXLharQutdba2ddRZV5+6+rR11121S619qtbVVmu11gEOcFGse6I4AEVliAqSnN8f95dATEAIgQTyeb9eeZHce+7NNwnoh8O558iEEAJERERERCbCzNAFEBERERGVJAZgIiIiIjIpDMBEREREZFIYgImIiIjIpDAAExEREZFJYQAmIiIiIpPCAExEREREJoUBmIiIiIhMCgMwEREREZkUBmCiEpCRkYGvvvoKoaGhqFixIqysrFCuXDkEBwdj5syZiI+PN3SJRs3HxwcymczQZRjEpk2b0LhxY9jZ2UEmk8HHx+e1xyjfr9w3R0dHNGzYEHPmzMHTp0/1Utvs2bMhk8mwfv16vZxPX27dugWZTIa2bdsauhQNbdu21fhs7O3t4efnh0mTJuHhw4eGLpHIJFgYugCisu748ePo3bs3EhMTYWdnh+bNm6NixYpITU3FqVOnEB0djQULFmD37t3o0KGDocslI3Lq1Cm88847sLGxQadOneDi4gJXV9cCH9+7d284ODhACIE7d+4gKioKs2fPxvbt23Hs2DE4OjoWY/XFZ/369Rg2bBhmzZqF2bNnG7ocnYSFhcHDwwMAkJCQgOjoaHz11VfYsmULTpw4gcqVKxfp/IcPH0ZoaCiGDBlidL+gEBkDBmCiYhQbG4v27dvjxYsXmDp1KmbMmAF7e3vVfoVCgV9//RVTpkzB3bt3DVipcTtw4ABevnxp6DJK3K5du6BQKLBixQoMHz680McvWrRIrcf42rVraNWqFc6dO4dly5bh008/LVJ9Y8aMQf/+/VGpUqUinUffKleujEuXLsHOzs7QpeRp2rRpaj3UCQkJaN++PS5duoRZs2bhu+++M1xxRCaAQyCIiokQAoMGDcKLFy8we/ZszJs3Ty38AoCZmRl69eqFM2fOoEmTJgaq1PjVqFEDderUMXQZJU75S1H16tX1cj5fX19MnDgRALBv374in8/V1RV16tSBs7Nzkc+lT5aWlqhTpw6qVKli6FIKrFKlSpg1axYA/Xw2RJQ/BmCiYrJ3716cP38eXl5e+M9//pNvW2dnZ9SrV09t27Nnz/Df//4X9erVg62tLZydndGmTRts3rxZ6zlyj5NdtWqV6rhq1aphwYIFEEIAAGJiYvDGG2+gfPnycHBwQI8ePXD79m2N8w0dOhQymQyHDx/Gn3/+iVatWsHBwQHlypVDr169cPnyZY1jXrx4ge+//x49evRA9erVYWtrCxcXl3zrzv08+/btQ2hoKFxcXCCTyZCSkqLx2nI7f/483nnnHVSvXh02NjZwc3NDYGAgxo8fj4SEBI32e/bsQceOHVGuXDnY2Nigdu3amDZtmup5css9vvXcuXN48803Ua5cOdjb2yMkJATHjx/X+nry8+jRI0yePBm+vr6wsbFB+fLl0blzZ/z1119q7davXw+ZTIZ169YBAEJDQ1XjRYv652x/f38AwIMHD7Tu37t3L7p16wY3NzdYW1ujevXqmDhxIh49eqTRNr8xwNnZ2Vi9ejWCg4Ph5OQEW1tbBAYGYunSpcjOztb63BkZGZg/fz6aNGkCJycn2Nvbo06dOhg9ejSuXr0KQBpDO2zYMADAnDlz1MbSKut43RjgjRs3olWrVnBycoKdnR0CAgIwd+5cvHjxQqNt7u/PyMhItGvXDo6OjnByckK3bt1w8eJFrc+hi/w+myNHjmDMmDEICAhAuXLlYGtrizp16mj9/h06dChCQ0MBABs2bFB7j14dMnLnzh2MGTMGNWrUUH1Pdu/ePc/v7+PHj6Nnz56oWrUqrK2t4eHhgWbNmmHatGl6G1tOVCIEERWL0aNHCwBiwoQJhT42LS1NNG7cWAAQbm5uok+fPqJLly7C2tpaABBjx47VOKZq1aoCgBg/frywtbUVXbt2Fd27dxeOjo4CgJg5c6Y4evSosLOzE40aNRL9+vUTNWvWFABEjRo1xLNnz9TON2TIEAFAjBo1SshkMtG0aVPRv39/4efnJwAIZ2dnERsbq3bMpUuXBADh6ekpQkNDRXh4uAgJCRGWlpYCgJg1a5ZG3crnGTFihNrzNG3aVKSkpKi9ttxOnz4tbGxsBAAREBAg+vXrJ7p3766q79ChQ2rtv/zySwFAWFhYiPbt24vw8HDh5eUlAIhatWqJxMREtfazZs0SAMTo0aOFnZ2dqF+/vggPDxcNGjQQAISNjY04d+5cQT9ScffuXVG9enUBQFSpUkWEh4eLdu3aCXNzcwFAfPXVV6q2R44cEUOGDBE1atQQAERYWJgYMmSIGDJkiDhy5Mhrn0v5fsXFxWns+/nnnwUA0apVK419U6dOFQCElZWVaNmypejTp4/w9fVVfY/k9R6tW7dObfuzZ89EaGioACDKly8vOnbsKN544w3h7u4uAIg333xTyOVytWPu378v/P39BQBRrlw58cYbb4g+ffqIRo0aCTMzM7FkyRIhhBBz584VLVu2FABEgwYNVO9L7vcmLi5OABAhISEar/H9999XfX5du3YVffr0Ea6urgKACA4OFhkZGWrtld+fEydOFObm5iIoKEj069dP1KpVSwAQFSpUEAkJCa/5RHKEhIRo/f4UQojjx48LAMLLy0tjX1BQkLCxsRHNmjUTvXv3Ft26dROVKlUSAIS/v79IT09Xtf32229FWFiY6nPL/R7t3LlT7fnKlSsnAIjatWuLXr16idatWwsLCwthbm4uNm/erFbD77//LszMzIRMJhNBQUGif//+onPnzqrvU23fb0TGigGYqJgo/5PeuHFjoY8dM2aMACBCQ0NFWlqaavulS5dUIWLXrl1qxyhDj6enp7h+/braMdbW1sLOzk74+PiI1atXq/ZlZmaKdu3aCQDihx9+UDuf8j9+AOKbb75RbVcoFKqgFBgYqHZMcnKy2L9/v1AoFGrbb968KXx8fISZmZnGf5K5n+fV/3BffW25DR48WAAQixYt0mh/6dIlcf/+fdXjkydPCjMzM+Hg4CCio6NV21+8eCH69u0rAIjevXurnUMZ7gCIZcuWqe0bP368ACAGDRqktV5tunfvLgCIt99+W2RmZqq2HzlyRNjZ2Qlzc3Pxzz//qB2jfG+0haX85BeAle/b559/rrb9l19+EQBEvXr1xLVr11TbFQqFmDlzpgAgwsPD1Y7JKwCPGjVK1V75S4wQ0i92Xbt2FQDUvg+FEKJ9+/YCgOjXr59amBNCCrRnz55VPV63bl2ev1Ap22sLwNu2bVP9jFy9elW1PSUlRbRq1UoAEJMmTVI7RvkZmJmZqYXH7Oxs0bt3bwFAzJgxQ2sd2uQXgJXv83vvvaexb8+ePWrvpRDS968y0M+ZM0dt36FDhwQAMWTIEK11pKamikqVKglzc3Px008/qe07deqUKFeunHBwcBAPHjxQbW/Tpo0AILZt26ZxvpMnT6r9W0Vk7BiAiYpJnTp1BACxd+/eQh339OlTYWtrK8zMzMSlS5c09i9fvlwAEB06dFDbrgw93333ncYxb731Vp69fr/99pvW/yiV//G3aNFC45isrCxV72lBeiSFkHqlAIjly5drfZ5u3brleay2ANylSxcBQKMXWhtl6Js+fbrGvqSkJNX7HR8fr9quDHctW7bUOCY5OVkAEFWrVn3tcwshxI0bNwQA4eDgIB49eqSxf+LEiVqDj74CsEKhELdv3xazZs1S9d69GjKVPdvaerUVCoUIDAwU5ubm4uHDh6rt2gJwUlKSsLS0FN7e3hp/VRBCiISEBGFlZSUCAgJU206cOCEACHd39wKFKF0DsDLArV27VuOYs2fPCplMJhwcHMTz589V25WfwcCBAzWOOX36dJ49zXnRFoDv378vVqxYIWxsbETNmjXVfnl7nWfPngkLCwvRqFEjte2vC8BLlizRGviVvvrqK42/TNStW1cA0AjiRKURxwATGZkzZ87g+fPnaNSokdYLvwYNGgQAOHbsGBQKhcb+Tp06aWxTXkSV3z5tY2YBoH///hrbLC0t0adPHwDS2MRXHT16FJ9//jlGjhyJYcOGYejQodi6dSsAaSYCbd58802t2/PSuHFjAMDo0aNx+PDhPMeV5q5x4MCBGvvc3d3RqVMnKBQKHDt2TGO/tvesQoUKKF++fJ7v2auOHj0KAOjcuTPKly+vsV/5mWp7L4uiWrVqkMlkMDMzQ9WqVTFnzhx07twZR44cgYODg6rdgwcPcPbsWfj6+mqMRQcAmUyGli1bQi6X48yZM/k+5+HDh/Hy5Ut07twZtra2Gvs9PDzg6+uLc+fO4fnz5wCAv//+GwAwYMCAYpua7eXLl4iOjgag/fsgICAAAQEBePr0KWJjYzX2a/s+qFWrFoC8f3byk3tct6enJz766CP4+fnhzJkzec6qce/ePaxZswbjx4/H8OHDMXToUIwcORJWVlZ5/lzlRTnuvFevXlr3t27dGgBw8uRJ1Tblz9ygQYNw6tQprf/+EJUWnAaNqJhUqFABAAo9sf39+/cBIM8FD1xcXODs7IzU1FQ8efJE9TxK2uYPVYad/PZlZmZqfb6qVatq3a6sT1kvAKSmpqJXr144ePCg1mMAID09Xev2wl6xP3nyZBw9elQ136mDgwOCg4PRrVs3DB06VG1mgte9p8rt9+7d09jn5eWl9RhHR0c8fvy4QLUW5fmLQjkPcFZWFq5cuYKYmBj8+eef+PLLL1UzDgDSRWOA9MvJ6xYcSU5Ozne/8lzffvstvv3223zbPn78GJUrV8adO3cASLN9FJdHjx4hKysLrq6uGrOxKPn4+ODs2bMF/j5QhvW8fnbyo5wHWC6XIy4uDsePH0dMTAzGjRunuvgxt6+++grTpk3T23SAys+pZcuW+bbL/Xl/+eWXOHfuHHbt2oVdu3ahXLlyaNWqFd58803VfNVEpQUDMFExCQwMxLFjxxATE4N33nlHr+fOL6SYmeX9h5389unD1KlTcfDgQYSEhGDOnDmoV68eXFxcYG5ujr/++gthYWGq2SheVdj/PJ2cnHDw4EEcO3YMu3btwuHDh3Hw4EHs378fc+fOxZEjR+Dr61ugc+n6fupLca1y9+o8wFu2bMGAAQPw2WefoXPnzggKCgIAVU+eh4cHwsLC8j1nXr8QKSnPFRgYiAYNGuTb1tra+nUvoUSV5PfBq/MAR0ZGIiwsDOvXr0e3bt1Uf2EBgOjoaEyaNAnOzs5YtmwZ2rZtCw8PD9X75+npWeheaOXn1KdPnzx/IQCg9lcob29vnD59GgcPHsTu3bsRERGhCsMLFixAVFSUxi/kRMaKAZiomHTr1g2rVq3C1q1bsWDBAlhYFOzHzdPTEwC0Tk0GSL2sKSkpsLW1Rbly5fRWb17yqkO5XVkvAOzcuRPm5ub4/fff4eTkpNb+5s2beq9NJpOhVatWaNWqFQDpT/njx4/Hpk2b8J///Ae//PKLqsa4uDjcvn0bfn5+GudR9oYVdfWtvLzuMy3u51cKDw/HwYMH8c0332D69Omqnnpl76arq2uRp1lTnqtVq1ZYsWJFgY7x9vYGANy4caNIz52fChUqwMrKCsnJycjIyNAa+krqc9CmTZs2mDlzJj755BN88skneOutt2Bubg5A+rkCgC+++AJDhgxRO+758+dITEws9PN5eXnhypUrmDZtmmpoQ0FYWFigU6dOqiEht2/fxvDhw3Hw4EHMnz8fCxYsKHQtRIbAMcBExaRz587w9/fH3bt38cUXX+TbNi0tDRcuXAAgjbOztbXFmTNntI7r++mnnwBIf7osid5JZYjMLTs7G9u3bwcAVfgEgCdPnsDJyUkj/OZ1Hn1zd3dXzXN6/vx51XbleMZNmzZpHPPw4UPs27dPNc61OCjfo71792qdc1j5mSrrLE6zZ8+GjY0NDh06pJrr1cvLC3Xq1MHFixdV8+3qKjQ0FObm5ti9e3eB/1yvXAJ806ZNBZpL1srKCgDyHff9KktLSzRv3hwAtM5Jff78eZw9exYODg4IDAws8Hn1afz48fDw8MC1a9ewZcsW1fYnT54A0D4MY+vWrVr/qvK696hjx44AcsK1rqpWrYqpU6cCUP+ZIzJ2DMBExUQmk+Gnn36CjY0NZs+ejenTpyMjI0OtjRACv//+O5o0aYJTp04BAOzt7TF8+HAoFAqMHj1a7ZirV6/i888/BwCMHTu2RF7H0aNH8cMPP6htmzVrFuLj4xEQEKAW2mrVqoUnT56o/ecNAEuWLMGhQ4f0WteaNWsQFxensX3Pnj0AcnoVAelCOTMzMyxfvhynT59Wbc/KysJHH32E58+fo1evXmrH6FP16tXRrVs3pKenY9y4cWrBMCoqCqtXr4a5uTlGjx5dLM+fW6VKlfDhhx8CgNovZjNmzIBCoUDv3r21XgT26NGj147pBaTe0+HDh+PWrVsYMGAAkpKSNNpcv35d9QsUADRr1gyhoaF48OAB3n//fY2fk1u3buHcuXOqx8oe9StXrry2ntw++ugjANIvAbn/IpGeno4xY8ZACIEPPvjAYGNZbW1tMW3aNADA3LlzVcFWebHd999/r/a9c/HiRVX4fNXr3qMPPvgA7u7uWLBgAb755huNC9qys7Oxb98+tVC7ZMkSrb3N2n7miIyeQeegIDIBR48eFRUrVhQAhJ2dnWjfvr14++23Rbdu3VTbbWxsxN9//606JvdCGO7u7qJv376ia9euqoUf8lsIQ5u85msVIu8po5TTP40cOVLIZDLRrFkzMWDAANViBU5OTiImJkbtmJ9++kk1d27r1q3FgAEDhJ+fnzAzMxMTJkzId7q1/Kb60vbalNN2+fn5id69e2ssUnH06FG19l988YVqIYwOHTqI/v37C29vbwFA+Pr6FniRh/xqys/du3dFtWrVVNOn9e/fX7Rv3161EMbixYs1jimOeYCFkKYis7W1FQDU5h7+5JNPVHPeNmrUSPTt21f06dNHNGzYUJibmwtnZ2e18+S3EEbHjh0FAGFvby9atmwpBgwYIN58803V4is9evTQeH9q166tWjzjzTffFH379tVYCEMIIZ4/f66aDzskJEQMGzZMvPvuu+LYsWNCiIIthGFrayu6desm+vbtK9zc3AQA0bx58zwXwsjrM1B+ngWV3zzAytemXODi119/FUJI0+55eHgIAKJatWqiX79+okOHDsLS0lL07ds3z+/FgIAAAUA0bdpUDB06VLz77rvit99+U+2PiopSLQLi7e0tunTpIt5++23Rrl074eLiIgCozX3s7OwszMzMRMOGDUW/fv1E3759VQuClC9fXm1uZSJjxwBMVALS09PFokWLREhIiHBzcxMWFhbCxcVFBAUFiVmzZok7d+5oHPP06VMxZ84c4efnJ6ytrYWjo6No1aqV+Pnnn7U+R3EF4EOHDoldu3aJ4OBgYWdnJ5ydnUWPHj3EhQsXtD7XH3/8IZo3by4cHR2Fi4uL6NChgzh8+HCe85LqGoB///13MXz4cOHv7y9cXFyEnZ2dqFWrlnjvvffE5cuXtZ5n9+7don379sLZ2VlYWVmJmjVriilTpojHjx8X6j3Lq6bXSU5OFpMmTRI1atQQVlZWwsXFRXTq1Ens27dPa/viCsBC5Mw93LdvX7XtERERom/fvsLT01NYWlqKChUqiICAADFmzBgRERGh1lb5Hq1fv17j/NnZ2WLDhg2iXbt2onz58sLS0lJ4enqK4OBgMWfOHHHlyhWNY9LS0sRnn30mAgIChK2trXBwcBB16tQRY8aMUVucQwhpsYaOHTsKZ2dnIZPJ1D6r/AKwEEL8+OOPokWLFsLBwUHY2NgIf39/8cUXX2idt7ikA7AQOXN9N23aVLXtzp074u233xaVK1cWNjY2om7dumLevHkiOzs7z+/Fa9euiZ49e4oKFSoIMzMzrXMnJyQkiClTpgh/f39hZ2cn7OzsRI0aNUSPHj3E+vXr1eaL/vHHH8Xbb78tateuLRwdHYWjo6Pw8/MTEydOFHfv3i3we0BkDGRC5HFJNhGZtKFDh2LDhg04dOiQ2tXqREpTp07FggUL8Msvv6Bv376GLoeIqMA4BpiIiHSiXBSjOOfvJSIqDgzARERUKDNmzECzZs1w4MAB1K1bFw0bNjR0SUREhcIATEREhbJ7925cvHgRXbp0wW+//VZsC3kQERUXjgEmIiIiIpPCHmAiIiIiMikMwERERERkUhiAiYiIiMikMAATERERkUlhACYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwABMRERGRSWEAJiIiIiKTwgBMRERERCaFAZiIiIiITAoDMBERERGZFAZgIiIiIjIpDMBEREREZFIYgImIiIjIpDAAExEREZFJYQAmIiIiIpPCAExEREREJoUBmIiIiIhMCgMwEREREZkUBmAiIiIiMikMwERERERkUhiAiYiIiMikMAATERERkUmxMHQBpYFCocD9+/fh6OgImUxm6HKIiIiI6BVCCKSnp8PT0xNmZvn38TIAF8D9+/fh7e1t6DKIiIiI6DXu3LkDLy+vfNswABeAo6MjAOkNdXJyMnA1RERERPSqtLQ0eHt7q3JbfhiAC0A57MHJyYkBmIiIiMiIFWS4Ki+CIyIiIiKTwgBMRERERCaFAZiIiIiITArHABMREZkQIQSys7Mhl8sNXQpRoZibm8PCwkIvU9IyABMREZmIrKwsJCQk4NmzZ4YuhUgndnZ2qFSpEqysrIp0HgZgIiIiE6BQKBAXFwdzc3N4enrCysqKiztRqSGEQFZWFh4+fIi4uDj4+vq+drGL/DAAExERmYCsrCwoFAp4e3vDzs7O0OUQFZqtrS0sLS1x+/ZtZGVlwcbGRudz8SI4IiIiE1KUXjMiQ9PX9y9/CoiIiIjIpDAAGxm5HDh8GNi0SfrKi3SJiIj0x8fHB0uXLlU9TkxMRMeOHWFvbw8XFxeD1UUliwHYiOzYAfj4AKGhwNtvS199fKTtRERExqKkO2vatm2L8ePHa2xfv359oUPrqVOn8P7776seL1myBAkJCYiNjcXVq1eLWKl+yGQy1c3CwgJVqlTBxIkTkZmZqdfnmT17NgIDAwt1zKu/QJRWvAjOSOzYAfTpAwihvv3ePWn7tm1Ar16GqY2IiEhpxw5g3Djg7t2cbV5ewLJlpeP/KTc3N7XHN27cQOPGjeHr66vzObOysoo8Lder1q1bh86dO+Ply5c4e/Yshg0bBnt7e/z3v//V6/MUB7lcDplMZtTjzY23MhMil0v/mLwafoGcbePHczgEEREZlrKzJnf4BXI6awz9F8uhQ4eiZ8+eWLRoESpVqoQKFSpg9OjRePnypapN7h5MHx8fbN++HT/++CNkMhmGDh0KAIiPj0ePHj3g4OAAJycn9OvXD0lJSapzKHtOv/vuO1SrVk01G4FMJsPatWvRvXt32NnZoW7duoiKisL169fRtm1b2Nvbo0WLFrhx48ZrX4uLiws8PDzg7e2N7t27o0ePHoiJiVFrs3r1atSoUQNWVlaoXbs2Nm7cqLY/v9exfv16zJkzB2fPnlX1Nq9fvx5CCMyePRtVqlSBtbU1PD09MXbsWABST/zt27cxYcIE1THKc7m4uOD333+Hn58frK2tER8fj1OnTqFjx45wdXWFs7MzQkJCNF6DTCbD6tWr0aVLF9ja2qJ69erYtm3ba9+fomIANgJHjmj+Y5KbEMCdO1I7IiIifRECyMgo2C0tDRg7Nv/OmnHjpHYFOZ+28+jDoUOHcOPGDRw6dAgbNmzA+vXrsX79eq1tT506hc6dO6Nfv35ISEjAsmXLoFAo0KNHDzx+/BgRERHYv38/bt68ifDwcLVjr1+/ju3bt2PHjh2IjY1Vbf/vf/+LwYMHIzY2FnXq1MHbb7+NDz74ANOnT8fp06chhMCYMWMK9ZquXr2KgwcPIigoSLVt586dGDduHCZNmoTz58/jgw8+wLBhw3Do0CEAeO3rCA8Px6RJk+Dv74+EhAQkJCQgPDwc27dvx5IlS7B27Vpcu3YNv/76K+rXrw8A2LFjB7y8vPDZZ5+pjlF69uwZ5s+fj++++w4XLlyAu7s70tPTMWTIEBw9ehTR0dHw9fVF165dkZ6ervb6ZsyYgd69e+Ps2bMYOHAg+vfvj0uXLhXqPSo0Qa+VmpoqAIjU1NRiOf/PPwsh/VOQ/+3nn4vl6YmIyAQ8f/5cXLx4UTx//ly17enTgv3/Uxy3p08LXntISIgYN26cxvZ169YJZ2dn1eMhQ4aIqlWriuzsbNW2vn37ivDwcNXjqlWriiVLlqge9+jRQwwZMkT1+K+//hLm5uYiPj5ete3ChQsCgDh58qQQQohZs2YJS0tL8eDBA7V6AIhPP/1U9TgqKkoAEN9//71q26ZNm4SNjU2+rxeAsLGxEfb29sLa2loAEN27dxdZWVmqNi1atBAjRoxQO65v376ia9euhXodDRo0UDvH4sWLRa1atdSeK7dX3z8hpM8BgIiNjc33dcnlcuHo6Ch27dql9lo//PBDtXZBQUFi5MiRWs+h7ftYqTB5jT3ARqBSJf22IyIiMlX+/v4wNzdXPa5UqRIePHhQ4OMvXboEb29veHt7q7b5+fnBxcVFrVeyatWqGuOJASAgIEB1v2LFigCg6kFVbnvx4gXS0tLyrWPJkiWIjY3F2bNnsXv3bly9ehWDBg1Sq7Nly5Zqx7Rs2VJVY0Ffx6v69u2L58+fo3r16hgxYgR27tyJ7OzsfGsFACsrK7XXDgBJSUkYMWIEfH194ezsDCcnJzx9+hTx8fFq7YKDgzUeF3cPMC+CMwKtW0sXENy7p/1PQjKZtL9165KvjYiIyi47O+Dp04K1jYwEunZ9fbs9e4A2bQr23AXl5OSE1NRUje0pKSlwdnZW22Zpaan2WCaTQaFQFPzJCsje3l7r9tzPrxwjq23b62ry8PBAzZo1AQC1a9dGeno6BgwYgM8//1y1vTh4e3vjypUr+Pvvv7F//36MGjUKCxcuREREhMZ7m5utra3G0tpDhgzBo0ePsGzZMlStWhXW1tYIDg5GVlZWsdVfUOwBNgLm5tLVs4AUdrVZulRqR0REpC8yGWBvX7Bbp05SZ0xe/0/JZIC3t9SuIOfL6zza1K5dW+PiKQCIiYlBrVq1dHz12tWtWxd37tzBnTt3VNsuXryIlJQU+Pn56fW5CkPZq/38+XMAUp3Hjh1Ta3Ps2DFVjQV5HVZWVpBrucLe1tYWb7zxBpYvX47Dhw8jKioK586dy/cYbY4dO4axY8eia9eu8Pf3h7W1NZKTkzXaRUdHazyuW7dugZ5DV+wBNhK9eklTnb06tQwAfPxx6ZhahoiIyi5lZ02fPlJ4zf0XS2WYLa7OmpEjR2LlypUYO3Ys3nvvPVhbW+OPP/7Apk2bsGvXLr0+V4cOHVC/fn0MHDgQS5cuRXZ2NkaNGoWQkBA0adJEr8+Vn5SUFCQmJkKhUODatWv47LPPUKtWLVUwnDx5Mvr164eGDRuiQ4cO2LVrF3bs2IG///67wK/Dx8cHcXFxiI2NhZeXFxwdHbFp0ybI5XIEBQXBzs4OP/30E2xtbVG1alXVMZGRkejfvz+sra3h6uqa52vw9fXFxo0b0aRJE6SlpWHy5MmwtbXVaLd161Y0adIErVq1wv/+9z+cPHkS33//vb7fUjXsATYivXoBt24Bhw4BP/8MvPOOtD0ioviuliUiIiooZWdN5crq2728ine++urVqyMyMhKXL19Ghw4dEBQUhF9++QVbt25F586d9fpcMpkMv/32G8qVK4c2bdqgQ4cOqF69OrZs2aLX53mdYcOGoVKlSvDy8sKAAQPg7++PP//8ExYWUt9lz549sWzZMixatAj+/v5Yu3Yt1q1bh7Zt2xb4dfTu3RudO3dGaGgo3NzcsGnTJri4uODbb79Fy5YtERAQgL///hu7du1ChQoVAACfffYZbt26hRo1amgdA53b999/jydPnqBRo0YYNGgQxo4dC3d3d412c+bMwebNmxEQEIAff/wRmzZtKvbedtn/X4FH+UhLS4OzszNSU1Ph5ORUYs/74AFQpQqQmSmF4IKMqSIiItLmxYsXiIuLU5u3VldyuTQ1Z0KCdIF269Ycpke6kclk2LlzJ3r27Fmg9vl9Hxcmr7EH2Ii5uwPDhkn3FywwbC1ERERK5uZA27bAgAHSV4ZfKm0YgI3cpEnS2Ko//gAuXDB0NURERESlHwOwkatZM2dM1aJFhq2FiIiISJ+EEAUe/qBPDMClwOTJ0tf//S//JZOJiIiI6PUYgEuBoCAgJAR4+TJnvmAiIiIi0g0DcCkxZYr0de1aQMtiOERERERUQAzApUSXLkC9ekB6uhSCiYiIiEg3DMClhEwmrQgHSCvtZGYatBwiIiKiUosBuBQZMEBafSchQbogjoiIiIgKjwG4FLGyAiZMkO4vXAgoFIath4iIyJjIZDL8+uuvhi6DSgEG4FJmxAjA2Rm4fBnYvdvQ1RARkUmSy4HDh4FNm6SvcnmxP2ViYiI++ugjVK9eHdbW1vD29sYbb7yBAwcOFMvzHT58GDKZDCkpKcVyfkAK7MqbhYUFqlSpgokTJyJTz+McZ8+ejcDAwEId4+Pjg6VLl+q1DmPCAFzKODkBI0dK9xcuNGwtRERkgnbsAHx8gNBQ4O23pa8+PtL2YnLr1i00btwYBw8exMKFC3Hu3Dns3bsXoaGhGD16dLE9rz4IIZCdnZ3n/nXr1iEhIQFxcXH4+uuvsXHjRnz++eclWKHu5HI5FKX0z9EMwKXQ2LHScIijR4Hjxw1dDRERmYwdO4A+fTRXZbp3T9peTCF41KhRkMlkOHnyJHr37o1atWrB398fEydORHR0tNZjtPXgxsbGQiaT4datWwCA27dv44033kC5cuVgb28Pf39/7NmzB7du3UJoaCgAoFy5cpDJZBg6dCgAQKFQYO7cuahWrRpsbW3RoEEDbNu2TeN5//zzTzRu3BjW1tY4evRonq/NxcUFHh4e8Pb2Rvfu3dGjRw/ExMSotVm9ejVq1KgBKysr1K5dGxs3blTbHx8fjx49esDBwQFOTk7o168fkpKSAADr16/HnDlzcPbsWVVv8/r16yGEwOzZs1GlShVYW1vD09MTY8eOBQC0bdsWt2/fxoQJE1THKM/l4uKC33//HX5+frC2tkZ8fDxOnTqFjh07wtXVFc7OzggJCdF4DTKZDKtXr0aXLl1ga2uL6tWrq71vJY0BuBSqVAkYNEi6z15gIiLSmRBARkbBbmlpUg+MENrPAwDjxkntCnI+befR4vHjx9i7dy9Gjx4Ne3t7jf0uLi46v/zRo0cjMzMTkZGROHfuHObPnw8HBwd4e3tj+/btAIArV64gISEBy/5/Jaq5c+fixx9/xJo1a3DhwgVMmDAB77zzDiIiItTOPW3aNMybNw+XLl1CQEBAgeq5evUqDh48iKCgINW2nTt3Yty4cZg0aRLOnz+PDz74AMOGDcOhQ4cASIG8R48eePz4MSIiIrB//37cvHkT4eHhAIDw8HBMmjQJ/v7+SEhIQEJCAsLDw7F9+3YsWbIEa9euxbVr1/Drr7+ifv36AIAdO3bAy8sLn332meoYpWfPnmH+/Pn47rvvcOHCBbi7uyM9PR1DhgzB0aNHER0dDV9fX3Tt2hXp6elqr2/GjBno3bs3zp49i4EDB6J///64dOlSYT4y/RH0WqmpqQKASE1NNXQpKpcuCQEIIZNJ94mIiPLz/PlzcfHiRfH8+fOcjU+fSv+ZGOL29GmB6j5x4oQAIHbs2PHatgDEzp07hRBCHDp0SAAQT548Ue3/559/BAARFxcnhBCifv36Yvbs2VrPpe34Fy9eCDs7O3H8+HG1tu+++64YMGCA2nG//vprgeq1sbER9vb2wtraWgAQ3bt3F1lZWao2LVq0ECNGjFA7rm/fvqJr165CCCH++usvYW5uLuLj41X7L1y4IACIkydPCiGEmDVrlmjQoIHaORYvXixq1aql9ly5Va1aVSxZskRt27p16wQAERsbm+/rksvlwtHRUezatUvttX744Ydq7YKCgsTIkSPzPdertH4f/7/C5DX2AJdSdeoAPXpI/4osXmzoaoiIiIqHKGBPsS7Gjh2Lzz//HC1btsSsWbPw77//5tv++vXrePbsGTp27AgHBwfV7ccff8SNGzfU2jZp0qRANSxZsgSxsbE4e/Ysdu/ejatXr2KQ8s+8AC5duoSWLVuqHdOyZUtVz+mlS5fg7e0Nb29v1X4/Pz+4uLjk27vat29fPH/+HNWrV8eIESOwc+fOfMcqK1lZWWn0aCclJWHEiBHw9fWFs7MznJyc8PTpU8THx6u1Cw4O1nhsqB5gBuBSbPJk6euPPwKJiYathYiISiE7O+Dp04Ld9uwp2Dn37CnY+ezsCnQ6X19fyGQyXL58uVAvzcxMiji5A/TLly/V2rz33nu4efMmBg0ahHPnzqFJkyZYsWJFnud8+vQpAOCPP/5AbGys6nbx4kWN8azahmto4+HhgZo1a6J27dro1q0b5syZgy1btuD69esFOl5X3t7euHLlCr7++mvY2tpi1KhRaNOmjcZ79CpbW1vVmGClIUOGIDY2FsuWLcPx48cRGxuLChUqICsrqzhfQpEwAJdiLVsCLVoAWVnA8uWGroaIiEodmQywty/YrVMnwMtLOiavc3l7S+0Kcr68zvOK8uXLIywsDKtWrUJGRobG/rymKXNzcwMAtfGrsbGxGu28vb3x4YcfYseOHZg0aRK+/fZbAFJPJyDNdKCU+8KvmjVrqt1y98AWhbm5OQDg+fPnAIC6devi2LFjam2OHTsGPz8/1f47d+7gzp07qv0XL15ESkqKqo2VlZXa61CytbXFG2+8geXLl+Pw4cOIiorCuXPn8j1Gm2PHjmHs2LHo2rUr/P39YW1tjeTkZI12r16wGB0djbp16xboOfTN6ALwqlWr4OPjAxsbGwQFBeHkyZN5tr1w4QJ69+4NHx8fyGQyrfPVzZ07F02bNoWjoyPc3d3Rs2dPXLlypRhfQcmaMkX6+vXXwCtjzYmIiPTH3Bz4/wvBNMKr8vHSpVI7PVu1ahXkcjmaNWuG7du349q1a7h06RKWL1+u8Wd1JWUonT17Nq5du4Y//vgDi18ZMzh+/Hjs27cPcXFxiImJwaFDh1SBrGrVqpDJZNi9ezcePnyIp0+fwtHRER9//DEmTJiADRs24MaNG4iJicGKFSuwYcMGnV5bSkoKEhMTcf/+fUREROCzzz5DrVq1VHVMnjwZ69evx+rVq3Ht2jV89dVX2LFjBz7++GMAQIcOHVC/fn0MHDgQMTExOHnyJAYPHoyQkBDVMAwfHx/ExcUhNjYWycnJyMzMxPr16/H999/j/PnzuHnzJn766SfY2tqiatWqqmMiIyNx7949rWE2N19fX2zcuBGXLl3CiRMnMHDgQNja2mq027p1K3744QdcvXoVs2bNwsmTJzFmzBid3rciK9TI42K2efNmYWVlJX744Qdx4cIFMWLECOHi4iKSkpK0tj958qT4+OOPxaZNm4SHh4fGYG0hhAgLCxPr1q0T58+fF7GxsaJr166iSpUq4mkBB98LYZwXwSnJ5ULUri1dT7B4saGrISIiY5XfxUOFsn27EF5e6he0eXtL24vR/fv3xejRo0XVqlWFlZWVqFy5snjzzTfFoUOHVG2Q6yI4IYQ4evSoqF+/vrCxsRGtW7cWW7duVbsIbsyYMaJGjRrC2tpauLm5iUGDBonk5GTV8Z999pnw8PAQMplMDBkyRAghhEKhEEuXLhW1a9cWlpaWws3NTYSFhYmIiAghhPaL5/ICQHWTyWSiUqVKIjw8XNy4cUOt3ddffy2qV68uLC0tRa1atcSPP/6otv/27dvizTffFPb29sLR0VH07dtXJCYmqva/ePFC9O7dW7i4uAgAYt26dWLnzp0iKChIODk5CXt7e9G8eXPx999/q46JiooSAQEBqovzhJAugnN2dtZ4HTExMaJJkybCxsZG+Pr6iq1bt2pcRAdArFq1SnTs2FFYW1sLHx8fsWXLlte+R6/S10Vwsv8vyigEBQWhadOmWLlyJQBpag9vb2989NFHmDZtWr7H+vj4YPz48Rg/fny+7R4+fAh3d3dERESgTZs2BaorLS0Nzs7OSE1NhZOTU4GOKUnffw+89570l6mbNwFLS0NXRERExubFixeIi4tDtWrVYGNjU7STyeXAkSNAQoI0N2fr1sXS80tlh0wmw86dO9GzZ88inSe/7+PC5DWjGQKRlZWFM2fOoEOHDqptZmZm6NChA6KiovT2PKmpqQCkMUV5yczMRFpamtrNmL3zDuDhIc1LvnmzoashIqIyz9wcaNsWGDBA+srwS6WM0QTg5ORkyOVyVKxYUW17xYoVkainKQ4UCgXGjx+Pli1bol69enm2mzt3LpydnVU3fQ1sLy7W1tLc4wCwYEGB5xYnIiIiMklGE4BLwujRo3H+/Hlsfk036fTp05Gamqq65b6y0lh9+CHg4ACcPw/s3WvoaoiIiIhyCCGKPPxBn4wmALu6usLc3Fy1drVSUlISPDw8inz+MWPGYPfu3Th06BC8vLzybWttbQ0nJye1m7FzcQE++EC6v2CBQUshIiIiMmpGE4CtrKzQuHFjHDhwQLVNoVDgwIEDeU5xUhBCCIwZMwY7d+7EwYMHUa1aNX2Ua5TGjwcsLIDDh4FTpwxdDREREZFxMpoADAATJ07Et99+iw0bNuDSpUsYOXIkMjIyMGzYMADA4MGDMX36dFX7rKws1SosWVlZuHfvHmJjY9VWTxk9ejR++ukn/Pzzz3B0dERiYiISExNVE0yXJV5ewNtvS/cXLjRsLUREZJyMaPInokLT1/evUU2DBgArV67EwoULkZiYiMDAQCxfvhxBQUEAgLZt28LHxwfr168HANy6dUtrj25ISAgOHz4MABrL9SmtW7cOQ4cOLVBNxj4NWm7nzgEBAYCZGXDlClCzpqErIiIiYyCXy3H16lW4u7ujQoUKhi6HSCePHj3CgwcPUKtWLdWqeUqFyWtGF4CNUWkKwADQrZu0FPvIkdIKcURERIC0LHBKSgrc3d1hZ2eXZycRkbERQuDZs2d48OABXFxcUKlSJY02DMB6VtoCcESENC2jjQ1w+zbg7m7oioiIyBgIIZCYmIiUlBRDl0KkExcXF3h4eGj95a0wec2iuAokw2nTBmjWDDh5Eli1Cpgzx9AVERGRMZDJZKhUqRLc3d3x8uVLQ5dDVCiWlpYawx50xR7gAihtPcAAsG0b0LcvUL48EB8P2NsbuiIiIiKi4lMql0Im/XrrLaBGDeDxY+CHHwxdDREREZHxYAAuo8zNgY8/lu4vXgxkZxu2HiIiIiJjwQBchg0ZAri5SRfCbd1q6GqIiIiIjAMDcBlmawt89JF0f+FCgKO9iYiIiBiAy7xRowA7O+Cff4Bcq0wTERERmSwG4DKuQgXgvfek+wsWGLYWIiIiImPAAGwCJkyQLorbv1/qCSYiIiIyZQzAJsDHBwgPl+4vXGjQUoiIiIgMjgHYREyeLH395Rfg1i2DlkJERERkUAzAJiIwEOjYEZDLgSVLDF0NERERkeEwAJuQKVOkr999Bzx6ZNhaiIiIiAyFAdiEtG8PNGwIPHsGfP21oashIiIiMgwGYBMik+X0Aq9YATx/bth6iIiIiAyBAdjE9OkjzQrx8CGwYYOhqyEiIiIqeQzAJsbCApg4Ubq/aJF0URwRERGRKWEANkHDhwPlywM3bgA7dxq6GiIiIqKSxQBsguztgTFjpPsLFgBCGLYeIiIiopLEAGyixowBbGyAU6eAyEhDV0NERERUchiATZSbGzBsmHR/wQLD1kJERERUkhiATdjEiYCZGbBnD3DunKGrISIiIioZDMAmrGZNoHdv6f6iRYathYiIiKikMACbuMmTpa8//wzcuWPYWoiIiIhKAgOwiWvaFGjbFsjOBpYtM3Q1RERERMWPAZhUyyOvXQukpBi0FCIiIqJixwBM6NwZqFcPePoUWLPG0NUQERERFS8GYIJMltMLvGwZ8OKFYeshIiIiKk4MwAQA6N8f8PYGEhOBn34ydDVERERExYcBmAAAlpbA+PHS/UWLAIXCoOUQERERFRsGYFIZMQJwdgauXAF27TJ0NURERETFgwGYVBwdgVGjpPtcHpmIiIjKKgZgUjN2LGBlBRw/Dhw7ZuhqiIiIiPSPAZjUeHgAQ4ZI9xcuNGwtRERERMWBAZg0TJokTY3222/A5cuGroaIiIhIvxiASUPt2kCPHtL9RYsMWwsRERGRvjEAk1bKhTE2bgTu3zdsLURERET6xABMWgUHA61aAVlZwPLlhq6GiIiISH8YgClPkydLX9esAdLSDFsLERERkb4wAFOeuncH6tQBUlOBb781dDVERERE+sEATHkyM8vpBV6yRBoOQURERFTaMQBTvgYOBCpVAu7dAzZtMnQ1REREREXHAEz5srYGxo+X7i9cCAhh0HKIiIiIiowBmF7rgw8AR0fgwgXgzz8NXQ0RERFR0TAA02s5O0shGAAWLDBsLURERERFxQBMBTJuHGBpCUREACdOGLoaIiIiIt0ZXQBetWoVfHx8YGNjg6CgIJw8eTLPthcuXEDv3r3h4+MDmUyGpUuXFvmcpJ2Xl3RBHCCNBSYiIiIqrYwqAG/ZsgUTJ07ErFmzEBMTgwYNGiAsLAwPHjzQ2v7Zs2eoXr065s2bBw8PD72ck/L28cfS1x07gGvXDFsLERERka6MKgB/9dVXGDFiBIYNGwY/Pz+sWbMGdnZ2+OGHH7S2b9q0KRYuXIj+/fvD2tpaL+ekvPn7A926STNBfPWVoashIiIi0o3RBOCsrCycOXMGHTp0UG0zMzNDhw4dEBUVZTTnNHVTpkhf160DkpIMWwsRERGRLowmACcnJ0Mul6NixYpq2ytWrIjExMQSPWdmZibS0tLUbiRp3RoICgIyM4GVKw1dDREREVHhGU0ANiZz586Fs7Oz6ubt7W3okoyGTJbTC7xqFfD0qWHrISIiIiosownArq6uMDc3R9Irf1dPSkrK8wK34jrn9OnTkZqaqrrduXNHp+cvq3r0AHx9gSdPgO+/N3Q1RERERIVjNAHYysoKjRs3xoEDB1TbFAoFDhw4gODg4BI9p7W1NZycnNRulMPcHJg0Sbr/1VfAy5eGrYeIiIioMIwmAAPAxIkT8e2332LDhg24dOkSRo4ciYyMDAwbNgwAMHjwYEyfPl3VPisrC7GxsYiNjUVWVhbu3buH2NhYXL9+vcDnJN0MHgy4uwPx8cDWrYauhoiIiKjgLAxdQG7h4eF4+PAhZs6cicTERAQGBmLv3r2qi9ji4+NhZpaT2e/fv4+GDRuqHi9atAiLFi1CSEgIDh8+XKBzkm5sbYGxY4FPP5WWRx4wQBofTERERGTsZEIIYegijF1aWhqcnZ2RmprK4RC5PH4MVKkCZGQA+/YBnToZuiIiIiIyVYXJa0Y1BIJKl/Llgffek+5zeWQiIiIqLRiAqUgmTJAuivv7byAmxtDVEBEREb0eAzAVSdWqQP/+0n32AhMREVFpwABMRTZ5svT1l1+AuDjD1kJERET0OgzAVGQNGgBhYYBCIc0LTERERGTMGIBJL5S9wN9/DyQnG7YWIiIiovwwAJNetGsHNGoEPH8OfP21oashIiIiyhsDMOmFTAZMmSLdX7ECePbMsPUQERER5YUBmPSmd2+gWjVpCMT69YauhoiIiEg7BmDSGwsLYNIk6f7ixYBcbth6iIiIiLRhACa9GjYMqFABuHkT2LHD0NUQERERaWIAJr2yswPGjJHuz58PCGHYeoiIiIhexQBMejd6NGBrC5w5Axw+bOhqiIiIiNQxAJPeubkBw4dL9xcsMGwtRERERK9iAKZiMXEiYGYG7N0L/PuvoashIiIiysEATMWienWgTx/p/qJFhq2FiIiIKDcGYCo2yuWRN20C4uMNWwsRERGREgMwFZsmTaQlkrOzgaVLDV0NERERkYQBmIqVcnnkb74BnjwxbC1EREREAAMwFbNOnYCAACAjA1izxtDVEBERETEAUzGTyXLGAi9bBrx4Ydh6iIiIiBiAqdiFhwPe3kBSErBxo6GrISIiIlPHAEzFztJSmhcYkKZEk8sNWw8RERGZNgZgKhHvvQeUKwdcvQr8/ruhqyEiIiJTxgBMJcLBARg5Urq/YAEghGHrISIiItOlcwCOj4/Hhx9+iNq1a6N8+fKIjIwEACQnJ2Ps2LH4559/9FYklQ0ffQRYWwPR0cCxY4auhoiIiEyVTgH44sWLaNiwIbZs2YJq1aohNTUV2dnZAABXV1ccPXoUK1eu1GuhVPp5eABDhkj3FywwbC1ERERkunQKwFOmTIGLiwuuXr2Kn376CeKVv2d369YNR44c0UuBVLZMmiRNjbZrF3DxoqGrISIiIlOkUwCOjIzEyJEj4ebmBplMprG/SpUquHfvXpGLo7KnVi3grbek+4sWGbYWIiIiMk06BWCFQgE7O7s89z98+BDW1tY6F0Vlm3JhjJ9+Au7fN2wtREREZHp0CsCNGjXCH3/8oXVfdnY2Nm/ejObNmxepMCq7mjcHWrcGXr6UVocjIiIiKkk6BeDp06dj7969GDlyJM6fPw8ASEpKwt9//41OnTrh0qVLmDZtml4LpbJlyhTp65o1QGqqYWshIiIi0yITr17BVkAbN27EuHHjkJqaCiEEZDIZhBBwcnLC6tWrMWDAAH3XajBpaWlwdnZGamoqnJycDF1OmaBQAPXrSxfCLViQMyyCiIiISBeFyWs6B2AAyMjIwP79+3Ht2jUoFArUqFEDYWFhcHR01PWURokBuHisWwcMHw54egJxcYCVlaErIiIiotKq2ANwZGQk6tatCzc3N637k5OTcfHiRbRp06awpzZKDMDFIzMTqF5duhBu3Tpg6FBDV0RERESlVWHymk5jgENDQ7F///489x84cAChoaG6nJpMiLU1MH68dH/hQmlYBBEREVFx0ykAv67TODMzE+bm5joVRKbl/fcBJydpLPCePYauhoiIiEyBRUEbxsfH49atW6rHly9fRmRkpEa7lJQUrF27FlWrVtVLgVS2OTsDH34oXQi3YAHQvbuhKyIiIqKyrsBjgOfMmYM5c+ZoXfktNyEEzM3NsXbtWgwfPlwvRRoaxwAXr/v3AR8faV7gqChpnmAiIiKiwihMXitwD3C/fv1Qr149CCHQr18/jB07Fq1bt1ZrI5PJYG9vj8DAQFSsWFG36snkeHoC77wjXQi3cCGwfbuhKyIiIqKyTKdZIDZs2ICQkBD4+PgUQ0nGhz3Axe/iRcDfH5DJgMuXgVq1DF0RERERlSbFPgvEkCFDTCb8Usnw8wPeeAMQAli82NDVEBERUVmm80IYL168wPbt2xETE4PU1FQoXpnDSiaT4fvvv9dLkYbGHuCScfQo0Lq1ND3a7dsAR9EQERFRQRXLGODcbt++jdDQUNy6dQsuLi5ITU1F+fLlkZKSArlcDldXVzg4OOhUPJmuli2lC+Cio4EVK4DPPzd0RURERFQW6TQEYvLkyUhNTUV0dDSuXr0KIQS2bNmCp0+fYv78+bC1tcW+ffv0XSuVcTIZMGWKdH/VKuDpU8PWQ0RERGWTTgH44MGDGDVqFJo1awYzM+kUQghYW1tj8uTJaN++PcYrl/giKoQ335QugEtJAb77ztDVEBERUVmkUwB+9uyZ6iI4JycnyGQypKamqvYHBwfj6NGjeimQTIu5OfDxx9L9r76S5gYmIiIi0iedAnCVKlVw9+5dAICFhQUqV66M6Oho1f6LFy/CxsZGPxWSyRk0SLoA7s4dYMsWQ1dDREREZY1OAbhdu3b47bffVI+HDh2KJUuWYMSIEXj33XexatUqvPHGGzoVtGrVKvj4+MDGxgZBQUE4efJkvu23bt2KOnXqwMbGBvXr18eePXvU9j99+hRjxoyBl5cXbG1t4efnhzVr1uhUG5UMGxtg7Fjp/sKF0tRoRERERHojdHD79m2xbds28eLFCyGEEM+fPxfvvvuucHFxERUqVBBDhgwRqamphT7v5s2bhZWVlfjhhx/EhQsXxIgRI4SLi4tISkrS2v7YsWPC3NxcLFiwQFy8eFF8+umnwtLSUpw7d07VZsSIEaJGjRri0KFDIi4uTqxdu1aYm5uL3377rcB1paamCgA6vSbSzePHQtjbCwEIsXevoashIiIiY1eYvKbzPMDFISgoCE2bNsXKlSsBAAqFAt7e3vjoo48wbdo0jfbh4eHIyMjA7t27VduaN2+OwMBAVS9vvXr1EB4ejhkzZqjaNG7cGF26dMHnBZxni/MAG8bEicCSJUC7dsCBA4auhoiIiIxZsa4E9+zZM1SoUAELFy7UuUBtsrKycObMGXTo0CGnODMzdOjQAVFRUVqPiYqKUmsPAGFhYWrtW7Rogd9//x337t2DEAKHDh3C1atX0alTpzxryczMRFpamtqNSt748YCFBXDwIHD6tKGrISIiorKi0AHYzs4OFhYWsLe312shycnJkMvlqPjK8l8VK1ZEYmKi1mMSExNf237FihXw8/ODl5cXrKys0LlzZ6xatQpt2rTJs5a5c+fC2dlZdfP29i7CKyNdVakC9O8v3dfz71tERERkwnS6CK53797Ytm0bjGj0RJ5WrFiB6Oho/P777zhz5gwWL16M0aNH4++//87zmOnTpyM1NVV1u3PnTglWTLlNnix93bYNuHnTsLUQERFR2aDTUsj9+/fHqFGjEBoaihEjRsDHxwe2trYa7Ro1alTgc7q6usLc3BxJSUlq25OSkuDh4aH1GA8Pj3zbP3/+HJ988gl27tyJbt26AQACAgIQGxuLRYsWaQyfULK2toa1tXWBa6fiExAAdO4M7N0rzQv8/8PDiYiIiHSmUwBu27at6v6RI0c09gshIJPJIJfLC3xOKysrNG7cGAcOHEDPnj0BSBfBHThwAGPGjNF6THBwMA4cOKC26tz+/fsRHBwMAHj58iVevnypWq1OydzcHAqFosC1kWFNmSIF4B9+AGbNAtzcDF0RERERlWY6BeB169bpuw4AwMSJEzFkyBA0adIEzZo1w9KlS5GRkYFhw4YBAAYPHozKlStj7ty5AIBx48YhJCQEixcvRrdu3bB582acPn0a33zzDQBplbqQkBBMnjwZtra2qFq1KiIiIvDjjz/iq6++KpbXQPrXti3QpIl0IdyqVcDs2YauiIiIiEozo5oGDQBWrlyJhQsXIjExEYGBgVi+fDmCgoIASD3PPj4+WL9+var91q1b8emnn+LWrVvw9fXFggUL0LVrV9X+xMRETJ8+HX/99RceP36MqlWr4v3338eECRMgk8kKVBOnQTO8X34BwsOBChWA+HjAzs7QFREREZExKUxeM7oAbIwYgA0vOxuoXVu6EG7lSmD0aENXRERERMakWOcBJjIECwtg0iTp/uLFUiAmIiIi0gUDMJUaQ4cCrq5AXBywfbuhqyEiIqLSigGYSg07O+Cjj6T7CxYAHLxDREREumAAplJl1CjA1haIiQEOHTJ0NURERFQaMQBTqeLqCrz7rnR/wQLD1kJERESlk86zQMjlcuzbtw83b97EkydPNJZFlslkmDFjhl6KNDTOAmFc4uKAmjUBhQKIjQUaNDB0RURERGRoxT4N2unTp9G7d2/cvXtXI/iqTlzIleCMGQOw8RkwANi8GRg4EPjpJ0NXQ0RERIZW7NOgjRo1Cs+fP8evv/6Kx48fQ6FQaNzKSvgl4zR5svR182bg9m3D1kJERESli04B+N9//8XUqVPxxhtvwMXFRc8lEb1eo0ZA+/aAXA4sXWroaoiIiKg00SkAe3l55Tn0gaikTJkiff32W+DxY8PWQkRERKWHTgF46tSp+Pbbb5GWlqbveogKrGNH6QK4jAxg9WpDV0NERESlhYUuB6Wnp8PBwQE1a9ZE//794e3tDXNzc7U2MpkMEyZM0EuRRNrIZFIv8MCBwPLlwMSJ0hzBRERERPnRaRYIM7PXdxxzFggqCS9fSlOixccDa9YAH3xg6IqIiIjIEAqT13TqAY6Li9OpMCJ9s7SUen7HjwcWLwbeew945Y8RRERERGp0XgjDlLAH2Lg9fQpUqQI8eQJs3w706mXoioiIiKikFXsPsFJGRgYiIiJw+/8nYq1atSpCQkJgb29flNMSFYqDAzB6NPD558D8+cBbb0njg4mIiIi00bkHeMWKFfj000/x9OlTtSnRHB0d8cUXX2DMmDF6K9LQ2ANs/B48kHqBMzOBiAigTRtDV0REREQlqdhXgvvxxx8xbtw41KtXDz///DNiY2MRGxuLTZs2oX79+hg3bhw2btyoU/FEunB3B4YOle4vWGDQUoiIiEyePEuO2KWHcfyjTYhdehjyLOOaGEGnHuDAwEC4uLjgwIEDGtOfyeVytG/fHikpKYiNjdVXnQbFHuDS4do1oHZtQAjg/HnA39/QFREREZme6Ck7UOWrcfCU31Vtu2/uhfiJy9B8QfFdqFPsPcBXrlxB3759NcIvAJibm6Nv3764cuWKLqcm0pmvb84FcIsWGbYWIiIiUxQ9ZQeaLewDj1zhFwA85PfQbGEfRE/ZYaDK1OkUgJ2dnXHr1q0899+6dYs9pWQQkydLX//3P+Du3fzbEhERkf7Is+So8tU4AEIjYJpBGnDg/dV4oxgOodMsEN26dcOKFSvQuHFj9O/fX23fli1bsHLlSgwcOFAvBRIVRlAQEBIiXQi3bBmwcKGhKyIiIir9FAogPR1IScm5paaqP5YfOII58rx7n8wgUFl+B7FfH0Hg+LYlUXaedBoD/PDhQ4SEhODKlSvw8PCAr68vAODatWtITExEnTp1EBERAVdXV70XbAgcA1y6/PEH0L074OgI3LkDODsbuiIiIiLDUiiAtDT1wJrX7dVgq9ymLTHa4yla4hja4jB6Yztq4dprazk+5me0WDFAPy8sl2KfB9jNzQ0xMTFYu3Yt/vzzT9U8wPXr18fUqVPx/vvvw8bGRpdTExVZly7SBXAXLgBr1wJTphi6IiIioqLJzs47wGoLrK/e0tL0U0d5y3R0sjuKULMItMg6jLrPTsNcFG5Ig12NSvoppgi4ElwBsAe49NmwQZoWrVIlIC4OsLY2dEVERGTKXr5UD6oFCa2526Wn66cOGxvAxSXvm7Oz+uMKlmnwuH4U5f49DLsTh2EWGwPIXwm81aoBISFQtGqDh+9/AjdFkmrMb24KyJBg7gWPZ3Ewt9KcSKGoSmwlOCJjNWAA8J//APfuSRfEDR9u6IqIiKg0y8oqWGjNq01Ghn7qsLPLP7DmF2ydnaUAnK+UFODoUeDwYemCmpgYafxEbtWrA23bShfdhIQAVasCkGZWiLviDLeFfaCATC0EKyAt0Xpn4lJULobwW1gF6gEODQ2FmZkZ9u3bBwsLC7Rr1+71J5bJcODAAb0UaWjsAS6dFi2SZoWoU0caDmGm05wnRERUFmRmFj605r49f66fOhwcChdaX31sZaWfOlSePAGOHJHC7uHDQGysZuCtWVMKusrQ6+2d7ym1zQN8z9wbdyYuNZp5gAvUAyyEgCLXm6FQKCCTyV57DJEhvf8+8N//ApcvSxfGvfGGoSsiIiJdCAG8eFH40Jq7zYsX+qnF0bHggfXVm5MTYGmpnzp09vixFHiVPbyxsZpXt9WqpR54K1cu1FM0X9AL8s97IPbrI3h2IwF2NSqh/qjWRtHzq8QxwAXAHuDSa9o0YP58oFUr6eediIhKnhBSD6quMxCkpEhDEPRBW69qQXtjnZwAi9I2ePTRIyAyMqeH999/NQNv7drqQxo8PQ1RaZEVJq/pFIAjIyNRt25duLm5ad2fnJyMixcvok2bNoU9tVFiAC697t+XxuZnZQHHjgEtWhi6IiKi0kcIaQyrrsMHUlKkWQyKSiYreGjV1sbREdCyiG3Z8vCheuA9d06zTd266j28Hh4lXWWxKPaL4EJDQ7Fx40a8/fbbWvcfOHAAb7/9NuSvXiVIVMI8PYFBg4Dvv5cWxdi509AVERGVPCHUFzEo7AwEKSmaF/7rwsys8KE1983BgddzaHjwQAq8yiEN589rtvHzk8Ju27ZAmzZAxYolXKTx0SkAv67TODMzE+Zl/lcsKi0mTZIC8G+/AVeuSH/pISIqTbStwlWY3tjUVM3rmnRhYVH40Jq7nYOD1ItLRZCUJAVdZQ/vxYuaberVy+ndbdMGcHcv6SqNXoEDcHx8PG7duqV6fPnyZURGRmq0S0lJwdq1a1H1/6fEIDK0unWBN98Efv8dWLwY+OYbQ1dERKZGLi/6Igb6uGLH0rJgYTWvYGtnxwBb4hIS1APv5cuabQICcoY0tGkDlJGVeItTgccAz5kzB3PmzCnQ7A/m5uZYu3YthpeRyVc5Brj0O3ZMuhDOygq4fbvMDHciohKSnZ0TVHUZPqCvVbisrIBy5XQfQmBjwwBr9O7fzwm7ERHSny5zk8mkwKsc0tC6NVChggEKNT7FMga4X79+qFevHoQQ6NevH8aOHYvWrVurtZHJZLC3t0dgYCAqcnwJGZGWLaUL4I4fB5YvB7780tAVEVFJenUVrsIOIXj6VD912NrqPnxAGWCpjLl7V72H99o19f0yGRAYmDOkoXVroHx5AxRatug0C8SGDRvQpk0bVKtWrThqMjrsAS4bfvsN6NlT+o/kzh3pamAiKh2ysoo2A8GzZ/qpw96+aIsYcFl2wp07OWH38GHgxg31/WZmQMOGOUMaWrWSuv3ptYp9FoiBAwfiWT7/mqSlpcHOzg4WpW6yPCrL3nhDugDuyhXg22+BiRMNXRGR6ci9iIEuPbH6XIVL11kInJ2NYBEDKn1u31Yf0nDzpvp+MzOgUaOcIQ2tWknfbFSsdOoBHjVqFCIjI3Fe21QbAOrXr4927dph2bJlRS7QGLAHuOz47jtgxAjAy0v6N4j/mRG9nrZVuArbG5uZqZ9anJx0H0Lg7FwKFzGg0ufWrZywe/iw9Dg3c3OgceOcIQ2tWknf2FRkxd4DvHfvXgwePDjP/X369MFPP/1UZgIwlR3vvAPMmCENudq8WZojmKisE0IaAqDrEILUVP2swiWT5QRRXWYgcHIygUUMqHQRAoiLUx/SEB+v3sbcHGjaNGdIQ8uWHINnBHQKwPfv30flfNaF9vT0xL1793Quiqi42NgA48YB06cDCxZIgZhXRJOxE0K6CEvXGQhSUvSzCpeZ2euHCOQXah0duYgBlXJCSH8+VIbdiAhpTG9uFhZS4FUOaWjRQhp7Q0ZFpwBcoUIFXHl1Wo5cLl26xKECZLQ+/BD44gtpsZy9e4EuXQxdEZV1CkVOgNVl+EBqqn5W4TI3L/oiBgywZFKEAK5fVx/S8GoHn6Ul0KxZzpCGFi2kqyXJqOkUgDt37oy1a9di4MCBaNiwodq+mJgYfPPNN+jbt69eCiTSNxcX4P33ga++kpZHZgCm11EotC9iUNDe2LQ0/a3CVa6c7kMI7O35Fw+ifAkBXL2qPqQhIUG9jaUl0Lx5zpCG4GBphRAqVXS6CO7+/fto2rQpHjx4gDfffBP+/v4AgPPnz2PXrl1wd3fHiRMn4OXlpfeCDYEXwZU9d+4A1atLfxY+eVL6axWVXXJ54RYxeLWNPlfhKsoiBra2DLBEeiWENDVQ7iENiYnqbayspMCr7OFt3pyB10gVJq/pFIABICEhAdOmTcNvv/2GtP9f4sbJyQk9e/bEl19+CU9PT11Oa5QYgMumIUOAH38E+vYFfvnF0NVQfnKvwqXLEIL0dP3UYWOj2xCC3IsYMMASGZAQwKVLOWE3IgJISlJvY20t9eoqA29QkPTbJxm9EgnASkIIPHz4EADg5ub22qWSSyMG4LLp3DlpNUkzM6kDoGZNQ1dUdmVlaQbUwlzMpa9VuOzsiraIAVfhIiplFArg4kX1eXj/P7Oo2NhI43aVQxqaNeMPeylV7NOg5SaTyWBtbQ0HB4cyGX6p7KpfXxr/++ef0njgr782dEXGKzOzaDMQ6HMVrqIsYmBlpZ86iMhIKRTAhQs5QxoiI4HkZPU2trZS4FX28DZrxiX6TJDOAfj06dP49NNPERkZiaysLPz1119o164dkpOT8e6772LChAlo27atHksl0r8pU6QA/MMPQKdO0mpTlSpJS62XpflGX7eIweuC7YsX+qnD0VH3IQRchYuINCgU0p/zcg9pePxYvY2dnTT3rjLwNm3K34ZJtwB8/PhxtGvXDpUrV8Y777yD7777TrXP1dUVqampWLt2rU4BeNWqVVi4cCESExPRoEEDrFixAs2aNcuz/datWzFjxgzcunULvr6+mD9/Prp27arW5tKlS5g6dSoiIiKQnZ0NPz8/bN++HVWqVCl0fVS2hIQANWpIS7G/9VbOdi8vYNkyoFcvw9WmJIQUzHWdgSA1VX+rcBV0EQNtwdbJiatwEVERyeXAv//mDGmIjASePFFvY28vra6mHNLQuDEDL2nQ6b+jTz75BHXr1kV0dDTS09PVAjAAhIaGYsOGDYU+75YtWzBx4kSsWbMGQUFBWLp0KcLCwnDlyhW4u7trtD9+/DgGDBiAuXPnonv37vj555/Rs2dPxMTEoF69egCAGzduoFWrVnj33XcxZ84cODk54cKFC7Dh+B4CsHOnFH5fde8e0KcPsG1b0UOwEEBGhu7DB1JSgJcvi1YDkLMKV1EWMShLveJEVArI5cDZszlDGo4ckf5RzM3BQQq8yh7exo355yJ6LZ0ugrO3t8fcuXMxduxYPHr0CG5ubvj777/Rrl07AMB3332HsWPH4lkhB/4FBQWhadOmWLlyJQBAoVDA29sbH330EaZNm6bRPjw8HBkZGdi9e7dqW/PmzREYGIg1a9YAAPr37w9LS0ts3LixsC9ThRfBlU1yOeDjIy2LrI1MJvUE37ypvQe2ML2x+ljEwMysaIsYcBUuIjJ62dlAbGzOkIYjR6R/YHNzdJTGqSl7eBs14p+XCEAJXARnaWkJRT6zut+7dw8OhVz2LysrC2fOnMH06dNV28zMzNChQwdERUVpPSYqKgoTJ05U2xYWFoZff/0VgBSg//jjD0yZMgVhYWH4559/UK1aNUyfPh09e/bMs5bMzExk5vqbsXKaNypbjhzJO/wCUs/tnTvSX870MQesuXnBFzHQFm4dHDiFFhGVMdnZQExMzpCGo0elibdzc3IC2rTJCbyBgQy8VGQ6fQc1b94c27Ztw/jx4zX2ZWRkYN26dQgJCSnUOZOTkyGXy1GxYkW17RUrVsTly5e1HpOYmKi1feL/T2L94MEDPH36FPPmzcPnn3+O+fPnY+/evejVqxcOHTqUZ41z587FnDlzClU/lT6vLu6TF2X4tbTUfQYCFxfpOgwGWCIyaS9fSoFX2cN79KjmRN3OzlLgVQ5pCAzk+CvSO50C8Jw5cxASEoJu3bphwIABAICzZ8/i5s2bWLRoER4+fIgZM2botVBdKHupe/TogQkTJgAAAgMDcfz4caxZsybPADx9+nS1nuW0tDR4e3sXf8FUoipVKli7rVuBrl25ChcRUaG9fAmcPq3ew5uRod6mXDn1Ht6AAAZeKnY6BeCgoCDs2bMHI0eOxODBgwEAkyZNAgDUqFEDe/bsQUBAQKHO6erqCnNzcyS9siJLUlISPDw8tB7j4eGRb3tXV1dYWFjAz89PrU3dunVx9OjRPGuxtraGNecELPNat5bG+N67p32Ig3IM8Ftv8d9iIqICycoCTp3KCbzHj2sG3vLlc3p427aVJmXnBQpUwnQeRNOuXTtcuXIFsbGxuHbtGhQKBWrUqIHGjRvrtCCGlZUVGjdujAMHDqjG5yoUChw4cABjxozRekxwcDAOHDigNhRj//79CA4OVp2zadOmuHLlitpxV69eRdWqVQtdI5Ut5ubSVGd9+khhN3cIVn4LL13K8EtElKfMTCnwKoc0HDsmXTWcW4UKOb27ISFAvXoMvGRwRR5FHhgYiMDAQD2UAkycOBFDhgxBkyZN0KxZMyxduhQZGRkYNmwYAGDw4MGoXLky5s6dCwAYN24cQkJCsHjxYnTr1g2bN2/G6dOn8c0336jOOXnyZISHh6NNmzYIDQ3F3r17sWvXLhw+fFgvNVPp1quXNNXZuHHqF8R5eUnh1xjmASYiMhqZmcCJE+o9vK+ulOPqmhN227YF/PwYeMnoFCgAR0ZGAgDatGmj9vi1J7ewgKurK2rVqlWg9uHh4Xj48CFmzpyJxMREBAYGYu/evaoL3eLj42GW64eoRYsW+Pnnn/Hpp5/ik08+ga+vL3799VfVHMAA8NZbb2HNmjWqadtq166N7du3o1WrVgWqicq+Xr2AHj2kWSESEsrmSnBERDp58QKIjs4JvNHRmoHX3T0n7LZtC9StywsmyOgVaB5gMzMzyGQyPH/+HFZWVqrHBeXt7Y3t27ejcePGRSrWUDgPMBERmYTnz6WQqxzSEB2tuZRkxYo5YTckBKhTh4GXjILe5wE+dOgQAGlMbe7HryOXy3H//n3MmzcPo0aNwokTJwp0HBEREZWAZ8+AqKicHt4TJ6QL2XKrVEl9SEOtWgy8VOoVKAC/Ol1YYef4ffbsGcaOHVuoY4iIiEjPMjKkcbvKwHvypOZa65Urqw9pqFmTgZfKnCJfBJeQkIAHDx6gZs2asLe319rmnXfeQVhYWFGfioiIiArj6VMp8CqHNJw8Ka2+lpuXl/qQhho1GHipzNM5AP/222+YOnUqrl27BkCafqxdu3ZITk5Gx44dMWvWLNV0ZnZ2dpx2jIiIqLilp0tTkSl7eE+f1gy8VaqoD2moVo2Bl0yOTgF4165d6NWrF4KDg/H2229j9uzZqn2urq6oXLky1q1bpwrAREREVAzS0qTAe/iwdDtzBpDL1dv4+KgPafDxKekqiYyOTgH4s88+Q5s2bXDo0CE8evRILQAD0gIVa9eu1Ud9REREpJSaKi0nrBzScOYMoFCot6lWTX1IA/8CS6RBpwB8/vx5fPXVV3nur1ixIh48eKBzUURERAQgJUWapFw5pOGffzQDb40aOWE3JEQa4kBE+dIpANvZ2SHj1bW9c7l58yYqVKigc1FEREQm6ckTKfAqhzTExqqv0w4Avr7qSwt7eZV8nUSlnE4BODQ0FBs2bMD48eM19iUmJuLbb79F9+7di1obERFR2fb4MRAZmTOk4exZzcBbq5Z6D2/lyoaolKhM0SkAf/HFF2jevDmaNm2Kvn37QiaTYd++fTh48CDWrl0LIQRmzZql71qJiIhKt+RkKfAqhzScO6cZeOvUUQ+8lSoZolKiMq1ASyFrc+HCBYwbNw6HDh1C7lO0bdsWq1atQt26dfVWpKFxKWQiItLJw4c5PbyHDwPnz2u28fPLGdLQpg3g4VHCRRKVDXpfClkbf39//P3333jy5AmuX78OhUKB6tWrw83NDQAghICM8woSEZEpefAgp3c3IgK4cEGzjb9/Tg9vmzZAxYolXSWRySvySnDlypVD06ZNVY+zsrKwfv16LFq0CFevXi3q6YmIiIxXYqIUdJWh99IlzTb166sH3v/vKCIiwylUAM7KysLvv/+OGzduoFy5cujevTs8PT0BAM+ePcPKlSuxdOlSJCYmokaNGsVSMBERkcEkJKj38F6+rNmmQYOcIQ2tWwOuriVdJRG9RoED8P3799G2bVvcuHFDNebX1tYWv//+O6ysrPD222/j3r17aNasGVasWIFevXoVW9FEREQl4t499cD76l82ZTIp8Cp7eFu3BjgNKJHRK3AA/s9//oO4uDhMmTIFrVu3RlxcHD777DO8//77SE5Ohr+/P3766SeEhIQUZ71ERETF5+7dnLB7+DBw/br6fpkMaNhQPfCWK2eAQomoKAocgPfv349hw4Zh7ty5qm0eHh7o27cvunXrht9++w1mZmbFUiQREVGxiI9X7+G9cUN9v5kZ0KhRzpCGVq0AFxcDFEpE+lTgAJyUlITmzZurbVM+Hj58OMMvEREZv9u3c6Yki4gA4uLU95uZAY0b5/TwtmoFODsboFAiKk4FDsByuRw2NjZq25SPnfmPAxERGRshgFu31Ic03L6t3sbcHGjSJKeHt2VLgPO9E5V5hZoF4tatW4iJiVE9Tk1NBQBcu3YNLlr+JNSoUaOiVUdERFRQQgA3b6oPaYiPV29jYSEF3rZtpVuLFoCjowGKJSJDKvBKcGZmZloXttC24IVym1wu10+VBsaV4IiIjJAQ0pjd3D28d++qt7GwAJo1yxnS0KIF4OBggGKJqLgVy0pw69atK3JhREREOhMCuHZNPfDev6/extISCArKGdIQHAzY2xugWCIyZgUOwEOGDCnOOoiIiNQJAVy5oj6kISFBvY2VlRR4lUMamjcH7OwMUCwRlSZFXgqZiIhIL4SQVlbL3cOblKTextpaCrnKIQ3NmwO2tgYolohKMwZgIiIyDCGAixfVe3gfPFBvY2MjDWNQDmkICpK2EREVAQMwERGVDIVCCrzKeXgjI4GHD9Xb2NhIF6ophzQ0ayb1+hIR6REDMBERFQ+FAjh/Pqd3NyICePRIvY2trTT3rnJIQ9OmDLxEVOwYgImISD8UCuDff3OGNERGAo8fq7exs5NWV1MOaWjSRLqQjYioBDEAExGRbuRyKfAqhzQcOQI8eaLext5eCrzKHt4mTaSpyoiIDIgBmIiICkYuB2Jjc4Y0REYC/78iqIqDA9C6dU7gbdSIgZeIjA4DMBERaZedDfzzT86QhiNHgLQ09TZOTlLgVQ5paNhQWn2NiMiI8V8pIiKSZGcDMTE5QxqOHgXS09XbODur9/AGBjLwElGpw3+1iIhM1cuXwJkzOUMajh4Fnj5Vb+PiArRpkxN4GzQAzM0NUCwRkf4wABMRmYqsLOD06ZwhDceOARkZ6m3KlZOCrnJIQ/36DLxEVOYwABMRlVVZWcCpUzlDGo4fB549U29ToYJ6D2/9+oCZmQGKJSIqOQzARERlRWYmcPJkzpCG48eB58/V27i65vTuhoQA/v4MvERkchiAiYhKqxcvgBMncoY0REVJ23Jzc8sJu23bAnXrMvASkcljACYiKi1evACio3OGNERHS72+uVWsqN7DW7cuIJMZoFgiIuPFAExEZKyeP5d6dZVDGqKjpXG9uXl4qPfw1q7NwEtE9BoMwERExuLZM2ncrnJIw8mTmoHX01M98Pr6MvASERUSAzARkaFkZEiBV9nDe/KkNDdvbpUrS0FXGXpr1mTgJSIqIgZgIqKS8vSpNPeuMvCeOiWtvpabt7d6D2/16gy8RER6xgBMRFRc0tOl1dWUQxpOnwbkcvU2VaoAoaE5gdfHh4GXiKiYMQATEelLWpoUeJU9vGfOaAbeatXUZ2nw8TFAoUREpo0BmIhIVykp6oE3JgZQKNTbVK+eE3ZDQoCqVQ1QKBER5cYATERUUCkpwJEjOfPwxsZqBt6aNdV7eL29S7xMIiLKHwMwEVFeHj/OCbwREVLgFUK9Ta1a6oG3cmUDFEpERIXBAExEpPToERAZmXPR2r//agbe2rXVhzR4ehqiUiIiKgKjXBB+1apV8PHxgY2NDYKCgnDy5Ml822/duhV16tSBjY0N6tevjz179uTZ9sMPP4RMJsPSpUv1XDURlTrJycCOHcDYsUBAAODqCvTqBSxbBpw9K4XfunWBDz8ENm8G7t8HLl8G1qwBBgxg+CUiKqWMrgd4y5YtmDhxItasWYOgoCAsXboUYWFhuHLlCtzd3TXaHz9+HAMGDMDcuXPRvXt3/Pzzz+jZsydiYmJQr149tbY7d+5EdHQ0PPmfFpFpevBA6uFVDmk4f16zjZ9fzsITbdoAFSuWcJFERFTcZEK8+vc9wwoKCkLTpk2xcuVKAIBCoYC3tzc++ugjTJs2TaN9eHg4MjIysHv3btW25s2bIzAwEGvWrFFtu3fvHoKCgrBv3z5069YN48ePx/jx4wtUU1paGpydnZGamgonJ6eivUAiKjlJSVLQVQ5puHhRs029ejlDGtq0AbT8ok1ERMavMHnNqHqAs7KycObMGUyfPl21zczMDB06dEBUVJTWY6KiojBx4kS1bWFhYfj1119VjxUKBQYNGoTJkyfD39//tXVkZmYiMzNT9TgtLa2Qr4SIDCIxMSfsHj4sDVd4VUBAzkVrbdpIwx6IiMikGFUATk5OhlwuR8VX/uRYsWJFXNb2HxmAxMREre0TExNVj+fPnw8LCwuMHTu2QHXMnTsXc+bMKWT1RFTi7t/PCbwREcCVK+r7ZTIp8CqHNLRuDVSoYIBCiYjImBhVAC4OZ86cwbJlyxATEwNZAZcXnT59ulqvclpaGrw5lyeR4d29qz6k4do19f0yGRAYmDOkoXVroHx5AxRKRETGzKgCsKurK8zNzZGUlKS2PSkpCR4eHlqP8fDwyLf9kSNH8ODBA1SpUkW1Xy6XY9KkSVi6dClu3bqlcU5ra2tYW1sX8dUQUZHduaM+pOHGDfX9ZmZAw4Y5QxpatQLKlTNAoUREVJoYVQC2srJC48aNceDAAfTs2ROANH73wIEDGDNmjNZjgoODceDAAbUL2vbv34/g4GAAwKBBg9ChQwe1Y8LCwjBo0CAMGzasWF4HEeno9m31IQ03b6rvNzMDGjXKGdLQqhXg7GyAQomIqDQzqgAMABMnTsSQIUPQpEkTNGvWDEuXLkVGRoYqrA4ePBiVK1fG3LlzAQDjxo1DSEgIFi9ejG7dumHz5s04ffo0vvnmGwBAhQoVUOGVMX+Wlpbw8PBA7dq1S/bFEZG6W7dywu7hw9Lj3MzNgcaNc4Y0tGoFcCYWIiIqIqMLwOHh4Xj48CFmzpyJxMREBAYGYu/evaoL3eLj42FmlrN+R4sWLfDzzz/j008/xSeffAJfX1/8+uuvGnMAE5GBCQHExan38N6+rd7G3Bxo2jRnSEPLloCjoyGqJSKiMszo5gE2RpwHmEgHQkhDGJTjdyMipDG9uVlYSIFXOaShRQvAwaHkayUiolKv1M4DTESlmBDA9evqQxru3VNvY2kJNGuWM6ShRQvA3t4AxRIRkSljACYi3QgBXL2qPqTh/n31NpaWQPPmOUMagoMBOztDVEtERKTCAExEBSOEtNBE7iENuRacAQBYWUmBV9nD27w5Ay8RERkdBmAi0k4I4NKlnLAbEQG8Muc2rK2lXl1lD29QEGBra4hqiYiICowBmIgkCgVw8aL6kIaHD9Xb2NhI43aVgbdZM2kbERFRKcIATGSqFArgwoWcIQ2RkUBysnobW1sp8CqHNDRrJvX6EhERlWIMwESmQqEAzp1TH9Lw+LF6Gzs7ae5dZQ9v06bSuF4iIqIyhAGYqKySy4F//80Z0hAZCTx5ot7G3l5aXU0ZeBs3ZuAlIqIyjwGYqKyQy4GzZ3OGNBw5AqSkqLdxcJACr3JIQ+PG0lRlREREJoQBmKi0ys4GYmNzhjQcOQKkpqq3cXQEWrfO6eFt1EhafY2IiMiE8X9CotIiOxuIickZ0nD0KJCWpt7GyQlo0yYn8AYGMvASERG9gv8zEhmrly+lwKvs4T16FEhPV2/j7CwFXuWQhsBAwNzcAMUSERGVHgzARMbi5Uvg9Gn1Ht6MDPU25cqp9/AGBDDwEhERFRIDMJGhZGUBp07lBN7jxzUDb/nyOT28bdsC9esDZmYGKJaIiKjsYAAmKimZmVLgVQ5pOHYMeP5cvU2FCjm9uyEhQL16DLxERER6xgBMVFwyM4ETJ3J6eKOiNAOvq2tO2G3bFvDzY+AlIiIqZgzARPry4gUQHZ0TeKOjpW25ubvnhN22bYG6dQGZzADFEhERmS4GYCJAWkTiyBEgIQGoVEmaO/d1F5c9fy6FXOWQhuhoqdc3t4oVc8JuSAhQpw4DLxERkYExABPt2AGMGwfcvZuzzcsLWLYM6NUrZ9uzZ9IwBmUP74kT0oVsuVWqpD6koVYtBl4iIiIjwwBMpm3HDqBPH0AI9e337knbZ8yQeocPHwZOnpSmKsvN0zOnh7dtW6BmTQZeIiIiIycT4tX/+elVaWlpcHZ2RmpqKpycnAxdDumLXA74+Kj3/L6Ol5d6D2+NGgy8RERERqAweY09wGS6jhwpWPjt1AkID5cCb7VqDLxERESlHAMwma6EhIK1GzoUGDCgWEshIiKiksMJR8k0vXwpXcxWEJUqFW8tREREVKLYA0ymJyoK+OAD4Ny5/NvJZNKY39atS6YuIiIiKhHsASbT8eQJ8OGHQMuWUvitUAEYPVoKuq+O61U+Xrr09fMBExERUanCAExlnxDAzz9Li1CsXSs9HjoUuHwZWLkS2LYNqFxZ/RgvL2l77nmAiYiIqEzgEAgq265fB0aNAvbvlx7XqQOsWSNNY6bUqxfQo0fhV4IjIiKiUokBmMqmzExgwQLgiy+k+9bWwKefApMnS/dfZW4uTXNGREREZR4DMJU9hw9LY32vXJEed+wIfP21tEobERERmTyOAaayIzlZGtsbGiqF34oVpbG/+/Yx/BIREZEKAzCVfgoF8MMPQO3awIYN0gwOI0dKF7kNGMCV24iIiEgNh0BQ6XbxojTc4cgR6XFAgDTTQ/Pmhq2LiIiIjBZ7gKl0ev4c+M9/gMBAKfza2QELFwKnTzP8EhERUb7YA0ylz9690gIWN29Kj994A1ixAqha1bB1ERERUanAHmAqPRISgPBwoEsXKfx6eQE7dwK//cbwS0RERAXGAEzGTy4HVq2SFrH45RfAzAyYMEEa/9uzJy9yIyIiokLhEAgybrGxwAcfACdPSo+bNpUucmvY0KBlERERUenFHmAyTk+fAhMnAo0bS+HX0RFYuRKIimL4JSIioiJhDzAZn19/BT76CLh7V3rcrx+wZAng6WnQsoiIiKhsYAAm4xEfLwXf33+XHlerJi1h3LmzYesiIiKiMoVDIMjwsrOBxYsBPz8p/FpYANOnA+fPM/wSERGR3rEHmAzrxAnpIrezZ6XHrVoBa9YA/v6GrYuIiIjKLPYAk2GkpACjRgHBwVL4LV8e+O47ICKC4ZeIiIiKFXuAqWQJAWzZIs3jm5gobRs8GFi0CHBzM2xtREREZBIYgKnk3Lgh9fr+9Zf0uFYtabhDaKhh6yIiIiKTwiEQVPyysoAvvgDq1ZPCr7U1MGcO8O+/DL9ERERU4owyAK9atQo+Pj6wsbFBUFAQTipXAcvD1q1bUadOHdjY2KB+/frYs2ePat/Lly8xdepU1K9fH/b29vD09MTgwYNx//794n4ZBACRkUBgIPDpp8CLF0D79sC5c8DMmVIQJiIiIiphRheAt2zZgokTJ2LWrFmIiYlBgwYNEBYWhgcPHmhtf/z4cQwYMADvvvsu/vnnH/Ts2RM9e/bE+fPnAQDPnj1DTEwMZsyYgZiYGOzYsQNXrlzBm2++WZIvy/QkJwPDhwMhIcClS4C7O/DTT8D+/YCvr6GrIyIiIhMmE0IIQxeRW1BQEJo2bYqVK1cCABQKBby9vfHRRx9h2rRpGu3Dw8ORkZGB3bt3q7Y1b94cgYGBWLNmjdbnOHXqFJo1a4bbt2+jSpUqr60pLS0Nzs7OSE1NhZOTk46vzEQIAWzYAHz8MfDokbTt/feBefOAcuUMWxsRERGVWYXJa0bVA5yVlYUzZ86gQ4cOqm1mZmbo0KEDoqKitB4TFRWl1h4AwsLC8mwPAKmpqZDJZHBxcdG6PzMzE2lpaWo3KoBLl6QxvcOGSeG3Xj3g2DFg7VqGXyIiIjIaRhWAk5OTIZfLUbFiRbXtFStWRKJyyqxXJCYmFqr9ixcvMHXqVAwYMCDP3w7mzp0LZ2dn1c3b21uHV2NCnj8HZswAGjSQ5vG1tQXmzwdiYoAWLQxdHREREZEaowrAxe3ly5fo168fhBBYvXp1nu2mT5+O1NRU1e3OnTslWGUps38/UL8+8PnnwMuXQLduwMWLwJQpgKWloasjIiIi0mBU8wC7urrC3NwcSUlJatuTkpLg4eGh9RgPD48CtVeG39u3b+PgwYP5jg2xtraGNWcoyF9iIjBxIrBpk/TY0xNYvhzo1QuQyQxbGxEREVE+jKoH2MrKCo0bN8aBAwdU2xQKBQ4cOIDg4GCtxwQHB6u1B4D9+/ertVeG32vXruHvv/9GhQoViucFmAKFQlq8ok4dKfyamQFjx0rjf3v3ZvglIiIio2dUPcAAMHHiRAwZMgRNmjRBs2bNsHTpUmRkZGDYsGEAgMGDB6Ny5cqYO3cuAGDcuHEICQnB4sWL0a1bN2zevBmnT5/GN998A0AKv3369EFMTAx2794NuVyuGh9cvnx5WFlZGeaFlkZnzwIffACcOCE9btxYusCtcWPD1kVERERUCEYXgMPDw/Hw4UPMnDkTiYmJCAwMxN69e1UXusXHx8PMLKfjukWLFvj555/x6aef4pNPPoGvry9+/fVX1KtXDwBw7949/P777wCAwMBAtec6dOgQ2rZtWyKvq1R7+hSYPRtYuhSQywFHR2llt1GjAHNzQ1dHREREVChGNw+wMTLpeYB37QLGjAHi46XHffpIQbhyZYOWRURERJRbYfKa0fUAk5G4cwcYNw7YuVN6XLUqsGqVNMsDERERUSlmVBfBkRHIzgaWLAH8/KTwa2EBTJ0KXLjA8EtERERlAnuAKcepU9JFbv/8Iz1u0UKa8aF+fcPWRURERKRH7AEmIDVVGucbFCSFXxcX4JtvgCNHGH6JiIiozGEPsCkTAti6FRg/HkhIkLa98w6weDHg7m7Q0oiIiIiKCwOwqbp5Exg9Gti7V3rs6wusXg20b2/YuoiIiIiKGYdAmJqsLGDuXMDfXwq/VlbArFnAv/8y/BIREZFJYA+wKTl6FPjwQ2lGBwAIDZV6fWvXNmxdRERERCWIPcCm4NEj4L33gNatpfDr6gr8+CNw4ADDLxEREZkc9gCXZUIAGzcCkyYBycnStvfeA+bPB8qXN2xtRERERAbCAFxWXbkCjBwJHDokPfb3l+b0bdXKsHURERERGRiHQJQ1L15IF7UFBEjh19ZWuugtJobhl4iIiAjsAS5b/v5b6vW9fl163KULsGoVUK2aYesiIiIiMiLsAS4LkpKkBSw6dpTCb6VKwC+/AH/8wfBLRERE9AoG4NJMoZCWLK5TB/jf/wCZTFrS+NIloG9f6TERERERqeEQiNLq3Dnggw+AqCjpccOGwNq1QNOmhq2LiIiIyMixB7i0ycgApkyRAm9UFODgACxZApw8yfBLREREVADsATY2cjlw5AiQkCCN5W3dGjA3l/bt3i0Ncbh9W3rcqxewbBng5WW4eomIiIhKGQZgY7JjBzBuHHD3bs42Ly9g5kxg3z5g+3ZpW5Uq0uwO3bsbpk4iIiKiUowB2Fjs2AH06SOt3pbb3bvA++9L983NgQkTgNmzAXv7Ei+RiIiIqCxgADYGcrnU8/tq+M3Nykoa89uoUcnVRURERFQG8SI4Y3DkiPqwB22ysoC0tJKph4iIiKgMYwA2BgkJ+m1HRERERHliADYGlSrptx0RERER5YkB2Bi0bi3N9pDXym0yGeDtLbUjIiIioiJhADYG5ubSfL6AZghWPl66NGc+YCIiIiLSGQOwsejVC9i2DahcWX27l5e0vVcvw9RFREREVMZwGjRj0qsX0KNH3ivBEREREVGRMQAbG3NzoG1bQ1dBREREVGZxCAQRERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwABMRERGRSWEAJiIiIiKTwgBMRERERCaFAZiIiIiITAoDMBERERGZFAZgIiIiIjIpXAq5AIQQAIC0tDQDV0JERERE2ihzmjK35YcBuADS09MBAN7e3gauhIiIiIjyk56eDmdn53zbyERBYrKJUygUuH//PhwdHSGTyQBIv2V4e3vjzp07cHJyMnCFpA/8TMsmfq5lDz/TsoefadlU0p+rEALp6enw9PSEmVn+o3zZA1wAZmZm8PLy0rrPycmJP6xlDD/Tsomfa9nDz7Ts4WdaNpXk5/q6nl8lXgRHRERERCaFAZiIiIiITAoDsI6sra0xa9YsWFtbG7oU0hN+pmUTP9eyh59p2cPPtGwy5s+VF8ERERERkUlhDzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIiIyKQwAOtg1apV8PHxgY2NDYKCgnDy5ElDl0T5iIyMxBtvvAFPT0/IZDL8+uuvavuFEJg5cyYqVaoEW1tbdOjQAdeuXVNr8/jxYwwcOBBOTk5wcXHBu+++i6dPn5bgqyCluXPnomnTpnB0dIS7uzt69uyJK1euqLV58eIFRo8ejQoVKsDBwQG9e/dGUlKSWpv4+Hh069YNdnZ2cHd3x+TJk5GdnV2SL4VyWb16NQICAlQT5gcHB+PPP/9U7ednWvrNmzcPMpkM48ePV23j51r6zJ49GzKZTO1Wp04d1f7S8pkyABfSli1bMHHiRMyaNQsxMTFo0KABwsLC8ODBA0OXRnnIyMhAgwYNsGrVKq37FyxYgOXLl2PNmjU4ceIE7O3tERYWhhcvXqjaDBw4EBcuXMD+/fuxe/duREZG4v333y+pl0C5REREYPTo0YiOjsb+/fvx8uVLdOrUCRkZGao2EyZMwK5du7B161ZERETg/v376NWrl2q/XC5Ht27dkJWVhePHj2PDhg1Yv349Zs6caYiXRAC8vLwwb948nDlzBqdPn0a7du3Qo0cPXLhwAQA/09Lu1KlTWLt2LQICAtS283Mtnfz9/ZGQkKC6HT16VLWv1HymggqlWbNmYvTo0arHcrlceHp6irlz5xqwKiooAGLnzp2qxwqFQnh4eIiFCxeqtqWkpAhra2uxadMmIYQQFy9eFADEqVOnVG3+/PNPIZPJxL1790qsdtLuwYMHAoCIiIgQQkifn6Wlpdi6dauqzaVLlwQAERUVJYQQYs+ePcLMzEwkJiaq2qxevVo4OTmJzMzMkn0BlKdy5cqJ7777jp9pKZeeni58fX3F/v37RUhIiBg3bpwQgj+rpdWsWbNEgwYNtO4rTZ8pe4ALISsrC2fOnEGHDh1U28zMzNChQwdERUUZsDLSVVxcHBITE9U+U2dnZwQFBak+06ioKLi4uKBJkyaqNh06dICZmRlOnDhR4jWTutTUVABA+fLlAQBnzpzBy5cv1T7TOnXqoEqVKmqfaf369VGxYkVVm7CwMKSlpal6HMlw5HI5Nm/ejIyMDAQHB/MzLeVGjx6Nbt26qX1+AH9WS7Nr167B09MT1atXx8CBAxEfHw+gdH2mFiX2TGVAcnIy5HK52ocGABUrVsTly5cNVBUVRWJiIgBo/UyV+xITE+Hu7q6238LCAuXLl1e1IcNQKBQYP348WrZsiXr16gGQPi8rKyu4uLiotX31M9X2mSv3kWGcO3cOwcHBePHiBRwcHLBz5074+fkhNjaWn2kptXnzZsTExODUqVMa+/izWjoFBQVh/fr1qF27NhISEjBnzhy0bt0a58+fL1WfKQMwEZVao0ePxvnz59XGn1HpVbt2bcTGxiI1NRXbtm3DkCFDEBERYeiySEd37tzBuHHjsH//ftjY2Bi6HNKTLl26qO4HBAQgKCgIVatWxS+//AJbW1sDVlY4HAJRCK6urjA3N9e4mjEpKQkeHh4GqoqKQvm55feZenh4aFzkmJ2djcePH/NzN6AxY8Zg9+7dOHToELy8vFTbPTw8kJWVhZSUFLX2r36m2j5z5T4yDCsrK9SsWRONGzfG3Llz0aBBAyxbtoyfaSl15swZPHjwAI0aNYKFhQUsLCwQERGB5cuXw8LCAhUrVuTnWga4uLigVq1auH79eqn6WWUALgQrKys0btwYBw4cUG1TKBQ4cOAAgoODDVgZ6apatWrw8PBQ+0zT0tJw4sQJ1WcaHByMlJQUnDlzRtXm4MGDUCgUCAoKKvGaTZ0QAmPGjMHOnTtx8OBBVKtWTW1/48aNYWlpqfaZXrlyBfHx8Wqf6blz59R+sdm/fz+cnJzg5+dXMi+EXkuhUCAzM5OfaSnVvn17nDt3DrGxsapbkyZNMHDgQNV9fq6l39OnT3Hjxg1UqlSpdP2sltjldmXE5s2bhbW1tVi/fr24ePGieP/994WLi4va1YxkXNLT08U///wj/vnnHwFAfPXVV+Kff/4Rt2/fFkIIMW/ePOHi4iJ+++038e+//4oePXqIatWqiefPn6vO0blzZ9GwYUNx4sQJcfToUeHr6ysGDBhgqJdk0kaOHCmcnZ3F4cOHRUJCgur27NkzVZsPP/xQVKlSRRw8eFCcPn1aBAcHi+DgYNX+7OxsUa9ePdGpUycRGxsr9u7dK9zc3MT06dMN8ZJICDFt2jQREREh4uLixL///iumTZsmZDKZ+Ouvv4QQ/EzLityzQAjBz7U0mjRpkjh8+LCIi4sTx44dEx06dBCurq7iwYMHQojS85kyAOtgxYoVokqVKsLKyko0a9ZMREdHG7okysehQ4cEAI3bkCFDhBDSVGgzZswQFStWFNbW1qJ9+/biypUraud49OiRGDBggHBwcBBOTk5i2LBhIj093QCvhrR9lgDEunXrVG2eP38uRo0aJcqVKyfs7OzEW2+9JRISEtTOc+vWLdGlSxdha2srXF1dxaRJk8TLly9L+NWQ0vDhw0XVqlWFlZWVcHNzE+3bt1eFXyH4mZYVrwZgfq6lT3h4uKhUqZKwsrISlStXFuHh4eL69euq/aXlM5UJIUTJ9TcTERERERkWxwATERERkUlhACYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARERERGRSGICJiIrR4cOHIZPJsG3bNkOXUiBJSUno06cPKlSoAJlMhqVLlxbpfD4+Phg6dKheaiMi0hcGYCIq9davXw+ZTAYbGxvcu3dPY3/btm1Rr149A1RW+kyYMAH79u3D9OnTsXHjRnTu3NnQJeXp2bNnmD17Ng4fPmzoUoiolLEwdAFERPqSmZmJefPmYcWKFYYupdQ6ePAgevTogY8//tjQpbzWs2fPMGfOHADSLzlERAXFHmAiKjMCAwPx7bff4v79+4YupcRlZGTo5TwPHjyAi4uLXs5VWunrvSQi48UATERlxieffAK5XI558+bl2+7WrVuQyWRYv369xj6ZTIbZs2erHs+ePRsymQxXr17FO++8A2dnZ7i5uWHGjBkQQuDOnTvo0aMHnJyc4OHhgcWLF2t9Trlcjk8++QQeHh6wt7fHm2++iTt37mi0O3HiBDp37gxnZ2fY2dkhJCQEx44dU2ujrOnixYt4++23Ua5cObRq1Srf13zz5k307dsX5cuXh52dHZo3b44//vhDtV85jEQIgVWrVkEmk0Emk+V7ToVCgWXLlqF+/fqwsbGBm5sbOnfujNOnT+d5jLL2Vymf/9atW6ptp0+fRlhYGFxdXWFra4tq1aph+PDhAKTP0M3NDQAwZ84cVb25P7vLly+jT58+KF++PGxsbNCkSRP8/vvvWp83IiICo0aNgru7O7y8vAAA6enpGD9+PHx8fGBtbQ13d3d07NgRMTEx+b4vRGT8OASCiMqMatWqYfDgwfj2228xbdo0eHp66u3c4eHhqFu3LubNm4c//vgDn3/+OcqXL4+1a9eiXbt2mD9/Pv73v//h448/RtOmTdGmTRu147/44gvIZDJMnToVDx48wNKlS9GhQwfExsbC1tYWgDT8oEuXLmjcuDFmzZoFMzMzrFu3Du3atcORI0fQrFkztXP27dsXvr6++PLLLyGEyLP2pKQktGjRAs+ePcPYsWNRoUIFbNiwAW+++Sa2bduGt956C23atMHGjRsxaNAgdOzYEYMHD37te/Luu+9i/fr16NKlC9577z1kZ2fjyJEjiI6ORpMmTXR4l3M8ePAAnTp1gpubG6ZNmwYXFxfcunULO3bsAAC4ublh9erVGDlyJN566y306tULABAQEAAAuHDhAlq2bInKlStj2rRpsLe3xy+//IKePXti+/bteOutt9Seb9SoUXBzc8PMmTNVPcAffvghtm3bhjFjxsDPzw+PHj3C0aNHcenSJTRq1KhIr4+IDEwQEZVy69atEwDEqVOnxI0bN4SFhYUYO3asan9ISIjw9/dXPY6LixMAxLp16zTOBUDMmjVL9XjWrFkCgHj//fdV27Kzs4WXl5eQyWRi3rx5qu1PnjwRtra2YsiQIapthw4dEgBE5cqVRVpammr7L7/8IgCIZcuWCSGEUCgUwtfXV4SFhQmFQqFq9+zZM1GtWjXRsWNHjZoGDBhQoPdn/PjxAoA4cuSIalt6erqoVq2a8PHxEXK5XO31jx49+rXnPHjwoACg9j4r5a6/atWqau+HsvZXKT/DuLg4IYQQO3fuVH2meXn48KHG56XUvn17Ub9+ffHixQu1ulq0aCF8fX01nrdVq1YiOztb7RzOzs4Fei+IqPThEAgiKlOqV6+OQYMG4ZtvvkFCQoLezvvee++p7pubm6NJkyYQQuDdd99VbXdxcUHt2rVx8+ZNjeMHDx4MR0dH1eM+ffqgUqVK2LNnDwAgNjYW165dw9tvv41Hjx4hOTkZycnJyMjIQPv27REZGQmFQqF2zg8//LBAte/ZswfNmjVTGybh4OCA999/H7du3cLFixcL9ibksn37dshkMsyaNUtj3+uGThSEchzy7t278fLly0Id+/jxYxw8eBD9+vVDenq66r189OgRwsLCcO3aNY3ZQkaMGAFzc3ONGk6cOGGSY8qJyjoGYCIqcz799FNkZ2e/dixwYVSpUkXtsbOzM2xsbODq6qqx/cmTJxrH+/r6qj2WyWSoWbOmaszrtWvXAABDhgyBm5ub2u27775DZmYmUlNT1c5RrVq1AtV++/Zt1K5dW2N73bp1VfsL68aNG/D09ET58uULfWxBhISEoHfv3pgzZw5cXV3Ro0cPrFu3DpmZma899vr16xBCYMaMGRrvpTKwP3jwQO0Ybe/lggULcP78eXh7e6NZs2aYPXu21l9uiKj04RhgIipzqlevjnfeeQfffPMNpk2bprE/rx5KuVye5zlf7R3MaxuAfMfj5kXZu7tw4UIEBgZqbePg4KD2WDl2uDQp6HuvXDwkOjoau3btwr59+zB8+HAsXrwY0dHRGu9Fbsr38uOPP0ZYWJjWNjVr1lR7rO297NevH1q3bo2dO3fir7/+wsKFCzF//nzs2LEDXbp0yfd1EpFxYwAmojLp008/xU8//YT58+dr7CtXrhwAICUlRW27Lj2hBaXs4VUSQuD69euqi7Zq1KgBAHByckKHDh30+txVq1bFlStXNLZfvnxZtb+watSogX379uHx48eF6gXO/d7nnm4tr/e+efPmaN68Ob744gv8/PPPGDhwIDZv3oz33nsvzzBdvXp1AIClpWWR38tKlSph1KhRGDVqFB48eIBGjRrhiy++YAAmKuU4BIKIyqQaNWrgnXfewdq1a5GYmKi2z8nJCa6uroiMjFTb/vXXXxdbPT/++CPS09NVj7dt24aEhARVkGrcuDFq1KiBRYsW4enTpxrHP3z4UOfn7tq1K06ePImoqCjVtoyMDHzzzTfw8fGBn59foc/Zu3dvCCFUC1Hkll8PuDLo537vMzIysGHDBrV2T5480TiPsmdcOQzCzs4OgOYvMu7u7mjbti3Wrl2rdRx4Qd5LuVyuMeTE3d0dnp6eBRqGQUTGjT3ARFRm/ec//8HGjRtx5coV+Pv7q+177733MG/ePLz33nto0qQJIiMjcfXq1WKrpXz58mjVqhWGDRuGpKQkLF26FDVr1sSIESMAAGZmZvjuu+/QpUsX+Pv7Y9iwYahcuTLu3buHQ4cOwcnJCbt27dLpuadNm4ZNmzahS5cuGDt2LMqXL48NGzYgLi4O27dvh5lZ4ftCQkNDMWjQICxfvhzXrl1D586doVAocOTIEYSGhmLMmDFaj+vUqROqVKmCd999F5MnT4a5uTl++OEHuLm5IT4+XtVuw4YN+Prrr/HWW2+hRo0aSE9Px7fffgsnJyd07doVgDRswc/PD1u2bEGtWrVQvnx51KtXD/Xq1cOqVavQqlUr1K9fHyNGjED16tWRlJSEqKgo3L17F2fPns339aWnp8PLywt9+vRBgwYN4ODggL///hunTp3Kc65nIio9GICJqMyqWbMm3nnnHY3eRQCYOXMmHj58iG3btuGXX35Bly5d8Oeff8Ld3b1Yavnkk0/w77//Yu7cuUhPT0f79u3x9ddfq3oxAWk536ioKPz3v//FypUr8fTpU3h4eCAoKAgffPCBzs9dsWJFHD9+HFOnTsWKFSvw4sULBAQEYNeuXejWrZvO5123bh0CAgLw/fffY/LkyXB2dkaTJk3QokWLPI+xtLTEzp07MWrUKMyYMQMeHh4YP348ypUrh2HDhqnahYSE4OTJk9i8eTOSkpLg7OyMZs2a4X//+5/aBWvfffcdPvroI0yYMAFZWVmYNWsW6tWrBz8/P5w+fRpz5szB+vXr8ejRI7i7u6Nhw4aYOXPma1+bnZ0dRo0ahb/++gs7duyAQqFAzZo18fXXX2PkyJE6v2dEZBxkQperNYiIiIiISimOASYiIiIik8IATEREREQmhQGYiIiIiEwKAzARERERmRQGYCIiIiIyKQzARPR/7daBAAAAAIAgf+tBLooAYEWAAQBYEWAAAFYEGACAFQEGAGBFgAEAWBFgAABWBBgAgJUAmg7gZZcpejUAAAAASUVORK5CYII=\",\n      \"text/plain\": [\n       \"<Figure size 800x500 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"y = mean_rejection_data\\n\",\n    \"x = y.index.values\\n\",\n    \"\\n\",\n    \"plt.rcParams[\\\"figure.figsize\\\"] = (8, 5)\\n\",\n    \"plt.xlabel(\\\"Number of clusters\\\", fontsize=12)\\n\",\n    \"plt.ylabel(\\\"Rejection rate\\\", fontsize=12)\\n\",\n    \"plt.plot(x, y[\\\"uniform\\\"], label=\\\"Uniform Bootstrap\\\", color=\\\"blue\\\", marker=\\\"o\\\")\\n\",\n    \"plt.plot(x, y[\\\"cluster\\\"], label=\\\"Cluster Bootstrap\\\", color=\\\"red\\\", marker=\\\"o\\\")\\n\",\n    \"plt.legend()\\n\",\n    \"plt.suptitle(\\\"Comparison of Rejection Rates\\\", fontsize=15)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can see that when the number of clusters is low, it is particularly important to use the cluster robust bootstrap, since rejection with the regular bootstrap is excessive. For a large number of clusters, clustering naturally becomes less important. \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.8\"\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/estimagic/explanation/cluster_robust_likelihood_inference.md",
    "content": "(robust_likelihood_inference)=\n\n# Robust Likelihood inference\n\n(to be written.)\n\nIn case of an urgent request for this guide, feel free to open an issue\n\\[here\\](<https://github.com/optimagic-dev/optimagic/issues>).\n"
  },
  {
    "path": "docs/source/estimagic/explanation/index.md",
    "content": "# Explanation\n\n```{toctree}\n---\nmaxdepth: 1\n---\nbootstrap_ci\nbootstrap_montecarlo_comparison\ncluster_robust_likelihood_inference\n```\n"
  },
  {
    "path": "docs/source/estimagic/index.md",
    "content": "(estimagic)=\n\n# Estimagic\n\n*estimagic* is a subpackage of *optimagic* that helps you to fit nonlinear statistical\nmodels to data and perform inference on the estimated parameters.\n\nAs a user, you need to code up the objective function that defines the estimator. This\nis either a likelihood (ML) function or a Method of Simulated Moments (MSM) objective\nfunction. Everything else is done by *estimagic*.\n\nEverything else means:\n\n- Optimize your objective function\n- Calculate asymptotic or bootstrapped standard errors and confidence intervals\n- Create publication quality tables\n- Perform sensitivity analysis on MSM models\n\n`````{grid} 1 2 2 2\n---\ngutter: 3\n---\n````{grid-item-card}\n:text-align: center\n:img-top: ../_static/images/light-bulb.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} tutorials/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nTutorials\n```\n\nNew users of estimagic should read this first.\n\n````\n\n\n\n````{grid-item-card}\n:text-align: center\n:img-top: ../_static/images/books.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} explanation/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nExplanations\n```\n\nBackground information on key topics central to the package.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:columns: 12\n:img-top: ../_static/images/coding.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} reference/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nAPI Reference\n```\n\nDetailed description of the estimagic API.\n\n````\n\n\n\n`````\n\n```{toctree}\n---\nhidden: true\nmaxdepth: 1\n---\ntutorials/index\nexplanation/index\nreference/index\n```\n"
  },
  {
    "path": "docs/source/estimagic/reference/index.md",
    "content": "# estimagic API\n\n```{eval-rst}\n.. currentmodule:: estimagic\n```\n\n(estimation)=\n\n## Estimation\n\n```{eval-rst}\n.. dropdown:: estimate_ml\n\n    .. autofunction:: estimate_ml\n\n```\n\n```{eval-rst}\n.. dropdown:: estimate_msm\n\n    .. autofunction:: estimate_msm\n\n```\n\n```{eval-rst}\n.. dropdown:: get_moments_cov\n\n    .. autofunction:: get_moments_cov\n\n```\n\n```{eval-rst}\n.. dropdown:: lollipop_plot\n\n    .. autofunction:: lollipop_plot\n\n```\n\n```{eval-rst}\n.. dropdown:: estimation_table\n\n    .. autofunction:: estimation_table\n\n```\n\n```{eval-rst}\n.. dropdown:: render_html\n\n    .. autofunction:: render_html\n\n```\n\n```{eval-rst}\n.. dropdown:: render_latex\n\n    .. autofunction:: render_latex\n\n```\n\n```{eval-rst}\n.. dropdown:: LikelihoodResult\n\n    .. autoclass:: LikelihoodResult\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: MomentsResult\n\n    .. autoclass:: MomentsResult\n        :members:\n\n\n\n```\n\n(bootstrap)=\n\n## Bootstrap\n\n```{eval-rst}\n.. dropdown:: bootstrap\n\n    .. autofunction:: bootstrap\n```\n\n```{eval-rst}\n.. dropdown::  BootstrapResult\n\n    .. autoclass:: BootstrapResult\n        :members:\n\n\n```\n"
  },
  {
    "path": "docs/source/estimagic/tutorials/bootstrap_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Bootstrap Tutorial\\n\",\n    \"\\n\",\n    \"This notebook contains a tutorial on how to use the bootstrap functionality provided by estimagic. We start with the simplest possible example of calculating standard errors and confidence intervals for an OLS estimator without as well as with clustering. Then we progress to more advanced examples.\\n\",\n    \"\\n\",\n    \"In the example here, we will work with the \\\"exercise\\\" example dataset taken from the seaborn library.\\n\",\n    \"\\n\",\n    \"The working example will be a linear regression to investigate the effects of exercise time on pulse.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"import seaborn as sns\\n\",\n    \"import statsmodels.api as sm\\n\",\n    \"\\n\",\n    \"import estimagic as em\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Prepare the dataset\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df = sns.load_dataset(\\\"exercise\\\", index_col=0)\\n\",\n    \"replacements = {\\\"1 min\\\": 1, \\\"15 min\\\": 15, \\\"30 min\\\": 30}\\n\",\n    \"df = df.replace({\\\"time\\\": replacements})\\n\",\n    \"df[\\\"constant\\\"] = 1\\n\",\n    \"\\n\",\n    \"df.head()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Doing a very simple bootstrap\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The first thing we need is a function that calculates the bootstrap outcome, given an empirical or re-sampled dataset. The bootstrap outcome is the quantity for which you want to calculate standard errors and confidence intervals. In most applications those are just parameter estimates.\\n\",\n    \"\\n\",\n    \"In our case, we want to regress \\\"pulse\\\" on \\\"time\\\" and a constant. Our outcome function looks as follows:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def ols_fit(data):\\n\",\n    \"    y = data[\\\"pulse\\\"]\\n\",\n    \"    x = data[[\\\"constant\\\", \\\"time\\\"]]\\n\",\n    \"    params = sm.OLS(y, x).fit().params\\n\",\n    \"\\n\",\n    \"    return params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In general, the user-specified outcome function may return any pytree (e.g. numpy.ndarray, pandas.DataFrame, dict etc.). In the example here, it returns a pandas.Series.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Now we are ready to calculate confidence intervals and standard errors.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results_without_cluster = em.bootstrap(data=df, outcome=ols_fit)\\n\",\n    \"results_without_cluster.ci()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results_without_cluster.se()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The above function call represents the minimum that a user has to specify, making full use of the default options, such as drawing a 1_000 bootstrap draws, using the \\\"percentile\\\" bootstrap confidence interval, not making use of parallelization, etc.\\n\",\n    \"\\n\",\n    \"If, for example, we wanted to take 10_000 draws, while parallelizing on two cores, and using a \\\"bc\\\" type confidence interval, we would simply call the following:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results_without_cluster2 = em.bootstrap(\\n\",\n    \"    data=df, outcome=ols_fit, n_draws=10_000, n_cores=2\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"results_without_cluster2.ci(ci_method=\\\"bc\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Doing a clustered bootstrap\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In the cluster robust variant of the bootstrap, the original dataset is divided into clusters according to the values of some user-specified variable, and then clusters are drawn uniformly with replacement in order to create the different bootstrap samples. \\n\",\n    \"\\n\",\n    \"In order to use the cluster robust boostrap, we simply specify which variable to cluster by. In the example we are working with, it seems sensible to cluster on individuals, i.e. on the column \\\"id\\\" of our dataset.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results_with_cluster = em.bootstrap(data=df, outcome=ols_fit, cluster_by=\\\"id\\\")\\n\",\n    \"\\n\",\n    \"results_with_cluster.se()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can see that the estimated standard errors are indeed of smaller magnitude when we use the cluster robust bootstrap. \\n\",\n    \"\\n\",\n    \"Finally, we can compare our bootstrap results to a regression on the full sample using statsmodels' OLS function.\\n\",\n    \"We see that the cluster robust bootstrap yields standard error estimates very close to the ones of the cluster robust regression, while the regular bootstrap seems to overestimate the standard errors of both coefficients.\\n\",\n    \"\\n\",\n    \"**Note**: We would not expect the asymptotic statsmodels standard errors to be exactly the same as the bootstrapped standard errors.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"y = df[\\\"pulse\\\"]\\n\",\n    \"x = df[[\\\"constant\\\", \\\"time\\\"]]\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"cluster_robust_ols = sm.OLS(y, x).fit(cov_type=\\\"cluster\\\", cov_kwds={\\\"groups\\\": df[\\\"id\\\"]})\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Splitting up the process\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In many situations, the above procedure is enough. However, sometimes it may be important to split the bootstrapping process up into smaller steps. Examples for such situations are:\\n\",\n    \"\\n\",\n    \"1. You want to look at the bootstrap estimates\\n\",\n    \"2. You want to do a bootstrap with a low number of draws first and add more draws later without duplicated calculations\\n\",\n    \"3. You have more bootstrap outcomes than just the parameters\\n\",\n    \"\\n\",\n    \"### 1. Accessing bootstrap outcomes\\n\",\n    \"\\n\",\n    \"The bootstrap outcomes are stored in the results object you get back when calling the bootstrap function. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"result = em.bootstrap(data=df, outcome=ols_fit, seed=1234)\\n\",\n    \"my_outcomes = result.outcomes\\n\",\n    \"\\n\",\n    \"my_outcomes[:5]\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"To further compare the cluster bootstrap to the uniform bootstrap, let's plot the sampling distribution of the parameters on time. We can again see that the standard error is smaller when we cluster on the subject id. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"result_clustered = em.bootstrap(data=df, outcome=ols_fit, seed=1234, cluster_by=\\\"id\\\")\\n\",\n    \"my_outcomes_clustered = result_clustered.outcomes\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# clustered distribution in blue\\n\",\n    \"sns.histplot(\\n\",\n    \"    pd.DataFrame(my_outcomes_clustered)[\\\"time\\\"], kde=True, stat=\\\"density\\\", linewidth=0\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"# non-clustered distribution in orange\\n\",\n    \"sns.histplot(\\n\",\n    \"    pd.DataFrame(my_outcomes)[\\\"time\\\"],\\n\",\n    \"    kde=True,\\n\",\n    \"    stat=\\\"density\\\",\\n\",\n    \"    linewidth=0,\\n\",\n    \"    color=\\\"orange\\\",\\n\",\n    \");\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Calculating standard errors and confidence intervals from existing bootstrap result\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"If you've already run ``bootstrap`` once, you can simply pass the existing result object to a new call of ``bootstrap``. Estimagic reuses the existing bootstrap outcomes and now only draws ``n_draws`` - ``n_existing`` outcomes instead of drawing entirely new ``n_draws``. Depending on the ``n_draws`` you specified (this is set to 1_000 by default), this may save considerable computation time. \\n\",\n    \"\\n\",\n    \"We can go on and compute confidence intervals and standard errors, just the same way as before, with several methods (e.g. \\\"percentile\\\" and \\\"bc\\\"), yet without duplicated evaluations of the bootstrap outcome function. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"my_results = em.bootstrap(\\n\",\n    \"    data=df,\\n\",\n    \"    outcome=ols_fit,\\n\",\n    \"    existing_result=result,\\n\",\n    \")\\n\",\n    \"my_results.ci(ci_method=\\\"t\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can use this to calculate confidence intervals with several methods (e.g. \\\"percentile\\\" and \\\"bc\\\") without duplicated evaluations of the bootstrap outcome function.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2. Extending bootstrap results with more draws\\n\",\n    \"\\n\",\n    \"It is often the case that, for speed reasons, you set the number of bootstrap draws quite low, so you can look at the results earlier and later decide that you need more draws. \\n\",\n    \"\\n\",\n    \"As an example, we will take an initial sample of 500 draws. We then extend it with another 1500 draws. \\n\",\n    \"\\n\",\n    \"*Note*: It is very important to use a different random seed when you calculate the additional outcomes!!!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"initial_result = em.bootstrap(data=df, outcome=ols_fit, seed=5471, n_draws=500)\\n\",\n    \"initial_result.ci(ci_method=\\\"t\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"combined_result = em.bootstrap(\\n\",\n    \"    data=df, outcome=ols_fit, existing_result=initial_result, seed=2365, n_draws=2000\\n\",\n    \")\\n\",\n    \"combined_result.ci(ci_method=\\\"t\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3. Using less draws than totally available bootstrap outcomes\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You have a large sample of bootstrap outcomes but want to compute summary statistics only on a subset? No problem! Estimagic got you covered. You can simply pass any number of ``n_draws`` to your next call of ``bootstrap``, regardless of the size of the existing sample you want to use. We already covered the case where ``n_draws`` > ``n_existing`` above, in which case estimagic draws the remaining bootstrap outcomes for you.\\n\",\n    \"\\n\",\n    \"If ``n_draws`` <= ``n_existing``, estimagic takes a random subset of the existing outcomes - and voilà! \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"subset_result = em.bootstrap(\\n\",\n    \"    data=df, outcome=ols_fit, existing_result=combined_result, seed=4632, n_draws=500\\n\",\n    \")\\n\",\n    \"subset_result.ci(ci_method=\\\"t\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Accessing the bootstrap samples\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"It is also possible to just access the bootstrap samples. You may do so, for example, if you want to calculate your bootstrap outcomes in parallel in a way that is not yet supported by estimagic (e.g. on a large cluster or super-computer).\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from estimagic.bootstrap_samples import get_bootstrap_samples\\n\",\n    \"\\n\",\n    \"rng = np.random.default_rng(1234)\\n\",\n    \"my_samples = get_bootstrap_samples(data=df, rng=rng)\\n\",\n    \"my_samples[0]\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"estimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/estimagic/tutorials/estimation_tables_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to generate publication quality tables\\n\"\n   ]\n  },\n  {\n   \"attachments\": {},\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Estimagic can create publication quality tables of parameter estimates in LaTeX or HTML. It works with the results from `estimate_ml` and `estimate_msm` but also supports statsmodels results out of the box. \\n\",\n    \"\\n\",\n    \"You can get almost limitless flexibility if you split the table generation into two steps. The fist generates a DataFrame which you can customize to your liking, the second renders that DataFrame in LaTeX or HTML. If you are interested in this feature, search for \\\"render_inputs\\\" below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 24,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# Make necessary imports\\n\",\n    \"import pandas as pd\\n\",\n    \"import statsmodels.formula.api as sm\\n\",\n    \"from IPython.core.display import HTML\\n\",\n    \"\\n\",\n    \"import estimagic as em\\n\",\n    \"from estimagic.config import EXAMPLE_DIR\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Create tables from statsmodels results\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 25,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df = pd.read_csv(EXAMPLE_DIR / \\\"diabetes.csv\\\", index_col=0)\\n\",\n    \"mod1 = sm.ols(\\\"target ~ Age + Sex\\\", data=df).fit()\\n\",\n    \"mod2 = sm.ols(\\\"target ~ Age + Sex + BMI + ABP\\\", data=df).fit()\\n\",\n    \"models = [mod1, mod2]\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 26,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<table>\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th colspan=\\\"2\\\">target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th >(1)</th>\\n\",\n       \"      <th >(2)</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Intercept</th>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(3.61)</td>\\n\",\n       \"      <td >(2.85)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Age</th>\\n\",\n       \"      <td >301.00$^{*** }$</td>\\n\",\n       \"      <td >37.20$^{ }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(64.10)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Sex</th>\\n\",\n       \"      <td >17.40$^{ }$</td>\\n\",\n       \"      <td >-107.00$^{* }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(62.10)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >BMI</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >787.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(65.40)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >ABP</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >417.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(69.50)</td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"3\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"            </td></tr>  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Observations</th>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >442</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >R$^2$</th>\\n\",\n       \"      <td >0.04</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Adj. R$^2$</th>\\n\",\n       \"      <td >0.03</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Residual Std. Error</th>\\n\",\n       \"      <td >75.90</td>\\n\",\n       \"      <td >60</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >F Statistic</th>\\n\",\n       \"      <td >8.06$^{***}$</td>\\n\",\n       \"      <td >72.90$^{***}$</td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"3\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"        </td></tr>\\n\",\n       \"        <tr><td style=\\\"text-align: left\\\">Note:</td><td colspan=\\\"2\\\"\\n\",\n       \"        style=\\\"text-align: right\\\"><sup>***</sup>p&lt;0.01; <sup>**</sup>p&lt;0.05; <sup>*</sup>p&lt;0.1 </td></tbody>\\n\",\n       \"</table>\"\n      ],\n      \"text/plain\": [\n       \"<IPython.core.display.HTML object>\"\n      ]\n     },\n     \"execution_count\": 26,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"HTML(em.estimation_table(models, return_type=\\\"html\\\"))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Adding estimagic results\\n\",\n    \"\\n\",\n    \"`estimate_ml` and `estimate_msm` can both generate summaries of estimation results. Those summaries are either DataFrames with the columns `\\\"value\\\"`, `\\\"standard_error\\\"`, `\\\"p_value\\\"` and `\\\"stars\\\"` or pytrees containing such DataFrames. \\n\",\n    \"\\n\",\n    \"For examples, check out our tutorials on [`estimate_ml`](likelihood_overview.ipynb) and [`estimate_msm`](msm_overview.ipynb).\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"Assume we got the following DataFrame from an estimation summary:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 27,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<div>\\n\",\n       \"<style scoped>\\n\",\n       \"    .dataframe tbody tr th:only-of-type {\\n\",\n       \"        vertical-align: middle;\\n\",\n       \"    }\\n\",\n       \"\\n\",\n       \"    .dataframe tbody tr th {\\n\",\n       \"        vertical-align: top;\\n\",\n       \"    }\\n\",\n       \"\\n\",\n       \"    .dataframe thead th {\\n\",\n       \"        text-align: right;\\n\",\n       \"    }\\n\",\n       \"</style>\\n\",\n       \"<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n\",\n       \"  <thead>\\n\",\n       \"    <tr style=\\\"text-align: right;\\\">\\n\",\n       \"      <th></th>\\n\",\n       \"      <th>value</th>\\n\",\n       \"      <th>standard_error</th>\\n\",\n       \"      <th>p_value</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Intercept</th>\\n\",\n       \"      <td>142.123</td>\\n\",\n       \"      <td>3.14150</td>\\n\",\n       \"      <td>1.000000e-08</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Age</th>\\n\",\n       \"      <td>51.456</td>\\n\",\n       \"      <td>2.71828</td>\\n\",\n       \"      <td>1.000000e-08</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Sex</th>\\n\",\n       \"      <td>-33.789</td>\\n\",\n       \"      <td>1.61800</td>\\n\",\n       \"      <td>1.000000e-08</td>\\n\",\n       \"    </tr>\\n\",\n       \"  </tbody>\\n\",\n       \"</table>\\n\",\n       \"</div>\"\n      ],\n      \"text/plain\": [\n       \"             value  standard_error       p_value\\n\",\n       \"Intercept  142.123         3.14150  1.000000e-08\\n\",\n       \"Age         51.456         2.71828  1.000000e-08\\n\",\n       \"Sex        -33.789         1.61800  1.000000e-08\"\n      ]\n     },\n     \"execution_count\": 27,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"params = pd.DataFrame(\\n\",\n    \"    {\\n\",\n    \"        \\\"value\\\": [142.123, 51.456, -33.789],\\n\",\n    \"        \\\"standard_error\\\": [3.1415, 2.71828, 1.6180],\\n\",\n    \"        \\\"p_value\\\": [1e-8] * 3,\\n\",\n    \"    },\\n\",\n    \"    index=[\\\"Intercept\\\", \\\"Age\\\", \\\"Sex\\\"],\\n\",\n    \")\\n\",\n    \"params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can either use just the params DataFrame or a dictionary containing \\\"params\\\" and additional information in `estimation_table`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 28,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"mod3 = {\\\"params\\\": params, \\\"name\\\": \\\"target\\\", \\\"info\\\": {\\\"n_obs\\\": 445}}\\n\",\n    \"models = [mod1, mod2, mod3]\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 29,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<table>\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th colspan=\\\"3\\\">target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th >(1)</th>\\n\",\n       \"      <th >(2)</th>\\n\",\n       \"      <th >(3)</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Intercept</th>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >142.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(3.61)</td>\\n\",\n       \"      <td >(2.85)</td>\\n\",\n       \"      <td >(3.14)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Age</th>\\n\",\n       \"      <td >301.00$^{*** }$</td>\\n\",\n       \"      <td >37.20$^{ }$</td>\\n\",\n       \"      <td >51.50$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(64.10)</td>\\n\",\n       \"      <td >(2.72)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Sex</th>\\n\",\n       \"      <td >17.40$^{ }$</td>\\n\",\n       \"      <td >-107.00$^{* }$</td>\\n\",\n       \"      <td >-33.80$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(62.10)</td>\\n\",\n       \"      <td >(1.62)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >BMI</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >787.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(65.40)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >ABP</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >417.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(69.50)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"            </td></tr>  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Observations</th>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >445</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >R$^2$</th>\\n\",\n       \"      <td >0.04</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Adj. R$^2$</th>\\n\",\n       \"      <td >0.03</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Residual Std. Error</th>\\n\",\n       \"      <td >75.90</td>\\n\",\n       \"      <td >60</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >F Statistic</th>\\n\",\n       \"      <td >8.06$^{***}$</td>\\n\",\n       \"      <td >72.90$^{***}$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"        </td></tr>\\n\",\n       \"        <tr><td style=\\\"text-align: left\\\">Note:</td><td colspan=\\\"3\\\"\\n\",\n       \"        style=\\\"text-align: right\\\"><sup>***</sup>p&lt;0.01; <sup>**</sup>p&lt;0.05; <sup>*</sup>p&lt;0.1 </td></tbody>\\n\",\n       \"</table>\"\n      ],\n      \"text/plain\": [\n       \"<IPython.core.display.HTML object>\"\n      ]\n     },\n     \"execution_count\": 29,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"HTML(em.estimation_table(models, return_type=\\\"html\\\"))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Selecting the right return_type\\n\",\n    \"\\n\",\n    \"The following return types are supported:\\n\",\n    \"- `\\\"latex\\\"`: Returns a string that you can save and import into a LaTeX document\\n\",\n    \"- `\\\"html\\\"`: Returns a string that you can save and import into a HTML document.\\n\",\n    \"- `\\\"render_inputs\\\"`: Returns a dictionary with the following entries:\\n\",\n    \"    - `\\\"body\\\"`: A DataFrame containing the main table\\n\",\n    \"    - `\\\"footer\\\"`: A DataFrame containing the statisics\\n\",\n    \"    - other stuff that you should ignore\\n\",\n    \"- `\\\"dataframe\\\"`: Returns a DataFrame you can look at in a notebook\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Use `render_inputs` for maximum flexibility\\n\",\n    \"\\n\",\n    \"As an example, let's assume we want to remove a few rows from the footer.\\n\",\n    \"\\n\",\n    \"Let's first look at the footer we get from `estimation_table`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 30,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<div>\\n\",\n       \"<style scoped>\\n\",\n       \"    .dataframe tbody tr th:only-of-type {\\n\",\n       \"        vertical-align: middle;\\n\",\n       \"    }\\n\",\n       \"\\n\",\n       \"    .dataframe tbody tr th {\\n\",\n       \"        vertical-align: top;\\n\",\n       \"    }\\n\",\n       \"\\n\",\n       \"    .dataframe thead tr th {\\n\",\n       \"        text-align: left;\\n\",\n       \"    }\\n\",\n       \"</style>\\n\",\n       \"<table border=\\\"1\\\" class=\\\"dataframe\\\">\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th></th>\\n\",\n       \"      <th colspan=\\\"3\\\" halign=\\\"left\\\">target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th></th>\\n\",\n       \"      <th>(1)</th>\\n\",\n       \"      <th>(2)</th>\\n\",\n       \"      <th>(3)</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Observations</th>\\n\",\n       \"      <td>442</td>\\n\",\n       \"      <td>442</td>\\n\",\n       \"      <td>445</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>R$^2$</th>\\n\",\n       \"      <td>0.04</td>\\n\",\n       \"      <td>0.40</td>\\n\",\n       \"      <td></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Adj. R$^2$</th>\\n\",\n       \"      <td>0.03</td>\\n\",\n       \"      <td>0.40</td>\\n\",\n       \"      <td></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>Residual Std. Error</th>\\n\",\n       \"      <td>75.90</td>\\n\",\n       \"      <td>60</td>\\n\",\n       \"      <td></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th>F Statistic</th>\\n\",\n       \"      <td>8.06$^{***}$</td>\\n\",\n       \"      <td>72.90$^{***}$</td>\\n\",\n       \"      <td></td>\\n\",\n       \"    </tr>\\n\",\n       \"  </tbody>\\n\",\n       \"</table>\\n\",\n       \"</div>\"\n      ],\n      \"text/plain\": [\n       \"                           target                    \\n\",\n       \"                              (1)            (2)  (3)\\n\",\n       \"Observations                  442            442  445\\n\",\n       \"R$^2$                        0.04           0.40     \\n\",\n       \"Adj. R$^2$                   0.03           0.40     \\n\",\n       \"Residual Std. Error         75.90             60     \\n\",\n       \"F Statistic          8.06$^{***}$  72.90$^{***}$     \"\n      ]\n     },\n     \"execution_count\": 30,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"render_inputs = em.estimation_table(models, return_type=\\\"render_inputs\\\")\\n\",\n    \"footer = render_inputs[\\\"footer\\\"]\\n\",\n    \"footer\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Now we can remove the rows we don't need and render it to html. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 31,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<table>\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th colspan=\\\"3\\\">target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th >(1)</th>\\n\",\n       \"      <th >(2)</th>\\n\",\n       \"      <th >(3)</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Intercept</th>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >142.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(3.61)</td>\\n\",\n       \"      <td >(2.85)</td>\\n\",\n       \"      <td >(3.14)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Age</th>\\n\",\n       \"      <td >301.00$^{*** }$</td>\\n\",\n       \"      <td >37.20$^{ }$</td>\\n\",\n       \"      <td >51.50$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(64.10)</td>\\n\",\n       \"      <td >(2.72)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Sex</th>\\n\",\n       \"      <td >17.40$^{ }$</td>\\n\",\n       \"      <td >-107.00$^{* }$</td>\\n\",\n       \"      <td >-33.80$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(62.10)</td>\\n\",\n       \"      <td >(1.62)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >BMI</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >787.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(65.40)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >ABP</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >417.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(69.50)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"            </td></tr>  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >R$^2$</th>\\n\",\n       \"      <td >0.04</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Observations</th>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >445</td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"        </td></tr>\\n\",\n       \"        <tr><td style=\\\"text-align: left\\\">Note:</td><td colspan=\\\"3\\\"\\n\",\n       \"        style=\\\"text-align: right\\\"><sup>***</sup>p&lt;0.01; <sup>**</sup>p&lt;0.05; <sup>*</sup>p&lt;0.1 </td></tbody>\\n\",\n       \"</table>\"\n      ],\n      \"text/plain\": [\n       \"<IPython.core.display.HTML object>\"\n      ]\n     },\n     \"execution_count\": 31,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"render_inputs[\\\"footer\\\"] = footer.loc[[\\\"R$^2$\\\", \\\"Observations\\\"]]\\n\",\n    \"HTML(em.render_html(**render_inputs))\"\n   ]\n  },\n  {\n   \"attachments\": {},\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Using this 2-step-procedure, we can also easily add additional rows to the footer.\\n\",\n    \"\\n\",\n    \"Note that we add the row using `.loc[(\\\"Statsmodels\\\", )]` since the index of `render_inputs[\\\"footer\\\"]` is a MultiIndex.\\n\",\n    \"\\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 32,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<table>\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th colspan=\\\"3\\\">target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th >(1)</th>\\n\",\n       \"      <th >(2)</th>\\n\",\n       \"      <th >(3)</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Intercept</th>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >152.00$^{*** }$</td>\\n\",\n       \"      <td >142.00$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(3.61)</td>\\n\",\n       \"      <td >(2.85)</td>\\n\",\n       \"      <td >(3.14)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Age</th>\\n\",\n       \"      <td >301.00$^{*** }$</td>\\n\",\n       \"      <td >37.20$^{ }$</td>\\n\",\n       \"      <td >51.50$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(64.10)</td>\\n\",\n       \"      <td >(2.72)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Sex</th>\\n\",\n       \"      <td >17.40$^{ }$</td>\\n\",\n       \"      <td >-107.00$^{* }$</td>\\n\",\n       \"      <td >-33.80$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.10)</td>\\n\",\n       \"      <td >(62.10)</td>\\n\",\n       \"      <td >(1.62)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >BMI</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >787.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(65.40)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >ABP</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >417.00$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(69.50)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"            </td></tr>  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >R$^2$</th>\\n\",\n       \"      <td >0.04</td>\\n\",\n       \"      <td >0.40</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Observations</th>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >445</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Statsmodels</th>\\n\",\n       \"      <td >Yes</td>\\n\",\n       \"      <td >Yes</td>\\n\",\n       \"      <td >No</td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"        </td></tr>\\n\",\n       \"        <tr><td style=\\\"text-align: left\\\">Note:</td><td colspan=\\\"3\\\"\\n\",\n       \"        style=\\\"text-align: right\\\"><sup>***</sup>p&lt;0.01; <sup>**</sup>p&lt;0.05; <sup>*</sup>p&lt;0.1 </td></tbody>\\n\",\n       \"</table>\"\n      ],\n      \"text/plain\": [\n       \"<IPython.core.display.HTML object>\"\n      ]\n     },\n     \"execution_count\": 32,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"render_inputs[\\\"footer\\\"].loc[(\\\"Statsmodels\\\",)] = [\\\"Yes\\\"] * 2 + [\\\"No\\\"]\\n\",\n    \"HTML(em.render_html(**render_inputs))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Advanced options \\n\",\n    \"\\n\",\n    \"Below is an exmample that demonstrates how to use advanced options to customize your table.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 33,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"stats_dict = {\\n\",\n    \"    \\\"n_obs\\\": \\\"Observations\\\",\\n\",\n    \"    \\\"rsquared\\\": \\\"R$^2$\\\",\\n\",\n    \"    \\\"rsquared_adj\\\": \\\"Adj. R$^2$\\\",\\n\",\n    \"    \\\"resid_std_err\\\": \\\"Residual Std. Error\\\",\\n\",\n    \"    \\\"fvalue\\\": \\\"F Statistic\\\",\\n\",\n    \"    \\\"show_dof\\\": True,\\n\",\n    \"}\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 34,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/html\": [\n       \"<table>\\n\",\n       \"  <caption>Table Latex(render_latex(**render_inputs))Title</caption>\\n\",\n       \"  <thead>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th colspan=\\\"3\\\">Dependent variable: target</th>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >&nbsp;</th>\\n\",\n       \"      <th >Model 1</th>\\n\",\n       \"      <th >Model 2</th>\\n\",\n       \"      <th >Model 3</th>\\n\",\n       \"    </tr>\\n\",\n       \"  </thead>\\n\",\n       \"  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Constant</th>\\n\",\n       \"      <td >152.133$^{*** }$</td>\\n\",\n       \"      <td >152.133$^{*** }$</td>\\n\",\n       \"      <td >142.123$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(3.610)</td>\\n\",\n       \"      <td >(2.853)</td>\\n\",\n       \"      <td >(3.142)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Age</th>\\n\",\n       \"      <td >301.161$^{*** }$</td>\\n\",\n       \"      <td >37.241$^{ }$</td>\\n\",\n       \"      <td >51.456$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.060)</td>\\n\",\n       \"      <td >(64.117)</td>\\n\",\n       \"      <td >(2.718)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Gender</th>\\n\",\n       \"      <td >17.392$^{ }$</td>\\n\",\n       \"      <td >-106.578$^{* }$</td>\\n\",\n       \"      <td >-33.789$^{*** }$</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td >(77.060)</td>\\n\",\n       \"      <td >(62.125)</td>\\n\",\n       \"      <td >(1.618)</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >BMI</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >787.179$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(65.424)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >ABP</th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >416.674$^{*** }$</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th ></th>\\n\",\n       \"      <td ></td>\\n\",\n       \"      <td >(69.495)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"            </td></tr>  <tbody>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Observations</th>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >442</td>\\n\",\n       \"      <td >445</td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >R$^2$</th>\\n\",\n       \"      <td >0.035</td>\\n\",\n       \"      <td >0.400</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Adj. R$^2$</th>\\n\",\n       \"      <td >0.031</td>\\n\",\n       \"      <td >0.395</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >Residual Std. Error</th>\\n\",\n       \"      <td >75.888(df=439)</td>\\n\",\n       \"      <td >59.976(df=437)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"    <tr>\\n\",\n       \"      <th >F Statistic</th>\\n\",\n       \"      <td >8.059$^{***}$(df=2;439)</td>\\n\",\n       \"      <td >72.913$^{***}$(df=4;437)</td>\\n\",\n       \"      <td ></td>\\n\",\n       \"    </tr>\\n\",\n       \"  <tr><td colspan=\\\"4\\\" style=\\\"border-bottom: 1px solid black\\\">\\n\",\n       \"        </td></tr>\\n\",\n       \"        <tr><td style=\\\"text-align: left\\\">Note:</td><td colspan=\\\"3\\\"\\n\",\n       \"        style=\\\"text-align: right\\\"><sup>***</sup>p&lt;0.01; <sup>**</sup>p&lt;0.05; <sup>*</sup>p&lt;0.1 </td></tbody>\\n\",\n       \"</table>\"\n      ],\n      \"text/plain\": [\n       \"<IPython.core.display.HTML object>\"\n      ]\n     },\n     \"execution_count\": 34,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"HTML(\\n\",\n    \"    em.estimation_table(\\n\",\n    \"        models=models,\\n\",\n    \"        return_type=\\\"html\\\",\\n\",\n    \"        custom_param_names={\\\"Intercept\\\": \\\"Constant\\\", \\\"Sex\\\": \\\"Gender\\\"},\\n\",\n    \"        custom_col_names=[\\\"Model 1\\\", \\\"Model 2\\\", \\\"Model 3\\\"],\\n\",\n    \"        custom_col_groups={\\\"target\\\": \\\"Dependent variable: target\\\"},\\n\",\n    \"        render_options={\\\"caption\\\": \\\"Table Latex(render_latex(**render_inputs))Title\\\"},\\n\",\n    \"        stats_options=stats_dict,\\n\",\n    \"        number_format=\\\"{0:.3f}\\\",\\n\",\n    \"    )\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"***Note 1***: You can pass a dictionary for `custom_col_names` to rename specific columns, e.g. `custom_col_names={\\\"(1)\\\": \\\"Model 1\\\"}`, leaving names of the other columns at default values.\\n\",\n    \"\\n\",\n    \"***Note 2***: In addition to renaming the default column groups by passing a dictionary for `custom_col_groups`, you can also pass a list to create custom column groups, e.g. `custom_col_groups=[\\\"target\\\", \\\"target\\\", \\\"not target\\\"]` will group the first two columns under the name `\\\"target\\\"`, and the last column under the name `\\\"not target\\\"`.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## LaTeX peculiarities\\n\",\n    \"\\n\",\n    \"By default, tables in `render_latex` are structured in compliance with `siunitx` package. This is done by setting column formats to `S` in the default rendering options defined internally. \\n\",\n    \"To get nicely formatted tables, you need to add the following to your LaTeX preamble:\\n\",\n    \"```latex\\n\",\n    \"\\\\usepackage{siunitx}\\n\",\n    \"\\\\sisetup{\\n\",\n    \"        input-symbols            = (),\\n\",\n    \"        table-align-text-post    = false\\n\",\n    \"        group-digits             = false,\\n\",\n    \"    }\\n\",\n    \"```\\n\",\n    \"The first line in `\\\\sisetup` is necessary if you have parentheses in your table cells (e.g. when displaying standard errors or confidence intervals), otherwise LaTex will raise an error.\\n\",\n    \"\\n\",\n    \"The second argument is necessary so that there is no spacing between the significance stars and the numerical values.\\n\",\n    \"\\n\",\n    \"The third line prevents digits in numbers being grouped into groups of threes, which is the default behaviour.\\n\",\n    \"This line is optional, but recommended.\\n\",\n    \"\\n\",\n    \"By default, whenever calling `render_latex`, a warning will be raised about this. To silence the warning, set `siunitx_warning=False` in the relvant function calls (when calling `estimation_table` with `return_type=tex` or when calling `render_latex`)\\n\",\n    \"\\n\",\n    \"If you don't want to generate `siunitx` style tables, you can pass `render_options={\\\"column_format\\\":<desired formats>}` to your function calls. \\n\",\n    \"\\n\",\n    \"You can influence the format of the output table with keyword arguments passed via `render_options`. For the list of supported keyword arguments see [the documentation of pandas.io.formats.style.Styler.to_latex](https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_latex.html)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"By default, `siunitx` will center table columns around the decimal point. This means, that if there is a number in a column that has many comparatively larger number of symbols after the decimal point (e.g. when there is a number with scientific notation), there will be extra spacing between that column and the preceeding one, since there is as much space reserved for the column before the decimal point, as there is after it. \\n\",\n    \"\\n\",\n    \"You can adjust the spacing between columns, by using the format `S[table-format =x.y]` for the numeric columns, where `x` and `y` control the space pre and post the decimal point, respecitvely. We further show a case with the described problem and the solution to that problem. For number with scientific notations, use `S[table-format=x.yez]`, where `y` reserves the space for the exponential, and `z` reserves the space for the column after the decimal point.\\n\",\n    \"\\n\",\n    \"Compiling the following LaTex table will result in extra spacing between columns `(2)` and `(3)`:\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"```latex\\n\",\n    \"\\n\",\n    \"\\\\begin{tabular}{lSSS}\\n\",\n    \"  \\\\toprule\\n\",\n    \"  & \\\\multicolumn{3}{c}{target} \\\\\\\\\\n\",\n    \"  \\\\cmidrule(lr){2-4}\\n\",\n    \"\\n\",\n    \"               & (1)                     & (2)                     & (3)                     \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  Intercept    & 152.00$^{*** }$         & 152.00$^{*** }$         & 1.43e08$^{*** }$        \\\\\\\\\\n\",\n    \"               & (3.61)                  & (2.85)                  & (3.14)                  \\\\\\\\\\n\",\n    \"  Age          & 301.00$^{*** }$         & 37.20$^{ }$             & 51.50$^{*** }$          \\\\\\\\\\n\",\n    \"               & (77.10)                 & (64.10)                 & (2.72)                  \\\\\\\\\\n\",\n    \"  Sex          & 17.40$^{ }$             & -107.00$^{* }$          & -33.80$^{*** }$         \\\\\\\\\\n\",\n    \"               & (77.10)                 & (62.10)                 & (1.62)                  \\\\\\\\\\n\",\n    \"  BMI          &                         & 787.00$^{*** }$         &                         \\\\\\\\\\n\",\n    \"               &                         & (65.40)                 &                         \\\\\\\\\\n\",\n    \"  ABP          &                         & 417.00$^{*** }$         &                         \\\\\\\\\\n\",\n    \"               &                         & (69.50)                 &                         \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  R$^2$        & 0.04                    & 0.40                    &                         \\\\\\\\\\n\",\n    \"  Observations & \\\\multicolumn{1}{c}{442} & \\\\multicolumn{1}{c}{442} & \\\\multicolumn{1}{c}{445} \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  \\\\textit{Note:} & \\\\multicolumn{3}{r}{$^{***}$p$<$0.01;$^{**}$p$<$0.05;$^{*}$p$<$0.1} \\\\\\\\\\n\",\n    \"  \\\\bottomrule\\n\",\n    \"\\\\end{tabular}\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can get a nicer output by setting the format of the last column to, for example, `S[table-format=3.2e4]`, via passing `render_options={'column_format':'lSSS[table-format = 3.2e4]'}`. The resulting table of `render_latex` will look like the following:\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"```latex\\n\",\n    \"\\n\",\n    \"\\\\begin{tabular}{lSSS[table-format = 3.2e4]}\\n\",\n    \"  \\\\toprule\\n\",\n    \"  & \\\\multicolumn{3}{c}{target} \\\\\\\\\\n\",\n    \"  \\\\cmidrule(lr){2-4}\\n\",\n    \"\\n\",\n    \"               & (1)                     & (2)                     & (3)                     \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  Intercept    & 152.00$^{*** }$         & 152.00$^{*** }$         & 1.43e08$^{*** }$        \\\\\\\\\\n\",\n    \"               & (3.61)                  & (2.85)                  & (3.14)                  \\\\\\\\\\n\",\n    \"  Age          & 301.00$^{*** }$         & 37.20$^{ }$             & 51.50$^{*** }$          \\\\\\\\\\n\",\n    \"               & (77.10)                 & (64.10)                 & (2.72)                  \\\\\\\\\\n\",\n    \"  Sex          & 17.40$^{ }$             & -107.00$^{* }$          & -33.80$^{*** }$         \\\\\\\\\\n\",\n    \"               & (77.10)                 & (62.10)                 & (1.62)                  \\\\\\\\\\n\",\n    \"  BMI          &                         & 787.00$^{*** }$         &                         \\\\\\\\\\n\",\n    \"               &                         & (65.40)                 &                         \\\\\\\\\\n\",\n    \"  ABP          &                         & 417.00$^{*** }$         &                         \\\\\\\\\\n\",\n    \"               &                         & (69.50)                 &                         \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  R$^2$        & 0.04                    & 0.40                    &                         \\\\\\\\\\n\",\n    \"  Observations & \\\\multicolumn{1}{c}{442} & \\\\multicolumn{1}{c}{442} & \\\\multicolumn{1}{c}{445} \\\\\\\\\\n\",\n    \"  \\\\midrule\\n\",\n    \"  \\\\textit{Note:} & \\\\multicolumn{3}{r}{$^{***}$p$<$0.01;$^{**}$p$<$0.05;$^{*}$p$<$0.1} \\\\\\\\\\n\",\n    \"  \\\\bottomrule\\n\",\n    \"\\\\end{tabular}\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": []\n  }\n ],\n \"metadata\": {\n  \"@webio\": {\n   \"lastCommId\": null,\n   \"lastKernelId\": null\n  },\n  \"interpreter\": {\n   \"hash\": \"5cdb9867252288f10687117449de6ad870b49795ca695c868016dc0022895cce\"\n  },\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.10\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/estimagic/tutorials/index.md",
    "content": "# Estimagic Tutorials\n\nEstimagic hast functions to estimate the parameters of maximum likelihood or simulation\nmodels. You provide a likelihood or moment simulation function. Estimagic produces\nparameter estimates and standard errors in a format that can be easily used to create\npublication quality latex or html tables.\n\n```{toctree}\n---\nmaxdepth: 1\n---\nlikelihood_overview\nmsm_overview\nbootstrap_overview\nestimation_tables_overview\n```\n"
  },
  {
    "path": "docs/source/estimagic/tutorials/likelihood_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Likelihood estimation\\n\",\n    \"\\n\",\n    \"This notebook shows how to do a simple maximum likelihood (ml) estimation with estimagic. As an illustrating example, we implement a simple linear regression model. This is the same example model used as in the method of moments notebook.\\n\",\n    \"\\n\",\n    \"We proceed in 4 steps:\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"1. Create a data generating process\\n\",\n    \"2. Set up a likelihood function\\n\",\n    \"3. Maximize the likelihood function\\n\",\n    \"4. Calculate standard errors, confidence intervals, and p-values\\n\",\n    \"\\n\",\n    \"The user only needs to do step 1 and 2. The rest is done by `estimate_ml`. \\n\",\n    \"\\n\",\n    \"To be very clear: Estimagic is not a package to estimate linear models or other models that are implemented in Stata, statsmodels or anywhere else. Its purpose is to estimate parameters with custom likelihood or method of simulated moments functions. We just use an ordered logit model as an example of a very simple likelihood function.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## Model:\\n\",\n    \"\\n\",\n    \"$$ y = \\\\beta_0 + \\\\beta_1 x + \\\\epsilon, \\\\text{ where } \\\\epsilon \\\\sim N(0, \\\\sigma^2)$$\\n\",\n    \"\\n\",\n    \"We aim to estimate $\\\\beta_0, \\\\beta_1, \\\\sigma^2$.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"from scipy.stats import norm\\n\",\n    \"\\n\",\n    \"import estimagic as em\\n\",\n    \"\\n\",\n    \"rng = np.random.default_rng(seed=0)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 1. Create a data generating process\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def simulate_data(params, n_draws):\\n\",\n    \"    x = rng.normal(0, 1, size=n_draws)\\n\",\n    \"    e = rng.normal(0, params.loc[\\\"sd\\\", \\\"value\\\"], size=n_draws)\\n\",\n    \"    y = params.loc[\\\"intercept\\\", \\\"value\\\"] + params.loc[\\\"slope\\\", \\\"value\\\"] * x + e\\n\",\n    \"    return pd.DataFrame({\\\"y\\\": y, \\\"x\\\": x})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"true_params = pd.DataFrame(\\n\",\n    \"    data=[[2, -np.inf], [-1, -np.inf], [1, 1e-10]],\\n\",\n    \"    columns=[\\\"value\\\", \\\"lower_bound\\\"],\\n\",\n    \"    index=[\\\"intercept\\\", \\\"slope\\\", \\\"sd\\\"],\\n\",\n    \")\\n\",\n    \"true_params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"data = simulate_data(true_params, n_draws=100)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2. Define the `loglike` function\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def normal_loglike(params, data):\\n\",\n    \"    norm_rv = norm(\\n\",\n    \"        loc=params.loc[\\\"intercept\\\", \\\"value\\\"] + params.loc[\\\"slope\\\", \\\"value\\\"] * data[\\\"x\\\"],\\n\",\n    \"        scale=params.loc[\\\"sd\\\", \\\"value\\\"],\\n\",\n    \"    )\\n\",\n    \"    contributions = norm_rv.logpdf(data[\\\"y\\\"])\\n\",\n    \"    return {\\\"contributions\\\": contributions, \\\"value\\\": contributions.sum()}\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"A few remarks before we move on:\\n\",\n    \"\\n\",\n    \"1. There are numerically better ways to calculate the likelihood; we chose this implementation for brevity and readability. \\n\",\n    \"2. The loglike function takes params and other arguments. You are completely flexible with respect to the number and names of the other arguments as long as the first argument is params. \\n\",\n    \"3. The loglike function returns a dictionary with the entries \\\"contributions\\\" and \\\"value\\\". The \\\"contributions\\\" are the log likelihood evaluations of each individual in the dataset. The \\\"value\\\" are their sum. The \\\"value\\\" entry could be omitted, the \\\"contributions\\\" entry, however, is mandatory. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3. Estimate the model\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"start_params = true_params.assign(value=[100, 100, 100])\\n\",\n    \"\\n\",\n    \"res = em.estimate_ml(\\n\",\n    \"    loglike=normal_loglike,\\n\",\n    \"    params=start_params,\\n\",\n    \"    optimize_options={\\\"algorithm\\\": \\\"scipy_lbfgsb\\\"},\\n\",\n    \"    loglike_kwargs={\\\"data\\\": data},\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.summary().round(3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 4. What's in the results?\\n\",\n    \"\\n\",\n    \"`LikelihoodResult` objects provide attributes and methods to calculate standard errors, confidence intervals, and p-values. For all three, several methods are available. You can even calculate cluster robust standard errors. \\n\",\n    \"\\n\",\n    \"A few examples are:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.cov(method=\\\"robust\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.se()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/estimagic/tutorials/msm_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Method of Simulated Moments (MSM)\\n\",\n    \"\\n\",\n    \"This tutorial shows you how to do a Method of Simulated Moments estimation in estimagic. The Method of Simulated Moments (MSM) is a nonlinear estimation principle that is very useful for fitting complicated models to the data. The only ingredient required is a function that simulates the model outcomes you observe in some empirical dataset. \\n\",\n    \"\\n\",\n    \"In the tutorial here, we will use a simple linear regression model. This is the same model which we use in the tutorial on maximum likelihood estimation.\\n\",\n    \"\\n\",\n    \"Throughout the tutorial, we only talk about MSM estimation. However, the more general case of indirect inference estimation works exactly the same way. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## Steps of MSM estimation\\n\",\n    \"\\n\",\n    \"1. Load (simulate) empirical data \\n\",\n    \"2. Define a function to calculate estimation moments on the data \\n\",\n    \"3. Calculate the covariance matrix of the empirical moments (with ``get_moments_cov``)\\n\",\n    \"4. Define a function to simulate moments from the model \\n\",\n    \"5. Estimate the model, calculate standard errors, do sensitivity analysis (with ``estimate_msm``)\\n\",\n    \"\\n\",\n    \"## Example: Estimate the parameters of a regression model\\n\",\n    \"\\n\",\n    \"The model we consider here is a simple regression model with only one explanatory variable (plus a constant). The goal is to estimate the slope coefficients and the error variance from a simulated data set.\\n\",\n    \"\\n\",\n    \"The estimation mechanics are exactly the same for more complicated models. A model is always defined by a function that can take parameters (here: the mean, variance and lower_cutoff and upper_cutoff) and returns a number of simulated moments (mean, variance, soft_min and soft_max of simulated exam points).\\n\",\n    \"\\n\",\n    \"### Model:\\n\",\n    \"\\n\",\n    \"$$ y = \\\\beta_0 + \\\\beta_1 x + \\\\epsilon, \\\\text{ where } \\\\epsilon \\\\sim N(0, \\\\sigma^2)$$\\n\",\n    \"\\n\",\n    \"We aim to estimate $\\\\beta_0, \\\\beta_1, \\\\sigma^2$.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import estimagic as em\\n\",\n    \"\\n\",\n    \"rng = np.random.default_rng(seed=0)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 1. Simulate data\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def simulate_data(params, n_draws, rng):\\n\",\n    \"    x = rng.normal(0, 1, size=n_draws)\\n\",\n    \"    e = rng.normal(0, params.loc[\\\"sd\\\", \\\"value\\\"], size=n_draws)\\n\",\n    \"    y = params.loc[\\\"intercept\\\", \\\"value\\\"] + params.loc[\\\"slope\\\", \\\"value\\\"] * x + e\\n\",\n    \"    return pd.DataFrame({\\\"y\\\": y, \\\"x\\\": x})\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"true_params = pd.DataFrame(\\n\",\n    \"    data=[[2, -np.inf], [-1, -np.inf], [1, 1e-10]],\\n\",\n    \"    columns=[\\\"value\\\", \\\"lower_bound\\\"],\\n\",\n    \"    index=[\\\"intercept\\\", \\\"slope\\\", \\\"sd\\\"],\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"data = simulate_data(true_params, n_draws=100, rng=rng)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2. Calculate Moments\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def calculate_moments(sample):\\n\",\n    \"    moments = {\\n\",\n    \"        \\\"y_mean\\\": sample[\\\"y\\\"].mean(),\\n\",\n    \"        \\\"x_mean\\\": sample[\\\"x\\\"].mean(),\\n\",\n    \"        \\\"yx_mean\\\": (sample[\\\"y\\\"] * sample[\\\"x\\\"]).mean(),\\n\",\n    \"        \\\"y_sqrd_mean\\\": (sample[\\\"y\\\"] ** 2).mean(),\\n\",\n    \"        \\\"x_sqrd_mean\\\": (sample[\\\"x\\\"] ** 2).mean(),\\n\",\n    \"    }\\n\",\n    \"    return pd.Series(moments)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"empirical_moments = calculate_moments(data)\\n\",\n    \"empirical_moments\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3. Calculate the covariance matrix of empirical moments\\n\",\n    \"\\n\",\n    \"The covariance matrix of the empirical moments (``moments_cov``) is needed for three things:\\n\",\n    \"1. to calculate the weighting matrix\\n\",\n    \"2. to calculate standard errors\\n\",\n    \"3. to calculate sensitivity measures\\n\",\n    \"\\n\",\n    \"We will calculate ``moments_cov`` via a bootstrap. Depending on your problem, there can be other ways to calculate the covariance matrix.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"moments_cov = em.get_moments_cov(\\n\",\n    \"    data, calculate_moments, bootstrap_kwargs={\\\"n_draws\\\": 5_000, \\\"seed\\\": 0}\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"moments_cov\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"``get_moments_cov`` mainly just calls estimagic's bootstrap function. See our [bootstrap_tutorial](bootstrap_overview.ipynb) for background information. \\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 4. Define a function to calculate simulated moments\\n\",\n    \"\\n\",\n    \"In a real world application, this is the step that would take most of the time. However, in our very simple example, all the work is already done by numpy.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def simulate_moments(params, n_draws=10_000, seed=0):\\n\",\n    \"    rng = np.random.default_rng(seed)\\n\",\n    \"    sim_data = simulate_data(params, n_draws, rng)\\n\",\n    \"    sim_moments = calculate_moments(sim_data)\\n\",\n    \"    return sim_moments\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"simulate_moments(true_params)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 5. Estimate the model parameters\\n\",\n    \"\\n\",\n    \"Estimating a model consists of the following steps:\\n\",\n    \"\\n\",\n    \"- Building a criterion function that measures a distance between simulated and empirical moments\\n\",\n    \"- Minimizing this criterion function\\n\",\n    \"- Calculating the Jacobian of the model\\n\",\n    \"- Calculating standard errors, confidence intervals and p-values\\n\",\n    \"- Calculating sensitivity measures\\n\",\n    \"\\n\",\n    \"This can all be done in one go with the ``estimate_msm`` function. This function has sensible default values, so you only need a minimum number of inputs. However, you can configure almost any aspect of the workflow via optional arguments. If you need even more control, you can call the lower level functions, which the now famliliar``estimate_msm`` function is built on, directly. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"start_params = true_params.assign(value=[100, 100, 100])\\n\",\n    \"\\n\",\n    \"res = em.estimate_msm(\\n\",\n    \"    simulate_moments,\\n\",\n    \"    empirical_moments,\\n\",\n    \"    moments_cov,\\n\",\n    \"    start_params,\\n\",\n    \"    optimize_options=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.summary()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"source\": [\n    \"## What's in the result?\\n\",\n    \"\\n\",\n    \"`MomentsResult` objects provide attributes and methods to calculate standard errors, confidence intervals and p-values. For all three, several methods are available. You can even calculate cluster robust standard errors.\\n\",\n    \"\\n\",\n    \"A few examples are:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"19\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.cov(method=\\\"robust\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"20\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.se()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"21\",\n   \"metadata\": {},\n   \"source\": [\n    \"## How to visualize sensitivity measures?\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"22\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from estimagic import lollipop_plot\\n\",\n    \"\\n\",\n    \"sensitivity_data = res.sensitivity(kind=\\\"bias\\\").abs().T\\n\",\n    \"\\n\",\n    \"fig = lollipop_plot(sensitivity_data)\\n\",\n    \"\\n\",\n    \"fig = fig.update_layout(height=500, width=900)\\n\",\n    \"fig.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"estimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"e8a16b1bdcc80285313db4674a5df2a5a80c75795379c5d9f174c7c712f05b3a\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/explanation/explanation_of_numerical_optimizers.md",
    "content": "(explanation-of-numerical-optimizers)=\n\n# Introduction to basic types of numerical optimization algorithms\n\nThere are hundreds of different numerical optimization algorithm. However, most of them\nbuild on a few basic principles. Knowing those principles helps to classify algorithms\nand thus allows you to connect information about new algorithms with the stuff you\nalready know.\n\nThe main principles we describe here are:\n\n- Derivative based line search algorithms\n- Derivative based trust region algorithms\n- Derivative free trust region algorithms\n- Derivative free direct search algorithms\n\nThis covers a large range of the algorithms that come with optimagic. In contrast, the\nfollowing classes of optimizers are also accessible via optimagic, but not yet covered\nin this overview:\n\n- Conjugate gradient methods\n- Genetic algorithms\n- Grid or random search\n- Bayesian Optimization\n\nFor each class of algorithms we describe the basic idea, show a gif of a stylized\nimplementation with a graphical explanation of each iteration and a gif that shows how a\nreal algorithm of the class converges.\n\nAll of the above algorithms are local optimization algorithms that can (and will in\nfact) get stuck in local optima. If you need a global optimum, you will need to start\nthem from several starting points and take the best result.\n\n## Derivative based line search algorithms\n\n### Basic idea\n\n1. Use first derivative to get search direction\n1. Use approximated second derivative to guess step length\n1. Use a line search algorithm to see how far to go in the search direction\n\nIn other words, the algorithm first fixes a promising direction and then figures out how\nfar it should go in that direction. The important insight here is that even though the\nparameter space might be high dimensional, the line search problem remains one\ndimensional and thus simple to solve. Moreover, the line search problem is typically not\nsolved exactly but only approximately. The exact termination conditions for the line\nsearch problem are complicated, but most of the time the initial guess for the step\nlength is accepted.\n\n### Stylized implementation\n\n```{image} ../../_static/images/stylized_line_search.gif\n```\n\n### Convergence of a real algorithm\n\n```{image} ../../_static/images/history_l-bfgs-b.gif\n```\n\n## Derivative based trust-region algorithms\n\n### Basic idea\n\n1. Fix a trust region radius\n1. Construct a Taylor expansion of the function based on function value, gradient, and\n   (approximation to) Hessian\n1. Minimize the Taylor expansion within the trust region\n1. Evaluate function again at the argmin of the Taylor expansion\n1. Compare expected and actual improvement\n1. Accept the new parameters if actual vs. expected improvement is good enough.\n1. Potentially modify the trust region radius\n1. Go back to 2.\n\nIn other words, the algorithm first fixes a maximum step length (the trust region\nradius) and then figures out in which direction to go. If the surrogate model (usually a\nquadratic taylor expansion) approximates the function well, trust region algorithms can\nconverge extremely fast. The main insight here is that evaluating the surrogate model is\nusually much cheaper than evaluating the actual criterion function and thus the trust\nregion subproblem can be solved very fast.\n\nAs can be seen in the stylized implementation, the approximation does not actually have\nto be very good. The only thing that matters is that it points the optimizer in the\nright direction.\n\n### Stylized implementation\n\n```{image} ../../_static/images/stylized_gradient_based_trust_region.gif\n```\n\n### Convergence of a real algorithm\n\n```{image} ../../_static/images/history_trust-ncg.gif\n```\n\n## Derivative free trust region algorithms\n\n### Basic Idea\n\nThe basic idea is very similar to derivative based trust region algorithms. The only\ndifference is that instead of a Taylor approximation which requires derivatives, we need\nto come up with another type of surrogate model.\n\nIn order to fit this model, the algorithm evaluates the criterion function at a few\npoints inside the trust region. Depending on how many points those are the surrogate\nmodel is a interpolation or regression model. If there are very few points it might even\nbe an underdetermined interpolation model. In that case some kind of regularization is\nneeded.\n\nNote that for differentiable functions without closed form derivatives, one way to\ndefine the surrogate model would be a Taylor approximation calculated from numerical\nderivatives. However, this would be a rather inefficient choice because points that are\nspaced more evenly throughout the trust region provide more information about the\ncriterion function than the numerical derivatives.\n\n### Stylized implementation\n\n```{image} ../../_static/images/stylized_gradient_free_trust_region.gif\n```\n\n### Convergence of a real algorithm\n\n```{image} ../../_static/images/history_cobyla.gif\n```\n\n## Derivative free direct search algorithms\n\n### Basic Idea\n\n1. Evaluate function at points lying in a fixed pattern around the current point\n1. Accept the best point as new current point\n1. Potentially modify the size or spread of the pattern\n1. Go back to 1.\n\nDirect search algorithms are also called pattern search algorithms. They can typically\ndeal well with small amounts of noise, because only the ordering of function values is\nused, not the magnitudes. However, they are relatively slow compared to the other\nalgorithms.\n\n### Stylized implementation\n\n```{image} ../../_static/images/stylized_direct_search.gif\n```\n\n### Convergence of a real algorithm\n\n```{image} ../../_static/images/history_nelder-mead.gif\n```\n"
  },
  {
    "path": "docs/source/explanation/implementation_of_constraints.md",
    "content": "(implementation_of_constraints)=\n\n# How constraints are implemented\n\nMost of the optimizers wrapped in optimagic cannot deal natively with anything but box\nconstraints. So the problem they can solve is:\n\n$$\n\\min_{x \\in \\mathbb{R}^k} f(x) \\quad \\text{s.t.} \\hspace{0.5cm} l \\leq x \\leq u\n$$\n\nHowever, in most econometric applications, we also need other constraints. For example,\nwe may require that some parameters sum to a value, form a covariance matrix, or are\nprobabilities. More abstractly, the problem becomes:\n\n$$\n\\min_{x \\in \\mathbb{R}^k} f(x) \\quad \\text{s.t.} \\hspace{0.5cm} l \\leq x \\leq u\n\\text{  and  } C(x) = 0\n$$\n\nThere are two basic ways of converting optimizers, which, natively, can only deal with\nbox constraints, into constrained optimizers: Reparametrization and penalties. Below, we\nexplain what both approaches are, why we chose the reparametrization approach over\npenalties, and which reparametrizations we are using for each type of constraint.\n\nIn this text, we focus on constraints that can be solved by optimagic via bijective and\ndifferentiable transformations. General nonlinear constraints do not fall into this\ncategory. If you want to use nonlinear constraints, you can still do so, but optimagic\nwill simply pass the constraints to your chosen optimizer. See {ref}`constraints` for\nmore details.\n\n## Possible approaches\n\n### Reparametrizations\n\nIn the reparametrization approach, we need to find an invertible mapping\n$g : \\mathbb{R}^{k'} \\to \\mathbb{R}^k$, and two new bounds $l'$ and $u'$ such that:\n\n$$\nl' \\leq \\tilde{x} \\leq u' \\iff l \\leq g(\\tilde{x}) \\leq u \\text {  and  }\nC(g(\\tilde{x})) = 0\n$$\n\nThis means that:\n\n$$\n\\min_{\\tilde{x} \\in \\mathbb{R}^{k'}} f(g(\\tilde{x})) \\quad \\text{s.t.}\n\\hspace{0.5cm} l' \\leq \\tilde{x} \\leq u'\\\\\n$$\n\nis equivalent to the original minimization problem.\n\nThis sounds more complicated than it is. Let's look at the simple example of a two\ndimensional parameter vector, where our constraint is that the two parameters have to\nsum to 5.\n\n$$\nx = (x_1, x_2)\n\nf(x) = x_1^2 + 2 x_2^2\n\nc(x) = x_1 + x_2 - 5\n\n\\tilde{x} = x_1\n\ng(\\tilde{x}) = (\\tilde{x}, 5 - \\tilde{x})\n$$\n\nTypically, users implement such reparametrizations manually and write functions to\nconvert between the parameters of interest and their reparametrized version. optimagic\ndoes this for you, for a large number of constraints that are typically used in\neconometric applications.\n\nFor this approach to be efficient, it is crucial that the reparametrizations preserve\ndesirable properties of the original problem. In particular, the mapping $g$ should be\ndifferentiable and if possible linear. Moreover, the dimensionality of $\\tilde{x}$\nshould be chosen as small as possible. optimagic only implements constraints that can be\nenforced with differentiable transformations and always achieves full dimensionality\nreduction.\n\n### Penalties\n\nThe penalty approach is conceptually much simpler. Whenever $C(x) \\neq 0$, a penalty\nterm is added to the criterion function. If the penalty term is large enough (e.g. as\nlarge as the criterion function at the start values), this penalty ensures that any x\nthat does not satisfy the constraints can not be optimal.\n\nWhile the generality and conceptual simplicity of this approach is attractive, it also\nhas its drawbacks. Applying penalties in a naive way can introduce kinks,\ndiscontinuities, and even local optima into the penalized criterion.\n\n## What optimagic does\n\nWe chose to implement constraints via reparametrizations for the following reasons:\n\n- Reparametrizations ensure that the criterion function is only evaluated at parameters\n  that satisfy all constraints. This is not only efficient, but essential if the\n  criterion function is only defined for such parameters.\n- Reparametrizations can often achieve a substantial dimensionality reduction. In\n  particular, fixes and equality constraints are implemented at zero cost, i.e. as\n  efficiently as if you directly plugged them into your original problem. This is\n  important because fixes and equality constraints often make user code much nicer and\n  more flexible.\n- It is easier to preserve desirable properties such as convexity and differentiability\n  with reparametrizations rather than penalties.\n\nThe constraints that can be implemented via reparametrizations are available for all\noptimizers. More general constraints are only available with optimizers that can deal\nnatively with them. This includes all optimizers from the `nlopt` and `ipopt` libraries.\n\n## The non-trivial reparametrizations\n\nFixed parameters, equality, and pairwise equality constraints can be implemented\ntrivially with reparametrizations by simply plugging them into the criterion function.\nIncreasing and decreasing constraints are internally implemented as linear constraints.\nThe following section explains how the other types of constraints are implemented:\n\n### Covariance and sdcorr constraints\n\nThe main difficulty with covariance and sdcorr constraints is to keep the (implied)\ncovariance matrix valid, i.e. positive semi-definite. In both cases, $\\tilde{x}$\ncontains the non-zero elements of the lower triangular cholesky factor of the (implied)\ncovariance matrix. For covariance constraints, $g$ is then simply the product of the\ncholesky factor with its transpose. For the sdcorr covariance matrix, the product is\nfurther converted to standard deviations and the unique elements of a covariance matrix.\n\nSeveral papers show that the cholesky reparametrization is a very efficient way to\noptimize over covariance matrices. Examples are {cite}`Pinheiro1996` and\n{cite}`Groeneveld1994`.\n\nA limitation of this approach is that there can be no additional fixes, box constraints,\nor other constraints on any of the involved parameters.\n\n(linear-constraint-implementation)=\n\n### Linear constraints\n\nAssume we have m linear constraints on an n-dimensional parameter vector. Then the set\nof all parameter vectors that satisfies the constraints can be written as:\n\n$$\n\\mathbf{X} \\equiv \\{\\mathbf{x} \\in \\mathbb{R}^n \\mid \\mathbf{l} \\leq \\mathbf{Ax}\n\\leq \\mathbf{u}\\}\n$$\n\nWe are looking for a set $\\mathbf{\\tilde{X}}$ that only satisfies box constraints and\nreparametrizations. The reparametrizations will turn out to be a linear mapping, and\nthus have a matrix representation, say M. We are good if the following holds:\n\n$$\nx \\in \\mathbf{X} \\iff \\exists \\mathbf{\\tilde{x}} \\in \\mathbf{\\tilde{X}} \\text{s.t.}\n\\mathbf{x} = \\mathbf{M\\tilde{x}}\n$$\n\nSuitable choices of $\\mathbf{\\tilde{X}}$ and $\\mathbf{M}$ are:\n\n$$\n\\mathbf{\\tilde{X}} \\equiv \\{(\\tilde{x}_1, \\tilde{x}_2)^T \\mid \\mathbf{\\tilde{x}}_1\n\\in \\mathbb{R}^{k} \\text{ and } \\mathbf{l} \\leq \\mathbf{\\tilde{x}}_2 \\leq \\mathbf{l}\\}\n\n\\mathbf{M} =\n    \\left[ {\\begin{array}{cc}\n    \\mathbb{I}_n[k] \\\\\n    A \\\\\n    \\end{array} } \\right]^{-1}\n$$\n\nwhere $k = m - n$ and $\\mathbb{I}_n[k]$ are the k rows of the identity matrix that make\nall rows of $\\mathbf{M}$ linearly independent.\n\n**Proof:**\n\n\"$\\Rightarrow$\":\n\nLet $x\\in \\mathbf{X}$, then we define $\\mathbf{\\tilde{x}} = \\mathbf{M}^{-1} x$. Claim:\n$\\mathbf{\\tilde{x}}  \\in \\mathbf{\\tilde{X}}$: \\\\\n\n$$\n\\mathbf{\\tilde{x}}  = \\mathbf{M}^{-1} x =   \\left[ {\\begin{array}{cc}      \\mathbb{I}_n[k]x \\\\      Ax \\\\     \\end{array} } \\right]     = (\\tilde{x}_1, \\tilde{x}_2)^T\n$$\n\nwhere $\\tilde{x}_1 \\in \\mathbb{R}^k$ and\n$\\mathbf{l} \\leq \\mathbf{\\tilde{x}}_2 \\leq \\mathbf{u}$ because\n$\\mathbf{l} \\leq \\mathbf{Ax} \\leq \\mathbf{u}$. Thus\n$\\mathbf{\\tilde{x}} \\in \\mathbf{\\tilde{X}}$.\n\n\"$\\Leftarrow$\" (Proof by negation):\n\nLet $x \\not\\in \\mathbf{X}$ and define $\\mathbf{\\tilde{x}} = \\mathbf{M}^{-1} x$. Claim\n$\\mathbf{\\tilde{x}}  \\not\\in \\mathbf{\\tilde{X}}$.\n\nBy the same argument as above we can show, that, because\n$\\neg(\\mathbf{l} \\leq \\mathbf{Ax} \\leq \\mathbf{u})$,\n$\\mathbf{\\tilde{x}}  \\not\\in \\mathbf{\\tilde{X}}$.\n\nThe rank condition on M makes it clear that there can be at most as many linear\nconstraints as involved parameters. This includes any box constraints on the involved\nparameters.\n\n### Probability constraints\n\nA probability constraint on k parameters means that all parameters lie in $[0, 1]$ and\ntheir sum equals one. While those are all linear constraints, they cannot be implemented\nin the way described above, because there are k + 1 constraints for k parameters.\n\nInstead we do the following\n\n$$\n\\tilde{x} = (\\tilde{x}_1, \\tilde{x}_2, \\ldots, \\tilde{x}_{k - 1})\\\\ g(\\tilde{x}) = (\\frac{\\tilde{x}_1}{1 + \\sum_{i=1}^{k-1}\\tilde{x}_i}, \\frac{\\tilde{x}_2}{1 + \\sum_{i=1}^{k-1}\\tilde{x}_i}, \\ldots, \\frac{1}{1 + \\sum_{i=1}^{k-1}\\tilde{x}_i})\\\\ l' = (0, 0, \\ldots, 0)\n$$\n\nA limitation of this approach is that there can be no additional fixes, box constraints\nor other constraints on any of the involved parameters.\n\n**References**\n\n```{eval-rst}\n.. bibliography:: ../refs.bib\n    :filter: docname in docnames\n```\n"
  },
  {
    "path": "docs/source/explanation/index.md",
    "content": "# Explanation\n\nThis section provides background information on numerical topics and details of\noptimagic. It is completely optional and not necessary if you are just starting out.\n\n```{toctree}\n---\nmaxdepth: 1\n---\nimplementation_of_constraints\ninternal_optimizers\nwhy_optimization_is_hard.ipynb\nexplanation_of_numerical_optimizers\ntests_for_supported_optimizers\nnumdiff_background\n```\n"
  },
  {
    "path": "docs/source/explanation/internal_optimizers.md",
    "content": "(internal_optimizer_interface)=\n\n# Internal optimizers for optimagic\n\noptimagic provides a large collection of optimization algorithm that can be used by\npassing the algorithm name as `algorithm` into `maximize` or `minimize`. Advanced users\ncan also use optimagic with their own algorithm, as long as it conforms with the\ninternal optimizer interface.\n\nThe advantages of using the algorithm with optimagic over using it directly are:\n\n- You can collect the optimizer history and create criterion_plots and params_plots.\n- You can use flexible formats for your start parameters (e.g. nested dicts or\n  namedtuples)\n- optimagic turns unconstrained optimizers into constrained ones.\n- You can use logging.\n- You get great error handling for exceptions in the criterion function or gradient.\n- You get a parallelized and customizable numerical gradient if you don't have a closed\n  form gradient.\n- You can compare your optimizer with all the other optimagic optimizers on our\n  benchmark sets.\n\nAll of this functionality is achieved by transforming a more complicated user provided\nproblem into a simpler problem and then calling \"internal optimizers\" to solve the\ntransformed problem.\n\n(functions_and_classes_for_internal_optimizers)=\n\n## Functions and classes for internal optimizers\n\nThe functions and classes below are everything you need to know to add an optimizer to\noptimagic. To see them in action look at\n[this guide](../how_to/how_to_add_optimizers.ipynb)\n\n```{eval-rst}\n.. currentmodule:: optimagic.mark\n```\n\n```{eval-rst}\n.. dropdown:: mark.minimizer\n\n    The `mark.minimizer` decorator is used to provide algorithm specific information to\n    optimagic. This information is used in the algorithm selection tool, for better\n    error handling and for processing of the user provided optimization problem.\n\n    .. autofunction:: minimizer\n```\n\n```{eval-rst}\n.. currentmodule:: optimagic.optimization.internal_optimization_problem\n```\n\n```{eval-rst}\n\n\n.. dropdown:: InternalOptimizationProblem\n\n    The `InternalOptimizationProblem` is optimagic's internal representation of objective\n    functions, derivatives, bounds, constraints, and more. This representation is already\n    pretty close to what most algorithms expect (e.g. parameters and bounds are flat\n    numpy arrays, no matter which format the user provided).\n\n    .. autoclass:: InternalOptimizationProblem()\n        :members:\n\n```\n\n```{eval-rst}\n.. currentmodule:: optimagic.optimization.algorithm\n```\n\n```{eval-rst}\n\n.. dropdown:: InternalOptimizeResult\n\n    This is what you need to create from the output of a wrapped algorithm.\n\n    .. autoclass:: InternalOptimizeResult\n        :members:\n\n```\n\n```{eval-rst}\n\n.. dropdown:: Algorithm\n\n    .. autoclass:: Algorithm\n        :members:\n        :exclude-members: with_option_if_applicable\n\n```\n\n(naming-conventions)=\n\n## Naming conventions for algorithm specific arguments\n\nTo make switching between different algorithm as simple as possible, we align the names\nof commonly used convergence and stopping criteria. We also align the default values for\nstopping and convergence criteria as much as possible.\n\n```{eval-rst}\nYou can find the harmonized names and value here: :ref:`algo_options`.\n```\n\nTo align the names of other tuning parameters as much as possible with what is already\nthere, simple have a look at the optimizers we already wrapped. For example, if you are\nwrapping a bfgs or lbfgs algorithm from some libray, try to look at all existing\nwrappers of bfgs algorithms and use the same names for the same options.\n\n## Algorithms that parallelize\n\nAlgorithms that evaluate the objective function or derivatives in parallel should only\ndo so via `InternalOptimizationProblem.batch_fun`,\n`InternalOptimizationProblem.batch_jac` or\n`InternalOptimizationProblem.batch_fun_and_jac`.\n\nIf you parallelize in any other way, the automatic history collection will stop to work.\n\nIn that case, call `om.mark.minimizer` with `disable_history=True`. In that case you can\neither do your own history collection and add that history to `InternalOptimizeResult`\nor the user has to rely on logging.\n\n## Nonlinear constraints\n\n(to be written)\n"
  },
  {
    "path": "docs/source/explanation/numdiff_background.md",
    "content": "# Numerical differentiation: methods\n\nIn this section we explain the mathematical background of forward, backward and central\ndifferences. The main ideas in this chapter are taken from {cite}`Dennis1996`. x is used\nfor the pandas DataFrame with parameters. We index the entries of x as a n-dimensional\nvector, where n is the number of variables in params_sr. The forward difference for the\ngradient is given by:\n\n$$\n\\nabla f(x) = \\begin{pmatrix}\\frac{f(x + e_0 * h_0) - f(x)}{h_0}\\\\\n\\frac{f(x + e_1 * h_1) - f(x)}{h_1}\\\\.\\\\.\\\\.\\\\ \\frac{f(x + e_n * h_n)\n- f(x)}{h_n} \\end{pmatrix}\n$$\n\nThe backward difference for the gradient is given by:\n\n$$\n\\nabla f(x) = \\begin{pmatrix}\\frac{f(x) - f(x - e_0 * h_0)}{h_0}\\\\ \\frac{f(x) -\nf(x - e_1 * h_1)}{h_1}\\\\.\\\\.\\\\.\\\\ \\frac{f(x) - f(x - e_n * h_n)}{h_n}\n\\end{pmatrix}\n$$\n\nThe central difference for the gradient is given by:\n\n$$\n\\nabla f(x) =\n\\begin{pmatrix}\\frac{f(x + e_0 * h_0) - f(x - e_0 * h_0)}{2 h_0}\\\\\n\\frac{f(x + e_1 * h_1) - f(x - e_1 * h_1)}{2 h_1}\\\\.\\\\.\\\\.\\\\ \\frac{f(x + e_n * h_n)\n- f(x - e_n * h_n)}{2 h_n} \\end{pmatrix}\n$$\n\nFor the optimal stepsize h the following rule of thumb is applied:\n\n$$\nh_i = (1 + |x[i]|) * \\sqrt\\epsilon\n$$\n\nWith the above in mind it is easy to calculate the Jacobian matrix. The calculation of\nthe finite difference w.r.t. each variable of params_sr yields a vector, which is the\ncorresponding column of the Jacobian matrix. The optimal stepsize remains the same.\n\nFor the Hessian matrix, we repeatedly call the finite differences functions. As we allow\nfor central finite differences in the second order derivative only, the deductions for\nforward and backward, are left to the interested reader:\n\n$$\nf_{i,j}(x)\n    = &\\frac{f_i(x + e_j * h_j) - f_i(x - e_j * h_j)}{h_j} \\\\\n    = &\\frac{\\frac{f(x + e_j * h_j + e_i * h_i) - f(x + e_j * h_j - e_i * h_i)}{h_i}\n       - \\frac{\n             f(x - e_j * h_j + e_i * h_i) - f(x - e_j * h_j - e_i * h_i)\n         }{h_i}}{h_j} \\\\\n    = &\\frac{\n           f(x + e_j * h_j + e_i * h_i) - f(x + e_j * h_j - e_i * h_i)\n       }{h_j * h_i} \\\\\n      &+ \\frac{\n             - f(x - e_j * h_j + e_i * h_i) + f(x - e_j * h_j - e_i * h_i)\n         }{h_j * h_i}\n$$\n\nFor the optimal stepsize a different rule is used:\n\n$$\nh_i = (1 + |x[i]|) * \\sqrt[3]\\epsilon\n$$\n\nSimilar deviations lead to the elements of the Hessian matrix calculated by backward and\ncentral differences.\n\n**References:**\n\n```{eval-rst}\n.. bibliography:: ../refs.bib\n    :filter: docname in docnames\n```\n"
  },
  {
    "path": "docs/source/explanation/tests_for_supported_optimizers.md",
    "content": "# How supported optimization algorithms are tested\n\noptimagic provides a unified interface that supports a large number of optimization\nalgorithms from different libraries. Additionally, it allows putting constraints on the\noptimization problem. To test the external interface of all supported algorithms, we\nconsider different criterion (benchmark) functions and test each algorithm with every\ntype of constraint.\n\n## Benchmark functions for testing\n\n### Trid function\n\n> $f({x}) = \\Sigma^{D}_{i=1}(x_{i} - 1)^2 - \\Sigma^{D}_{i=2}(x_i x_{i-1})$\n\n### Rotated Hyper Ellipsoid function\n\n> $f({x}) = \\Sigma^{D}_{i=1} \\Sigma^{i}_{j=1}x_j^2$\n\n### Rosenbrock function\n\n> $\\Sigma^{D-1}_{i=1}(100(x_i+1 - x_i^2)^2 + (x_i - 1)^2)$\n\n### Sphere function\n\n> $f({x}) = \\Sigma^{D}_{i=1} ix_{i}^2$\n\n## How testcases are implemented\n\nWe consider different implementations of each criterion and its gradient. All algorithms\naccept criterion functions specified in a dictionary, while a subset also accepts the\ncriterion specified in scalar form. Likewise, if specified, the gradient of a criterion\ncan be an np.ndarray or a pandas object. We test for all possible cases. For instance,\nfor rotated hyper ellipsoid, we implement the following functions:\n\n- rotated_hyper_ellipsoid_scalar_criterion\n- rotated_hyper_ellipsoid_dict_criterion: This provides a dictionary wherein the\n  `contributions` and `root_contributions` keys present the criterion as a least squares\n  problem, relevant when we are testing a least squares algorithm.\n- rotated_hyper_ellipsoid_gradient\n- rotated_hyper_ellipsoid_pandas_gradient: Computes the gradient of the rotated hyper\n  ellipsoid function, as a pandas object.\n- rotated_hyper_ellipsoid_criterion_and_gradient\n\nThese criterion functions are specified in the `examples` directory. For an overview of\nall constraints supported in optimagic, please see [this how-to guide].\n\nWe write several test functions, each corresponding to the case of one constraint. Given\nthe constraint, the test function considers all possible combinations of the algorithm,\nwhether to maximize or to minimize, criterion function implementation, gradient\nimplementation for that criterion (if provided), and whether `criterion_and_derivative`\nhas been provided or not.\n\nBelow, we show the calculations behind the true values, for each testcase (one criterion\nand one constraint).\n\n### Trid: Solutions for three-dimension case\n\n> $f({x}) = (x_1-1)^2 + (x_2-1)^2 + (x_3-1)^2 - x_2 x_1 - x_3 x_2$\n\n```{eval-rst}\n.. dropdown::  No constraints\n\n    .. code-block:: python\n\n        constraints = []\n\n    :math:`x* = (3, 4, 3)`\n```\n\n```{eval-rst}\n.. dropdown:: Fixed constraints\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": \"x_1\", \"type\": \"fixed\", \"value\": 1}]\n\n    :math:`x_{1} = 1 \\rightarrow f(x) = (x_2 - 1)^2 + (x_3 - 1)^2 - x_2 - x_3 x_2 \\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_2} = 2x_2 - 3 - x_3 = 0\n    \\Rightarrow x_3 = 2x_2 - 3\\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_3} = 2x_3 - 2 - x_2 = 0\n    \\Rightarrow x_2 = 2x_3 - 2\\\\\n    \\Rightarrow x_2 = \\frac{8}{3} , \\quad x_3 = \\frac{7}{3}\\\\\n    \\rightarrow x* = (1,\\frac{8}{3}, \\frac{7}{3})`\n```\n\n```{eval-rst}\n.. dropdown::  Probability constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"probability\"}]\n\n    :math:`x_{1} + x_{2} = 1, \\quad 0 \\leq x_1 \\leq 1, \\quad 0 \\leq x_2 \\leq 1 \\\\\n    \\rightarrow f({x}) = 3x_1^2 - 3x_1 - 3x_3 + x_3^2 + x_1 x_3 + 2 \\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_1} = 6x_1 - 3 + x_3 = 0\n    \\Rightarrow x_3 = 3 - 6x_1\\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_3} = 2x_3 - 3 + x_1 = 0\n    \\Rightarrow x_1 = 3 - 2x_3\\\\\n    \\Rightarrow x_1 = \\frac{3}{11}, \\quad x_3 = \\frac{15}{11}\\\\\n    \\rightarrow x* = (\\frac{3}{11}, \\frac{8}{11}, \\frac{15}{11})`\n```\n\n```{eval-rst}\n.. dropdown:: Increasing constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_2\", \"x_3\"], \"type\": \"increasing\"}]\n\n    :math:`\\mathcal{L}({x_i}) = (x_1 - 1)^2 + (x_2 - 1)^2 + (x_3 - 1)^2 - x_1 x_2 -\n    x_3 x_2 - \\lambda(x_3 - x_2)\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_1} = 2(x_1 - 1) - x_2 = 0\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_2} = 2(x_2 - 1) - x_1 - x_3 +\n    \\lambda = 0\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_3} = 2(x_3 - 1) - x_2 - \\lambda\n    = 0\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta \\lambda} = - x_3 + x_2 = 0\\\\\n    \\Rightarrow x_2 = 2(x_1 - 1) = x_3 = \\frac{10}{3}\\\\\n    \\Rightarrow 2(x_2 - 1) - x_1 - 2 = 0\\\\\n    \\Rightarrow 4(x_1 - 1) - 2 - x_1 - 2 = 0\\\\\n    \\Rightarrow 3x_1 - 8 = 0 \\Rightarrow x_1 = \\frac{8}{3}\\\\\n    \\rightarrow x* = (\\frac{8}{3}, \\frac{10}{3}, \\frac{10}{3})`\n```\n\n```{eval-rst}\n.. dropdown::  Decreasing constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"decreasing\"}]\n\n    Solution unavailable.\n```\n\n```{eval-rst}\n.. dropdown::  Equality constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"equality\"}]\n\n    :math:`x_{1} = x_{2} = x_{3} = x \\\\\n    \\rightarrow f({x}) = x^2 - 6x + 3\\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x} = 2x - 6 = 0\\\\\n    \\Rightarrow x = 3\\\\\n    \\rightarrow x* = (3,3,3)`\n```\n\n```{eval-rst}\n.. dropdown::   Pairwise equality constraint\n\n    .. code-block:: python\n\n        constraints = [{\"locs\": [\"x_1\", \"x_2\"], \"type\": \"pairwise_equality\"}]\n\n    :math:`x_{1} = x_{2} \\\\\n    \\rightarrow f({x}) = 2(x_1 - 1)^2 + (x_3 - 1)^2 - x_1^2 - x_3 x_1\\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_1} = 2x_1 - x_3 - 4 = 0 \\Rightarrow x_3\n    = 2x_1 - 4\\\\\n    \\Rightarrow \\frac{\\delta f({x})}{\\delta x_3} = 2x_3 - x_1 - 2 = 0 \\Rightarrow x_1\n    = 2x_3 - 2\\\\\n    \\Rightarrow x_1 = \\frac{10}{3}, x_3 = \\frac{8}{3}\\\\\n    \\rightarrow x* = (\\frac{10}{3},\\frac{10}{3},\\frac{8}{3})`\n```\n\n```{eval-rst}\n.. dropdown::   Covariance constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"covariance\"}]\n\n    Solution unavailable.\n\n```\n\n```{eval-rst}\n.. dropdown::  sdcorr constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"sdcorr\"}]\n\n    Solution unavailable.\n```\n\n```{eval-rst}\n.. dropdown::  Linear constraint\n\n    .. code-block:: python\n\n        constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"linear\", \"weights\": [1, 2], \"value\": 4}]\n\n    :math:`x_1 + 2x_2 = 4\\\\\n    \\mathcal{L}({x_i}) = (x_1 - 1)^2 + (x_2 - 1)^2 + (x_3 - 1)^2 - x_1 x_2 - x_3 x_2\n    - \\lambda(x_1 +2x_2-4)\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_1} = 2(x_1 - 1) - x_2 - \\lambda = 0\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_2} = 2(x_2 - 1) - x_1 - x_3 -\n    2\\lambda = 0\\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_3} = 2(x_3 - 1) - x_2 = 0 \\\\\n    \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta \\lambda} = - x_1 - 2x_2 + 4 = 0\\\\\n    \\Rightarrow x_2 = 2(x_3 - 1), \\quad x_1 = 4 - 2x_2\\\\\n    \\Rightarrow 2(4 - 2x_2 - 1) - x_2 = x_2 - 1 - 2 + x_2 - \\frac{x_2}{4} -\n    \\frac{1}{2}\\\\\n    \\rightarrow x* = (\\frac{32}{27}, \\frac{38}{27}, \\frac{46}{27})`\n\n\n\n\n\n```\n\n### Rotated Hyper Ellipsoid: Solutions for three-dimension case\n\n> $f({x}) = x^2_1 + (x^2_1 + x^2_2) + (x^2_1 + x^2_2 + x^2_3)$\n>\n> > ```{eval-rst}\n> > .. dropdown::   No constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = []\n> >\n> >     :math:`x* = (0, 0, 0)`\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   Fixed constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": \"x_1\", \"type\": \"fixed\", \"value\": 1}]\n> >\n> >     :math:`x_{1} = 1\n> >     \\rightarrow x* = (1, 0, 0)`\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   Probability constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"probability\"}]\n> >\n> >     :math:`x_{1} + x_{2} = 1, \\quad 0 \\leq x_1 \\leq 1, \\quad 0 \\leq x_2 \\leq 1 \\\\\n> >     \\mathcal{L}({x_i}) = x^2_1 + (x^2_1 + x^2_2) + (x^2_1 + x^2_2 + x^2_3)\\\\\n> >     -\\lambda(x_1 +x_2-1)\\\\\n> >     \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_1}\\\\\n> >     = 6x_1 - \\lambda = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_2}\\\\\n> >     = 4x_2 - \\lambda = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta x_3}\\\\\n> >     = 2 x_3 = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\mathcal{L}}{\\delta \\lambda} \\\\\n> >     = -x_1 - x_2 + 1 = 0\\\\\n> >     \\rightarrow x* = (\\frac{2}{5}, \\frac{3}{5}, 0),\\\\\n> >     \\quad f({x*}) = \\frac{6}{5}`\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::  Increasing  constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_2\", \"x_3\"], \"type\": \"increasing\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   Decreasing  constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"decreasing\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   Equality  constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"equality\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::  Pairwise equality  constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"locs\": [\"x_1\", \"x_2\"], \"type\": \"pairwise_equality\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   Covariance constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"covariance\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::   sdcorr constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"sdcorr\"}]\n> >\n> >     Not binding :math:`\\rightarrow x* = (0, 0, 0)`\n> >\n> > ```\n> >\n> > ```{eval-rst}\n> > .. dropdown::  Linear constraints\n> >\n> >     .. code-block:: python\n> >\n> >         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"linear\", \"weights\": [1, 2], \"value\": 4}]\n> >\n> >     :math:`x_1 + 2x_2 = 4\\\\\\mathcal{L}({x_i}) = x^2_1 + (x^2_1 + x^2_2) +\n> >     (x^2_1 + x^2_2 + x^2_3) -\\lambda(x_1 +2x_2-4)\\\\\n> >     \\Rightarrow \\frac{\\delta\\mathcal{L}}{\\delta x_1} = 6x_1 - \\lambda = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\\\\n> >     \\mathcal{L}}{\\delta x_2} = 4x_2 - 2\\lambda = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\\\\n> >     \\mathcal{L}}{\\delta x_3} = 2 x_3 = 0\\\\\n> >     \\Rightarrow \\frac{\\delta \\\\\n> >     \\mathcal{L}}{\\delta \\lambda} = -x_1 - 2x_2 + 4 = 0\\\\\n> >     \\rightarrow x* = (\\frac{4}{7}, \\frac{12}{7}, 0)`\n> >\n> >\n> >\n> >\n> >\n> >\n> > ```\n\n### Rosenbrock: Solutions for three-dimension case\n\n> $f({x}) = 100(x_2 - x_1^2) + (x_1 - 1)^2$\n\nGlobal minima: $x* = (1, 1, 1)$\n\n> ```{eval-rst}\n> .. dropdown::  No constraints\n>\n>     .. code-block:: python\n>\n>         constraints = []\n>\n>     :math:`x* = (1, 1, 1)`\n>\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Fixed constraints\n>\n>     .. code-block:: python\n>\n>        constraints = [{\"loc\": \"x_1\", \"type\": \"fixed\", \"value\": 1}]\n>\n>     :math:`x_{1} = 1 \\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Fixed constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"probability\"}]\n>\n>     No solution available.\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Increasing constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_2\", \"x_3\"], \"type\": \"increasing\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n>\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Decreasing constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"decreasing\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Equality constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"equality\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Pairwise equality constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"locs\": [\"x_1\", \"x_2\"], \"type\": \"pairwise_equality\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Covariance constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"covariance\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  sdcorr constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\", \"x_3\"], \"type\": \"sdcorr\"}]\n>\n>     Not binding :math:`\\rightarrow x* = (1, 1, 1)`\n> ```\n>\n> ```{eval-rst}\n> .. dropdown::  Linear constraints\n>\n>     .. code-block:: python\n>\n>         constraints = [{\"loc\": [\"x_1\", \"x_2\"], \"type\": \"linear\", \"weights\": [1, 2], \"value\": 4}]\n>\n>     No solution available.\n> ```\n\n[this how-to guide]: ../how_to/how_to_constraints.md\n"
  },
  {
    "path": "docs/source/explanation/why_optimization_is_hard.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Why optimization is difficult\\n\",\n    \"\\n\",\n    \"This tutorial shows why optimization is difficult and why you need some knowledge in order to solve optimization problems efficiently. It is meant for people who have no previous experience with numerical optimization and wonder why there are so many optimization algorithms and still none that works for all problems. For each potential problem we highlight, we also give some ideas on how to solve it. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"If you simply want to learn the mechanics of doing optimization with optimagic, check out the [quickstart guide](../tutorials/optimization_overview.ipynb)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"The take-home message of this notebook can be summarized as follows:\\n\",\n    \"\\n\",\n    \"- The only algorithms that are guaranteed to solve all problems are grid search or other algorithms that evaluate the criterion function almost everywhere in the parameter space.\\n\",\n    \"- If you have more than a hand full of parameters, these methods would take too long.\\n\",\n    \"- Thus, you have to know the properties of your optimization problem and have knowledge about different optimization algorithms in order to choose the right algorithm for your problem. \\n\",\n    \"\\n\",\n    \"This tutorial uses variants of the sphere function from the [quickstart guide](../tutorials/optimization_overview.ipynb).\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import seaborn as sns\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def sphere_gradient(x):\\n\",\n    \"    return 2 * x\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Why grid search is infeasible\\n\",\n    \"\\n\",\n    \"Sampling based optimizers and grid search require the parameter space to be bounded in all directions. Let's assume we know that the optimum of the sphere function lies between -0.5 and 0.5, but don't know where it is exactly. \\n\",\n    \"\\n\",\n    \"In order to get a precision of 2 digits with grid search, we require the following number of function evaluations (depending on the number of parameters):\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"dimensions = np.arange(10) + 1\\n\",\n    \"n_evals = 100**dimensions\\n\",\n    \"sns.lineplot(x=dimensions, y=n_evals);\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"If you have 10 dimensions and evaluating your criterion function takes one second, you need about 3 billion years on a 1000 core cluster. Many of the real world criterion functions have hundreds of parameters and take minutes to evaluate once. This is called the curse of dimensionality.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Sampling based algorithms typically fix the number of criterion evaluations and apply them a bit smarter than algorithms that rummage the search space randomly. However, these smart tricks only work under additional assumptions. Thus, either you need to make assumptions on your problem or you will get the curse of dimensionality through the backdoor again. For easier analysis, assume we fix the number of function evaluations in a grid search instead of a sampling based algorithm and want to know which precision we can get, depending on the dimension:\\n\",\n    \"\\n\",\n    \"For 1 million function evaluations, we can expect the following precision:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"dimensions = np.arange(10) + 1\\n\",\n    \"precision = 1e-6 ** (1 / dimensions)\\n\",\n    \"sns.lineplot(x=dimensions, y=precision);\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## How derivatives can solve the curse of dimensionality\\n\",\n    \"\\n\",\n    \"Derivative based methods do not try to evaluate the criterion function everywhere in the search space. Instead, they start at some point and go \\\"downhill\\\" from there. The gradient of the criterion function indicates which direction is downhill. Then there are different ways of determining how far to go in that direction. The time it takes to evaluate a derivative increases at most linearly in the number of parameters. Using the derivative information, optimizers can often find an optimum with very few function evaluations.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## How derivative based methods can fail\\n\",\n    \"\\n\",\n    \"To see how derivative based methods can fail, we use simple modifications of the sphere function. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"rng = np.random.default_rng(seed=0)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere_with_noise(x, rng):\\n\",\n    \"    return sphere(x) + rng.normal(scale=0.02)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"start_params = np.arange(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"grid = np.linspace(-1, 1, 1000)\\n\",\n    \"sns.lineplot(\\n\",\n    \"    x=grid,\\n\",\n    \"    y=(grid**2) + rng.normal(scale=0.02, size=len(grid)),\\n\",\n    \");\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere_with_noise,\\n\",\n    \"    params=start_params,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    logging=False,\\n\",\n    \"    fun_kwargs={\\\"rng\\\": rng},\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.success\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.message\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"So the algorithm failed, but at least tells you that it did not succed. Let's look at a different kind of numerical noise that could come from rounding. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def piecewise_constant_sphere(x):\\n\",\n    \"    return sphere(x.round(2))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sns.lineplot(x=grid, y=grid.round(2) ** 2);\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=piecewise_constant_sphere,\\n\",\n    \"    params=start_params,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"This time, the algorithm failed silently.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_add_optimizers.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"source\": [\n    \"# How to add optimizers to optimagic\\n\",\n    \"\\n\",\n    \"This is a hands-on guide that shows you how to use custom optimizers with optimagic or\\n\",\n    \"how to contribute an optimizer to the optimagic library.\\n\",\n    \"\\n\",\n    \"We have many [examples of optimizers](https://github.com/optimagic-dev/optimagic/tree/main/src/optimagic/optimizers) that are already part of optimagic and you can learn a lot from looking at \\n\",\n    \"those. However, only looking at the final results might be a bit intimidating and does\\n\",\n    \"not show the process of exploring a new optimizer library and gradually developing a \\n\",\n    \"wrapper. \\n\",\n    \"\\n\",\n    \"This guide is there to fill the gap. It tells the story of how the `pygmo_gaco`\\n\",\n    \"optimizer was added to optimagic by someone who was unfamiliar with pygmo or the \\n\",\n    \"gaco algorithm. \\n\",\n    \"\\n\",\n    \"The steps of adding an algorithm are roughly as follows:\\n\",\n    \"\\n\",\n    \"1. **Understand how to use the algorithm**: Play around with the algorithm you want to \\n\",\n    \"add in a notebook and solve some simple problems with it. Only move on to the next step \\n\",\n    \"after you have a solid understanding. This is completely unrelated to optimagic and only\\n\",\n    \"about he algorithm implementation you want to wrap. \\n\",\n    \"2. **Understand how the algorithm works**: Read documentation,\\n\",\n    \"research papers and other resources to find out why this algorithm was created and what \\n\",\n    \"problems it is supposed to solve really well. \\n\",\n    \"3. **Implement the minimal wrapper**: Learn about the `om.mark.minimizer` decorator as \\n\",\n    \"well as the `om.InternalOptimizationProblem` and the `om.Algorithm` classes. Implement a \\n\",\n    \"minimal version of your wrapper and test it.\\n\",\n    \"4. **Complete and refactor the wrapper**: Make sure that all convergence criteria, \\n\",\n    \"stopping criteria, and tuning parameters the algorithm supports can be passed to your \\n\",\n    \"wrapper. Also check that the algorithm gets everything it needs to achieve maximum \\n\",\n    \"performance (e.g. closed form derivatives and batch function evaluators). Now is also \\n\",\n    \"the time to clean-up and refactor your code, especially if you wrap multiple optimizers \\n\",\n    \"from a library.\\n\",\n    \"5. **Align the wrapper with optimagic conventions**: Use harmonized names wherever \\n\",\n    \"a convention exists. Think about good names everywhere else. Set stopping criteria \\n\",\n    \"similar to other optimizers and try to adhere to our [design philosophy](style_guide) \\n\",\n    \"when it comes to tuning parameters. \\n\",\n    \"6. **Integrate your code into optimagic**: Learn how to add an optional dependency to \\n\",\n    \"optimagic, where you need to put your code and how to add tests and documentation. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## Gen AI Policy \\n\",\n    \"\\n\",\n    \"It is ok to use GenAI and AI based coding assistants to speed up the process of adding \\n\",\n    \"an optimizer to optimagic. They can be very useful for step 1 and 2. However, AI models \\n\",\n    \"often fail completely when filling out the arguments of `om.mark.minimizer`, when you \\n\",\n    \"ask them to come up with good names for tuning parameters or when you auto-generate the \\n\",\n    \"documentation.  \\n\",\n    \"\\n\",\n    \"Even for step 1 and 2 you should not use an AI Model naively, but upload a paper or \\n\",\n    \"documentation page to provide context to the AI.\\n\",\n    \"\\n\",\n    \"Our policy is therefore:\\n\",\n    \"1. Only use AI for drafts that you double-check; Never rely on AI producing correct results \\n\",\n    \"2. Be transparent about your use of AI \\n\",\n    \"\\n\",\n    \"We will reject all Pull Requests that violate this policy. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 1. Understand how to use the algorithm\\n\",\n    \"\\n\",\n    \"Understanding how to use an algorithm means that you are at least able to solve a \\n\",\n    \"simple optimization problem (like a sphere function or a rosenbrock function). \\n\",\n    \"\\n\",\n    \"The best starting point for this are usually tutorials or example notebooks from the \\n\",\n    \"documentation. An AI model can also be a good idea. \\n\",\n    \"\\n\",\n    \"The things you need to find out for any new algorithm are:\\n\",\n    \"\\n\",\n    \"1. How to code up the objective function \\n\",\n    \"2. How to run an optimization at default values\\n\",\n    \"3. How to pass tuning parameters \\n\",\n    \"4. How to pass bounds, constraints, derivatives, batch evaluators, etc. \\n\",\n    \"5. How to get results back from the optimizer\\n\",\n    \"\\n\",\n    \"### Objective functions in pygmo\\n\",\n    \"\\n\",\n    \"To add pygmo_gaco, let's start by looking at the pygmo [tutorials](https://esa.github.io/pygmo2/tutorials/tutorials.html). Objective functions are coded up via the Problem class. We skip using [pre-defined problems](https://esa.github.io/pygmo2/tutorials/using_problem.html) because they will not help us and directly go to [user defined problems](https://esa.github.io/pygmo2/tutorials/coding_udp_simple.html).\\n\",\n    \"\\n\",\n    \"The following is copied from the documentation:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pygmo as pg\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"class sphere_function:\\n\",\n    \"    def fitness(self, x):\\n\",\n    \"        return [sum(x * x)]\\n\",\n    \"\\n\",\n    \"    def get_bounds(self):\\n\",\n    \"        return ([-1, -1], [1, 1])\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"prob = pg.problem(sphere_function())\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"This looks simple enough. No subclassing is required, `fitness` implements the objective\\n\",\n    \"function, which returns the objective value as a list of a scalar and `get_bounds` returns \\n\",\n    \"the bounds. We can immediately see how we would adjust this for any scalar objective \\n\",\n    \"function. \\n\",\n    \"\\n\",\n    \"### How to run an optimization at default values\\n\",\n    \"\\n\",\n    \"After copy pasting from a few tutorials we find the following:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# The initial population\\n\",\n    \"pop = pg.population(prob, size=20)\\n\",\n    \"# The algorithm; ker needs to be at most the population size to avoid errors\\n\",\n    \"algo = pg.algorithm(pg.gaco(ker=20))\\n\",\n    \"# The actual optimization process\\n\",\n    \"pop = algo.evolve(pop)\\n\",\n    \"# Getting the best individual in the population\\n\",\n    \"best_fitness = pop.get_f()[pop.best_idx()]\\n\",\n    \"print(best_fitness)\\n\",\n    \"best_x = pop.get_x()[pop.best_idx()]\\n\",\n    \"print(np.round(best_x, 4))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"It looks like the optimization worked, even though the precision is not great. The true optimal function value is 0 and the true optimal parameters are [0, 0]. But global algorithms like gaco are almost never precise, so this is good enough. \\n\",\n    \"\\n\",\n    \"We can also see that pygmo is really organized around concepts that are specific to genetic optimizers. Examples are `population` and `evolve`. The optimagic wrapper will hide the details (i.e. users don't have to create a population) but still allow full customization (the population size will be an algorithm specific option that can be set by the user).\\n\",\n    \"\\n\",\n    \"### How to pass tuning parameters\\n\",\n    \"\\n\",\n    \"We already saw in the previous step that tuning parameters like `ker` are passed when the \\n\",\n    \"algorithm is created. \\n\",\n    \"\\n\",\n    \"All supported tuning parameters of gaco are listed and described \\n\",\n    \"[here](https://esa.github.io/pygmo2/algorithms.html#pygmo.gaco). Unfortunately, the \\n\",\n    \"description is not great so we'll have to look into the [paper](https://digital.csic.es/bitstream/10261/54957/3/Extended_ant_colony_2009.pdf) for details. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### How to pass bounds, constraints, derivatives, batch evaluators, etc. \\n\",\n    \"\\n\",\n    \"- We already saw how to pass bounds via the Problem class \\n\",\n    \"- gaco does not support any other constraints, so we don't need to pass them \\n\",\n    \"- gaco is derivative free, so we don't need to pass derivatives \\n\",\n    \"- gaco can parallelize, so we need to find out how to pass a batch version of the \\n\",\n    \"objective function\\n\",\n    \"\\n\",\n    \"After searching around in the pygmo documentation, we find out that our Problem needs to \\n\",\n    \"be extended with a [`batch_fitness`](https://esa.github.io/pygmo2/problem.html#pygmo.problem.batch_fitness)\\n\",\n    \"and our algorithm needs to know about [`pg.bfe()`](https://esa.github.io/pygmo2/bfe.html).\\n\",\n    \"In our previous example it will look like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pygmo as pg\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"class sphere_function:\\n\",\n    \"    def fitness(self, x):\\n\",\n    \"        return [sum(x * x)]\\n\",\n    \"\\n\",\n    \"    def get_bounds(self):\\n\",\n    \"        return ([-1, -1], [1, 1])\\n\",\n    \"\\n\",\n    \"    # dvs represents a batch of parameter vectors at which the objective function is\\n\",\n    \"    # evaluated. However it is stored in an unintuitive format that needs to be reshaped\\n\",\n    \"    # to get at the actual parameter vectors.\\n\",\n    \"    def batch_fitness(self, dvs):\\n\",\n    \"        dim = len(self.get_bounds()[0])\\n\",\n    \"        x_list = list(dvs.reshape(-1, dim))\\n\",\n    \"        # we don't actually need to parallelize to find out how batch evaluators work\\n\",\n    \"        # and optimagic will make it really easy to parallelize this later on.\\n\",\n    \"        eval_list = [self.fitness(x)[0] for x in x_list]\\n\",\n    \"        evals = np.array(eval_list)\\n\",\n    \"        return evals\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"prob = pg.problem(sphere_function())\\n\",\n    \"\\n\",\n    \"pop = pg.population(prob, size=20)\\n\",\n    \"\\n\",\n    \"# creating the algorithm now requires 3 steps\\n\",\n    \"pygmo_uda = pg.gaco(ker=20)\\n\",\n    \"pygmo_uda.set_bfe(pg.bfe())\\n\",\n    \"algo = pg.algorithm(pygmo_uda)\\n\",\n    \"\\n\",\n    \"pop = algo.evolve(pop)\\n\",\n    \"best_fitness = pop.get_f()[pop.best_idx()]\\n\",\n    \"print(best_fitness)\\n\",\n    \"best_x = pop.get_x()[pop.best_idx()]\\n\",\n    \"print(np.round(best_x, 4))\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"For this how-to guide we leave it at this basic exploration of the pygmo library. If you actually contributed an optimizer to optimagic, you would have to explore much more and document your exploration to convince us that you understand the library you wrap in detail. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How to get results back \\n\",\n    \"\\n\",\n    \"The results are stored as part of the evolved population\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"print(\\\"Best function value: \\\", pop.get_f()[pop.best_idx()][0])\\n\",\n    \"print(\\\"Best parameters: \\\", pop.get_x()[pop.best_idx()])\\n\",\n    \"print(\\\"Number of function evaluations: \\\", pop.problem.get_fevals())\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2. Understand how the algorithm works\\n\",\n    \"\\n\",\n    \"Here we want to find out as much as possible about the algorithm. Common questions \\n\",\n    \"that should be answered are:\\n\",\n    \"- For which kind of problems and situations was it designed?\\n\",\n    \"- How does it work (intuitively)?\\n\",\n    \"- Are there any papers, blogposts or other sources of information on the algorithm? \\n\",\n    \"- Which tuning parameters does it have and what do they mean? \\n\",\n    \"- Are there known limitations? \\n\",\n    \"\\n\",\n    \"### For which kind of problems and situations was it desigend \\n\",\n    \"\\n\",\n    \"gaco is a global optimizer that does not use derivative information. It should not be\\n\",\n    \"used if you only need a local optimum or if you have derivatives. Other algorithms would \\n\",\n    \"be more efficient and more precise there. \\n\",\n    \"\\n\",\n    \"Since gaco can evaluate the objective function in parallel it is designed for problems \\n\",\n    \"with expensive objective functions. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### How does it work (intuitively)\\n\",\n    \"\\n\",\n    \"Ant colony optimization is a class of optimization algorithms modeled on the\\n\",\n    \"actions of an ant colony. Artificial \\\"ants\\\" (e.g. simulation agents) locate\\n\",\n    \"optimal solutions by moving through a parameter space representing all\\n\",\n    \"possible solutions. Real ants lay down pheromones directing each other to\\n\",\n    \"resources while exploring their environment. The simulated \\\"ants\\\" similarly\\n\",\n    \"record their positions and the quality of their solutions, so that in later\\n\",\n    \"simulation iterations more ants locate better solutions.\\n\",\n    \"\\n\",\n    \"The generalized ant colony algorithm generates future generations of ants by\\n\",\n    \"using a multi-kernel gaussian distribution based on three parameters (i.e.,\\n\",\n    \"pheromone values) which are computed depending on the quality of each\\n\",\n    \"previous solution. The solutions are ranked through an oracle penalty\\n\",\n    \"method.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Are there any papers, blogposts or other sources of information on the algorithm? \\n\",\n    \"\\n\",\n    \"gaco was proposed in M. Schlueter, et al. (2009). Extended ant colony optimization for \\n\",\n    \"non-convex mixed integer non-linear programming. Computers & Operations Research.\\n\",\n    \"\\n\",\n    \"See [here](https://digital.csic.es/bitstream/10261/54957/3/Extended_ant_colony_2009.pdf) for a free pdf. \\n\",\n    \"\\n\",\n    \"### Which tuning parameters does it have and what do they mean? \\n\",\n    \"\\n\",\n    \"The following is not just copied from the documentation but extended by reading the\\n\",\n    \"paper. It is super important to provide as much information as possible for every \\n\",\n    \"tunig parameter: \\n\",\n    \"\\n\",\n    \"- gen (int): number of generations.\\n\",\n    \"- ker (int): number of solutions stored in the solution archive. Must be <= the population\\n\",\n    \"    size. \\n\",\n    \"- q (float): convergence speed parameter. This parameter manages the convergence speed\\n\",\n    \"    towards the found minima (the smaller the faster). It must be positive and can be\\n\",\n    \"    larger than 1. The default is 1.0 until **threshold** is reached. Then it\\n\",\n    \"    is set to 0.01.\\n\",\n    \"- oracle (float): oracle parameter used in the penalty method.\\n\",\n    \"- acc (float): accuracy parameter for maintaining a minimum penalty\\n\",\n    \"    function's values distances.\\n\",\n    \"- threshold (int): when the iteration counter reaches the threshold the\\n\",\n    \"    convergence speed is set to 0.01 automatically. To deactivate this effect\\n\",\n    \"    set the threshold to stopping.maxiter which is the largest allowed\\n\",\n    \"    value.\\n\",\n    \"- n_gen_mark (int): parameter that determines the convergence speed of the standard \\n\",\n    \"    deviations. This must be an integer.\\n\",\n    \"- impstop (int): if a positive integer is assigned here, the algorithm will count the \\n\",\n    \"    runs without improvements, if this number exceeds the given value, the algorithm \\n\",\n    \"    will be stopped.\\n\",\n    \"- evalstop (int): maximum number of function evaluations.\\n\",\n    \"- focus (float): this parameter makes the search for the optimum greedier\\n\",\n    \"    and more focused on local improvements (the higher the greedier). If the\\n\",\n    \"    value is very high, the search is more focused around the current best\\n\",\n    \"    solutions. Values larger than 1 are allowed.\\n\",\n    \"- memory (bool): if True, memory is activated in the algorithm for multiple calls.\\n\",\n    \"- seed (int): seed used by the internal random number generator (default is random).\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Are there known limitations \\n\",\n    \"\\n\",\n    \"No. \\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3. Implement the minimal wrapper\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Learn the relevant functions and classes\\n\",\n    \"\\n\",\n    \"Before you implement a minimal wrapper, you need to familiarize yourself with a few\\n\",\n    \"important [classes and functions](functions_and_classes_for_internal_optimizers) \\n\",\n    \"you will need. \\n\",\n    \"\\n\",\n    \"- The `mark.miminizer` decorator \\n\",\n    \"- The `Algorithm` class \\n\",\n    \"- The `InternalOptimizationProblem` class \\n\",\n    \"- The `InternalOptimizeResult` class \\n\",\n    \"\\n\",\n    \"**Your task will be to subclass `Algorithm`. Your subclass must be decorated with\\n\",\n    \"`mark.minizer` and override `Algorithm._solve_internal_problem`. `_solve_internal_problem`\\n\",\n    \"takes an `InternalOptimizationProblem` and returns an `InternalOptimizeResult`**\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"Users of optimagic never create instances of `InternalOptimizationProblem` nor \\n\",\n    \"do they call the `_solve_internal_problem` methods of algorithms. Instead they call \\n\",\n    \"`minimize` or `maximize` which are much more convenient and flexible. \\n\",\n    \"\\n\",\n    \"`minimize` and `maximize` will then create an `InternalOptimizationProblem` from the \\n\",\n    \"user's inputs, call the `_solve_internal_problem` method and postprocess it to create an \\n\",\n    \"OptimizeResult. \\n\",\n    \"\\n\",\n    \"To summarize: The public `minimize` interface is optimized for user-friendliness. The \\n\",\n    \"`InternalOptimizeProblem` is optimized for easy wrapping of external libraries. \\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"Below we define a heavily commented minimal version of a wrapper for pygmo's gaco \\n\",\n    \"algorithm. We stay as close as possible to the pygmo examples we have worked with \\n\",\n    \"before and ignore most tuning parameters for now. \\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Write the minimal implementation\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from dataclasses import dataclass\\n\",\n    \"\\n\",\n    \"from numpy.typing import NDArray\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\\n\",\n    \"from optimagic.optimization.internal_optimization_problem import (\\n\",\n    \"    InternalOptimizationProblem,\\n\",\n    \")\\n\",\n    \"from optimagic.typing import AggregationLevel, PositiveInt\\n\",\n    \"\\n\",\n    \"try:\\n\",\n    \"    import pygmo as pg\\n\",\n    \"\\n\",\n    \"    IS_PYGMO_INSTALLED = True\\n\",\n    \"except ImportError:\\n\",\n    \"    IS_PYGMO_INSTALLED = False\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"@om.mark.minimizer(\\n\",\n    \"    # you can pick the name; convention is lowercase with underscores\\n\",\n    \"    name=\\\"pygmo_gaco\\\",\\n\",\n    \"    # the type of problem this optimizer can solve -> scalar problems; Other optimizers\\n\",\n    \"    # solve likelihood or least_squares problems.\\n\",\n    \"    solver_type=AggregationLevel.SCALAR,\\n\",\n    \"    # is the optimizer available? -> only if pygmo is installed\\n\",\n    \"    is_available=IS_PYGMO_INSTALLED,\\n\",\n    \"    # is the optimizer a global optimizer? -> yes\\n\",\n    \"    is_global=True,\\n\",\n    \"    # does the optimizer need the jacobian? -> no, gaco is derivative free\\n\",\n    \"    needs_jac=False,\\n\",\n    \"    # does the optimizer need the hessian? -> no, gaco is derivative free\\n\",\n    \"    needs_hess=False,\\n\",\n    \"    # does the optimizer support parallelism? -> yes\\n\",\n    \"    supports_parallelism=True,\\n\",\n    \"    # does the optimizer support bounds? -> yes\\n\",\n    \"    supports_bounds=True,\\n\",\n    \"    # does the optimizer support linear constraints? -> no\\n\",\n    \"    supports_linear_constraints=False,\\n\",\n    \"    # does the optimizer support nonlinear constraints? -> no\\n\",\n    \"    supports_nonlinear_constraints=False,\\n\",\n    \"    # should the history be disabled? -> no\\n\",\n    \"    disable_history=False,\\n\",\n    \")\\n\",\n    \"# All algortihms need to be frozen dataclasses.\\n\",\n    \"@dataclass(frozen=True)\\n\",\n    \"class PygmoGaco(Algorithm):\\n\",\n    \"    # for now only set one parameter to get things running. The rest will come later.\\n\",\n    \"    stopping_maxiter: PositiveInt = 1000\\n\",\n    \"    n_cores: int = 1\\n\",\n    \"\\n\",\n    \"    def _solve_internal_problem(\\n\",\n    \"        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\\n\",\n    \"    ) -> InternalOptimizeResult:\\n\",\n    \"        # create a pygmo problem from the internal optimization problem\\n\",\n    \"        # This is just slightly more abstract than before and actually simpler because\\n\",\n    \"        # we have problem.batch_fun.\\n\",\n    \"\\n\",\n    \"        n_cores = self.n_cores\\n\",\n    \"\\n\",\n    \"        class PygmoProblem:\\n\",\n    \"            def fitness(self, x):\\n\",\n    \"                # problem.fun is not just the `fun` that was passed to `minimize` by\\n\",\n    \"                # the user. It is a wrapper around fun with added error handling,\\n\",\n    \"                # history collection, and reparametrization to enforce constraints.\\n\",\n    \"                # Moreover, it always works on flat numpy arrays as parameters and\\n\",\n    \"                # does not have additional arguments. The magic of optimagic is to\\n\",\n    \"                # create this internal `fun` from the user's `fun`, so you don't have\\n\",\n    \"                # to deal with constraints, weird parameter formats and similar when\\n\",\n    \"                # implementing the wrapper.\\n\",\n    \"                return [problem.fun(x)]\\n\",\n    \"\\n\",\n    \"            def get_bounds(self):\\n\",\n    \"                # problem.bounds is not just the `bounds` that was passed to `minimize`\\n\",\n    \"                # by the user, which could have been a dictionary or some other non-flat\\n\",\n    \"                # format. `problem.bounds` always contains flat arrays with lower and\\n\",\n    \"                # upper bounds because this makes it easy to write wrappers.\\n\",\n    \"                return (problem.bounds.lower, problem.bounds.upper)\\n\",\n    \"\\n\",\n    \"            def batch_fitness(self, dvs):\\n\",\n    \"                # The processing of dvs is pygmo specific.\\n\",\n    \"                dim = len(self.get_bounds()[0])\\n\",\n    \"                x_list = list(dvs.reshape(-1, dim))\\n\",\n    \"                # problem.batch_fun is a parallelized version of problem.fun.\\n\",\n    \"                eval_list = problem.batch_fun(x_list, n_cores)\\n\",\n    \"                evals = np.array(eval_list)\\n\",\n    \"                return evals\\n\",\n    \"\\n\",\n    \"        prob = pg.problem(PygmoProblem())\\n\",\n    \"        pop = pg.population(prob, size=20)\\n\",\n    \"        pygmo_uda = pg.gaco(ker=20)\\n\",\n    \"        pygmo_uda.set_bfe(pg.bfe())\\n\",\n    \"        algo = pg.algorithm(pygmo_uda)\\n\",\n    \"        pop = algo.evolve(pop)\\n\",\n    \"        best_fun = pop.get_f()[pop.best_idx()][0]\\n\",\n    \"        best_x = pop.get_x()[pop.best_idx()]\\n\",\n    \"        n_fun_evals = pop.problem.get_fevals()\\n\",\n    \"        # For now we only use a few fields of the InternalOptimizeResult.\\n\",\n    \"        out = InternalOptimizeResult(\\n\",\n    \"            x=best_x,\\n\",\n    \"            fun=best_fun,\\n\",\n    \"            n_fun_evals=n_fun_evals,\\n\",\n    \"        )\\n\",\n    \"        return out\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Test the minimal wrapper directly\\n\",\n    \"\\n\",\n    \"So now that we have a wrapper, what do we do with it? And how can we be sure it works?\\n\",\n    \"\\n\",\n    \"We'll first try it out directly with the `SphereExampleInternalOptimizationProblem`. \\n\",\n    \"This is only for debugging and testing purposes. A user would never create an \\n\",\n    \"InternalOptimizationProblem and call an algorithm with it. It's called \\\"Internal\\\" for \\n\",\n    \"a reason!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from optimagic.optimization.internal_optimization_problem import (\\n\",\n    \"    SphereExampleInternalOptimizationProblem,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"problem = SphereExampleInternalOptimizationProblem()\\n\",\n    \"\\n\",\n    \"gaco = PygmoGaco()\\n\",\n    \"\\n\",\n    \"result = gaco._solve_internal_problem(problem, x0=np.array([1.0, 1.0]))\\n\",\n    \"\\n\",\n    \"print(result.fun)\\n\",\n    \"print(result.x)\\n\",\n    \"print(result.n_fun_evals)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Use the minimal wrapper in minimize\\n\",\n    \"\\n\",\n    \"The internal testing gives us some confidence that the wrapper works correctly and would \\n\",\n    \"have been good for debugging if it didn't. But now we want to test the wrapper in the\\n\",\n    \"way it would be used later: via `minimize`\\n\",\n    \"\\n\",\n    \"With this we also get all the benefits of optimagic, from history collection and \\n\",\n    \"criterion plots to flexible parameter formats. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=lambda x: x @ x,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=PygmoGaco,\\n\",\n    \"    bounds=om.Bounds(lower=-np.ones(5), upper=np.ones(5)),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"om.criterion_plot(res, monotone=True)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 4 Complete and refactor the wrapper\\n\",\n    \"\\n\",\n    \"To keep things simple, we left out almost all tuning parameters of the gaco algorithm \\n\",\n    \"when we wrote the minimal wrapper. \\n\",\n    \"\\n\",\n    \"Now it's time to add them. You can add them one by one and make sure nothing breaks by \\n\",\n    \"testing your wrapper after each change - both with the internal problem and via \\n\",\n    \"minimize. \\n\",\n    \"\\n\",\n    \"Moreover, our code looks quite messy currently. Despite being a minimal wrapper, the \\n\",\n    \"`_solve_internal_problem` method is quite long, unstructured and hard to read. \\n\",\n    \"\\n\",\n    \"The result of completing and refactoring the wrapper is too long to be repeated in the \\n\",\n    \"notebook. Instead you can look at the actual [implementation in optimagic](\\n\",\n    \"https://github.com/optimagic-dev/optimagic/blob/ba2678753587f91cea54de69ff76cb3dcb4257d4/src/optimagic/optimizers/pygmo_optimizers.py#L70)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"The PygmoGaco class now contains all tuning parameters we identified in step 2 as\\n\",\n    \"dataclass fields. They all have very useful type-hints that don't just show whether\\n\",\n    \"a parameter is an int, str or float but also which values it can take (e.g. PositiveInt).\\n\",\n    \"\\n\",\n    \"`_solve_internal_problem` is now also much cleaner. It mainly maps our mor descriptive \\n\",\n    \"names of tuning parameters to the old pygmo names and then calls a function called \\n\",\n    \"`_minimize_pygmo` that does all the heavy lifting and can be re-used for other pygmo \\n\",\n    \"optimizers. \\n\",\n    \"\\n\",\n    \"The arguments to `mark.minimizer` have not changed. They always need te be set correctly,\\n\",\n    \"even for minimal working examples. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 5. Align the wrapper with optimagic conventions\\n\",\n    \"\\n\",\n    \"To make switching between different algorithm as simple as possible, we align the names \\n\",\n    \"of commonly used convergence and stopping criteria. We also align the default values for \\n\",\n    \"stopping and convergence criteria as much as possible. \\n\",\n    \"\\n\",\n    \"You can find the harmonized names and value [here](algo_options_docs). \\n\",\n    \"\\n\",\n    \"To align the names of other tuning parameters as much as possible with what is already \\n\",\n    \"there, simple have a look at the optimizers we already wrapped. For example, if you are \\n\",\n    \"wrapping a bfgs or lbfgs algorithm from some libray, try to look at all existing wrappers \\n\",\n    \"of bfgs algorithms and use the same names for the same options. \\n\",\n    \"\\n\",\n    \"You can see what this means for the gaco algorithm [here](\\n\",\n    \"https://github.com/optimagic-dev/optimagic/blob/ba2678753587f91cea54de69ff76cb3dcb4257d4/src/optimagic/optimizers/pygmo_optimizers.py#L70)\\n\",\n    \"\\n\",\n    \"In the future we will provide much more extensive guidelines for harmonization. \\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": \"## 6. Integrate your code into optimagic\\n\\nSo far you could have worked in a Jupyter Notebook. Integrating your code into\\noptimagic only requires a few small changes:\\n\\n1. Add new dependencies to the `[tool.pixi.feature.test.dependencies]` section of\\n`pyproject.toml` and run `pixi install` to update the lock file. Then re-create the\\nenvironment to make sure that the environment is the same as we will use for continuous\\nintegration. If your dependencies don't work on all platforms (e.g. linux only packages),\\nskip this entire step and reach out to a core contributor for help.\\n2. Save the code for your algorithm wrapper in a .py file in `optimagic.algorithms`.\\nUse an existing file if you wrap another algorithm from a library we already had.\\nOtherwise, create a new file.\\n3. Run `pre-commit run --all-files`. This will trigger an automatic code generation\\nthat fully integrates your wrapper into our algorithm selection tool.\\n4. Run `pytest`. This will run at least a few tests for your new algorithm. Add more\\ntests for algorithm specific things (e.g. tests that make sure tuning parameters have\\nthe intended effects).\\n5. Write documentation. The documentation should contain everything you figured out in\\nstep 2. You can either write it into the docstring of your algorithm class (preferred,\\nas this is what we will do for all algorithms in the long run) or in `algorithms.md`\\nin the documentation.\\n6. Create a pull request [in the optimagic repository](https://github.com/optimagic-dev/optimagic)\\nand ask for a review.\"\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.15\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_algorithm_selection.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-select-algorithms)=\\n\",\n    \"# How to select a local optimizer\\n\",\n    \"\\n\",\n    \"This guide explains how to choose a local optimizer that works well for your problem. \\n\",\n    \"Depending on your [strategy for global optimization](how_to_globalization.ipynb) it \\n\",\n    \"is also relevant for global optimization problems. \\n\",\n    \"\\n\",\n    \"## Important facts \\n\",\n    \"\\n\",\n    \"- There is no optimizer that works well for all problems \\n\",\n    \"- Making the right choice can lead to enormous speedups\\n\",\n    \"- Making the wrong choice can mean that you [don't solve your problem at all](algo-selection-how-important). Sometimes,\\n\",\n    \"optimizers fail silently!\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## The three steps for selecting algorithms\\n\",\n    \"\\n\",\n    \"Algorithm selection is a mix of theory and experimentation. We recommend the following \\n\",\n    \"steps:\\n\",\n    \"\\n\",\n    \"1. **Theory**: Based on the properties of your problem, start with 3 to 5 candidate algorithms. \\n\",\n    \"You may use the decision tree below.\\n\",\n    \"2. **Experiments**: Run the candidate algorithms for a small number of function \\n\",\n    \"evaluations and compare the results in a *criterion plot*. As a rule of thumb, use \\n\",\n    \"between `n_params` and `10 * n_params` evaluations. \\n\",\n    \"3. **Optimization**: Re-run the algorithm with the best results until \\n\",\n    \"convergence. Use the best parameter vector from the experiments as start parameters.\\n\",\n    \"\\n\",\n    \"We will walk you through the steps in an [example](algo-selection-example-problem)\\n\",\n    \"below. These steps work well for most problems but sometimes you need \\n\",\n    \"[variations](algo-selection-steps-variations).\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## A decision tree \\n\",\n    \"\\n\",\n    \"This is a practical guide for narrowing down the set of algorithms to experiment with:\\n\",\n    \"\\n\",\n    \"```{mermaid}\\n\",\n    \"graph LR\\n\",\n    \"    classDef highlight fill:#FF4500;\\n\",\n    \"    A[\\\"Do you have<br/>nonlinear<br/>constraints?\\\"] -- yes --> B[\\\"differentiable?\\\"]\\n\",\n    \"    B[\\\"Is your objective function differentiable?\\\"] -- yes --> C[\\\"ipopt<br/>nlopt_slsqp<br/>scipy_trust_constr\\\"]\\n\",\n    \"    B[\\\"differentiable?\\\"] -- no --> D[\\\"scipy_cobyla<br/>nlopt_cobyla\\\"]\\n\",\n    \"\\n\",\n    \"    A[\\\"Do you have<br/>nonlinear constraints?\\\"] -- no --> E[\\\"Can you exploit<br/>a least-squares<br/>structure?\\\"]\\n\",\n    \"    E[\\\"Can you exploit<br/>a least-squares<br/>structure?\\\"] -- yes --> F[\\\"differentiable?\\\"]\\n\",\n    \"    E[\\\"Can you exploit<br/>a least-squares<br/>structure?\\\"] -- no --> G[\\\"differentiable?\\\"]\\n\",\n    \"\\n\",\n    \"    F[\\\"differentiable?\\\"] -- yes --> H[\\\"scipy_ls_lm<br/>scipy_ls_trf<br/>scipy_ls_dogbox\\\"]\\n\",\n    \"    F[\\\"differentiable?\\\"] -- no --> I[\\\"nag_dflos<br/>pounders<br/>tao_pounders\\\"]\\n\",\n    \"\\n\",\n    \"    G[\\\"differentiable?\\\"] -- yes --> J[\\\"scipy_lbfgsb<br/>nlopt_lbfgsb<br/>fides\\\"]\\n\",\n    \"    G[\\\"differentiable?\\\"] -- no --> K[\\\"nlopt_bobyqa<br/>nlopt_neldermead<br/>neldermead_parallel\\\"]\\n\",\n    \"\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"Going through the different questions will give you a list of candidate algorithms. \\n\",\n    \"All algorithms in that list are designed for the same problem class but use different \\n\",\n    \"approaches to solve the problem. Which of them works best for your problem can only be \\n\",\n    \"found out through experimentation.\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"Many books on numerical optimization focus strongly on the inner workings of algorithms.\\n\",\n    \"They will, for example, describe the difference between a trust-region algorithm and a \\n\",\n    \"line-search algorithm in a lot of detail. We have an [intuitive explanation](../explanation/explanation_of_numerical_optimizers.md) of this too. Understanding these details is important for configuring and\\n\",\n    \"troubleshooting optimizations, but not for algorithm selection. For example, If you have\\n\",\n    \"a scalar, differentiable problem without nonlinear constraints, the decision tree \\n\",\n    \"suggests `fides` and two variants of `lbfgsb`. `fides` is a trust-region algorithm, \\n\",\n    \"`lbfgsb` is a line-search algorithm. Both are designed to solve the same kinds of \\n\",\n    \"problems and which one works best needs to be found out through experimentation.\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"## Filtering algorithms \\n\",\n    \"\\n\",\n    \"An even more fine-grained version of the decision tree is built into optimagic's \\n\",\n    \"algorithm selection tool, which can filter algorithms based on the properties of \\n\",\n    \"your problem. To make this concrete, assume we are looking for a **local** optimizer for \\n\",\n    \"a **differentiable** problem with a **scalar** objective function and \\n\",\n    \"**bound constraints**. \\n\",\n    \"\\n\",\n    \"To find all algorithms that match our criteria, we can simply type:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"om.algos.Local.GradientBased.Scalar.Bounded.All\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The available filters are: GradientBased, GradientFree, Global, Local, Bounded, \\n\",\n    \"LinearConstrained, NonlinearConstrained, Scalar, LeastSquares, Likelihood, and Parallel.\\n\",\n    \"You can apply them in any order your want. They are also discoverable, i.e. the \\n\",\n    \"autocomplete feature of your editor will show you all filters you can apply on top of \\n\",\n    \"your current selection.\\n\",\n    \"\\n\",\n    \"Using `.All` after applying filters shows you all algorithms optimagic knows of that \\n\",\n    \"satisfy your criteria. Some of them require optional dependencies. To show only the \\n\",\n    \"algorithms that are available with the packages you have currently installed, use \\n\",\n    \"`.Available` instead of `.All`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"An even more fine-grained way of filtering is described in [Filtering Algorithms Using Bounds](filtering_algorithms_using_bounds).\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(algo-selection-example-problem)=\\n\",\n    \"\\n\",\n    \"## An example problem\\n\",\n    \"\\n\",\n    \"As an example we use the [Trid function](https://www.sfu.ca/~ssurjano/trid.html). The Trid function has no local minimum except \\n\",\n    \"the global one. It is defined for any number of dimensions, we will pick 20. As starting \\n\",\n    \"values we will pick the vector [0, 1, ..., 19]. \\n\",\n    \"\\n\",\n    \"A Python implementation of the function and its gradient looks like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import warnings\\n\",\n    \"\\n\",\n    \"warnings.filterwarnings(\\\"ignore\\\")\\n\",\n    \"\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def trid_scalar(x):\\n\",\n    \"    \\\"\\\"\\\"Implement Trid function: https://www.sfu.ca/~ssurjano/trid.html.\\\"\\\"\\\"\\n\",\n    \"    return ((x - 1) ** 2).sum() - (x[1:] * x[:-1]).sum()\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def trid_gradient(x):\\n\",\n    \"    \\\"\\\"\\\"Calculate gradient of trid function.\\\"\\\"\\\"\\n\",\n    \"    l1 = np.insert(x, 0, 0)\\n\",\n    \"    l1 = np.delete(l1, [-1])\\n\",\n    \"    l2 = np.append(x, 0)\\n\",\n    \"    l2 = np.delete(l2, [0])\\n\",\n    \"    return 2 * (x - 1) - l1 - l2\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Step 1: Theory\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"Let's go through the decision tree for the Trid function:\\n\",\n    \"\\n\",\n    \"1. **No** nonlinear constraints our solution needs to satisfy\\n\",\n    \"2.  **No** least-squares structure we can exploit \\n\",\n    \"3.  **Yes**, the function is differentiable. We even have a closed form gradient that \\n\",\n    \"we would like to use. \\n\",\n    \"\\n\",\n    \"We therefore end up with the candidate algorithms `scipy_lbfgsb`, `nlopt_lbfgsb`, and \\n\",\n    \"`fides`.\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"If your function is differentiable but you do not have a closed form gradient (yet), \\n\",\n    \"we suggest to use at least one gradient based optimizer and one gradient free optimizer.\\n\",\n    \"in your experiments. Optimagic will use numerical gradients in that case. For details, \\n\",\n    \"see [here](how_to_derivatives.ipynb).\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Step 2: Experiments\\n\",\n    \"\\n\",\n    \"To find out which algorithms work well for our problem, we simply run optimizations with\\n\",\n    \"all candidate algorithms in a loop and store the result in a dictionary. We limit the \\n\",\n    \"number of function evaluations to 8. Since some algorithms only support a maximum number\\n\",\n    \"of iterations as stopping criterion we also limit the number of iterations to 8.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results = {}\\n\",\n    \"for algo in [\\\"scipy_lbfgsb\\\", \\\"nlopt_lbfgsb\\\", \\\"fides\\\"]:\\n\",\n    \"    results[algo] = om.minimize(\\n\",\n    \"        fun=trid_scalar,\\n\",\n    \"        jac=trid_gradient,\\n\",\n    \"        params=np.arange(20),\\n\",\n    \"        algorithm=algo,\\n\",\n    \"        algo_options={\\\"stopping_maxfun\\\": 8, \\\"stopping_maxiter\\\": 8},\\n\",\n    \"    )\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(results, max_evaluations=8)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"All optimizers work pretty well here and since this is a very simple problem, any of them \\n\",\n    \"would probably find the optimum in a reasonable time. However, `nlopt_lbfgsb` is a bit \\n\",\n    \"better than the others, so we will select it for the next step. In more difficult\\n\",\n    \"examples, the difference between optimizers can be much more pronounced.\\n\",\n    \"\\n\",\n    \"### Step 3: Optimization \\n\",\n    \"\\n\",\n    \"All that is left to do is to run the optimization until convergence with the best \\n\",\n    \"optimizer. To avoid duplicated calculations, we can already start from the previously \\n\",\n    \"best parameter vector:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"best_x = results[\\\"nlopt_lbfgsb\\\"].params\\n\",\n    \"results[\\\"nlopt_lbfgsb_complete\\\"] = om.minimize(\\n\",\n    \"    fun=trid_scalar,\\n\",\n    \"    jac=trid_gradient,\\n\",\n    \"    params=best_x,\\n\",\n    \"    algorithm=\\\"nlopt_lbfgsb\\\",\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Looking at the result in a criterion plot we can see that the optimizer converges after \\n\",\n    \"a bit more than 30 function evaluations. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(results)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(algo-selection-steps-variations)=\\n\",\n    \"\\n\",\n    \"## Variations of the four steps\\n\",\n    \"\\n\",\n    \"The four steps described above work very well in most situations. However, sometimes \\n\",\n    \"it makes sense to deviate: \\n\",\n    \"\\n\",\n    \"- If you are unsure about some of the questions in step 1, select more algorithms for \\n\",\n    \"the experimentation phase and run more than 1 algorithm until convergence. \\n\",\n    \"- If it is very important to find a precise optimum, run more than 1 algorithm until \\n\",\n    \"convergence. \\n\",\n    \"- If you have a very fast objective function, simply run all candidate algorithms until \\n\",\n    \"convergence. \\n\",\n    \"- If you have a differentiable objective function but no closed form derivative, use \\n\",\n    \"at least one gradient based optimizer and one gradient free optimizer in the \\n\",\n    \"experiments. See [here](how_to_derivatives.ipynb) to learn more about derivatives.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"(algo-selection-how-important)=\\n\",\n    \"\\n\",\n    \"## How important was it?\\n\",\n    \"\\n\",\n    \"The Trid function is differentiable and very well behaved in almost every aspect. \\n\",\n    \"Moreover, it has a very short runtime. One would think that any optimizer can find its \\n\",\n    \"optimum. So let's compare the selected optimizer with a few others:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results = {}\\n\",\n    \"for algo in [\\\"nlopt_lbfgsb\\\", \\\"scipy_neldermead\\\", \\\"scipy_cobyla\\\"]:\\n\",\n    \"    results[algo] = om.minimize(\\n\",\n    \"        fun=trid_scalar,\\n\",\n    \"        jac=trid_gradient,\\n\",\n    \"        params=np.arange(20),\\n\",\n    \"        algorithm=algo,\\n\",\n    \"    )\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(results)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can see that our chosen optimizer solves the problem with less than 35 function \\n\",\n    \"evaluations. At this point, the two gradient-free optimizers have not yet made \\n\",\n    \"significant progress. CoByLA gets reasonably close to an optimum after about 4k \\n\",\n    \"evaluations. Nelder-Mead gets stuck after 8k evaluations and fails to solve the problem. \\n\",\n    \"\\n\",\n    \"This example shows not only that the choice of optimizer is important but that the commonly \\n\",\n    \"held belief that gradient free optimizers are generally more robust than gradient based \\n\",\n    \"ones is dangerous! The Nelder-Mead algorithm did \\\"converge\\\" and reports success, but\\n\",\n    \"did not find the optimum. It did not even get stuck in a local optimum because we know \\n\",\n    \"that the Trid function does not have local optima except the global one. It just got \\n\",\n    \"stuck somewhere. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results[\\\"scipy_neldermead\\\"].success\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic-docs\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.16\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_benchmarking.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to Benchmark Optimization Algorithms\\n\",\n    \"\\n\",\n    \"Benchmarking optimization algorithms is an important step when developing a new algorithm or when searching for an algorithm that is good at solving a particular problem. \\n\",\n    \"\\n\",\n    \"In general, benchmarking constists of the following steps:\\n\",\n    \"\\n\",\n    \"1. Define the test problems (or get pre-implemented ones)\\n\",\n    \"2. Define the optimization algorithms and the tuning parameters you want to try\\n\",\n    \"3. Run the benchmark\\n\",\n    \"4. Plot the results\\n\",\n    \"\\n\",\n    \"optimagic helps you with all of these steps!\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 1. Get Test Problems\\n\",\n    \"\\n\",\n    \"optimagic includes the problems of [Moré and Wild (2009)](https://doi.org/10.1137/080724083) as well as [Cartis and Roberts](https://arxiv.org/abs/1710.11005).\\n\",\n    \"\\n\",\n    \"Each problem consist of the `inputs` (the criterion function and the start parameters) and the `solution` (the optimal parameters and criterion value) and optionally provides more information.\\n\",\n    \"\\n\",\n    \"Below we load a subset of the Moré and Wild problems and look at one particular Rosenbrock problem that has difficult start parameters.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"problems = om.get_benchmark_problems(\\\"example\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2. Specify the Optimizers\\n\",\n    \"\\n\",\n    \"To select optimizers you want to benchmark on the set of problems, you can simply specify them as a list. Advanced examples - that do not only compare algorithms but also vary the `algo_options` - can be found below. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"optimizers = [\\n\",\n    \"    \\\"nag_dfols\\\",\\n\",\n    \"    \\\"scipy_neldermead\\\",\\n\",\n    \"    \\\"scipy_truncated_newton\\\",\\n\",\n    \"]\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3. Run the Benchmark\\n\",\n    \"\\n\",\n    \"Once you have your problems and your optimizers set up, you can simply use `run_benchmark`. The results are a dictionary with one entry for each (problem, algorithm) combination. Each entry not only saves the solution but also the history of the algorithm's criterion and parameter history. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results = om.run_benchmark(\\n\",\n    \"    problems,\\n\",\n    \"    optimizers,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 4a. Profile plots\\n\",\n    \"\\n\",\n    \"**Profile Plots** compare optimizers over a whole problem set. \\n\",\n    \"\\n\",\n    \"The literature distinguishes **data profiles** and **performance profiles**. Data profiles use a normalized runtime measure whereas performance profiles use an absolute one. The profile plot does not normalize runtime by default. To do this, simply set `normalize_runtime` to True. For background information, check [Moré and Wild (2009)](https://doi.org/10.1137/080724083). \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.profile_plot(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \":::{note}\\n\",\n    \"\\n\",\n    \"For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\\n\",\n    \"\\n\",\n    \":::\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"source\": [\n    \"The x axis shows runtime per problem. The y axis shows the share of problems each algorithm solved within that runtime. Thus, higher and further to the left values are desirable. Higher means more problems were solved and further to the left means, the algorithm found the solutions earlier. \\n\",\n    \"\\n\",\n    \"You can choose:\\n\",\n    \"\\n\",\n    \"- whether to use `n_evaluations` or `walltime` as **`runtime_measure`**\\n\",\n    \"- whether to normalize runtime such that the runtime of each problem is shown as a multiple of the fastest algorithm on that problem\\n\",\n    \"- how to determine when an evaluation is close enough to the optimum to be counted as converged. Convergence is always based on some measure of distance between the true solution and the solution found by an optimizer. Whether distiance is measured in parameter space, function space, or a combination of both can be specified. \\n\",\n    \"\\n\",\n    \"Below, we consider a problem to be solved if the distance between the parameters found by the optimizer and the true solution parameters are at most 0.1% of the distance between the start parameters and true solution parameters. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.profile_plot(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \"    runtime_measure=\\\"n_evaluations\\\",\\n\",\n    \"    stopping_criterion=\\\"x\\\",\\n\",\n    \"    x_precision=0.001,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 4b. Convergence plots\\n\",\n    \"\\n\",\n    \"**Convergence Plots** look at particular problems and show the convergence of each optimizer on each problem. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.convergence_plot(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \"    n_cols=2,\\n\",\n    \"    problem_subset=[\\\"rosenbrock_good_start\\\", \\\"box_3d\\\"],\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"source\": [\n    \"The further to the left and the lower the curve of an algorithm, the better that algorithm performed.\\n\",\n    \"\\n\",\n    \"Often we are more interested in how close each algorithm got to the true solution in parameter space, not in criterion space as above. For this. we simply set the **`distance_measure`** to `parameter_space`. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.convergence_plot(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \"    n_cols=2,\\n\",\n    \"    problem_subset=[\\\"rosenbrock_good_start\\\", \\\"box_3d\\\"],\\n\",\n    \"    distance_measure=\\\"parameter_distance\\\",\\n\",\n    \"    stopping_criterion=\\\"x\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 5a. Convergence report\\n\",\n    \"\\n\",\n    \"The **Convergence Report** shows for each problem and optimizer which problems the optimizer solved successfully, failed to do so, or where it stopped with an error. The respective strings are \\\"success\\\", \\\"failed\\\", or \\\"error\\\".\\n\",\n    \"Moreover, the last column of the ```pd.DataFrame``` displays the number of dimensions of the benchmark problem.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df = om.convergence_report(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \"    stopping_criterion=\\\"y\\\",\\n\",\n    \"    x_precision=1e-4,\\n\",\n    \"    y_precision=1e-4,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"19\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"20\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 5b. Rank report\\n\",\n    \"\\n\",\n    \"The **Rank Report** shows the ranks of the algorithms for each problem; where 0 means the algorithm was the fastest on a given benchmark problem, 1 means it was the second fastest and so on. If an algorithm did not converge on a problem, the value is \\\"failed\\\". If an algorithm did encounter an error during optimization, the value is \\\"error\\\".\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"21\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df = om.rank_report(\\n\",\n    \"    problems=problems,\\n\",\n    \"    results=results,\\n\",\n    \"    runtime_measure=\\\"n_evaluations\\\",\\n\",\n    \"    stopping_criterion=\\\"y\\\",\\n\",\n    \"    x_precision=1e-4,\\n\",\n    \"    y_precision=1e-4,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"22\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"23\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 5b. Traceback report\\n\",\n    \"\\n\",\n    \"The **Traceback Report** shows the tracebacks returned by the optimizers if they encountered an error during optimization. The resulting ```pd.DataFrame``` is empty if none of the optimizers terminated with an error, as in the example below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"24\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df = om.traceback_report(problems=problems, results=results)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"25\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"df\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_bounds.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-bounds)=\\n\",\n    \"\\n\",\n    \"# How to specify bounds\\n\",\n    \"\\n\",\n    \"## Constraints vs bounds \\n\",\n    \"\\n\",\n    \"optimagic distinguishes between bounds and constraints. Bounds are lower and upper bounds for parameters. In the literature, they are sometimes called box constraints. Examples for general constraints are linear constraints, probability constraints, or nonlinear constraints. You can find out more about general constraints in the next section on [How to specify constraints](how_to_constraints.md).\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Example objective function\\n\",\n    \"\\n\",\n    \"Let’s again look at the sphere function:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def fun(x):\\n\",\n    \"    return x @ x\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(fun=fun, params=np.arange(3), algorithm=\\\"scipy_lbfgsb\\\")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Array params\\n\",\n    \"\\n\",\n    \"For params that are a `numpy.ndarray`, one can specify the lower and/or upper-bounds as an array of the same length.\\n\",\n    \"\\n\",\n    \"**Lower bounds**\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=fun,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    bounds=om.Bounds(lower=np.ones(3)),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"source\": [\n    \"**Lower & upper-bounds**\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=fun,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=om.Bounds(\\n\",\n    \"        lower=np.array([-2, -np.inf, 1]),\\n\",\n    \"        upper=np.array([-1, np.inf, np.inf]),\\n\",\n    \"    ),\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Pytree params\\n\",\n    \"\\n\",\n    \"Now let's look at a case where params is a more general pytree. We also update the sphere function by adding an intercept. Since the criterion always decreases when decreasing the intercept, there is no unrestricted solution. Lets fix a lower bound only for the intercept.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"params = {\\\"x\\\": np.arange(3), \\\"intercept\\\": 3}\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def fun(params):\\n\",\n    \"    return params[\\\"x\\\"] @ params[\\\"x\\\"] + params[\\\"intercept\\\"]\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=fun,\\n\",\n    \"    params=params,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=om.Bounds(lower={\\\"intercept\\\": -2}),\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"source\": [\n    \"optimagic tries to match the user provided bounds with the structure of params. This allows you to specify bounds for subtrees of params. In case your subtree specification results in an unidentified matching, optimagic will tell you so with a `InvalidBoundsError`.  \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"source\": [\n    \"## params data frame\\n\",\n    \"\\n\",\n    \"It often makes sense to specify your parameters in a `pandas.DataFrame`, where you can utilize the multiindex for parameter naming. In this case, you can specify bounds as extra columns `lower_bound` and `upper_bound`.\\n\",\n    \"\\n\",\n    \"> **Note**\\n\",\n    \"> The columns are called `*_bound` instead of `*_bounds` like the argument passed to `minimize` or `maximize`. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import pandas as pd\\n\",\n    \"\\n\",\n    \"params = pd.DataFrame(\\n\",\n    \"    {\\\"value\\\": [0, 1, 2, 3], \\\"lower_bound\\\": [0, 1, 1, -2]},\\n\",\n    \"    index=pd.MultiIndex.from_tuples([(\\\"x\\\", k) for k in range(3)] + [(\\\"intercept\\\", 0)]),\\n\",\n    \")\\n\",\n    \"params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def fun(params):\\n\",\n    \"    x = params.loc[\\\"x\\\"][\\\"value\\\"].to_numpy()\\n\",\n    \"    intercept = params.loc[\\\"intercept\\\"][\\\"value\\\"].iloc[0]\\n\",\n    \"    value = x @ x + intercept\\n\",\n    \"    return float(value)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun,\\n\",\n    \"    params=params,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"source\": [\n    \"(filtering_algorithms_using_bounds)=\\n\",\n    \"\\n\",\n    \"## Filtering algorithms\\n\",\n    \"\\n\",\n    \"It is further possible to filter algorithms based on whether they support bounds, if bounds are required to run, and if infinite bounds are supported. The AlgoInfo class provides all information about the chosen algorithm, which can be accessed with algo.algo_info... . Suppose we are looking for a optimizer that supports bounds and strictly require them for the algorithm to run properly.\\n\",\n    \"\\n\",\n    \"To find all algorithms that support bounds and cannot run without bounds, we can simply do:\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from optimagic.algorithms import AVAILABLE_ALGORITHMS\\n\",\n    \"\\n\",\n    \"algos_with_bounds_support = [\\n\",\n    \"    algo\\n\",\n    \"    for name, algo in AVAILABLE_ALGORITHMS.items()\\n\",\n    \"    if algo.algo_info.supports_bounds\\n\",\n    \"]\\n\",\n    \"my_selection = [\\n\",\n    \"    algo for algo in algos_with_bounds_support if algo.algo_info.needs_bounds\\n\",\n    \"]\\n\",\n    \"my_selection[0:3]\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"19\",\n   \"metadata\": {},\n   \"source\": [\n    \"Similarly, to find all algorithms that support infinite values in bounds , we can do:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"20\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"my_selection2 = [\\n\",\n    \"    algo\\n\",\n    \"    for algo in algos_with_bounds_support\\n\",\n    \"    if algo.algo_info.supports_infinite_bounds\\n\",\n    \"]\\n\",\n    \"my_selection2[0:3]\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"21\",\n   \"metadata\": {},\n   \"source\": [\n    \"In case you you forget to specify bounds for a optimizer that strictly requires them or pass infinite values in bounds to a optimizer which does not support them, optimagic will raise an `IncompleteBoundsError`. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"22\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Coming from scipy\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"23\",\n   \"metadata\": {},\n   \"source\": [\n    \"If `params` is a flat numpy array, you can also provide bounds in any format that \\n\",\n    \"is supported by [`scipy.optimize.minimize`](\\n\",\n    \"https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html). \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic-docs\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.12.11\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_change_plotting_backend.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to change the plotting backend\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"optimagic supports various visualization libraries as plotting backends, which can be\\n\",\n    \"selected using the `backend` argument. In the following guide, we showcase the \\n\",\n    \"`criterion_plot` visualized using different backends.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def sphere(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"results = {}\\n\",\n    \"for algo in [\\\"scipy_lbfgsb\\\", \\\"scipy_neldermead\\\"]:\\n\",\n    \"    results[algo] = om.minimize(sphere, params=np.arange(5), algorithm=algo)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Backends\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Plotly\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"source\": [\n    \"The default plotting library. To select the Plotly backend explicitly, set `backend=\\\"plotly\\\"`.\\n\",\n    \"\\n\",\n    \"The returned figure object is a [`plotly.graph_objects.Figure`](https://plotly.com/python-api-reference/generated/plotly.graph_objects.Figure.html).\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"**Choose the Plotly renderer according to your environment:**\\n\",\n    \"\\n\",\n    \"- Use `plotly.io.renderers.default = \\\"notebook_connected\\\"` in Jupyter notebooks for interactive plots.\\n\",\n    \"- Use `plotly.io.renderers.default = \\\"browser\\\"` to open plots in your default web browser when running as a script.\\n\",\n    \"\\n\",\n    \"Refer to the [Plotly documentation](https://plotly.com/python/renderers/) for more details.\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(results, backend=\\\"plotly\\\")  # Also the default\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Matplotlib\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"To select the Matplotlib backend, set `backend=\\\"matplotlib\\\"`.\\n\",\n    \"\\n\",\n    \"The returned figure object is a [`matplotlib.axes.Axes`](https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.html).\\n\",\n    \"\\n\",\n    \"In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is a 2-dimensional numpy array of `Axes` objects: [`numpy.ndarray`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html)[[`matplotlib.axes.Axes`]](https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.html) of shape `(n_rows, n_cols)`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"ax = om.criterion_plot(results, backend=\\\"matplotlib\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Bokeh\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"source\": [\n    \"To select the Bokeh backend, set `backend=\\\"bokeh\\\"`.\\n\",\n    \"\\n\",\n    \"The returned figure object is a [`bokeh.plotting.figure`](https://docs.bokeh.org/en/latest/docs/reference/plotting/figure.html).\\n\",\n    \"\\n\",\n    \"In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is a [`bokeh.models.GridPlot`](https://docs.bokeh.org/en/latest/docs/reference/models/plots.html#bokeh.models.GridPlot) object.\\n\",\n    \"\\n\",\n    \"```{warning}\\n\",\n    \"- Bokeh applies themes globally. Passing the `template` parameter to a plotting function updates the theme for all existing and future Bokeh plots. If you do not pass `template`, a default template is applied, which will also change the global theme.\\n\",\n    \"- Bokeh doesn't support titles for grid plots. So, the `title` parameter in `slice_plot` is ignored when using the Bokeh backend.\\n\",\n    \"```\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from bokeh.io import output_notebook, show\\n\",\n    \"\\n\",\n    \"output_notebook()\\n\",\n    \"\\n\",\n    \"p = om.criterion_plot(results, backend=\\\"bokeh\\\")\\n\",\n    \"show(p)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Altair\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"source\": [\n    \"To select the Altair backend, set `backend=\\\"altair\\\"`.\\n\",\n    \"\\n\",\n    \"The returned figure object is an [`altair.Chart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html).\\n\",\n    \"\\n\",\n    \"In case of grid plots (such as `convergence_plot` or `slice_plot`), the returned object is either an [`altair.Chart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html) if there is only one subplot, an [`altair.HConcatChart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.HConcatChart.html) if there is only one row, or an [`altair.VConcatChart`](https://altair-viz.github.io/user_guide/generated/toplevel/altair.VConcatChart.html) otherwise.\\n\",\n    \"\\n\",\n    \"```{warning}\\n\",\n    \"Altair applies themes globally. Passing the `template` parameter to a plotting function updates the theme for all existing and future Altair plots. If you do not pass `template`, a default template is applied, which will also change the global theme.\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"It is mostly not required to set the renderer manually, as Altair automatically\\n\",\n    \"selects the appropriate renderer based on your environment. In this example,\\n\",\n    \"we explicitly set the renderer to ensure correct display within the documentation.\\n\",\n    \"\\n\",\n    \"Refer to the [Altair documentation](https://altair-viz.github.io/user_guide/display_frontends.html) for more details.\\n\",\n    \"```\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import altair as alt\\n\",\n    \"\\n\",\n    \"# Setting the renderer is mostly not required. See note above.\\n\",\n    \"alt.renderers.enable(\\\"jupyter\\\")\\n\",\n    \"\\n\",\n    \"chart = om.criterion_plot(results, backend=\\\"altair\\\")\\n\",\n    \"chart.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Customizing plots\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"source\": [\n    \"Here, we provide a simple example of how to customize plots created with different backends.\\n\",\n    \"\\n\",\n    \"::::{tab-set}\\n\",\n    \"\\n\",\n    \":::{tab-item} Plotly\\n\",\n    \"\\n\",\n    \"```python\\n\",\n    \"fig = om.criterion_plot(results, backend=\\\"plotly\\\")\\n\",\n    \"\\n\",\n    \"# Configure Axes\\n\",\n    \"fig.update_yaxes(title_text=\\\"Custom Y Label\\\", title_font_size=20)\\n\",\n    \"fig.update_xaxes(range=[0, 100])\\n\",\n    \"\\n\",\n    \"# Change legend position\\n\",\n    \"fig.update_layout(legend=dict(xanchor=\\\"left\\\", yanchor=\\\"top\\\", x=1, y=0.6))\\n\",\n    \"\\n\",\n    \"# Configure line properties\\n\",\n    \"# The index corresponding to a line, can be inferred from the legend\\n\",\n    \"# In case of criterion_plot, it is the order of optimizers in `results`\\n\",\n    \"fig.data[0].update(line=dict(width=4))\\n\",\n    \"fig.data[1].update(line=dict(dash=\\\"dashdot\\\"))\\n\",\n    \"\\n\",\n    \"fig.show()\\n\",\n    \"```\\n\",\n    \":::\\n\",\n    \"\\n\",\n    \":::{tab-item} Matplotlib\\n\",\n    \"\\n\",\n    \"```python\\n\",\n    \"ax = om.criterion_plot(results, backend=\\\"matplotlib\\\")\\n\",\n    \"\\n\",\n    \"# Configure Axis\\n\",\n    \"ax.set_ylabel(ylabel=\\\"Custom Y Label\\\", fontsize=20)\\n\",\n    \"ax.set_xlim(0, 100)\\n\",\n    \"\\n\",\n    \"# Change legend position\\n\",\n    \"ax.figure.legends[0].set_loc(\\\"outside center right\\\")\\n\",\n    \"\\n\",\n    \"# Configure line properties\\n\",\n    \"# The index corresponding to a line, can be inferred from the legend\\n\",\n    \"# In case of criterion_plot, it is the order of optimizers in `results`\\n\",\n    \"ax.lines[0].set_linewidth(4)\\n\",\n    \"ax.lines[1].set_linestyle(\\\"dashdot\\\")\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \":::\\n\",\n    \"\\n\",\n    \":::{tab-item} Bokeh\\n\",\n    \"\\n\",\n    \"```python\\n\",\n    \"from bokeh.models import Range1d\\n\",\n    \"\\n\",\n    \"p = om.criterion_plot(results, backend=\\\"bokeh\\\")\\n\",\n    \"\\n\",\n    \"# Configure Axes\\n\",\n    \"p.yaxis.axis_label = \\\"Custom Y Label\\\"\\n\",\n    \"p.yaxis.axis_label_text_font_size = \\\"20pt\\\"\\n\",\n    \"p.x_range = Range1d(0, 100)\\n\",\n    \"\\n\",\n    \"# Change legend position\\n\",\n    \"p.add_layout(p.legend[0], \\\"right\\\")\\n\",\n    \"p.legend[0].location = \\\"center\\\"\\n\",\n    \"\\n\",\n    \"# Configure line properties\\n\",\n    \"# The index corresponding to a line, can be inferred from the legend\\n\",\n    \"# In case of criterion_plot, it is the order of optimizers in `results`\\n\",\n    \"p.renderers[0].glyph.line_width = 4\\n\",\n    \"p.renderers[1].glyph.line_dash = \\\"dashdot\\\"\\n\",\n    \"\\n\",\n    \"show(p)\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \":::\\n\",\n    \"\\n\",\n    \":::{tab-item} Altair\\n\",\n    \"\\n\",\n    \"```{note}\\n\",\n    \"Due to the nature of Altair charts, top-level configuration may not work as expected. In these cases, it might be necessary to override the encoding.\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \"```python\\n\",\n    \"import altair as alt\\n\",\n    \"\\n\",\n    \"chart = om.criterion_plot(results, backend=\\\"altair\\\")\\n\",\n    \"\\n\",\n    \"# Configure Axes\\n\",\n    \"chart = chart.encode(\\n\",\n    \"    y=alt.Y(\\\"y\\\", axis=alt.Axis(title=\\\"Custom Y Label\\\", titleFontSize=20)),\\n\",\n    \"    x=alt.X(\\\"x\\\", scale=alt.Scale(domain=(0, 100))),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"# Configure lines\\n\",\n    \"chart = chart.encode(\\n\",\n    \"    strokeWidth=alt.condition(\\n\",\n    \"        alt.datum.name == \\\"scipy_lbfgsb\\\", alt.value(4), alt.value(2)\\n\",\n    \"    ),\\n\",\n    \"    strokeDash=alt.condition(\\n\",\n    \"        alt.datum.name == \\\"scipy_neldermead\\\", alt.value([8, 4, 2, 4]), alt.value([1, 0])\\n\",\n    \"    ),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"chart.show()\\n\",\n    \"```\\n\",\n    \"\\n\",\n    \":::\\n\",\n    \"\\n\",\n    \"::::\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.17\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_constraints.md",
    "content": "(constraints)=\n\n# How to specify constraints\n\n## Constraints vs bounds\n\noptimagic distinguishes between bounds and constraints. Bounds are lower and upper\nbounds for parameters. In the literature, they are sometimes called box constraints.\nBounds are specified as `lower_bounds` and `upper_bounds` argument to `maximize` and\n`minimize`.\n\nExamples with bounds can be found in [this tutorial].\n\nTo specify more general constraints on your parameters, you can use the argument\n`constraints`. The variety of constraints you can impose ranges from rather simple ones\n(e.g. parameters are fixed to a value, a group of parameters is required to be equal) to\nmore complex ones (like general linear constraints, or even nonlinear constraints).\n\n## Can you use constraints with all optimizers?\n\nWith the exception of general nonlinear constraints, we implement constraints via\nreparametrizations. Details are explained [here]. This means that you can use all of the\nconstraints with any optimizer that supports bounds. Some constraints (e.g. fixing\nparameters) can even be used with optimizers that do not support bounds.\n\n## Example criterion function\n\nLet's look at a variation of the sphere function to illustrate what kinds of constraints\nyou can impose and how you specify them in optimagic:\n\n```{eval-rst}\n\n.. code-block:: python\n\n    >>> import numpy as np\n    >>> import optimagic as om\n    >>> def fun(params):\n    ...     offset = np.linspace(1, 0, len(params))\n    ...     x = params - offset\n    ...     return x @ x\n\n```\n\nThe unconstrained optimum of a six-dimensional version of this problem is:\n\n```{eval-rst}\n\n.. code-block:: python\n\n    >>> res = om.minimize(\n    ...    fun=fun,\n    ...    params=np.array([2.5, 1, 1, 1, 1, -2.5]),\n    ...    algorithm=\"scipy_lbfgsb\",\n    ... )\n    >>> res.params.round(3) # doctest: +SKIP\n    array([1. , 0.8, 0.6, 0.4, 0.2, 0. ])\n\n```\n\nThe unconstrained optimum is usually easy to see because all parameters enter the\ncriterion function in a additively separable way.\n\n## Types of constraints\n\nBelow, we show a very simple example of each type of constraint implemented in\noptimagic. For each constraint, we will select a subset of the parameters on which the\nconstraint is imposed via the `selector` argument, which is a function that takes in the\nfull parameter vector and returns the subset of parameters that should be constrained.\n\n```{eval-rst}\n.. dropdown:: fixed\n\n    The simplest (but very useful) constraint fixes parameters at their start values.\n\n    Let's take the above example and fix the first and last parameter to 2.5 and\n    -2.5, respectively.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([2.5, 1, 1, 1, 1, -2.5]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.FixedConstraint(\n        ...        selector=lambda params: params[[0, 5]]\n        ...    ),\n        ... )\n\n    Looking at the optimization result, we get:\n\n    >>> res.params.round(3)\n    array([ 2.5,  0.8,  0.6,  0.4,  0.2, -2.5])\n\n    Which is indeed the correct constrained optimum. Fixes are compatible with all\n    optimizers.\n\n```\n\n```{eval-rst}\n.. dropdown:: increasing\n\n    In our unconstrained example, the optimal parameters are decreasing from left to\n    right. Let's impose the constraint that the second, third and fourth parameter\n    increase (weakly):\n\n    .. code-block:: python\n\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([1, 1, 1, 1, 1, 1]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.IncreasingConstraint(\n        ...        selector=lambda params: params[[1, 2, 3]]\n        ...    ),\n        ... )\n\n\n    Imposing the constraint on positions ``params[[1, 2, 3]]`` means that the parameter value\n    at index position ``2`` has to be (weakly) greater than the value at position ``1``.\n    Likewise, the parameter value at index position ``3`` has to be (weakly) greater than the\n    value at position ``2``. Hence, imposing an increasing constraint with\n    only one selected parameter has no effect. We need to specify at least two parameters to make\n    a meaningful *relative* comparison.\n    Note that the increasing constraint affect all three parameters, i.e. ``params[1]``,\n    ``params[2]``, and ``params[3]`` because the optimal parameters in the unconstrained case\n    are decreasing from left to right.\n\n    Looking at the optimization result, we get:\n\n    >>> res.params.round(3)\n    array([1. , 0.6, 0.6, 0.6, 0.2, 0. ])\n\n    Which is indeed the correct constrained optimum. Increasing constraints are only\n    compatible with optimizers that support bounds.\n\n```\n\n```{eval-rst}\n.. dropdown:: decreasing\n\n    In our unconstrained example, the optimal parameters are decreasing from left to\n    right already - without imposing any constraints. If we imposed an decreasing constraint\n    without changing the order, it would simply have no effect.\n\n    So let's impose one in a different order:\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([1, 1, 1, 1, 1, 1]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.DecreasingConstraint(\n        ...        selector=lambda params: params[[3, 0, 4]]\n        ...    ),\n        ... )\n\n    Imposing the constraint on positions ``params[[3, 0, 4]]`` means that the parameter value\n    at index position ``0`` has to be (weakly) smaller than the value at position ``3``.\n    Likewise, the parameter value at index position ``4`` has to be (weakly) smaller than the\n    value at position ``0``. Hence, imposing a decreasing constraint with\n    only one selected parameter has no effect. We need to specify at least two parameters to make\n    a meaningful *relative* comparison.\n    Note that the decreasing constraint should have no effect on ``params[4]`` because it is\n    smaller than the other two anyways in the unconstrained optimum, but it will change\n    the optimal values of ``params[3]`` and ``params[0]``. Indeed we get:\n\n    >>> res.params.round(3)\n    array([ 0.7,  0.8,  0.6,  0.7,  0.2, -0. ])\n\n    Which is the correct optimum. Decreasing constraints are only compatible with\n    optimizers that support bounds.\n```\n\n```{eval-rst}\n.. dropdown:: equality\n\n    In our example, all optimal parameters are different. Let's constrain the first\n    and last to be equal to each other:\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([1, 1, 1, 1, 1, 1]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.EqualityConstraint(\n        ...        selector=lambda params: params[[0, 5]]\n        ...    ),\n        ... )\n\n    This yields:\n\n    >>> res.params.round(3)\n    array([0.5, 0.8, 0.6, 0.4, 0.2, 0.5])\n\n    Which is the correct solution. Equality constraints are compatible with all\n    optimizers.\n\n```\n\n```{eval-rst}\n.. dropdown:: pairwise_equality\n\n    Pairwise equality constraints are similar to equality constraints but impose that\n    two or more groups of parameters are pairwise equal. Let's look at an example:\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([1, 1, 1, 1, 1, 1]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.PairwiseEqualityConstraint(\n        ...        selectors=[\n        ...            lambda params: params[[0, 1]],\n        ...            lambda params: params[[2, 3]]\n        ...        ],\n        ...    ),\n        ... )\n\n\n\n    This constraint imposes that ``params[0] == params[2]`` and\n    ``params[1] == params[3]``. The optimal parameters with this constraint are:\n\n    >>> res.params.round(3)\n    array([ 0.8,  0.6,  0.8,  0.6,  0.2, -0. ])\n\n```\n\n```{eval-rst}\n.. dropdown:: probability\n\n    Let's impose the constraint that the first four parameters form valid\n    probabilities, i.e. they should add up to one and be between zero and one.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.array([0.3, 0.2, 0.25, 0.25, 1, 1]),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.ProbabilityConstraint(\n        ...        selector=lambda params: params[:4]\n        ...    ),\n        ... )\n\n    This yields again the correct result:\n\n    .. code-block:: python\n\n        >>> res.params.round(2) # doctest: +SKIP\n        array([0.53, 0.33, 0.13, 0.  , 0.2 , 0.  ])\n\n\n```\n\n```{eval-rst}\n.. dropdown:: covariance\n\n    In many estimation problems, particularly when doing a maximum likelihood estimation,\n    one has to estimate the covariance matrix of a random variable. The\n    ``covariance`` costraint ensures that such a covariance matrix is always valid,\n    i.e. positive semi-definite and symmetric. Due to its symmetry, only the lower\n    triangle of a covariance matrix actually has to be estimated.\n\n    Let's look at an example. We want to impose that the first three elements form the\n    lower triangle of a valid covariance matrix.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.ones(6),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.FlatCovConstraint(\n        ...        selector=lambda params: params[:3]\n        ...    ),\n        ... )\n\n    This yields the same solution as an unconstrained estimation because the constraint\n    is not binding:\n\n    >>> res.params.round(3)\n    array([ 1.006,  0.784,  0.61 ,  0.4  ,  0.2  , -0.   ])\n\n    We can now use one of optimagic's utility functions to actually build the covariance\n    matrix out of the first three parameters:\n\n    .. code-block:: python\n\n        >>> from optimagic.utilities import cov_params_to_matrix\n        >>> cov_params_to_matrix(res.params[:3]).round(2) # doctest: +NORMALIZE_WHITESPACE\n        array([[1.01, 0.78],\n               [0.78, 0.61]])\n\n\n```\n\n```{eval-rst}\n.. dropdown:: sdcorr\n\n    ``sdcorr`` constraints are very similar to ``covariance`` constraints. The only\n    difference is that instead of estimating a covariance matrix, we estimate\n    standard deviations and the correlation matrix of random variables.\n\n    Let's look at an example. We want to impose that the first three elements form valid\n    standard deviations and a correlation matrix.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.ones(6),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.FlatSDCorrConstraint(\n        ...        selector=lambda params: params[:3]\n        ...    ),\n        ... )\n\n\n    This yields the same solution as an unconstrained estimation because the constraint\n    is not binding:\n\n    >>> res.params.round(3) # doctest: +SKIP\n    array([ 1. ,  0.8,  0.6,  0.4,  0.2, -0. ])\n\n    We can now use one of optimagic's utility functions to actually build the standard\n    deviations and the correlation matrix:\n\n    .. code-block:: python\n\n        >>> from optimagic.utilities import sdcorr_params_to_sds_and_corr\n        >>> sd, corr = sdcorr_params_to_sds_and_corr(res.params[:3])\n        >>> sd.round(2)\n        array([1. , 0.8])\n        >>> corr.round(2) # doctest: +NORMALIZE_WHITESPACE\n        array([[1. , 0.6],\n               [0.6, 1. ]])\n\n\n```\n\n```{eval-rst}\n.. dropdown:: linear\n\n    Linear constraints are the most difficult but also the most powerful constraints\n    in your toolkit. They can be used to express constraints of the form\n    ``lower_bound <= weights.dot(x) <= upper_bound`` or\n    ``weights.dot(x) = value`` where ``x`` are the selected parameters.\n\n    Linear constraints have many of the other constraint types as special cases, but\n    typically it is more convenient to use the special cases instead of expressing\n    them as a linear constraint. Internally, it will make no difference.\n\n    Let's impose the constraint that the average of the first four parameters is at\n    least 0.95.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.ones(6),\n        ...    algorithm=\"scipy_lbfgsb\",\n        ...    constraints=om.LinearConstraint(\n        ...        selector=lambda params: params[:4],\n        ...        lower_bound=0.95,\n        ...        weights=0.25,\n        ...    ),\n        ... )\n\n    This yields:\n\n    >>> res.params.round(2)\n    array([ 1.25,  1.05,  0.85,  0.65,  0.2 , -0.  ])\n\n    Where the first four parameters have an average of 0.95.\n\n    In the above example, ``lower_bound`` and ``weights`` are scalars. They may, however,\n    also be arrays (or even pytrees) with bounds and weights for each selected\n    parameter.\n```\n\n```{eval-rst}\n.. dropdown:: nonlinear\n\n    .. warning::\n\n        General nonlinear constraints that are specified via a black-box constraint\n        function can only be used if you choose an optimizer that supports it.\n        This feature is currently supported by the algorithms:\n\n        * ``ipopt``\n        * ``nlopt``: ``cobyla``, ``slsqp``, ``isres``, ``mma``\n        * ``scipy``: ``cobyla``, ``slsqp``, ``trust_constr``\n\n    You can use nonlinear constraints to express restrictions of the form\n    ``lower_bound <= func(x) <= upper_bound`` or\n    ``func(x) = value`` where ``x`` are the selected parameters and ``func`` is the\n    constraint function.\n\n    Let's impose the constraint that the product of all but the last parameter is 1.\n\n    .. code-block:: python\n\n        >>> res = om.minimize(\n        ...    fun=fun,\n        ...    params=np.ones(6),\n        ...    algorithm=\"scipy_slsqp\",\n        ...    constraints=om.NonlinearConstraint(\n        ...        selector=lambda params: params[:-1],\n        ...        func=lambda x: np.prod(x),\n        ...        value=1.0,\n        ...    ),\n        ... )\n\n    This yields:\n\n    >>> res.params.round(2)\n    array([ 1.31,  1.16,  1.01,  0.87,  0.75, -0.  ])\n\n    Where the product of all but the last parameters is equal to 1.\n\n    If you have a function that calculates the derivative of your constraint, you can\n    add this under the key `\"derivative\"` to the constraint dictionary. Otherwise,\n    numerical derivatives are calculated for you if needed.\n\n```\n\n## Imposing multiple constraints at once\n\nThe above examples all just impose one constraint at a time. To impose multiple\nconstraints simultaneously, simple pass in a list of constraints. For example:\n\n```{eval-rst}\n\n.. code-block:: python\n\n    >>> res = om.minimize(\n    ...    fun=fun,\n    ...    params=np.ones(6),\n    ...    algorithm=\"scipy_lbfgsb\",\n    ...    constraints=[\n    ...        om.EqualityConstraint(selector=lambda params: params[:2]),\n    ...        om.LinearConstraint(\n    ...            selector=lambda params: params[2:5],\n    ...            weights=1,\n    ...            value=3,\n    ...        ),\n    ...    ],\n    ... )\n\n    This yields:\n\n    >>> res.params.round(2)\n    array([0.9, 0.9, 1.2, 1. , 0.8, 0. ])\n\nThere are limits regarding the compatibility of overlapping constraints. You will\nget a descriptive error message if your constraints are not compatible.\n\n```\n\n## How to select the parameters?\n\nThe parameters can be selected via a `selector` function. This function takes in the\nfull parameter vector and returns the subset of parameters that should be constrained.\n\nLet's assume we have defined parameters in a nested dictionary:\n\n```python\nparams = {\"a\": np.ones(2), \"b\": {\"c\": 3, \"d\": pd.Series([4, 5])}}\n```\n\nIt is probably not a good idea to use a nested dictionary for so few parameters, but\nlet's ignore that.\n\nNow assume we want to fix the parameters in the pandas Series at their start values. We\ncan do so as follows:\n\n```python\nres = om.minimize(\n    fun=some_fun,\n    params=params,\n    algorithm=\"scipy_lbfgsb\",\n    constraints=om.FixedConstraint(selector=lambda params: params[\"b\"][\"d\"]),\n)\n```\n\nI.e. the value corresponding to `selector` is a python function that takes the full\n`params` and returns a subset. The selected subset does not have to be a numpy array, it\ncan be an arbitrary pytree.\n\nUsing lambda functions if often convenient, but we could have just as well defined the\nselector function using def.\n\n```python\ndef my_selector(params):\n    return params[\"b\"][\"d\"]\n\n\nres = om.minimize(\n    fun=some_fun,\n    params=params,\n    algorithm=\"scipy_lbfgsb\",\n    constraints=om.FixedConstraint(selector=my_selector),\n)\n```\n\n[here]: ../../explanation/implementation_of_constraints.md\n[this tutorial]: ../tutorials/optimization_overview.ipynb\n"
  },
  {
    "path": "docs/source/how_to/how_to_criterion_function.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-fun)=\\n\",\n    \"\\n\",\n    \"# How to write objective functions\\n\",\n    \"\\n\",\n    \"optimagic is very flexible when it comes to the objective function and its derivatives. \\n\",\n    \"In this how-to guide we start with simple examples, that would also work with \\n\",\n    \"scipy.optimize before we show advanced options and their advantages. \\n\",\n    \"\\n\",\n    \"## The simplest case\\n\",\n    \"\\n\",\n    \"In the simplest case, `fun` maps a numpy array into a scalar objective value. The name\\n\",\n    \"of first argument of `fun` is arbitrary. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def sphere(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"res.params.round(6)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## More flexible `params`\\n\",\n    \"\\n\",\n    \"In all but the most simple problems, a flat numpy array is not ideal to keep track of \\n\",\n    \"all the different parameters one wants to optimize over. Therefore, optimagic accepts \\n\",\n    \"objective functions that work with other parameter formats. Below we show a simple \\n\",\n    \"example. More examples can be found [here](how_to_start_parameters.md).\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def dict_fun(x):\\n\",\n    \"    return x[\\\"a\\\"] ** 2 + x[\\\"b\\\"] ** 4\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=dict_fun,\\n\",\n    \"    params={\\\"a\\\": 1, \\\"b\\\": 2},\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The important thing is that the `params` provided to `minimize` need to have the format \\n\",\n    \"that is expected by the objective function.\\n\",\n    \"\\n\",\n    \"## Functions with additional arguments\\n\",\n    \"\\n\",\n    \"In many applications, the objective function takes more than `params` as argument. \\n\",\n    \"This can be achieved via `fun_kwargs`. Take the following simplified example:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def shifted_sphere(x, offset):\\n\",\n    \"    return (x - offset) @ (x - offset)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=shifted_sphere,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    fun_kwargs={\\\"offset\\\": np.ones(3)},\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"`fun_kwargs` is a dictionary with keyword arguments for `fun`. There is no constraint\\n\",\n    \"on the number or names of those arguments.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Least-Squares problems\\n\",\n    \"\\n\",\n    \"Many estimation problems have a least-squares structure. If so, specialized optimizers that exploit this structure can be much faster than standard optimizers. The `sphere` function from above is the simplest possible least-squarse problem you could imagine: the least-squares residuals are just the params. \\n\",\n    \"\\n\",\n    \"To use least-squares optimizers in optimagic, you need to mark your function with \\n\",\n    \"a decorator and return the least-squares residuals instead of the aggregated function value. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"@om.mark.least_squares\\n\",\n    \"def ls_sphere(params):\\n\",\n    \"    return params\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=ls_sphere,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"pounders\\\",\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Any least-squares optimization problem is also a standard optimization problem. You \\n\",\n    \"can therefore optimize least-squares functions with scalar optimizers as well:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=ls_sphere,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Returning additional information\\n\",\n    \"\\n\",\n    \"You can return additional information such as intermediate results, debugging information, etc. in your objective function. This information will be stored in a database if you use [logging](how_to_logging.ipynb).\\n\",\n    \"\\n\",\n    \"To do so, you need to return a `FunctionValue` object.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere_with_info(x):\\n\",\n    \"    return om.FunctionValue(value=x @ x, info={\\\"avg\\\": x.mean()})\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere_with_info,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(6)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The `info` can be an arbitrary dictionary. In the oversimplified example we returned the \\n\",\n    \"mean of the parameters, which could have been recovered from the params history that \\n\",\n    \"is collected anyways but in real applications this feature can be helpful. \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_derivatives.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-jac)=\\n\",\n    \"\\n\",\n    \"# How to speed up your optimization using derivatives\\n\",\n    \"\\n\",\n    \"Many optimization algorithms use derivatives to find good search directions. If you \\n\",\n    \"use a derivative based optimizer but do not provide derivatives of your objective \\n\",\n    \"function, optimagic calculates a numerical derivative for you. \\n\",\n    \"\\n\",\n    \"While this numerical derivative is usually precise enough to find good search directions \\n\",\n    \"it requires `n + 1` evaluations of the objective function (where `n` is the number of \\n\",\n    \"free parameters). For large `n` this becomes very slow.\\n\",\n    \"\\n\",\n    \"This how-to guide shows how you can speed up your optimization by parallelizing \\n\",\n    \"numerical derivatives or by providing closed form derivatives. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Parallel numerical derivatives\\n\",\n    \"\\n\",\n    \"If you have a computer with a few idle cores, the easiest way to speed up your\\n\",\n    \"optimization with a gradient based optimizer is to calculate numerical derivatives \\n\",\n    \"in parallel:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def sphere(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    numdiff_options=om.NumdiffOptions(n_cores=6),\\n\",\n    \")\\n\",\n    \"res.params.round(6)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Of course, for this super fast objective function, parallelizing will not yield an actual \\n\",\n    \"speedup. But if your objective function takes 100 milliseconds or longer to evaluate, \\n\",\n    \"you can parallelize efficiently to up to `n + 1` cores. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Custom derivatives\\n\",\n    \"\\n\",\n    \"If you don't want to solve your speed problem by throwing more compute at it, you can \\n\",\n    \"provide a derivative to optimagic that is faster than doing `n + 1` evaluations of `fun`. \\n\",\n    \"Here we show you how to hand-code it, but in practice you would usually use JAX or another \\n\",\n    \"autodiff framework to create the derivative.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere_gradient(x):\\n\",\n    \"    return 2 * x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    jac=sphere_gradient,\\n\",\n    \")\\n\",\n    \"res.params.round(6)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In this example, the evaluation of `sphere_gradient` is even faster than evaluating `sphere`. \\n\",\n    \"\\n\",\n    \"In non-trivial functions, there are synergies between calculating the objective value and \\n\",\n    \"its derivative. Therefore, you can also provide a function that evaluates both at the same time. In such a case, providing fun is optional.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere_fun_and_gradient(x):\\n\",\n    \"    return x @ x, 2 * x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,  # optional when fun_and_jac is provided\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    fun_and_jac=sphere_fun_and_gradient,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"`fun_and_jac` can be provided in addition to or instead of `jac` or `fun`. Providing them \\n\",\n    \"together gives optimagic more opportunities to save \\n\",\n    \"time by evaluating just the function that is needed for a given optimizer. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Derivatives with flexible params\\n\",\n    \"\\n\",\n    \"Derivatives are compatible with any format of params. In general, the gradients have \\n\",\n    \"just the same structure as your params. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def dict_fun(x):\\n\",\n    \"    return x[\\\"a\\\"] ** 2 + x[\\\"b\\\"] ** 4\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def dict_gradient(x):\\n\",\n    \"    return {\\\"a\\\": 2 * x[\\\"a\\\"], \\\"b\\\": 4 * x[\\\"b\\\"] ** 3}\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=dict_fun,\\n\",\n    \"    params={\\\"a\\\": 1, \\\"b\\\": 2},\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    jac=dict_gradient,\\n\",\n    \")\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"This is also the convention that JAX uses, so any derivative you get via JAX will be \\n\",\n    \"compatible with optimagic. \\n\",\n    \"\\n\",\n    \"## Derivatives for least-squares functions\\n\",\n    \"\\n\",\n    \"When minimizing least-squares functions, you don't need the gradient of the objective \\n\",\n    \"value but the jacobian of the least-squares residuals. Moreover, this jacobian function \\n\",\n    \"needs to be decorated with the `mark.least_squares` decorator. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"@om.mark.least_squares\\n\",\n    \"def ls_sphere(params):\\n\",\n    \"    return params\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"@om.mark.least_squares\\n\",\n    \"def ls_sphere_jac(params):\\n\",\n    \"    return np.eye(len(params))\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=ls_sphere,\\n\",\n    \"    params=np.arange(3),\\n\",\n    \"    algorithm=\\\"scipy_ls_lm\\\",\\n\",\n    \"    jac=ls_sphere_jac,\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The `fun_and_jac` argument works just analogous to the scalar case. \\n\",\n    \"\\n\",\n    \"Derivatives of least-squares functions again work with all valid formats of params. \\n\",\n    \"However, the structure of the jacobian can be a bit complicated. Again, JAX will do \\n\",\n    \"the right thing here, so we strongly suggest you calculate all your jacobians via JAX,\\n\",\n    \"especially if your params are not a flat numpy array. \\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Derivatives that work for scalar and least-squares optimizers\\n\",\n    \"\\n\",\n    \"If you want to seamlessly switch between scalar and least-squares optimizers, you can \\n\",\n    \"do so by providing even more versions of derivatives to `minimize`. You probably won't \\n\",\n    \"ever need this, but here is how you would do it. To pretend that this can be useful, \\n\",\n    \"we compare a scalar and a least squares optimizer in a criterion_plot:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results = {}\\n\",\n    \"for algorithm in [\\\"scipy_lbfgsb\\\", \\\"scipy_ls_lm\\\"]:\\n\",\n    \"    results[algorithm] = om.minimize(\\n\",\n    \"        fun=ls_sphere,\\n\",\n    \"        params=np.arange(5),\\n\",\n    \"        algorithm=algorithm,\\n\",\n    \"        jac=[sphere_gradient, ls_sphere_jac],\\n\",\n    \"    )\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(results)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We see that both optimizers were super fast in solving this problem (mainly because the problem is so simple) and in this case the scalar optimizer was even faster. However, in non-trivial problems it almost always pays of to exploit the least-squares structure if you can.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_document_optimizers.md",
    "content": "# How to document optimizers\n\nThis guide shows you how to document algorithms in optimagic using our new documentation\nsystem. We'll walk through the process step-by-step using the `ScipyLBFGSB` optimizer as\na complete example.\n\n## When to Use This Guide\n\nUse this guide when you need to:\n\n- Document a new algorithm you've added to optimagic\n- Migrate existing algorithm documentation from the old split system (docstrings +\n  `algorithms.md`) to the new system\n- Update or improve existing algorithm documentation\n\nIf you're adding a completely new optimizer to optimagic, start with the \"How to Add\nOptimizers guide\" first, then use this guide to document your algorithm properly.\n\n## Why the New Documentation System?\n\nPreviously, algorithm documentation was scattered across multiple places:\n\n- Basic descriptions in the algorithm class docstrings\n- Detailed parameter descriptions in `algorithms.md`\n- Usage examples separate from the algorithm definitions\n\nThis made it hard to maintain consistency and keep documentation up-to-date. The new\nsystem centralizes nearly all documentation in the algorithm code itself, making it:\n\n- Easier to maintain (documentation lives next to code)\n- More consistent (unified format across all algorithms)\n- Auto-generated (parameter lists appear automatically in docs)\n- Type-safe (documentation matches actual parameter types)\n\n## The Documentation System Components\n\nOur documentation system has three main parts:\n\n1. **Algorithm Class Documentation**: A comprehensive docstring in the algorithm\n   dataclass that explains what the algorithm does, how it works, and when to use it\n1. **Parameter Documentation**: Detailed docstrings for each parameter with mathematical\n   formulations when needed\n1. **Usage Integration**: A section in `algorithms.md` that show how to use the\n   algorithm\n\nLet's walk through documenting an algorithm from start to finish.\n\n## Example: Documenting ScipyLBFGSB\n\nWe'll use the `ScipyLBFGSB` optimizer to show you exactly how to document an algorithm.\nThis is a real example from the optimagic codebase, so you can follow along and see the\nresults.\n\n### Step 1: Understand Your Algorithm\n\nBefore writing documentation, make sure you understand:\n\n- What the algorithm does mathematically\n- What problems it's designed to solve\n- How its parameters affect behavior\n- Any performance characteristics or limitations\n\nFor L-BFGS-B, this means understanding it's a quasi-Newton method for bound-constrained\noptimization that approximates the Hessian using gradient history.\n\n```{eval-rst}\n\n.. note::\n    If you are simply migrating an existing algorithm, you can mostly rely on the\n    existing documentation in the algorithm class docstring and `algorithms.md`.\n\n```\n\n### Step 2: Write the Algorithm Class Documentation\n\nThe algorithm class docstring is the most important part. It should give users\neverything they need to decide whether to use this algorithm.\n\nHere's how we document `ScipyLBFGSB`:\n\n```python\n# src/optimagic/optimizers/scipy_optimizers.py\nclass ScipyLBFGSB(Algorithm):\n    \"\"\"Minimize a scalar differentiable function using the L-BFGS-B algorithm.\n\n    The optimizer is taken from scipy, which calls the Fortran code written by the\n    original authors of the algorithm. The Fortran code includes the corrections\n    and improvements that were introduced in a follow up paper.\n\n    lbfgsb is a limited memory version of the original bfgs algorithm, that deals with\n    lower and upper bounds via an active set approach.\n\n    The lbfgsb algorithm is well suited for differentiable scalar optimization problems\n    with up to several hundred parameters.\n\n    It is a quasi-newton line search algorithm. At each trial point it evaluates the\n    criterion function and its gradient to find a search direction. It then approximates\n    the hessian using the stored history of gradients and uses the hessian to calculate\n    a candidate step size. Then it uses a gradient based line search algorithm to\n    determine the actual step length. Since the algorithm always evaluates the gradient\n    and criterion function jointly, the user should provide a ``fun_and_jac`` function\n    that exploits the synergies in the calculation of criterion and gradient.\n\n    The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary\n    to scale the parameters.\n\n    \"\"\"\n```\n\n**What makes this docstring effective:**\n\n- **Clear first line**: States exactly what the algorithm does\n- **Implementation details**: Explains it uses scipy's Fortran implementation\n- **Algorithm classification**: Identifies it as a quasi-Newton method\n- **Problem suitability**: Explains what problems it's good for\n- **How it works**: Brief explanation of the algorithm's approach\n- **Performance characteristics**: Mentions scale invariance\n- **Usage advice**: Suggests using `fun_and_jac` for efficiency\n\n### Step 3: Document Individual Parameters\n\nEach parameter needs clear documentation explaining what it controls and how it affects\nthe algorithm's behavior.\n\n```python\n# Basic parameter documentation\nstopping_maxiter: PositiveInt = STOPPING_MAXITER\n\"\"\"Maximum number of iterations.\"\"\"\n\n# Parameter with mathematical formulation\nconvergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\nr\"\"\"Converge if the relative change in the objective function is less than this\nvalue. More formally, this is expressed as.\n\n.. math::\n\n    \\frac{f^k - f^{k+1}}{\\max\\{|f^k|, |f^{k+1}|, 1\\}} \\leq\n    \\textsf{convergence_ftol_rel}.\n\n\"\"\"\n\n# Parameter with external library context\nlimited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH\n\"\"\"The maximum number of variable metric corrections used to define the limited\nmemory matrix. This is the 'maxcor' parameter in the SciPy documentation.\n\nThe default value is taken from SciPy's L-BFGS-B implementation. Larger values use\nmore memory but may converge faster for some problems.\n\n\"\"\"\n```\n\n**Key principles for parameter documentation:**\n\n- **Start with a clear description** of what the parameter controls\n- **Add mathematical formulations** when they clarify the exact meaning (use `r\"\"\"` for\n  raw strings with LaTeX)\n- **Include external library context** when relevant (e.g., \"Default value is taken from\n  SciPy\")\n- **Explain performance implications** when they matter\n- **Use proper type annotations** that match the parameter's constraints\n\n```{eval-rst}\n\n.. warning::\n    If your optimizer module uses type hints (e.g., ``PositiveInt``,\n    ``NonNegativeInt``), include the following at the top of your optimizer module:\n\n    .. code-block:: python\n\n        from __future__ import annotations\n\n    Without this, type hints such as ``PositiveInt`` may appear decomposed in the\n    documentation (e.g., as ``Annotated[int, Gt(gt=0)]``).\n\n```\n\n### Step 4: Integrate into `algorithms.md`\n\nThe final step is integrating your documented algorithm into the main documentation.\nThis creates a dropdown section that shows users how to use the algorithm.\n\nAdd the following to `docs/source/algorithms.md` in an `eval-rst` block:\n\n```text\n.. dropdown::  scipy_lbfgsb\n\n    **How to use this algorithm:**\n\n    .. code-block:: python\n\n        import optimagic as om\n        om.minimize(\n          fun=lambda x: x @ x,\n          params=[1.0, 2.0, 3.0],\n          algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...),\n        )\n        \n    or using the string interface:\n        \n    .. code-block:: python\n\n        om.minimize(\n          fun=lambda x: x @ x,\n          params=[1.0, 2.0, 3.0],\n          algorithm=\"scipy_lbfgsb\",\n          algo_options={\"stopping_maxiter\": 1_000, ...},\n        )\n\n    **Description and available options:**\n\n    .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB\n```\n\n**What this section provides:**\n\n- **The dropdown button and title**: Makes it easy to find the algorithm\n- **Concrete usage examples** showing both the object and string interfaces\n- **Algorithm-specific parameter** in the usage example\n- **Auto-generated documentation** via the `autoclass` directive that pulls in your\n  docstrings\n\n## Working with Existing Documentation\n\nIf you're migrating an algorithm that already has documentation:\n\n### Finding Existing Content\n\nLook for existing documentation in:\n\n- **Algorithm class docstrings**: Usually basic descriptions\n- **`docs/source/algorithms.md`**: Detailed parameter descriptions and examples\n- **Research papers**: For mathematical formulations and background\n- **External library docs**: For default values and parameter meanings\n\n### Migration Strategy\n\n1. **Start with the algorithm class**: Move the best description from `algorithms.md` to\n   the class docstring\n1. **Update and expand**: Add missing information about performance, usage, etc.\n1. **Move parameter docs**: Transfer parameter descriptions from `algorithms.md` to\n   individual parameter docstrings\n1. **Verify accuracy**: Check that all information is current and correct\n1. **Create new integration**: Replace the old `algorithms.md` section with the new\n   dropdown format\n\n## Common Pitfalls to Avoid\n\n- **Don't copy-paste generic descriptions**: Each algorithm needs specific, detailed\n  documentation\n- **Don't skip mathematical formulations**: When convergence criteria or parameters have\n  precise mathematical definitions, include them\n- **Don't ignore external library context**: Always mention where default values come\n  from\n- **Don't use vague parameter descriptions**: \"Controls the algorithm behavior\" is not\n  helpful\n- **Don't forget performance implications**: Users need to understand trade-offs between\n  parameters\n\n## Getting Help\n\nIf you're stuck or need clarification:\n\n- Look at existing well-documented algorithms like `ScipyLBFGSB`\n- Check the {ref}`style_guide` for coding conventions\n- Ask questions in GitHub issues or discussions\n\nThe goal is to make optimagic's algorithm documentation the best resource for\nunderstanding and using optimization algorithms effectively.\n"
  },
  {
    "path": "docs/source/how_to/how_to_errors_during_optimization.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-errors)=\\n\",\n    \"\\n\",\n    \"# How to handle errors during optimization\\n\",\n    \"\\n\",\n    \"## Try to avoid errors\\n\",\n    \"\\n\",\n    \"Often, optimizers try quite extreme parameter vectors, which then can raise errors in your criterion function or derivative. Often, there are simple tricks to make your code more robust. Avoiding errors is always better than dealing with errors after they occur.  \\n\",\n    \"\\n\",\n    \"- Avoid to take ``np.exp`` without further safeguards. With 64 bit floating point numbers, the exponential function is only well defined roughly between -700 and 700. Below it is 0, above it is inf. Sometimes you can use ``scipy.special.logsumexp`` to avoid unsafe evaluations of the exponential. Read [this](https://en.wikipedia.org/wiki/LogSumExp) for background information on the logsumexp trick.\\n\",\n    \"- Set bounds for your parameters that prevent extreme parameter constellations.\\n\",\n    \"- Use the ``bounds_distance`` option with a not too small value for ``covariance`` and ``sdcorr`` constraints.\\n\",\n    \"- Use `optimagic.utilities.robust_cholesky` instead of normal\\n\",\n    \"  cholesky decompositions or try to avoid cholesky decompositions.\\n\",\n    \"- Use a less aggressive optimizer. Trust region optimizers like `fides` usually choose less extreme steps in the beginnig than line search optimizers like `scipy_bfgs` and `scip_lbfgsb`. \\n\",\n    \"\\n\",\n    \"## Do not use clipping\\n\",\n    \"\\n\",\n    \"A commonly chosen solution to numerical problems is clipping of extreme values. Naive clipping leads to flat areas in your criterion function and can cause spurious convergence. Only use clipping if you know that your optimizer can deal with flat parts. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Let optimagic do its magic\\n\",\n    \"\\n\",\n    \"Instead of avoiding errors in your criterion function, you can raise them and let optimagic deal with them. If you are using numerical derivatives, errors will automatically be raised if any entry in the derivative is not finite. \\n\",\n    \"\\n\",\n    \"### An example\\n\",\n    \"\\n\",\n    \"Let's look at a simple example from the Moré-Wild benchmark set that has a numerical instability. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import warnings\\n\",\n    \"\\n\",\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"from scipy.optimize import minimize as scipy_minimize\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"warnings.simplefilter(\\\"ignore\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def jennrich_sampson(x):\\n\",\n    \"    dim_out = 10\\n\",\n    \"    fvec = (\\n\",\n    \"        2 * (1.0 + np.arange(1, dim_out + 1))\\n\",\n    \"        - np.exp(np.arange(1, dim_out + 1) * x[0])\\n\",\n    \"        - np.exp(np.arange(1, dim_out + 1) * x[1])\\n\",\n    \"    )\\n\",\n    \"    return fvec @ fvec\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"correct_params = np.array([0.2578252135686162, 0.2578252135686162])\\n\",\n    \"correct_criterion = 124.3621823556148\\n\",\n    \"\\n\",\n    \"start_x = np.array([0.3, 0.4])\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"### What would scipy do?\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"scipy_res = scipy_minimize(jennrich_sampson, x0=start_x, method=\\\"L-BFGS-B\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"scipy_res.success\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"correct_params.round(4), scipy_res.x.round(4)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"So, scipy thinks it solved the problem successfully but the result is far off. (Note that scipy would have given us a warning, but we disabled warnings in order to not clutter the output).\\n\",\n    \"\\n\",\n    \"### optimagic's error handling magic\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=jennrich_sampson,\\n\",\n    \"    params=start_x,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    error_handling=\\\"continue\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"correct_params, res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How does the magic work\\n\",\n    \"\\n\",\n    \"When an error occurs and `error_handling` is set to `\\\"continue\\\"`, optimagic replaces your criterion with a dummy function (and adjusts the derivative accordingly). \\n\",\n    \"\\n\",\n    \"The dummy function has two important properties:\\n\",\n    \"\\n\",\n    \"1. Its value is always higher than criterion at start params. \\n\",\n    \"2. Its slope guides the optimizer back towards the start parameters. I.e., if you are minimizing, the direction of strongest decrease is towards the start parameters; if you are maximizing, the direction of strongest increase is towards the start parameters. \\n\",\n    \"\\n\",\n    \"Therefore, when hitting an undefined area, an optimizer can take a few steps back until it is in better territory and then continue its work. \\n\",\n    \"\\n\",\n    \"Importantly, the optimizer will not simply go back to a previously evaluated point (which would just lead to cyclical behavior). It will just go back in the direction it originally came from.\\n\",\n    \"\\n\",\n    \"In the concrete example, the dummy function would look similar to the following:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def dummy(params):\\n\",\n    \"    start_params = np.array([0.3, 0.4])\\n\",\n    \"    # this is close to the actual value used by optimagic\\n\",\n    \"    constant = 8000\\n\",\n    \"    # the actual slope used by optimagic would be even smaller\\n\",\n    \"    slope = 10_000\\n\",\n    \"    diff = params - start_params\\n\",\n    \"    return constant + slope * np.linalg.norm(diff)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"source\": [\n    \"Now, let's plot the two functions. For better illustration, we assume that the jennrich_sampson function is only defined until it reaches a value of 100_000 and the dummy function takes over from there.  \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from plotly import graph_objects as go\\n\",\n    \"\\n\",\n    \"grid = np.linspace(0, 1)\\n\",\n    \"params = [np.full(2, val) for val in grid]\\n\",\n    \"values = np.array([jennrich_sampson(p) for p in params])\\n\",\n    \"values = np.where(values <= 1e5, values, np.nan)\\n\",\n    \"dummy_values = np.array([dummy(p) for p in params])\\n\",\n    \"dummy_values = np.where(np.isfinite(values), np.nan, dummy_values)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = go.Figure()\\n\",\n    \"fig.add_trace(go.Scatter(x=grid, y=values))\\n\",\n    \"fig.add_trace(go.Scatter(x=grid, y=dummy_values))\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can see that the dummy function is lower than the highest achieved value of `jennrich_sampson` but higher than the start values. It is also rather flat. Fortunately, that is all we need. \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_globalization.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to choose a strategy for global optimization\\n\",\n    \"\\n\",\n    \"(to be written)\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"language_info\": {\n   \"name\": \"python\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_logging.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-logging)=\\n\",\n    \"\\n\",\n    \"# How to use logging\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"optimagic can keep a persistent log of the parameter and criterion values tried out by an optimizer in a sqlite database. \\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Turn logging on or off\\n\",\n    \"\\n\",\n    \"To enable logging, it suffices to provide a path to an sqlite database when calling ``maximize`` or ``minimize``. The database does not have to exist, optimagic will generate it for you. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from pathlib import Path\\n\",\n    \"\\n\",\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(params):\\n\",\n    \"    return params @ params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# Remove the log file if it exists (just needed for the example)\\n\",\n    \"log_file = Path(\\\"my_log.db\\\")\\n\",\n    \"if log_file.exists():\\n\",\n    \"    log_file.unlink()\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    logging=\\\"my_log.db\\\",\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In case the SQLite file already exists, this will raise a `FileExistsError` to prevent from accidentally polluting an existing database. If you want to reuse\\n\",\n    \"an existing database on purpose, you must explicitly provide the corresponding option for `if_database_exists`:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"log_options = om.SQLiteLogOptions(\\n\",\n    \"    \\\"my_log.db\\\", if_database_exists=om.ExistenceStrategy.EXTEND\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    logging=log_options,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Make logging faster\\n\",\n    \"\\n\",\n    \"By default, we use a very safe mode of sqlite that makes it almost impossible to corrupt the database. Even if your computer is suddenly shut down or unplugged. \\n\",\n    \"\\n\",\n    \"However, this makes writing logs rather slow, which becomes notable when the criterion function is very fast. \\n\",\n    \"\\n\",\n    \"In that case, you can enable `fast_logging`, which is still quite safe!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"log_options = om.SQLiteLogOptions(\\n\",\n    \"    \\\"my_log.db\\\",\\n\",\n    \"    fast_logging=True,\\n\",\n    \"    if_database_exists=om.ExistenceStrategy.REPLACE,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    logging=log_options,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Reading the log\\n\",\n    \"To read the log after an optimization, extract the logger from the optimization result:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader = res.logger\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Alternatively, you can create the reader like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader = om.SQLiteLogReader(\\\"my_log.db\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Read the start params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader.read_start_params()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Read a specific iteration (use -1 for the last)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader.read_iteration(-1)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Read the full history\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader.read_history().keys()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Plot the history from a log\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(\\\"my_log.db\\\")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.params_plot(\\\"my_log.db\\\", selector=lambda x: x[1:3])\\n\",\n    \"fig.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"interpreter\": {\n   \"hash\": \"5cdb9867252288f10687117449de6ad870b49795ca695c868016dc0022895cce\"\n  },\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_multistart.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"(how-to-multistart)=\\n\",\n    \"\\n\",\n    \"# How to do multistart optimizations\\n\",\n    \"\\n\",\n    \"Sometimes you want to make sure that your optimization is robust to the initial\\n\",\n    \"parameter values, i.e. that it does not get stuck at a local optimum. This is where\\n\",\n    \"multistart comes in handy.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"## What does multistart (not) do\\n\",\n    \"\\n\",\n    \"In short, multistart iteratively runs local optimizations from different initial\\n\",\n    \"conditions. If enough local optimization convergence to the same point, it stops.\\n\",\n    \"Importantly, it cannot guarantee that the result is the global optimum, but it can\\n\",\n    \"increase your confidence in the result.\\n\",\n    \"\\n\",\n    \"## TL;DR\\n\",\n    \"\\n\",\n    \"To activate multistart at the default options, pass `multistart=True` to the `minimize`\\n\",\n    \"or `maximize` function, as well as finite bounds on the parameters (which are used to\\n\",\n    \"sample the initial points). The default options are discussed below.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def fun(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"x0 = np.arange(7) - 4\\n\",\n    \"\\n\",\n    \"bounds = om.Bounds(\\n\",\n    \"    lower=np.full_like(x0, -5),\\n\",\n    \"    upper=np.full_like(x0, 10),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"algo_options = {\\\"stopping_maxfun\\\": 1_000}\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=fun,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    multistart=True,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"source\": [\n    \"In this example, we limited each local optimization to 1_000 function evaluations. In\\n\",\n    \"general, it is a good idea to limit the number of iterations and function evaluations\\n\",\n    \"for the local optimization. Because of the iterative nature of multistart, this\\n\",\n    \"limitation will usually not result in a precision issue.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"source\": [\n    \"## What does multistart mean in optimagic?\\n\",\n    \"\\n\",\n    \"Our multistart optimizations are inspired by the [TikTak algorithm](https://github.com/serdarozkan/TikTak) and consist of the following steps:\\n\",\n    \"\\n\",\n    \"1. Draw a large exploration sample of parameter vectors randomly or using a\\n\",\n    \"   low-discrepancy sequence.\\n\",\n    \"1. Evaluate the objective function in parallel on the exploration sample.\\n\",\n    \"1. Sort the parameter vectors from best to worst according to their objective function\\n\",\n    \"   values. \\n\",\n    \"1. Run local optimizations iteratively. That is, the first local optimization is started\\n\",\n    \"   from the best parameter vector in the sample. All subsequent ones are started from a\\n\",\n    \"   convex combination of the currently best known parameter vector and the next sample\\n\",\n    \"   point. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Visualizing multistart results\\n\",\n    \"\\n\",\n    \"To illustrate the multistart results, we will consider the optimization of a slightly\\n\",\n    \"more complex objective function, compared to `fun` from above. We also limit the\\n\",\n    \"number of exploration samples to 100.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def alpine(x):\\n\",\n    \"    return np.sum(np.abs(x * np.sin(x) + 0.1 * x))\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(n_samples=100, seed=0),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(res, monotone=True)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"source\": [\n    \"In the above image we see the optimization history for all of the local optimizations\\n\",\n    \"that have been run by multistart. The turquoise line represents the history\\n\",\n    \"corresponding to the local optimization that found the overall best parameter.\\n\",\n    \"\\n\",\n    \"We see that running a single optimization would not have sufficed, as some local\\n\",\n    \"optimizations are stuck.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Multistart does not always run many optimization\\n\",\n    \"\\n\",\n    \"Since the local optimizations are run iteratively by multistart, it is possible that\\n\",\n    \"only a handful of optimizations are actually run if all of them converge to the same\\n\",\n    \"point. This convergence is determined by the `convergence_max_discoveries` option,\\n\",\n    \"which defaults to 2. This means that if 2 local optimizations report the same point,\\n\",\n    \"multistart will stop. Below we see that if we use the simpler objective function\\n\",\n    \"(`fun`), and the `scipy_lbfgsb` algorithm, multistart runs only 2 local optimizations,\\n\",\n    \"and then stops, as both of them converge to the same point. Note that, the\\n\",\n    \"`scipy_lbfgsb` algorithm can solve this simple problem precisely, without reaching the\\n\",\n    \"maximum number of function evaluations.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(n_samples=100, seed=0),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fig = om.criterion_plot(res)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"source\": [\n    \"## How to configure multistart\\n\",\n    \"\\n\",\n    \"Configuration of multistart can be done by passing an instance of\\n\",\n    \"`optimagic.MultistartOptions` to `minimize` or `maximize`. Let's look at a few examples\\n\",\n    \"configurations.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How to run a specific number of optimizations\\n\",\n    \"\\n\",\n    \"To run a specific number of local optimizations, you need to set the `stopping_maxopt`\\n\",\n    \"option. Note that this does not set the number of exploration samples, which is\\n\",\n    \"controlled by the `n_samples` option. The number of exploration samples always needs\\n\",\n    \"to be at least as large as the number of local optimizations.\\n\",\n    \"\\n\",\n    \"Note that, as long as `convergence_max_discoveries` is smaller than `stopping_maxopt`,\\n\",\n    \"it is possible that a smaller number of local optimizations are run. To avoid this,\\n\",\n    \"set `convergence_max_discoveries` to a value at least as large as `stopping_maxopt`.\\n\",\n    \"\\n\",\n    \"To run, for example, 10 local optimizations from 15 exploration samples, do:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(\\n\",\n    \"        n_samples=15,\\n\",\n    \"        stopping_maxopt=10,\\n\",\n    \"        convergence_max_discoveries=10,\\n\",\n    \"    ),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.multistart_info.n_optimizations\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How to set a custom exploration sample\\n\",\n    \"\\n\",\n    \"If you want to start the multistart algorithm with a custom exploration sample, you can\\n\",\n    \"do so by passing a sequence of parameters to the `sample` option. Note that sequence\\n\",\n    \"elements must be of the same type as your parameter.\\n\",\n    \"\\n\",\n    \"To generate a sample of 100 random parameters and run them through the multistart\\n\",\n    \"algorithm, do:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"rng = np.random.default_rng(12345)\\n\",\n    \"\\n\",\n    \"sample = [x0 + rng.uniform(-1, 1, size=len(x0)) for _ in range(100)]\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(sample=sample),\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How to run multistart in parallel\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"The multistart algorithm can be run in parallel by setting the `n_cores` option to a\\n\",\n    \"value greater than 1. This will run the algorithm in batches. By default, the batch\\n\",\n    \"size is set to `n_cores`, but can be controlled by setting the `batch_size` option. The\\n\",\n    \"default batch evaluator is `joblib`, but can be controlled by setting the\\n\",\n    \"`batch_evaluator` option to `\\\"pathos\\\"` or a custom callable.\\n\",\n    \"\\n\",\n    \"To run the multistart algorithm in parallel, do:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(n_cores=2),\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"source\": [\n    \"## What to do if you do not have bounds\\n\",\n    \"\\n\",\n    \"Multistart requires finite bounds on the parameters. If your optimization problem is not\\n\",\n    \"bounded, you can set soft lower and upper bounds. These bounds will only be used to\\n\",\n    \"draw the exploration sample, and will not be used to constrain the local optimizations.\\n\",\n    \"\\n\",\n    \"To set soft bounds, do:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=om.Bounds(soft_lower=np.full_like(x0, -3), soft_upper=np.full_like(x0, 8)),\\n\",\n    \"    multistart=True,\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Understanding multistart results\\n\",\n    \"\\n\",\n    \"When activating multistart, the optimization result object corresponds to the local\\n\",\n    \"optimization that found the best objective function value. The result object has the\\n\",\n    \"additional attribute `multistart_info`, where all of the additional information is\\n\",\n    \"stored. It has the following attributes:\\n\",\n    \"\\n\",\n    \"- `local_optima`: A list with the results from all local optimizations that were performed.\\n\",\n    \"- `start_parameters`: A list with the start parameters from those optimizations \\n\",\n    \"- `exploration_sample`: A list with parameter vectors at which the objective function was evaluated in an initial exploration phase. \\n\",\n    \"- `exploration_results`: The corresponding objective values.\\n\",\n    \"- `n_optimizations`: The number of local optimizations that were run.\\n\",\n    \"\\n\",\n    \"To illustrate the multistart results, let us consider the optimization of the simple\\n\",\n    \"`fun` objective function from above.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"19\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun,\\n\",\n    \"    x0=x0,\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options=algo_options,\\n\",\n    \"    multistart=om.MultistartOptions(n_samples=100, convergence_max_discoveries=2),\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"20\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Start parameters\\n\",\n    \"\\n\",\n    \"The start parameters are the parameter vectors from which the local optimizations were\\n\",\n    \"started. Since the default number of `convergence_max_discoveries` is 2, and both\\n\",\n    \"local optimizations were successfull, the start parameters have 2 rows.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"21\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.multistart_info.start_parameters\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"22\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Local Optima\\n\",\n    \"\\n\",\n    \"The local optima are the results from the local optimizations. Since in this example\\n\",\n    \"only two local optimizations were run, the local optima list has two elements, each of\\n\",\n    \"which is an optimization result object.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"23\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"len(res.multistart_info.local_optima)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"24\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Exploration sample\\n\",\n    \"\\n\",\n    \"The exploration sample is a list of parameter vectors at which the objective function\\n\",\n    \"was evaluated. Above, we chose a random exploration sample of 100 parameter vectors.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"25\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"np.vstack(res.multistart_info.exploration_sample).shape\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"26\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Exploration results\\n\",\n    \"\\n\",\n    \"The exploration results are the objective function values at the exploration sample.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"27\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"len(res.multistart_info.exploration_results)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"28\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Number of local optimizations\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"29\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res.multistart_info.n_optimizations\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_scaling.md",
    "content": "(scaling)=\n\n# How to scale optimization problems\n\nReal world optimization problems often comprise parameters of vastly different orders of\nmagnitudes. This is typically not a problem for gradient based optimization algorithms\nbut can considerably slow down derivative free optimizers. Below we describe three\nsimple heuristics to improve the scaling of optimization problems and discuss the pros\nand cons of each approach.\n\n## What does well scaled mean\n\nIn short, an optimization problem is well scaled if a fixed step in any direction yields\na roughly similar sized change in the objective function.\n\nIn practice, this can never be achieved perfectly (at least for nonlinear problems).\nHowever, one can easily improve over simply ignoring the problem altogether.\n\n## TL;DR\n\nTo activate scaling at the default options, pass `scaling=True` to the `minimize` or\n`maximize` function. This uses the start values heuristic explained below. The default\noptions are discussed in the section {ref}`scaling-default-values`.\n\n```{code-block} python\n---\nemphasize-lines: 13\n---\nimport numpy as np\nimport optimagic as om\n\n\ndef fun(x):\n    return x @ x\n\n\nres = om.minimize(\n    fun=fun,\n    x0=np.arange(5),\n    algorithm=\"scipy_lbfgsb\",\n    scaling=True,\n)\n```\n\n## Heuristics to improve scaling\n\n(scaling-start-values-heuristic)=\n\n### Divide by absolute value of start parameters\n\nIn many applications, parameters with very large start values will vary over a wide\nrange and a change in that parameter will only lead to a relatively small change in the\nobjective function. If this is the case, the scaling of the optimization problem can be\nimproved by simply dividing all parameter vectors by the start parameters.\n\n**Advantages:**\n\n- Straightforward\n- Works with any type of constraints\n\n**Disadvantages:**\n\n- Makes scaling dependent on start values\n- Parameters with zero start value need special treatment\n\n**How to specify this scaling:**\n\n```{code-block} python\n---\nemphasize-lines: 5\n---\nres = om.minimize(\n    fun=fun,\n    x0=np.arange(5),\n    algorithm=\"scipy_lbfgsb\",\n    scaling=om.ScalingOptions(method=\"start_values\", clipping_value=0.1),\n)\n```\n\n### Divide by bounds\n\nIn many optimization problems, one has additional information on bounds of the parameter\nspace. Some of these bounds are hard (e.g. probabilities or variances are non negative),\nothers are soft and derived from simple considerations (e.g. if a time discount factor\nwere smaller than 0.7, we would not observe anyone to pursue a university degree in a\nstructural model of educational choices; or if an infection probability was higher than\n20% for distant contacts, the covid pandemic would have been over after a month). For\nparameters that strongly influence the objective function, the bounds stemming from\nthese considerations are typically tighter than for parameters that have a small effect\non the objective function.\n\nThus, a natural approach to improve the scaling of the optimization problem is to re-map\nall parameters such that the bounds are [0, 1] for all parameters. This has the\nadditional advantage that absolute and relative convergence criteria on parameter\nchanges become the same.\n\n**Advantages:**\n\n- Straightforward\n- Works well in many practical applications\n- Scaling is independent of start values\n- No problems with division by zero\n\n**Disadvantages:**\n\n- Only works if all parameters have bounds\n- This prohibits some kinds of other constraints in optimagic\n\n**How to specify this scaling:**\n\n```{code-block} python\n---\nemphasize-lines: 5,6\n---\nres = om.minimize(\n    fun=fun,\n    x0=np.arange(5),\n    algorithm=\"scipy_lbfgsb\",\n    bounds=om.Bounds(lower=np.zeros(5), upper=2 * np.arange(5) + 1),\n    scaling=om.ScalingOptions(method=\"bounds\", clipping_value=0.0),\n)\n```\n\n## Influencing the magnitude of parameters\n\nThe above approaches align the scale of parameters relative to each other. However, the\noverall magnitude is set rather arbitrarily. For example, when dividing by start values,\nthe magnitude of the scaled parameters is around one. When dividing by bounds, it is\nsomewhere between zero and one.\n\nFor the performance of numerical optimizers, only the relative scales are important.\n\nHowever, influencing the overall magnitude can be helpful to trick some optimizers into\ndoing things they do not want to do. For example, when there is a minimal allowed\ninitial trust region radius, increasing the magnitude of parameters allows to\neffectively make the trust region radius smaller.\n\nSetting the magnitude means simply adding one more entry to the scaling options. For\nexample, if you want to scale by bounds and increase the magnitude by a factor of five:\n\n```{code-block} python\n---\nemphasize-lines: 6\n---\nres = om.minimize(\n    fun=fun,\n    x0=np.arange(5),\n    algorithm=\"scipy_lbfgsb\",\n    bounds=om.Bounds(lower=np.zeros(5), upper=2 * np.arange(5) + 1),\n    scaling=om.ScalingOptions(method=\"bounds\", clipping_value=0.0, magnitude=5),\n)\n```\n\n## Remarks\n\n### What is the `clipping_value`\n\nIn all of the above heuristics, the parameter vector is divided (elementwise) by some\nother vector and it is possible that some entries of the divisor are zero or close to\nzero.\n\nThe clipping value bounds the elements of the divisor away from zero. It should be set\nto a strictly non-zero number for the `\"start_values\"` and `\"gradient\"` approach. The\n`\"bounds\"` approach avoids division by exact zeros by construction. The\n`\"clipping_value\"` can still be used to avoid extreme upscaling of parameters with very\ntight bounds. However, this means that the bounds of the re-scaled problem are not\nexactly [0, 1] for all parameters.\n\n(scaling-default-values)=\n\n### Default values\n\nScaling is disabled by default. By passing `scaling=True`, we enable scaling at the\ndefault values. We use the `\"start_values\"` method with a `\"clipping_value\"` of 0.1 and\na magnitude of 1.0. This is the default method because it can be used for all\noptimization problems and has low computational cost. We strongly recommend you read the\nabove guidelines and choose the method that is most suitable for your problem.\n"
  },
  {
    "path": "docs/source/how_to/how_to_slice_plot.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to visualize an optimization problem\\n\",\n    \"\\n\",\n    \"Plotting the criterion function of an optimization problem can answer important questions\\n\",\n    \"- Is the function smooth?\\n\",\n    \"- Is the function flat in some directions?\\n\",\n    \"- Should the optimization problem be scaled?\\n\",\n    \"- Is a candidate optimum a global one?\\n\",\n    \"\\n\",\n    \"Below we show how to make a slice plot of the criterion function.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## The simple sphere function (again)\\n\",\n    \"\\n\",\n    \"Let's look at the simple sphere function again. This time, we specify params as dictionary, but of course, any other params format (recall [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html)) would work just as well. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(params):\\n\",\n    \"    x = np.array(list(params.values()))\\n\",\n    \"    return x @ x\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"params = {\\\"alpha\\\": 0, \\\"beta\\\": 0, \\\"gamma\\\": 0, \\\"delta\\\": 0}\\n\",\n    \"bounds = om.Bounds(\\n\",\n    \"    lower={name: -5 for name in params},\\n\",\n    \"    upper={name: i + 2 for i, name in enumerate(params)},\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Creating a simple slice plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.slice_plot(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \":::{note}\\n\",\n    \"\\n\",\n    \"For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\\n\",\n    \"\\n\",\n    \":::\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Interpreting the plot\\n\",\n    \"\\n\",\n    \"The plot gives us the following insights:\\n\",\n    \" \\n\",\n    \"- There is no sign of local optima. \\n\",\n    \"- There is no sign of noise or non-differentiablities (careful, grid might not be fine enough).\\n\",\n    \"- The problem seems to be convex.\\n\",\n    \"\\n\",\n    \"-> We would expect almost any derivative based optimizer to work well here (which we know to be correct in that case)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Using advanced options\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.slice_plot(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    # selecting a subset of params\\n\",\n    \"    selector=lambda x: [x[\\\"alpha\\\"], x[\\\"beta\\\"]],\\n\",\n    \"    # evaluate func in parallel\\n\",\n    \"    n_cores=4,\\n\",\n    \"    # rename the parameters\\n\",\n    \"    param_names={\\\"alpha\\\": \\\"Alpha\\\", \\\"beta\\\": \\\"Beta\\\"},\\n\",\n    \"    title=\\\"Amazing Plot\\\",\\n\",\n    \"    # number of gridpoints in each dimension\\n\",\n    \"    n_gridpoints=50,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"optimagic\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.18\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_slice_plot_3d.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Visualizing Objective Functions with `slice_plot_3d`\\n\",\n    \"\\n\",\n    \"In optimization, understanding the shape of your objective function is a key step toward choosing the right algorithm.\\n\",\n    \"\\n\",\n    \"This notebook introduces the `slice_plot_3d` tool, which provides flexible ways to visualize:\\n\",\n    \"- Single-parameter sensitivity through **univariate slice plots**,\\n\",\n    \"- Pairwise interactions through **contour** or **surface plots**,\\n\",\n    \"- Full parameter relationships through **subplot grids**.\\n\",\n    \"\\n\",\n    \"We will progress from basic to advanced usage, learning how to create clean and insightful plots easily.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Univariate slice Plot\\n\",\n    \"\\n\",\n    \"We start with a **univariate slice plot**.\\n\",\n    \"This plots the function along each parameter individually to the function value,\\n\",\n    \"while fixing others at their current values. This provides a clean view of how sensitive the function is to each parameter separately. We use the **Sphere function**, which sums the squares of each input.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# Define the Sphere function\\n\",\n    \"def sphere(params):\\n\",\n    \"    x = np.array(list(params.values()))\\n\",\n    \"    return np.sum(x**2)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"params = {\\\"alpha\\\": 0, \\\"beta\\\": 0, \\\"gamma\\\": 0, \\\"delta\\\": 0}\\n\",\n    \"bounds = om.Bounds(\\n\",\n    \"    lower={name: -5 for name in params},\\n\",\n    \"    upper={name: i + 2 for i, name in enumerate(params)},\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Univariate slice plot with selected parameters\\n\",\n    \"\\n\",\n    \"In many situations, we are interested in exploring only specific parameters.\\n\",\n    \"Using the `selector` argument, we can restrict the univariate plots to\\n\",\n    \"chosen parameters — here, we select `\\\"alpha\\\"` and `\\\"beta\\\"`.\\n\",\n    \"\\n\",\n    \"This focuses our visualization on dimensions of interest.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    selector=lambda p: [p[\\\"alpha\\\"], p[\\\"beta\\\"]],\\n\",\n    \"    projection=\\\"univariate\\\",\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 3D Surface Plot for Two Parameters\\n\",\n    \"\\n\",\n    \"To better understand interaction between parameters,\\n\",\n    \"we can switch to a **3D surface plot**.\\n\",\n    \"\\n\",\n    \"Surface plots reveal valleys, ridges, and general landscape shapes clearly.\\n\",\n    \"Here, we vary `\\\"alpha\\\"` and `\\\"beta\\\"` simultaneously and plot the resulting surface.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    selector=lambda p: [p[\\\"alpha\\\"], p[\\\"beta\\\"]],\\n\",\n    \"    projection=\\\"surface\\\",\\n\",\n    \"    n_gridpoints=30,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"## 2D Contour Plot for Two Parameters\\n\",\n    \"\\n\",\n    \"Contour plots offer a 2D view with iso-function-value curves.\\n\",\n    \"\\n\",\n    \"They are especially useful for:\\n\",\n    \"- Finding basins or valleys.\\n\",\n    \"- Visualizing optimization paths.\\n\",\n    \"- Detecting steep or flat regions easily.\\n\",\n    \"\\n\",\n    \"Again, we use `\\\"alpha\\\"` and `\\\"beta\\\"` to generate the plot.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    selector=lambda p: [p[\\\"alpha\\\"], p[\\\"beta\\\"]],\\n\",\n    \"    projection=\\\"contour\\\",\\n\",\n    \"    n_gridpoints=30,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Grid View for Multiple Parameters\\n\",\n    \"When selecting more than two parameters, the slice_plot_3d function automatically constructs a grid-based visualization to analyze both individual and pairwise parameter effects.\\n\",\n    \"\\n\",\n    \"- **Diagonal** cells display 1D univariate slice plots, representing the isolated\\n\",\n    \"effect of each parameter on the function output.\\n\",\n    \"- **Off-diagonal** cells visualize pairwise interactions between parameters using\\n\",\n    \"either 3D surface or contour plots.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"### Single projection type\\n\",\n    \"##### (eg: `projection: \\\"surface\\\"`)\\n\",\n    \"\\n\",\n    \"By default, when a single projection type is specified (e.g., \\\"surface\\\" or \\\"contour\\\"), the following behavior is applied:\\n\",\n    \"\\n\",\n    \"- The **lower triangle** of the grid (i.e., plots below the diagonal) displays the\\n\",\n    \"specified projection type.\\n\",\n    \"- The **upper triangle** remains empty to avoid redundancy.\\n\",\n    \"\\n\",\n    \"This allows for a quick and uncluttered visualization of pairwise parameter interactions.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    projection=\\\"surface\\\",\\n\",\n    \"    n_gridpoints=20,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Multiple projection types\\n\",\n    \"##### (eg: `projection: {\\\"lower\\\": \\\"surface\\\", \\\"upper\\\": \\\"contour\\\"}`)\\n\",\n    \"\\n\",\n    \"For enhanced flexibility, slice_plot_3d also supports customizing projection types independently for the upper and lower halves of the grid. This is done by passing a dictionary to the projection argument:\\n\",\n    \"\\n\",\n    \"- The **\\\"lower\\\"** key controls the projection type for plots below the diagonal.\\n\",\n    \"- The **\\\"upper\\\"** key controls the projection type for plots above the diagonal.\\n\",\n    \"\\n\",\n    \"For example, setting \\\"lower\\\" to \\\"surface\\\" and \\\"upper\\\" to \\\"contour\\\" enables simultaneous display of both 3D and 2D representations, maximizing interpretability.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    projection={\\\"lower\\\": \\\"surface\\\", \\\"upper\\\": \\\"contour\\\"},\\n\",\n    \"    n_gridpoints=20,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"source\": [\n    \"This **dual-projection** layout is particularly useful when analyzing high-dimensional\\n\",\n    \"functions, as it provides both detailed surface representations and compact contour visualizations in a single coherent grid.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Full Customization of the Visualization\\n\",\n    \"\\n\",\n    \"`s‍lice_plot_3d` allows fine control over plot styling:\\n\",\n    \"\\n\",\n    \"- `layout_kwargs` adjusts figure size, titles, background themes.\\n\",\n    \"- `plot_kwargs` controls color maps, marker options, and plot styles.\\n\",\n    \"- `make_subplot_kwargs` configures grid spacing, axis sharing, and more.\\n\",\n    \"\\n\",\n    \"Here, we demonstrate a fully customized plot combining all these features.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.sandbox.slice_plot_3d(\\n\",\n    \"    func=sphere,\\n\",\n    \"    params=params,\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    selector=lambda p: [p[\\\"alpha\\\"], p[\\\"beta\\\"], p[\\\"gamma\\\"]],\\n\",\n    \"    projection=\\\"surface\\\",\\n\",\n    \"    n_gridpoints=40,\\n\",\n    \"    layout_kwargs={\\n\",\n    \"        \\\"width\\\": 800,\\n\",\n    \"        \\\"height\\\": 800,\\n\",\n    \"        \\\"title\\\": {\\\"text\\\": \\\"Customized Sphere Function Visualization\\\"},\\n\",\n    \"        \\\"template\\\": \\\"plotly_dark\\\",\\n\",\n    \"    },\\n\",\n    \"    make_subplot_kwargs={\\n\",\n    \"        \\\"horizontal_spacing\\\": 0.1,\\n\",\n    \"        \\\"vertical_spacing\\\": 0.1,\\n\",\n    \"    },\\n\",\n    \"    plot_kwargs={\\n\",\n    \"        \\\"surface_plot\\\": {\\\"colorscale\\\": \\\"Viridis\\\", \\\"opacity\\\": 0.7},\\n\",\n    \"    },\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.17\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/how_to_specify_algorithm_and_algo_options.md",
    "content": "(specify-algorithm)=\n\n# How to specify and configure algorithms\n\nThis how-to guide is about the mechanics of specifying and configuring optimizers in\noptimagic. It is not about choosing the right algorithm for your problem. For a\ndiscussion on choosing algorithms, see\n[this how-to guide](how_to_algorithm_selection.ipynb)\n\nThere are two ways to specify and configure optimizers. The *optimagic way* and the\n*scipy way*. Both use the `algorithm` argument of `minimize` and `maximize` to specify\nan optimizer and both are super easy to use.\n\nAs the name suggests, the *scipy way* is more familiar for users of scipy.optimize. The\n*optimagic way* adds discoverability and autocomplete. Using the *optimagic\nway*, you don't need to look things up in the documentation and rarely have to leave\nyour editor, notebook or IDE.\n\n::::{tab-set}\n:::{tab-item} The optimagic way\n:sync: optimagic\n\n## Selecting an algorithm\n\n```python\nimport optimagic as om\nimport numpy as np\n\n\ndef fun(x):\n    return x @ x\n\n\nom.minimize(\n    fun=fun,\n    params=np.arange(3),\n    algorithm=om.algos.scipy_neldermead,\n)\n```\n\nThe algorithm is selected by passing an algorithm class. This class is usually not\nimported manually, but discovered using `om.algos`. After typing `om.algos.`, your\neditor will show you all algorithms you can choose from.\n\n## Configuring an algorithm\n\nTo configure an algorithm with advanced options, you can create an instance of the\nclass:\n\n```python\nalgo = om.algos.scipy_neldermead(\n    stopping_maxiter=100,\n    adaptive=True,\n)\n\nom.minimize(\n    fun=fun,\n    params=np.arange(3),\n    algorithm=algo,\n)\n```\n\nAgain, you can use your editor's autocomplete to discover all options that your chosen\nalgorithm supports. When the instance is created, the types and values of all options\nare checked. Should you make a mistake, you will get an error before you run your\noptimization.\n\n## Advanced autocomplete in action\n\nAssume you need a gradient-free optimizer that supports bounds on the parameters.\nMoreover, you have a fixed computational budget, so you want to set stopping options.\n\nIf you type `om.algos.`, your editor will show you all available optimizers and a list\nof categories you can use to filter the results. In our case, we select `GradientFree`\nand `Bounded`, and we could do that in any order we want.\n\n![autocomplete_1](../_static/images/autocomplete_1.png)\n\nAfter selecting one of the displayed algorithms, in our case `scipy_neldermead`, the\neditor shows all tuning parameters of that optimizer. If you start to type `stopping`,\nyou will see all stopping criteria that are available.\n\n![autocomplete_2](../_static/images/autocomplete_2.png)\n\n## Modifying an algorithm\n\nGiven an algorithm, you can easily create a **modified copy** by using the `with_option`\nmethod.\n\n```python\n# using copy constructors to create variants\nbase_algo = om.algorithms.fides(stopping_maxiter=1000)\nalgorithms = [\n    base_algo.with_option(trustregion_initial_radius=r) for r in [0.1, 0.2, 0.5]\n]\n\nfor algo in algorithms:\n    minimize(\n        fun=fun,\n        params=np.arange(3),\n        algorithm=algo,\n    )\n```\n\n:::\n:::{tab-item} The scipy way\n:sync: scipy\n\n## Selecting an algorithm\n\n```python\nimport optimagic as om\nimport numpy as np\n\n\ndef fun(x):\n    return x @ x\n\n\nom.minimize(\n    fun=fun,\n    params=np.arange(3),\n    algorithm=\"scipy_lbfgsb\",\n)\n```\n\n\nFor a list of all supported algorithm names, see {ref}`list_of_algorithms`.\n\n```{note}\nTo provide full compatibility with scipy, you can also select algorithms with the\nargument `method` under their original scipy name, e.g. `method=\"L-BFGS-B\"` instead\nof `algorithm=\"scipy_lbfgsb\"`.\n```\n\n## Configuring an algorithm\n\nTo configure an algorithm, you can pass a dictionary to the `algo_options` argument.\n\n```python\noptions = {\n    \"stopping_maxiter\": 100,\n    \"adaptive\": True,\n}\n\nom.minimize(\n    fun=fun,\n    params=np.arange(3),\n    algorithm=\"scipy_neldermead\",\n    algo_options=options,\n)\n```\n\nIf `algo_options` contains options that are not supported by the optimizer, they will be\nignored and you get a warning.\n\nTo find out which options are supported by an optimizer, see {ref}`list_of_algorithms`.\n\n:::\n::::\n"
  },
  {
    "path": "docs/source/how_to/how_to_start_parameters.md",
    "content": "(params)=\n\n# How to specify `params`\n\n`params` is the first argument of any criterion function in optimagic. It collects all\nthe parameters to estimate, optimize, or differentiate over. In many optimization\nlibraries, `params` must be a one-dimensional numpy array. In optimagic, it can be an\narbitrary pytree (think nested dictionary) containing numbers, arrays, pandas.Series,\nand/or pandas.DataFrames.\n\nBelow, we show a few examples of what is possible in optimagic and discuss the\nadvantages and drawbacks of each of them.\n\nAgain, we use the simple `sphere` function you know from other tutorials as an example.\n\n```{eval-rst}\n.. tab-set::\n\n  .. tab-item:: Array\n\n    A frequent choice of ``params`` is a one-dimensional numpy array. This is\n    because one-dimensional numpy arrays are all that is supported by most optimizer\n    libraries.\n\n    In our opinion, it is rarely a good choice to represent parameters as flat numpy arrays\n    and then access individual parameters or sclices by positions. The only exception\n    are simple optimization problems with very-fast-to-evaluate criterion functions where\n    any overhead must be avoided.\n\n    If you still want to use one-dimensional numpy arrays, here is how:\n\n    .. code-block:: python\n\n        import optimagic as om\n\n\n        def sphere(params):\n            return params @ params\n\n\n        om.minimize(\n            fun=sphere,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n  .. tab-item:: DataFrame\n\n    Originally, pandas DataFrames were the mandatory format for ``params`` in optimagic.\n    They are still highly recommended and have a few special features. For example,\n    they allow to bundle information on start parameters and bounds together into one\n    data structure.\n\n    Let's look at an example where we do that:\n\n    .. code-block:: python\n\n        def sphere(params):\n            return (params[\"value\"] ** 2).sum()\n\n\n        params = pd.DataFrame(\n            data={\"value\": [1, 2, 3], \"lower_bound\": [-np.inf, 1.5, 0]},\n            index=[\"a\", \"b\", \"c\"],\n        )\n\n        om.minimize(\n            fun=sphere,\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n    DataFrames have many advantages:\n\n    - It is easy to select single parameters or groups of parameters or work with\n      the entire parameter vector. Especially, if you use a well designed MultiIndex.\n    - It is very easy to produce publication quality LaTeX tables from them.\n    - If you have nested models, you can easily update the parameter vector of a larger\n      model with the values from a smaller one (e.g. to get good start parameters).\n    - You can bundle information on bounds and values in one place.\n    - It is easy to compare two params vectors for equality.\n\n\n    If you are sure you won't have bounds on your parameter, you can also use a\n    pandas.Series instead of a pandas.DataFrame.\n\n    A drawback of DataFrames is that they are not JAX compatible. Another one is that\n    they are a bit slower than numpy arrays.\n\n  .. tab-item:: Dict\n\n    ``params`` can also be a (nested) dictionary containing all of the above and more.\n\n    .. code-block:: python\n\n        def sphere(params):\n            return params[\"a\"] ** 2 + params[\"b\"] ** 2 + (params[\"c\"] ** 2).sum()\n\n\n        res = om.minimize(\n            fun=sphere,\n            params={\"a\": 0, \"b\": 1, \"c\": pd.Series([2, 3, 4])},\n            algorithm=\"scipy_neldermead\",\n        )\n\n    Dictionarys of arrays are ideal if you want to do vectorized computations with\n    groups of parameters. They are also a good choice if you calculate derivatives\n    with JAX.\n\n    While optimagic won't stop you, don't go too far! Having parameters in very deeply\n    nested dictionaries makes it hard to visualize results and/or even to compare two\n    estimation results.\n\n  .. tab-item:: Scalar\n\n    If you have a one-dimensional optimization problem, the natural way to represent\n    your params is a float:\n\n    .. code-block:: python\n\n        def sphere(params):\n            return params**2\n\n\n        om.minimize(\n            fun=sphere,\n            params=3,\n            algorithm=\"scipy_lbfgsb\",\n        )\n```\n"
  },
  {
    "path": "docs/source/how_to/how_to_visualize_histories.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# How to visualize optimizer histories\\n\",\n    \"\\n\",\n    \"optimagic's `criterion_plot` can visualize the history of function values for one or multiple optimizations. \\n\",\n    \"optimagic's `params_plot` can visualize the history of parameter values for one optimization. \\n\",\n    \"\\n\",\n    \"This can help you to understand whether your optimization actually converged and if not, which parameters are problematic. \\n\",\n    \"\\n\",\n    \"It can also help you to find the fastest optimizer for a given optimization problem. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Run two optimization to get example results\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(x):\\n\",\n    \"    return x @ x\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"results = {}\\n\",\n    \"for algo in [\\\"scipy_lbfgsb\\\", \\\"scipy_neldermead\\\"]:\\n\",\n    \"    results[algo] = om.minimize(sphere, params=np.arange(5), algorithm=algo)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Make a single criterion plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(results[\\\"scipy_neldermead\\\"])\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"6\",\n   \"metadata\": {},\n   \"source\": [\n    \":::{note}\\n\",\n    \"\\n\",\n    \"For details on using other plotting backends, see [How to change the plotting backend](how_to_change_plotting_backend.ipynb).\\n\",\n    \"\\n\",\n    \":::\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"7\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Compare two optimizations in a criterion plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(results)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Use some advanced options of criterion plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(\\n\",\n    \"    results,\\n\",\n    \"    # cut off after 180 evaluations\\n\",\n    \"    max_evaluations=180,\\n\",\n    \"    # show only the current best function value\\n\",\n    \"    monotone=True,\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"11\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Make a params plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.params_plot(results[\\\"scipy_neldermead\\\"])\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"13\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Use advanced options of params plot\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.params_plot(\\n\",\n    \"    results[\\\"scipy_neldermead\\\"],\\n\",\n    \"    # cut off after 180 evaluations\\n\",\n    \"    max_evaluations=180,\\n\",\n    \"    # select only the last three parameters\\n\",\n    \"    selector=lambda x: x[2:],\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"15\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Criterion plot with multistart optimization\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def alpine(x):\\n\",\n    \"    return np.sum(np.abs(x * np.sin(x) + 0.1 * x))\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    alpine,\\n\",\n    \"    params=np.arange(7),\\n\",\n    \"    bounds=om.Bounds(soft_lower=np.full(7, -3), soft_upper=np.full(7, 10)),\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    multistart=om.MultistartOptions(n_samples=100, convergence_max_discoveries=3),\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"17\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(res, max_evaluations=1000, monotone=True)\\n\",\n    \"fig.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.17\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/how_to/index.md",
    "content": "(how-to)=\n\n# How-to Guides\n\nHow-to Guides show how to achieve specific tasks. In many cases they show you how to use\nadvanced options. For a more basic introduction, check out the [tutorials](tutorials).\n\n```{toctree}\n---\nmaxdepth: 1\n---\nhow_to_criterion_function\nhow_to_start_parameters\nhow_to_derivatives\nhow_to_specify_algorithm_and_algo_options\nhow_to_algorithm_selection\nhow_to_bounds\nhow_to_constraints\nhow_to_globalization\nhow_to_multistart\nhow_to_visualize_histories\nhow_to_change_plotting_backend\nhow_to_scaling\nhow_to_logging\nhow_to_errors_during_optimization\nhow_to_slice_plot\nhow_to_benchmarking\nhow_to_add_optimizers\nhow_to_document_optimizers\n```\n"
  },
  {
    "path": "docs/source/index.md",
    "content": "# \n\n<div style=\"padding-top: 50px;\">\n</div>\n\n```{raw} html\n<img src=\"_static/images/optimagic_logo.svg\" class=\"only-light center\" style=\"display:block; margin-left:auto; margin-right:auto; width:300px; height:auto;\"/>\n\n<img src=\"_static/images/optimagic_logo_dark_mode.svg\" class=\"only-dark center\" style=\"display:block; margin-left:auto; margin-right:auto; width:300px; height:auto;\"/>\n```\n\n<br>\n<br>\n\n*optimagic* is a Python package for numerical optimization. It is a unified interface to\noptimizers from SciPy, NlOpt and many other Python packages.\n\n*optimagic*'s `minimize` function works just like SciPy's, so you don't have to adjust\nyour code. You simply get more optimizers for free. On top you get powerful diagnostic\ntools, parallel numerical derivatives and more. If you want to see what *optimagic* can\ndo, check out this [tutorial](tutorials/optimization_overview.ipynb)\n\n*optimagic* was formerly called *estimagic*, because it also provides functionality to\nperform statistical inference on estimated parameters. *estimagic* is now a subpackage\nof *optimagic*, which is documented [here](estimagic).\n\n`````{grid} 1 2 2 2\n---\ngutter: 3\n---\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/light-bulb.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} tutorials/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nTutorials\n```\n\nNew users of optimagic should read this first.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/book.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} how_to/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nHow-to Guides\n```\n\nDetailed instructions for specific and advanced tasks.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/installation.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} installation.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nInstallation\n```\n\nInstallation instructions for optimagic and optional dependencies.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/optimization.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} algorithms.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nOptimization Algorithms\n```\n\nList of numerical optimizers and their optional parameters.\n\n````\n\n\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/books.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} explanation/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nExplanations\n```\n\nBackground information on key topics central to the package.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: _static/images/coding.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} reference/index.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nAPI Reference\n```\n\nDetailed description of the optimagic API.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:columns: 12\n:img-top: _static/images/video.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} videos.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nVideos\n```\n\nCollection of tutorials, talks, and screencasts on optimagic.\n\n````\n\n`````\n\n```{toctree}\n---\nhidden: true\nmaxdepth: 1\n---\ntutorials/index\nhow_to/index\nexplanation/index\nreference/index\ndevelopment/index\nvideos\nalgorithms\nestimagic/index\ninstallation\n```\n\n______________________________________________________________________\n\nWe thank all institutions that have funded or supported optimagic (formerly estimagic)\n\n```{image} _static/images/aai-institute-logo.svg\n---\nwidth: 185px\n---\n```\n\n```{image} _static/images/numfocus_logo.png\n---\nwidth: 200\n---\n```\n\n```{image} _static/images/tra_logo.png\n---\nwidth: 240px\n---\n```\n\n```{image} _static/images/hoover_logo.png\n---\nwidth: 192px\n---\n```\n\n```{image} _static/images/transferlab-logo.svg\n---\nwidth: 420px\n---\n```\n\n______________________________________________________________________\n\n**Useful links for search:** {ref}`genindex` | {ref}`modindex` | {ref}`search`\n"
  },
  {
    "path": "docs/source/installation.md",
    "content": "# Installation\n\n## Basic installation\n\nThe preferred way to install optimagic is via `conda` or `mamba`. To do so, open a\nterminal and type:\n\n```\nconda install -c conda-forge optimagic\n```\n\nAlternatively, you can install optimagic via pip:\n\n```\npip install optimagic\n```\n\nIn both cases, you get optimagic and all of its mandatory dependencies.\n\n## Installing optional dependencies\n\nOnly `scipy` is a mandatory dependency of optimagic. Other algorithms become available\nif you install more packages. We make this optional because you will rarely need all of\nthem in the same project.\n\nFor an overview of all optimizers and the packages you need to install to enable them,\nsee {ref}`list_of_algorithms`.\n\nTo enable all algorithms at once, do the following:\n\n```\nconda -c conda-forge install nlopt\n```\n\n```\npip install Py-BOBYQA\n```\n\n```\npip install DFO-LS\n```\n\n*Note*: We recommend to install `DFO-LS` version 1.5.3 or higher. Versions of 1.5.0 or\nlower also work but the versions `1.5.1` and `1.5.2` contain bugs that can lead to\nerrors being raised.\n\n```\nconda install -c conda-forge petsc4py\n```\n\n*Note*: `` `petsc4py` `` is not available on Windows.\n\n```\nconda install -c conda-forge cyipopt\n```\n\n*Note*: Make sure you have at least `cyipopt` 1.4.\n\n```\nconda install -c conda-forge pygmo\n```\n\n```\npip install fides>=0.7.4\n```\n\n*Note*: Make sure you have at least `fides` 0.7.4.\n"
  },
  {
    "path": "docs/source/reference/algo_options.md",
    "content": "(algo_options)=\n\n# The default algorithm options\n\n```{eval-rst}\n.. automodule:: optimagic.optimization.algo_options\n    :members:\n```\n"
  },
  {
    "path": "docs/source/reference/batch_evaluators.md",
    "content": "(batch_evaluators)=\n\n# Batch evaluators\n\n```{eval-rst}\n.. automodule:: optimagic.batch_evaluators\n    :members:\n```\n"
  },
  {
    "path": "docs/source/reference/index.md",
    "content": "# optimagic API\n\n```{eval-rst}\n.. currentmodule:: optimagic\n```\n\n(maximize-and-minimize)=\n\n## Optimization\n\n```{eval-rst}\n.. dropdown:: maximize\n\n    .. autofunction:: maximize\n```\n\n```{eval-rst}\n.. dropdown:: minimize\n\n    .. autofunction:: minimize\n\n```\n\n```{eval-rst}\n.. dropdown:: slice_plot\n\n    .. autofunction:: slice_plot\n\n```\n\n```{eval-rst}\n.. dropdown:: criterion_plot\n\n    .. autofunction:: criterion_plot\n\n```\n\n```{eval-rst}\n.. dropdown:: params_plot\n\n    .. autofunction:: params_plot\n\n\n```\n\n```{eval-rst}\n.. dropdown:: OptimizeResult\n\n    .. autoclass:: OptimizeResult\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: Bounds\n\n    .. autoclass:: Bounds\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: Constraints\n\n    .. autoclass:: FixedConstraint\n        :members:\n\n    .. autoclass:: IncreasingConstraint\n        :members:\n\n    .. autoclass:: DecreasingConstraint\n        :members:\n\n    .. autoclass:: EqualityConstraint\n        :members:\n\n    .. autoclass:: ProbabilityConstraint\n        :members:\n\n    .. autoclass:: PairwiseEqualityConstraint\n        :members:\n\n    .. autoclass:: FlatCovConstraint\n        :members:\n\n    .. autoclass:: FlatSDCorrConstraint\n        :members:\n\n    .. autoclass:: LinearConstraint\n        :members:\n\n    .. autoclass:: NonlinearConstraint\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: NumdiffOptions\n\n    .. autoclass:: NumdiffOptions\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: MultistartOptions\n\n    .. autoclass:: MultistartOptions\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: ScalingOptions\n\n    .. autoclass:: ScalingOptions\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: LogOptions\n\n    .. autoclass:: SQLiteLogOptions\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: History\n\n    .. autoclass:: History\n        :members:\n\n```\n\n```{eval-rst}\n.. dropdown:: count_free_params\n\n    .. autofunction:: count_free_params\n\n```\n\n```{eval-rst}\n.. dropdown:: check_constraints\n\n    .. autofunction:: check_constraints\n\n```\n\n(first_derivative)=\n\n## Derivatives\n\n```{eval-rst}\n.. dropdown:: first_derivative\n\n    .. autofunction:: first_derivative\n\n```\n\n```{eval-rst}\n.. dropdown:: second_derivative\n\n    .. autofunction:: second_derivative\n\n```\n\n(benchmarking)=\n\n## Benchmarks\n\n```{eval-rst}\n.. dropdown:: get_benchmark_problems\n\n    .. autofunction:: get_benchmark_problems\n```\n\n```{eval-rst}\n.. dropdown:: run_benchmark\n\n    .. autofunction:: run_benchmark\n```\n\n```{eval-rst}\n.. dropdown:: profile_plot\n\n    .. autofunction:: profile_plot\n```\n\n```{eval-rst}\n.. dropdown:: convergence_plot\n\n    .. autofunction:: convergence_plot\n\n\n```\n\n(logreading)=\n\n## Log reading\n\n```{eval-rst}\n.. dropdown:: OptimizeLogReader\n\n    .. autoclass:: OptimizeLogReader\n\n\n\n```\n\n## Other:\n\n```{toctree}\n---\nmaxdepth: 1\n---\nutilities\nalgo_options\nbatch_evaluators\ntyping\n```\n"
  },
  {
    "path": "docs/source/reference/typing.md",
    "content": "(typing)=\n\n# Types\n\n```{eval-rst}\n\n.. automodule:: optimagic.typing\n    :members:\n\n```\n"
  },
  {
    "path": "docs/source/reference/utilities.md",
    "content": "(utilities)=\n\n# Utility functions\n\n```{eval-rst}\n.. automodule:: optimagic.utilities\n    :members:\n```\n"
  },
  {
    "path": "docs/source/refs.bib",
    "content": "% Encoding: UTF-8\n\n\n\n@Book{Dennis1996,\n  Title                    = {Numerical Methods for Unconstrained Optimization and Nonlinear Equations},\n  Author                   = {Dennis, J.E. and Schnabel, R.B.},\n  Publisher                = {Society for Industrial and Applied Mathematics},\n  Year                     = {1996},\n  Series                   = {Classics in Applied Mathematics},\n\n  ISBN                     = {9780898713640},\n  Lccn                     = {lc95051776},\n  Url                      = {https://books.google.de/books?id=RtxcWd0eBD0C&redir_esc=y}\n}\n\n@book{Hansen2019,\n  title     = {Econometrics},\n  author    = {Bruce E. Hansen},\n  editor    = {Bruce E. Hansen},\n  publisher = {Unpublished},\n  year      = {2019},\n  url       = {https://www.ssc.wisc.edu/~bhansen/econometrics/},\n  owner     = {janos},\n  timestamp = {2019.10.03}\n}\n\n@book{Hansen2020,\n  title     = {Econometrics},\n  author    = {Bruce E. Hansen},\n  editor    = {Bruce E. Hansen},\n  publisher = {Unpublished},\n  year      = {2020},\n  address   = {https://www.ssc.wisc.edu/~bhansen/econometrics/},\n  owner     = {janos},\n  timestamp = {2020.03.04}\n}\n\n\n@book{Verbeek2008,\n  title     = {A Guide to Modern Econometrics},\n  author    = {Verbeek, M.},\n  publisher = {Wiley},\n  year      = {2008},\n  isbn      = {9780470517697},\n  lccn      = {2007050167},\n  url       = {https://books.google.com/books?id=uEFm6pAJZhoC}\n}\n\n@book{Wassermann2006,\n  title     = {All of nonparametric statistics},\n  author    = {Wasserman, Larry},\n  year      = {2006},\n  publisher = {Springer Science \\& Business Media}\n}\n\n@article{Groeneveld1994,\n  author  = {Eildert Groeneveld},\n  title   = {A reparameterization to improve numerical optimization in multivariate REML (co)variance component estimation},\n  journal = {Genetics, Selection, Evolution : GSE},\n  year    = {1994},\n  volume  = {26},\n  pages   = {537 - 545}\n}\n\n@article{Pinheiro1996,\n  author  = {José C. Pinheiro and Douglas M. Bates},\n  title   = {Unconstrained Parameterizations for Variance-Covariance Matrices},\n  journal = {Statistics and Computing},\n  year    = {1996},\n  volume  = {6},\n  pages   = {289--296}\n}\n\n@techreport{Kraft1988,\n  author      = {Kraft, Dieter},\n  institution = {DLR German Aerospace Center – Institute for Flight Mechanics},\n  title       = {A software package for sequential quadratic programming},\n  year        = {1988},\n  address     = {Köln, Germany},\n  url         = {http://degenerateconic.com/wp-content/uploads/2018/03/DFVLR_FB_88_28.pdf}\n}\n\n@book{Nocedal2006,\n  author    = {Nocedal, Jorge and Wright, Stephen},\n  publisher = {Springer Science \\& Business Media},\n  title     = {Numerical optimization},\n  year      = {2006}\n}\n\n@incollection{Conn2000,\n  author    = {Conn, AR and Gould, NI and Toint, PL},\n  booktitle = {Trust region methods},\n  publisher = {Siam},\n  title     = {Nonlinear equations and nonlinear fitting},\n  year      = {2000},\n  pages     = {749--774},\n  volume    = {1}\n}\n\n@article{Byrd1999,\n  author    = {Byrd, Richard H and Hribar, Mary E and Nocedal, Jorge},\n  journal   = {SIAM Journal on Optimization},\n  title     = {An interior point algorithm for large-scale nonlinear programming},\n  year      = {1999},\n  number    = {4},\n  pages     = {877--900},\n  volume    = {9},\n  publisher = {SIAM}\n}\n\n@article{Lalee1998,\n  author    = {Lalee, Marucha and Nocedal, Jorge and Plantenga, Todd},\n  journal   = {SIAM Journal on Optimization},\n  title     = {On the implementation of an algorithm for large-scale equality constrained optimization},\n  year      = {1998},\n  number    = {3},\n  pages     = {682--706},\n  volume    = {8},\n  publisher = {SIAM}\n}\n\n@article{Gao2012,\n  author    = {Gao, Fuchang and Han, Lixing},\n  journal   = {Computational Optimization and Applications},\n  title     = {Implementing the Nelder-Mead simplex algorithm with adaptive parameters},\n  year      = {2012},\n  number    = {1},\n  pages     = {259--277},\n  volume    = {51},\n  publisher = {Springer}\n}\n\n\n@article{Powell1998,\n  author    = {Powell, Michael JD},\n  journal   = {Acta numerica},\n  title     = {Direct search algorithms for optimization calculations},\n  year      = {1998},\n  pages     = {287--336},\n  publisher = {Cambridge University Press}\n}\n\n@article{Powell2007,\n  author    = {Powell, Michael JD},\n  journal   = {Mathematics Today-Bulletin of the Institute of Mathematics and its Applications},\n  title     = {A view of algorithms for optimization without derivatives},\n  year      = {2007},\n  number    = {5},\n  pages     = {170--174},\n  volume    = {43},\n  publisher = {Citeseer}\n}\n\n@techreport{Benson2017,\n  author      = {Benson, S and McInnes, LC and Mor{\\'e}, JJ and Munson, T and Sarich, J},\n  institution = {Technical Report ANL/MCS-TM-322, Argonne National Laboratory},\n  title       = {TAO user manual (revision 3.7)},\n  year        = {2017},\n  url         = {http://web.mit.edu/tao-petsc_v3.7/tao_manual.pdf}\n}\n\n@techreport{Wild2015,\n  author      = {Wild, Stefan M.},\n  institution = {Argonne National Laboratory},\n  title       = {Solving Derivative-Free Nonlinear Least Squares Problems with POUNDERS},\n  year        = {2015},\n  url         = {https://doi.org/10.1137/1.9781611974683.ch40}\n}\n\n@InProceedings{Wild2008,\n  author    = {Wild, Stefan M.},\n  title     = {{MNH: A} Derivative-Free Optimization Algorithm Using Minimal Norm {Hessians}},\n  booktitle = {Tenth Copper Mountain Conference on Iterative Methods},\n  year      = {2008},\n  month     = {April},\n  gurl      = {https://scholar.google.com/scholar?cluster=6407907761614456217},\n  url       = {http://grandmaster.colorado.edu/~copper/2008/SCWinners/Wild.pdf},\n}\n\n@misc{Cartis2018,\n  author        = {Coralia Cartis and Jan Fiala and Benjamin Marteau and Lindon Roberts},\n  title         = {Improving the Flexibility and Robustness of Model-Based Derivative-Free Optimization Solvers},\n  year          = {2018},\n  archiveprefix = {arXiv},\n  eprint        = {1804.00154},\n  primaryclass  = {math.OC}\n}\n\n@misc{Cartis2018a,\n  author        = {Coralia Cartis and Lindon Roberts and Oliver Sheridan-Methven},\n  title         = {Escaping local minima with derivative-free methods: a numerical investigation},\n  year          = {2018},\n  archiveprefix = {arXiv},\n  eprint        = {1812.11343},\n  primaryclass  = {math.OC}\n}\n\n@article{Powell2009,\n  author  = {Powell, Michael JD},\n  journal = {Cambridge NA Report NA2009/06, University of Cambridge, Cambridge},\n  title   = {The BOBYQA algorithm for bound constrained optimization without derivatives},\n  year    = {2009},\n  pages   = {26--46}\n}\n\n@misc{Cartis2018b,\n  author        = {Coralia Cartis and Jan Fiala and Benjamin Marteau and Lindon Roberts},\n  title         = {Improving the Flexibility and Robustness of Model-Based Derivative-Free Optimization Solvers},\n  year          = {2018},\n  archiveprefix = {arXiv},\n  eprint        = {1804.00154},\n  primaryclass  = {math.OC}\n}\n\n@unpublished{Saxton2018,\n  title  = {resample: Randomization-based inference in Python.},\n  author = {Daniel Saxton},\n  year   = {2018},\n  note   = {unpublished},\n  url    = {https://github.com/dsaxton/resample}\n}\n\n@article{CameronMiller2015,\n  title     = {A practitioner’s guide to cluster-robust inference},\n  author    = {Cameron, A Colin and Miller, Douglas L},\n  journal   = {Journal of human resources},\n  volume    = {50},\n  number    = {2},\n  pages     = {317--372},\n  year      = {2015},\n  publisher = {University of Wisconsin Press}\n}\n\n@article{Waechter2005,\n  author    = {Andreas Wächter and Lorenz T. Biegler},\n  journal   = {{SIAM} Journal on Optimization},\n  title     = {Line Search Filter Methods for Nonlinear Programming: Local Convergence},\n  year      = {2005},\n  month     = {jan},\n  number    = {1},\n  pages     = {32--48},\n  volume    = {16},\n  doi       = {10.1137/s1052623403426544},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})}\n}\n\n@article{Waechter2005a,\n  author    = {Andreas Wächter and Lorenz T. Biegler},\n  journal   = {{SIAM} Journal on Optimization},\n  title     = {Line Search Filter Methods for Nonlinear Programming: Motivation and Global Convergence},\n  year      = {2005},\n  month     = {jan},\n  number    = {1},\n  pages     = {1--31},\n  volume    = {16},\n  doi       = {10.1137/s1052623403426556},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})}\n}\n\n@article{Waechter2005b,\n  author    = {Andreas Wächter and Lorenz T. Biegler},\n  journal   = {Mathematical Programming},\n  title     = {On the implementation of an interior-point filter line-search algorithm for large-scale nonlinear programming},\n  year      = {2005},\n  month     = {apr},\n  number    = {1},\n  pages     = {25--57},\n  volume    = {106},\n  doi       = {10.1007/s10107-004-0559-y},\n  publisher = {Springer Science and Business Media {LLC}}\n}\n\n@article{Nocedal2009,\n  author    = {Jorge Nocedal and Andreas Wächter and Richard A. Waltz},\n  journal   = {{SIAM} Journal on Optimization},\n  title     = {Adaptive Barrier Update Strategies for Nonlinear Interior Methods},\n  year      = {2009},\n  month     = {jan},\n  number    = {4},\n  pages     = {1674--1693},\n  volume    = {19},\n  doi       = {10.1137/060649513},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})}\n}\n\n@article{Schlueter2009,\n  author    = {Martin Schlüter and Jose A. Egea and Julio R. Banga},\n  journal   = {Computers {\\&} Operations Research},\n  title     = {Extended ant colony optimization for non-convex mixed integer nonlinear programming},\n  year      = {2009},\n  month     = {jul},\n  number    = {7},\n  pages     = {2217--2229},\n  volume    = {36},\n  doi       = {10.1016/j.cor.2008.08.015},\n  publisher = {Elsevier {BV}}\n}\n\n@article{Karaboga2007,\n  author    = {Dervis Karaboga and Bahriye Basturk},\n  journal   = {Journal of Global Optimization},\n  title     = {A powerful and efficient algorithm for numerical function optimization: artificial bee colony ({ABC}) algorithm},\n  year      = {2007},\n  month     = {apr},\n  number    = {3},\n  pages     = {459--471},\n  volume    = {39},\n  doi       = {10.1007/s10898-007-9149-x},\n  publisher = {Springer Science and Business Media {LLC}}\n}\n\n@article{Mernik2015,\n  author    = {Marjan Mernik and Shih-Hsi Liu and Dervis Karaboga and Matej {\\v{C}}repin{\\v{s}}ek},\n  journal   = {Information Sciences},\n  title     = {On clarifying misconceptions when comparing variants of the Artificial Bee Colony Algorithm by offering a new implementation},\n  year      = {2015},\n  month     = {jan},\n  pages     = {115--127},\n  volume    = {291},\n  doi       = {10.1016/j.ins.2014.08.040},\n  publisher = {Elsevier {BV}}\n}\n\n@article{Storn1997,\n  author    = {Rainer Storn and Kenneth Price},\n  journal   = {Journal of Global Optimization},\n  title     = {Differential Evolution – A Simple and Efficient Heuristic for Global Optimization over Continuous Spaces},\n  year      = {1997},\n  number    = {4},\n  pages     = {341--359},\n  volume    = {11},\n  url       = {https://link.springer.com/article/10.1023/A:1008202821328},\n  publisher = {Springer Science and Business Media {LLC}}\n}\n\n@article{Oliveto2007,\n  author    = {Pietro S. Oliveto and Jun He and Xin Yao},\n  journal   = {International Journal of Automation and Computing},\n  title     = {Time complexity of evolutionary algorithms for combinatorial optimization: A decade of results},\n  year      = {2007},\n  month     = {jul},\n  number    = {3},\n  pages     = {281--293},\n  volume    = {4},\n  doi       = {10.1007/s11633-007-0281-3},\n  publisher = {Springer Science and Business Media {LLC}}\n}\n\n\n@article{Brest2006,\n  author  = {Brest, Janez and Greiner, Sao and Boskovic, Borko and Mernik, Marjan and Zumer, Viljem},\n  journal = {IEEE Transactions on Evolutionary Computation},\n  title   = {Self-Adapting Control Parameters in Differential Evolution: A Comparative Study on Numerical Benchmark Problems},\n  year    = {2006},\n  number  = {6},\n  pages   = {646-657},\n  volume  = {10},\n  doi     = {10.1109/TEVC.2006.872133}\n}\n\n@inproceedings{Elsayed2011,\n  author    = {Elsayed, Saber M. and Sarker, Ruhul A. and Essam, Daryl L.},\n  booktitle = {2011 IEEE Congress of Evolutionary Computation (CEC)},\n  title     = {Differential evolution with multiple strategies for solving CEC2011 real-world numerical optimization problems},\n  year      = {2011},\n  pages     = {1041-1048},\n  doi       = {10.1109/CEC.2011.5949732}\n}\n\n@incollection{Hansen2006,\n  author    = {Nikolaus Hansen},\n  booktitle = {Towards a New Evolutionary Computation},\n  publisher = {Springer Berlin Heidelberg},\n  title     = {The {CMA} Evolution Strategy: A Comparing Review},\n  year      = {2006},\n  pages     = {75--102},\n  doi       = {10.1007/3-540-32494-1_4}\n}\n\n@article{Corana1987,\n  author     = {Corana, A. and Marchesi, M. and Martini, C. and Ridella, S.},\n  title      = {Minimizing Multimodal Functions of Continuous Variables with the “Simulated Annealing” Algorithm—Corrigenda for This Article is Available Here},\n  year       = {1987},\n  issue_date = {Sept. 1987},\n  publisher  = {Association for Computing Machinery},\n  address    = {New York, NY, USA},\n  volume     = {13},\n  number     = {3},\n  issn       = {0098-3500},\n  url        = {https://doi.org/10.1145/29380.29864},\n  doi        = {10.1145/29380.29864},\n  abstract   = {A new global optimization algorithm for functions of continuous variables is presented,\nderived from the “Simulated Annealing” algorithm recently introduced in combinatorial\noptimization.The algorithm is essentially an iterative random search procedure with\nadaptive moves along the coordinate directions. It permits uphill moves under the\ncontrol of a probabilistic criterion, thus tending to avoid the first local minima\nencountered.The algorithm has been tested against the Nelder and Mead simplex method\nand against a version of Adaptive Random Search. The test functions were Rosenbrock\nvalleys and multiminima functions in 2,4, and 10 dimensions.The new method proved\nto be more reliable than the others, being always able to find the optimum, or at\nleast a point very close to it. It is quite costly in term of function evaluations,\nbut its cost can be predicted in advance, depending only slightly on the starting\npoint.},\n  journal    = {ACM Trans. Math. Softw.},\n  month      = sep,\n  pages      = {262–280},\n  numpages   = {19}\n}\n\n@Article{Poli2007,\n  author    = {Riccardo Poli and James Kennedy and Tim Blackwell},\n  journal   = {Swarm Intelligence},\n  title     = {Particle swarm optimization},\n  year      = {2007},\n  month     = {aug},\n  number    = {1},\n  pages     = {33--57},\n  volume    = {1},\n  doi       = {10.1007/s11721-007-0002-0},\n  publisher = {Springer Science and Business Media {LLC}},\n}\n\n@Article{Wales1997,\n  author    = {David J. Wales and Jonathan P. K. Doye},\n  journal   = {The Journal of Physical Chemistry A},\n  title     = {Global Optimization by Basin-Hopping and the Lowest Energy Structures of Lennard-Jones Clusters Containing up to 110 Atoms},\n  year      = {1997},\n  month     = {jul},\n  number    = {28},\n  pages     = {5111--5116},\n  volume    = {101},\n  publisher = {American Chemical Society ({ACS})},\n}\n\n@InProceedings{Glasmachers2010,\n  author    = {Tobias Glasmachers and Tom Schaul and Sun Yi and Daan Wierstra and Jürgen Schmidhuber},\n  booktitle = {Proceedings of the 12th annual conference on Genetic and evolutionary computation - {GECCO} {\\textquotesingle}10},\n  title     = {Exponential natural evolution strategies},\n  year      = {2010},\n  publisher = {{ACM} Press},\n  doi       = {10.1145/1830483.1830557},\n}\n\n@Article{Mahdavi2007,\n  author    = {M. Mahdavi and M. Fesanghary and E. Damangir},\n  journal   = {Applied Mathematics and Computation},\n  title     = {An improved harmony search algorithm for solving optimization problems},\n  year      = {2007},\n  month     = {may},\n  number    = {2},\n  pages     = {1567--1579},\n  volume    = {188},\n  doi       = {10.1016/j.amc.2006.11.033},\n  publisher = {Elsevier {BV}},\n}\n\n@Article{Mirjalili2014,\n  author    = {Seyedali Mirjalili and Seyed Mohammad Mirjalili and Andrew Lewis},\n  journal   = {Advances in Engineering Software},\n  title     = {Grey Wolf Optimizer},\n  year      = {2014},\n  month     = {mar},\n  pages     = {46--61},\n  volume    = {69},\n  doi       = {10.1016/j.advengsoft.2013.12.007},\n  publisher = {Elsevier {BV}},\n}\n\n@Article{Kolda2003,\n  author    = {Tamara G. Kolda and Robert Michael Lewis and Virginia Torczon},\n  journal   = {{SIAM} Review},\n  title     = {Optimization by Direct Search: New Perspectives on Some Classical and Modern Methods},\n  year      = {2003},\n  month     = {jan},\n  number    = {3},\n  pages     = {385--482},\n  volume    = {45},\n  doi       = {10.1137/s003614450242889},\n}\n\n@Article{Biscani2020,\n  doi = {10.21105/joss.02338},\n  url = {https://doi.org/10.21105/joss.02338},\n  year = {2020},\n  publisher = {The Open Journal},\n  volume = {5},\n  number = {53},\n  pages = {2338},\n  author = {Francesco Biscani and Dario Izzo},\n  title = {A parallel global multiobjective framework for optimization: pagmo},\n  journal = {Journal of Open Source Software}\n}\n\n@Article{Coleman1996,\n  author    = {Thomas F. Coleman and Yuying Li},\n  journal   = {SIAM Journal on Optimization},\n  title     = {An Interior Trust Region Approach for Nonlinear Minimization Subject to Bounds},\n  year      = {1996},\n  month     = {may},\n  number    = {2},\n  pages     = {418--445},\n  volume    = {6},\n  doi       = {10.1137/0806023},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})},\n}\n\n@Article{Coleman1994,\n  author    = {Thomas F. Coleman and Yuying Li},\n  journal   = {Mathematcial Programming},\n  title     = {On the convergence of interior-reflective Newton methods for nonlinear minimization subject to bounds},\n  year      = {1994},\n  month     = {oct},\n  number    = {1-3},\n  pages     = {189--224},\n  volume    = {67},\n  doi       = {10.1007/bf01582221},\n  publisher = {Springer Science and Business Media {LLC}},\n}\n\n@Article{Broyden1965,\n  author    = {C. G. Broyden},\n  journal   = {Mathematics of Computation},\n  title     = {A class of methods for solving nonlinear simultaneous equations},\n  year      = {1965},\n  number    = {92},\n  pages     = {577--577},\n  volume    = {19},\n  publisher = {American Mathematical Society ({AMS})},\n}\n\n@Book{Nocedal1999,\n  editor    = {Jorge Nocedal and Stephen J. Wright},\n  publisher = {Springer-Verlag},\n  title     = {Numerical Optimization},\n  year      = {1999},\n  doi       = {10.1007/b98874},\n}\n\n@Article{Fletcher1987,\n  author    = {R. Fletcher and C. Xu},\n  journal   = {IMA Journal of Numerical Analysis},\n  title     = {Hybrid Methods for Nonlinear Least Squares},\n  year      = {1987},\n  number    = {3},\n  pages     = {371--389},\n  volume    = {7},\n  doi       = {10.1093/imanum/7.3.371},\n  publisher = {Oxford University Press ({OUP})},\n}\n\n@Article{Kaelo2006,\n  author  = {Kaelo, P and Ali, M},\n  journal = {J. Optim. Theory Appl},\n  title   = {Some variants of the controlled random search algorithm for global optimization},\n  year    = {2006},\n  number  = {2},\n  pages   = {253--264},\n  volume  = {130},\n}\n\n@InBook{Price1978,\n  author    = {Price, W},\n  editor    = {L. C. W. Dixon and G. P. Szego},\n  pages     = {71--84},\n  publisher = {North-Holland Press},\n  title     = {A controlled random search procedure for global optimization},\n  year      = {1978},\n  address   = {Amsterdam},\n  volume    = {2},\n  booktitle = {Towards Global Optimization},\n}\n\n@Article{Price1983,\n  author  = {Price, W},\n  journal = {J. Optim. Theory Appl},\n  title   = {Global optimization by controlled random search},\n  year    = {1983},\n  number  = {3},\n  pages   = {333--348},\n  volume  = {40},\n}\n\n@Article{Kraft1994,\n  author  = {Kraft, Dieter},\n  journal = {ACM Transactions on Mathematical Software},\n  title   = {Algorithm 733: TOMP-Fortran modules for optimal control calculations},\n  year    = {1994},\n  number  = {3},\n  pages   = {262--281},\n  volume  = {20},\n}\n\n@Article{Jones1993,\n  author  = {Jones, D and Perttunen, C and Stuckmann, B},\n  journal = {J. Optimization Theory and Applications},\n  title   = {Lipschitzian optimization without the lipschitz constant},\n  year    = {1993},\n  pages   = {157},\n  volume  = {79},\n}\n\n@Article{Gablonsky2001,\n  author  = {Gablonsky, J and Kelley, C},\n  journal = {J. Global Optimization},\n  title   = {A locally-biased form of the DIRECT algorithm},\n  year    = {2001},\n  number  = {1},\n  pages   = {27--37},\n  volume  = {21},\n}\n\n@Article{DaSilva2010,\n  author  = {Da Silva, C and Santos, M and Goncalves, H and Hernandez-Figueroa},\n  journal = {IEEE Photonics Technology Letters},\n  title   = {Designing Novel Photonic Devices by Bio-Inspired Computing},\n  year    = {2010},\n  number  = {15},\n  pages   = {1177--1179},\n  volume  = {22},\n}\n\n@Misc{DaSilva2010a,\n  author = {Da Silva, C and Santos},\n  title  = {Parallel and Bio-Inspired Computing Applied to Analyze Microwave and Photonic Metamaterial Strucutures},\n  year   = {2010},\n}\n\n@Article{Beyer2002,\n  author  = {Beyer, H.-G and Schwefel, H.-P},\n  journal = {Journal Natural Computing},\n  title   = {Evolution Strategies: A Comprehensive Introduction},\n  year    = {2002},\n  number  = {1},\n  pages   = {3--52},\n  volume  = {1},\n}\n\n@article{Vent1975,\nauthor = {Vent, W.},\ntitle = {Rechenberg, Ingo, Evolutionsstrategie — Optimierung technischer Systeme nach Prinzipien der biologischen Evolution. 170 S. mit 36 Abb. Frommann-Holzboog-Verlag. Stuttgart 1973. Broschiert},\njournal = {Feddes Repertorium},\nvolume = {86},\nnumber = {5},\npages = {337-337},\nyear = {1975}\n}\n\n\n@Article{PhilipRunarsson2005,\n  author  = {Philip Runarsson, Thomas and Yao, Xin},\n  journal = {IEEE Trans. on Systems, Man, and Cybernetics Part C: Applications and Reviews},\n  title   = {Search biases in constrained evolutionary optimization},\n  year    = {2005},\n  number  = {2},\n  pages   = {233--243},\n  volume  = {35},\n}\n\n@Article{Thomas2000,\n  author  = {Thomas, P and Runarsson, Xin and Yao},\n  journal = {IEEE Trans. Evolutionary Computation},\n  title   = {Stochastic ranking for constrained evolutionary optimization},\n  year    = {2000},\n  number  = {3},\n  pages   = {284--294},\n  volume  = {4},\n}\n\n\n@Article{Nelder1965,\n  author  = {Nelder, J and Mead, R},\n  journal = {The Computer Journal},\n  title   = {A simplex method for function minimization},\n  year    = {1965},\n  pages   = {308--313},\n  volume  = {7},\n}\n\n@Misc{Brent1972,\n  author    = {Brent, Richard},\n  title     = {Algorithms for Minimization without Derivatives},\n  year      = {1972},\n  publisher = {Prentice-Hall},\n}\n\n@InBook{Powell1994,\n  author    = {Powell, M},\n  editor    = {S. Gomez and J.-P. Hennart},\n  pages     = {51--67},\n  publisher = {Kluwer Academic},\n  title     = {A direct search optimization method that models the objective and constraint functions by linear interpolation},\n  year      = {1994},\n  address   = {Dordrecht},\n  booktitle = {Advances in Optimization and Numerical Analysis},\n}\n\n@Misc{Rowan1990,\n  author = {Rowan, T},\n  title  = {Functional Stability Analysis of Numerical Algorithms},\n  year   = {1990},\n}\n\n@InProceedings{Powell2004,\n  author    = {Powell, M},\n  booktitle = {Proc. 40th Workshop on Large Scale Nonlinear Optimization},\n  title     = {The NEWUOA software for unconstrained optimization without derivatives},\n  year      = {2004},\n  address   = {Erice, Italy},\n}\n\n@Article{Dembo1983,\n  author  = {Dembo, R and Steihaug, T},\n  journal = {Math. Programming},\n  title   = {Truncated Newton algorithms for large-scale optimization},\n  year    = {1983},\n  pages   = {190--212},\n  volume  = {26},\n  doi     = {10.1007/BF02592055},\n}\n\n@Article{Nocedal1989,\n  author  = {Nocedal ; D, J and Liu, J},\n  journal = {Math. Comput},\n  title   = {On the limited memory BFGS method for large scale optimization},\n  year    = {1989},\n  pages   = {503--528},\n  volume  = {35},\n}\n\n@Article{Svanberg2002,\n  author  = {Svanberg, Krister},\n  journal = {SIAM J. Optim},\n  title   = {A class of globally convergent optimization methods based on conservative convex separable approximations},\n  year    = {2002},\n  number  = {2},\n  pages   = {555--573},\n  volume  = {12},\n}\n\n@Article{Vlcek2006,\n  author  = {Vlcek, J and Luksan, L},\n  journal = {J. Computational Appl. Math},\n  title   = {Shifted limited-memory variable metric methods for large-scale unconstrained minimization},\n  year    = {2006},\n  pages   = {365--390},\n  volume  = {186},\n}\n\n@Article{Nocedal1980,\n  author  = {Nocedal, J.},\n  journal = {Math. Comput},\n  title   = {Updating quasi-Newton matrices with limited storage},\n  year    = {1980},\n  pages   = {773--782},\n  volume  = {35},\n\n}\n\n@InProceedings{Chiang2014,\n  author    = {Chiang, N and Petra, C and Zavala, V},\n  booktitle = {Proceedings of the 18th power systems computation conference (PSCC)},\n  title     = {Structured nonconvex optimization of large-scale energy systems using PIPS-NLP},\n  year      = {2014},\n  address   = {Wroclaw, Poland},\n}\n\n\n@Article{Zhou2010,\n  author    = {Weijun Zhou and Xiaojun Chen},\n  journal   = {SIAM Journal on Optimization},\n  title     = {Global Convergence of a New Hybrid Gauss{\\textendash}Newton Structured {BFGS} Method for Nonlinear Least Squares Problems},\n  year      = {2010},\n  month     = {jan},\n  number    = {5},\n  pages     = {2422--2441},\n  volume    = {20},\n  doi       = {10.1137/090748470},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})},\n}\n\n@Article{Dennis1989,\n  author    = {J. E. Dennis and H. J. Martinez and R. A. Tapia},\n  journal   = {Journal of Optimization Theory and Applications},\n  title     = {Convergence theory for the structured {BFGS} secant method with an application to nonlinear least squares},\n  year      = {1989},\n  month     = {may},\n  number    = {2},\n  pages     = {161--178},\n  volume    = {61},\n  doi       = {10.1007/bf00962795},\n  publisher = {Springer Science and Business Media {LLC}},\n}\n\n@Article{Huschens1994,\n  author    = {J. Huschens},\n  journal   = {SIAM Journal on Optimization},\n  title     = {On the Use of Product Structure in Secant Methods for Nonlinear Least Squares Problems},\n  year      = {1994},\n  month     = {feb},\n  number    = {1},\n  pages     = {108--129},\n  volume    = {4},\n  doi       = {10.1137/0804005},\n  publisher = {Society for Industrial {\\&} Applied Mathematics ({SIAM})},\n}\n\n@Article{Berndt1974,\n  title = {Estimation and Inference in Nonlinear Structural Models},\n  journal = {Annals of Economic and Social Measurement},\n  author = {Berndt, Ernst R. and Hall, Bronwyn and Hall, Robert and Hausman, Jerry},\n  year = {1974},\n  pages = {653-665},\n  booktitle = {Annals of Economic and Social Measurement, Volume 3, number 4},\n  publisher = {National Bureau of Economic Research, Inc},\n}\n\n@Article{Halbert1982,\n author = {Halbert White},\n journal = {Econometrica},\n number = {1},\n pages = {1--25},\n publisher = {[Wiley, Econometric Society]},\n title = {Maximum Likelihood Estimation of Misspecified Models},\n volume = {50},\n year = {1982},\n}\n\n\n@article{More1983,\n  title={Computing a Trust Region Step},\n  author={Jorge J. Mor{\\'e} and Danny C. Sorensen},\n  journal={Siam Journal on Scientific and Statistical Computing},\n  year={1983},\n  volume={4},\n  pages={553-572}\n}\n\n@article{Bertsekas1982,\nauthor = {Bertsekas, Dimitri P.},\ntitle = {Projected Newton Methods for Optimization Problems with Simple Constraints},\njournal = {SIAM Journal on Control and Optimization},\nvolume = {20},\nnumber = {2},\npages = {221-246},\nyear = {1982},\ndoi = {10.1137/0320018},\nURL = {https://doi.org/10.1137/0320018},\n}\n\n@article{Steihaug1983,\nauthor = {Steihaug, Trond},\ntitle = {The Conjugate Gradient Method and Trust Regions in Large Scale Optimization},\njournal = {SIAM Journal on Numerical Analysis},\nvolume = {20},\nnumber = {3},\npages = {626-637},\nyear = {1983},\ndoi = {10.1137/0720042},\nURL = {https://doi.org/10.1137/0720042},\n}\n\n@InBook{Toint1981,\n  title      = {Towards an Efficient Sparsity Exploiting Newton Method for Minimization},\n  author     = {Toint, {\\relax Ph}ilippe L.},\n  booktitle  = {Sparse Matrices and Their Uses},\n  publisher  = {Academic Press},\n  year       = {1981},\n  address    = {London, England},\n  editor     = {I. S. Duff},\n  pages      = {57--88},\n}\n\n@article{Zhang2010,\nauthor = {Zhang, Hongchao and Conn, Andrew R. and Scheinberg, Katya},\ntitle = {A Derivative-Free Algorithm for Least-Squares Minimization},\njournal = {SIAM Journal on Optimization},\nvolume = {20},\nnumber = {6},\npages = {3555-3576},\nyear = {2010},\ndoi = {10.1137/09075531X},\nURL = {https://doi.org/10.1137/09075531X},\n}\n\n\n@book{Conn2009,\nauthor = {Conn, Andrew R. and Scheinberg, Katya and Vicente, Luis N.},\ntitle = {Introduction to Derivative-Free Optimization},\npublisher = {Society for Industrial and Applied Mathematics},\nyear = {2009},\ndoi = {10.1137/1.9780898718768},\nURL = {https://epubs.siam.org/doi/abs/10.1137/1.9780898718768},\n}\n\n@article{JAMES1975343,\ntitle = {Minuit - a system for function minimization and analysis of the parameter errors and correlations},\njournal = {Computer Physics Communications},\nvolume = {10},\nnumber = {6},\npages = {343-367},\nyear = {1975},\nissn = {0010-4655},\ndoi = {https://doi.org/10.1016/0010-4655(75)90039-9},\nurl = {https://www.sciencedirect.com/science/article/pii/0010465575900399},\nauthor = {F. James and M. Roos}\n}\n\n\n@misc{Hansen2023,\ntitle={The CMA Evolution Strategy: A Tutorial}, \nauthor={Nikolaus Hansen},\nyear={2023},\neprint={1604.00772},\narchivePrefix={arXiv},\nprimaryClass={cs.LG},\nurl={https://arxiv.org/abs/1604.00772}, \n}\n\n@InProceedings{Kennedy1995,\n  author={Kennedy, J. and Eberhart, R.},\n  booktitle={Proceedings of ICNN'95 - International Conference on Neural Networks}, \n  title={Particle swarm optimization}, \n  year={1995},\n  volume={4},\n  pages={1942-1948 vol.4},\n  keywords={Particle swarm optimization;Birds;Educational institutions;Marine animals;Testing;Humans;Genetic algorithms;Optimization methods;Artificial neural networks;Performance evaluation},\n  doi={10.1109/ICNN.1995.488968},\n}\n\n@InProceedings{Zambrano2013,\n  author = {Zambrano-Bigiarini, Mauricio and Clerc, Maurice and Rojas, Rodrigo},\n  booktitle = {2013 IEEE Congress on Evolutionary Computation}, \n  title = {Standard Particle Swarm Optimisation 2011 at CEC-2013: A baseline for future PSO improvements}, \n  year = {2013},\n  pages = {2337-2344},\n  keywords = {Optimization;Standards;Benchmark testing;Topology;Algorithm design and analysis;Convergence;Equations;particle swarm optimization;SPSO-2011;CEC-2013;random topology;rotational invariance;benchmark testing;evolutionary computation;optimization},\n  doi = {10.1109/CEC.2013.6557848},\n}\n\n@inbook{randomsearch2010,\nauthor = {Zabinsky, Zelda},\nyear = {2010},\nmonth = {06},\npages = {},\ntitle = {Random Search Algorithms},\nisbn = {9780470400531},\ndoi = {10.1002/9780470400531.eorms0704}\n}\n\n@INPROCEEDINGS{spsaimpl,\n  author={Rastogi, Pushpendre and Zhu, Jingyi and Spall, James C.},\n  booktitle={2016 Annual Conference on Information Science and Systems (CISS)}, \n  title={Efficient implementation of enhanced adaptive simultaneous perturbation algorithms}, \n  year={2016},\n  volume={},\n  number={},\n  pages={298-303},\n  keywords={Estimation;Algorithm design and analysis;Adaptive Estimation;Simultaneous Perturbation Stochastic Approximation (SPSA);Woodbury Matrix Identity},\n  doi={10.1109/CISS.2016.7460518}}\n\n@inproceedings{tbpsaimpl,\nauthor = {Hellwig, Michael and Beyer, Hans-Georg},\nyear = {2016},\nmonth = {09},\npages = {},\ntitle     = {Evolution under Strong Noise: A Self-Adaptive Evolution Strategy Can Reach the Lower Performance Bound - the pcCMSA-ES},\nbooktitle = {Parallel Problem Solving from Nature -- PPSN XIII},volume = {9921},\nisbn = {9783319458229},\ndoi = {10.1007/978-3-319-45823-6_3}\n}\n\n@ARTICLE{cgaimpl,\n  author={Harik, G.R. and Lobo, F.G. and Goldberg, D.E.},\n  journal={IEEE Transactions on Evolutionary Computation}, \n  title={The compact genetic algorithm}, \n  year={1999},\n  volume={3},\n  number={4},\n  pages={287-297},\n  keywords={Genetic algorithms;Algorithm design and analysis;Laboratories;Computer simulation;Genetic engineering;Probability distribution;Computational modeling;History;Convergence;Mathematical model},\n  doi={10.1109/4235.797971}}\n\n@inproceedings{bayesoptimimpl,\nauthor = {Raponi, Elena and Wang, Hao and Bujny, Mariusz and Boria, Simonetta and Doerr, Carola},\ntitle = {High Dimensional Bayesian Optimization Assisted by Principal Component Analysis},\nyear = {2020},\nisbn = {978-3-030-58111-4},\npublisher = {Springer-Verlag},\naddress = {Berlin, Heidelberg},\nurl = {https://doi.org/10.1007/978-3-030-58112-1_12},\ndoi = {10.1007/978-3-030-58112-1_12},\nabstract = {Bayesian Optimization (BO) is a surrogate-assisted global optimization technique that has been successfully applied in various fields, e.g., automated machine learning and design optimization. Built upon a so-called infill-criterion and Gaussian Process regression (GPR), the BO technique suffers from a substantial computational complexity and hampered convergence rate as the dimension of the search spaces increases. Scaling up BO for high-dimensional optimization problems remains a challenging task.In this paper, we propose to tackle the scalability of BO by hybridizing it with a Principal Component Analysis (PCA), resulting in a novel PCA-assisted BO (PCA-BO) algorithm. Specifically, the PCA procedure learns a linear transformation from all the evaluated points during the run and selects dimensions in the transformed space according to the variability of evaluated points. We then construct the GPR model, and the infill-criterion in the space spanned by the selected dimensions.We assess the performance of our PCA-BO in terms of the empirical convergence rate and CPU time on multi-modal problems from the COCO benchmark framework. The experimental results show that PCA-BO can effectively reduce the CPU time incurred on high-dimensional problems, and maintains the convergence rate on problems with an adequate global structure. PCA-BO therefore provides a satisfactory trade-off between the convergence rate and computational efficiency opening new ways to benefit from the strength of BO approaches in high dimensional numerical optimization.},\nbooktitle = {Parallel Problem Solving from Nature – PPSN XVI: 16th International Conference, PPSN 2020, Leiden, The Netherlands, September 5-9, 2020, Proceedings, Part I},\npages = {169–183},\nnumpages = {15},\nkeywords = {Dimensionality reduction, Principal Component Analysis, Black-box optimization, Bayesian optimization},\nlocation = {Leiden, The Netherlands}\n}\n\n@book{Rechenberg1973,\n  author = {Rechenberg, Ingo},\n  title = {Evolutionsstrategie: Optimierung technischer Systeme nach Prinzipien der biologischen Evolution},\n  publisher = {Frommann-Holzboog Verlag},\n  year = {1973},\n  url = {https://gwern.net/doc/reinforcement-learning/exploration/1973-rechenberg.pdf},\n  address = {Stuttgart},\n  note = {[Evolution Strategy: Optimization of Technical Systems According to the Principles of Biological Evolution]}\n}\n\n@article{Schumer1968,\n  author={Schumer, M. and Steiglitz, K.},\n  journal={IEEE Transactions on Automatic Control}, \n  title={Adaptive step size random search}, \n  year={1968},\n  volume={13},\n  number={3},\n  pages={270-276},\n  keywords={Minimization methods;Gradient methods;Search methods;Adaptive control;Communication systems;Q measurement;Cost function;Newton method;Military computing},\n  doi={10.1109/TAC.1968.1098903}\n}\n\n@misc{edaimpl,\n      title={Theory of Estimation-of-Distribution Algorithms}, \n      author={Martin S. Krejca and Carsten Witt},\n      year={2018},\n      eprint={1806.05392},\n      archivePrefix={arXiv},\n      primaryClass={cs.NE},\n      url={https://arxiv.org/abs/1806.05392}, \n}\n\n@book{emnaimpl,\nauthor = {Larranaga, Pedro and Lozano, Jose},\nyear = {2002},\nmonth = {01},\npages = {},\ntitle = {Estimation of Distribution Algorithms: A New Tool for Evolutionary Computation},\nisbn = {9781461356042},\npublisher = {Springer},\njournal = {Genetic algorithms and evolutionary computation ; 2},\ndoi = {10.1007/978-1-4615-1539-5}\n}\n\n@Misc{Nogueira2014,\n  author={Fernando Nogueira},\n  title={{Bayesian Optimization}: Open source constrained global optimization tool for {Python}},\n  year={2014--},\n  url=\"https://github.com/bayesian-optimization/BayesianOptimization\"\n}\n\n@article{Stander2002,\n  author={Stander, Nielen and Craig, Kenneth},\n  year={2002},\n  month={06},\n  pages={},\n  title={On the robustness of a simple domain reduction scheme for simulation-based optimization},\n  volume={19},\n  journal={International Journal for Computer-Aided Engineering and Software (Eng. Comput.)},\n  doi={10.1108/02644400210430190}\n}\n\n@inproceedings{gardner2014bayesian,\n  title={Bayesian optimization with inequality constraints.},\n  author={Gardner, Jacob R and Kusner, Matt J and Xu, Zhixiang Eddie and Weinberger, Kilian Q and Cunningham, John P},\n  booktitle={ICML},\n  volume={2014},\n  pages={937--945},\n  year={2014}\n}\n\n@article{gad2023pygad,\n  title={Pygad: An intuitive genetic algorithm python library},\n  author={Gad, Ahmed Fawzy},\n  journal={Multimedia Tools and Applications},\n  pages={1--14},\n  year={2023},\n  publisher={Springer}\n}\n\n@INPROCEEDINGS{EberhartKennedy1995,\n  author    = {Eberhart, R. and Kennedy, J.},\n  booktitle = {MHS'95. Proceedings of the Sixth International Symposium on Micro Machine and Human Science},\n  title     = {A new optimizer using particle swarm theory},\n  year      = {1995},\n  pages     = {39-43},\n  keywords  = {Particle swarm optimization;Genetic algorithms;Testing;Acceleration;Particle tracking;Optimization methods;Artificial neural networks;Evolutionary computation;Performance evaluation;Statistics},\n  doi       = {10.1109/MHS.1995.494215}\n}\n\n@INPROCEEDINGS{Lane2008SpatialPSO,\n  author={Lane, James and Engelbrecht, Andries and Gain, James},\n  booktitle={2008 IEEE Swarm Intelligence Symposium}, \n  title={Particle swarm optimization with spatially meaningful neighbours}, \n  year={2008},\n  volume={},\n  number={},\n  pages={1-8},\n  keywords={Particle swarm optimization;Topology;Birds;Convergence;Computer science;USA Councils;Cities and towns;Africa;Cultural differences;Data structures;Delaunay Triangulation;Neighbour Topology;Particle Swarm Optimization;Heuristics},\n  doi={10.1109/SIS.2008.4668281}\n}\n\n@article{Ni2013,\nauthor = {Ni, Qingjian and Deng, Jianming},\ntitle = {A New Logistic Dynamic Particle Swarm Optimization Algorithm Based on Random Topology},\njournal = {The Scientific World Journal},\nvolume = {2013},\nnumber = {1},\npages = {409167},\ndoi = {https://doi.org/10.1155/2013/409167},\nurl = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2013/409167},\neprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2013/409167},\nabstract = {Population topology of particle swarm optimization (PSO) will directly affect the dissemination of optimal information during the evolutionary process and will have a significant impact on the performance of PSO. Classic static population topologies are usually used in PSO, such as fully connected topology, ring topology, star topology, and square topology. In this paper, the performance of PSO with the proposed random topologies is analyzed, and the relationship between population topology and the performance of PSO is also explored from the perspective of graph theory characteristics in population topologies. Further, in a relatively new PSO variant which named logistic dynamic particle optimization, an extensive simulation study is presented to discuss the effectiveness of the random topology and the design strategies of population topology. Finally, the experimental data are analyzed and discussed. And about the design and use of population topology on PSO, some useful conclusions are proposed which can provide a basis for further discussion and research.},\nyear = {2013}\n}\n\n@Comment{jabref-meta: databaseType:bibtex;}\n"
  },
  {
    "path": "docs/source/tutorials/bayes_opt_tutorial.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0\",\n   \"metadata\": {},\n   \"source\": [\n    \"# `bayes_opt` Optimizer in optimagic\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1\",\n   \"metadata\": {},\n   \"source\": [\n    \"This tutorial demonstrates how to use the `\\\"bayes_opt\\\"` optimizer in optimagic. To use it, you need to have `bayesian-optimization` package installed. You can install it with the following command:\\n\",\n    \"```bash\\n\",\n    \"pip install bayesian-optimization\\n\",\n    \"```\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"2\",\n   \"metadata\": {},\n   \"source\": [\n    \"### When to use Bayesian Optimization:\\n\",\n    \"- Function evaluations are expensive (e.g., simulations, experiments)\\n\",\n    \"- The function is a black box(it cannot be expressed in closed form)\\n\",\n    \"- You have a limited budget of function evaluations\\n\",\n    \"- When gradients are unavailable or computationally expensive to obtain\\n\",\n    \"\\n\",\n    \"### Key Concepts\\n\",\n    \"\\n\",\n    \"### Gaussian Processes (GP)\\n\",\n    \"The GP serves as a probabilistic model of your objective function. It provides both a mean prediction and uncertainty estimates.\\n\",\n    \"### Acquisition Functions\\n\",\n    \"These functions use the GP's predictions to decide where to evaluate next.\\n\",\n    \"\\n\",\n    \"Common acquisition functions include:\\n\",\n    \"- **Upper Confidence Bound (UCB)**: Balances mean prediction with uncertainty\\n\",\n    \"- **Expected Improvement (EI)**: Expected improvement over the current best\\n\",\n    \"- **Probability of Improvement (POI)**: Probability of improving over the current best\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"3\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"import matplotlib.pyplot as plt\\n\",\n    \"\\n\",\n    \"import optimagic as om\\n\",\n    \"from bayes_opt import acquisition\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"4\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Basic Usage of the `bayes_opt` Optimizer\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5\",\n   \"metadata\": {},\n   \"source\": [\n    \"Let's start with a simple example using a sphere function\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"6\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(params):\\n\",\n    \"    return params @ params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"7\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Note: bayes_opt requires bounds for all parameters\\n\",\n    \"bounds = om.Bounds(\\n\",\n    \"    lower=np.full(2, -10.0),\\n\",\n    \"    upper=np.full(2, 10.0)\\n\",\n    \")\\n\",\n    \"bayesopt_res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(2),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options={\\\"seed\\\": 1}\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"bayesopt_res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Acquisition Functions in the `bayes_opt` Optimizer\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"9\",\n   \"metadata\": {},\n   \"source\": [\n    \"In Bayesian optimization, the **acquisition function** decides *where to sample next*.\\n\",\n    \"It controls the trade-off between **exploration** (search new areas) and **exploitation** (focus on good areas).\\n\",\n    \"\\n\",\n    \"optimagic lets you set the acquisition function in different ways:\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"10\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 1. Using No Acquisition Function (Default)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"11\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Uses package defaults: UCB for unconstrained, EI for constrained\\n\",\n    \"acquisition_function = None\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"12\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 2. Using a String (Built-in acquisition functions)\\n\",\n    \"\\n\",\n    \"You can pass any of the following strings to select a standard acquisition function:\\n\",\n    \"\\n\",\n    \"* `\\\"ucb\\\"` / `\\\"upper_confidence_bound\\\"` – Upper Confidence Bound\\n\",\n    \"* `\\\"ei\\\"` / `\\\"expected_improvement\\\"` – Expected Improvement\\n\",\n    \"* `\\\"poi\\\"` / `\\\"probability_of_improvement\\\"` – Probability of Improvement\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"13\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"acquisition_function_str = \\\"ucb\\\"\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"14\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 3. Using a Class (Auto-Instantiated)\\n\",\n    \"\\n\",\n    \"You can also pass the class directly, optimagic will create an instance for it:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"15\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from bayes_opt.acquisition import UpperConfidenceBound\\n\",\n    \"\\n\",\n    \"acquisition_function_class = UpperConfidenceBound\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"16\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 4. Using an Instance\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"17\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from bayes_opt.acquisition import ExpectedImprovement\\n\",\n    \"\\n\",\n    \"acquisition_function_instance = ExpectedImprovement(\\n\",\n    \"    xi=0.1,\\n\",\n    \"    exploration_decay=0.95,\\n\",\n    \"    exploration_decay_delay=5\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"18\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Example Run with configured acquisition functions\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"19\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere ,\\n\",\n    \"    params=np.arange(2),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\\n\",\n    \"    algo_options={\\\"seed\\\":1, \\\"acquisition_function\\\": acquisition_function_str,}\\n\",\n    \"        # acquisition_function can be any of:\\n\",\n    \"        #   acquisition_function_str        → e.g. \\\"ucb\\\", \\\"ei\\\", \\\"poi\\\"\\n\",\n    \"        #   acquisition_function_class      → e.g. UpperConfidenceBound\\n\",\n    \"        #   acquisition_function_instance   → e.g. ExpectedImprovement(xi=0.1)\\n\",\n    \"        #   None                            → defaults to \\\"ucb\\\"\\n\",\n    \"    )\\n\",\n    \"\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"20\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Custom Acquisition Functions\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"21\",\n   \"metadata\": {},\n   \"source\": [\n    \"`bayesian-optimization` also allows us to write our own acquisition functions by subclassing its `AcquisitionFunction` class. This allows you to define exploration/exploitation strategies tailored to your specific problem.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"22\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Implementation Requirements\\n\",\n    \"\\n\",\n    \"When subclassing `AcquisitionFunction`, there are specific methods we must implement:\\n\",\n    \"\\n\",\n    \"1. **`base_acq(self, mean, std)` method (Required)**: This is the core method where you define the mathematical formula for your acquisition function. It takes the predicted mean and standard deviation from the Gaussian Process and returns the acquisition value(s).\\n\",\n    \"\\n\",\n    \"2. **`suggest` method (Optional but often needed)**: The base class provides a default implementation, but you may need to override it if you need to set up internal state (like `y_max` for EI/PI) before `base_acq` is called.\\n\",\n    \"\\n\",\n    \"3. **`get_acquisition_params` and `set_acquisition_params` methods (Optional but recommended)**: These are used for retrieving and setting the internal parameters of your acquisition function. Implementing them makes your acquisition function fully configurable and serializable.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"23\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from bayes_opt.acquisition import AcquisitionFunction\\n\",\n    \"class CustomAcquisition(AcquisitionFunction):\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    A simple custom acquisition function.\\n\",\n    \"\\n\",\n    \"    This acquisition returns half of the predicted mean.\\n\",\n    \"    It ignores the uncertainty (std), making it purely\\n\",\n    \"    exploitation-oriented.\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    def __init__(self):\\n\",\n    \"        super().__init__()\\n\",\n    \"\\n\",\n    \"    def base_acq(self, mean, std):\\n\",\n    \"        return 0.5 * mean\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"24\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Using the Custom Acquisition Function\\n\",\n    \"\\n\",\n    \"Once you have defined your custom acquisition function, you can use it in optimagic by passing an instance or a class to the `acquisition_function` parameter:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"25\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"acquisition_function = CustomAcquisition()\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere ,\\n\",\n    \"    params=np.arange(2),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\\n\",\n    \"    algo_options={\\\"seed\\\":1, \\\"acquisition_function\\\": acquisition_function,}\\n\",\n    \"    )\\n\",\n    \"\\n\",\n    \"res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"26\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Meta Acquisition Functions\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"27\",\n   \"metadata\": {},\n   \"source\": [\n    \"The `bayesian-optimization` package also provides meta acquisition functions that operate on other acquisition functions:\\n\",\n    \"\\n\",\n    \"1. **GPHedge**: Dynamically chooses the best acquisition function from a set of candidates based on their past performance.\\n\",\n    \"2. **ConstantLiar**: Used for parallelized optimization to discourage sampling near points that have already been suggested but not yet evaluated.\\n\",\n    \"\\n\",\n    \"Here's how to use GPHedge with multiple base acquisition functions:\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"28\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 1. **GPHedge**:\\n\",\n    \"Dynamically chooses the best acquisition function from a set of candidates based on their past performance.\\n\",\n    \"\\n\",\n    \"let’s define the **Branin function**, to use with Meta Acquisition functions.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"29\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def branin(params):\\n\",\n    \"    \\\"\\\"\\\"The Branin function - a classic optimization test function.\\n\",\n    \"\\n\",\n    \"    Has three global minima at approximately:\\n\",\n    \"    - (-π, 12.275)\\n\",\n    \"    - (π, 2.275)\\n\",\n    \"    - (9.42478, 2.475)\\n\",\n    \"\\n\",\n    \"    Global minimum value: 0.397887\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    x1, x2 = params[0], params[1]\\n\",\n    \"\\n\",\n    \"    a = 1\\n\",\n    \"    b = 5.1 / (4 * np.pi**2)\\n\",\n    \"    c = 5 / np.pi\\n\",\n    \"    r = 6\\n\",\n    \"    s = 10\\n\",\n    \"    t = 1 / (8 * np.pi)\\n\",\n    \"\\n\",\n    \"    term1 = a * (x2 - b * x1**2 + c * x1 - r)**2\\n\",\n    \"    term2 = s * (1 - t) * np.cos(x1)\\n\",\n    \"    term3 = s\\n\",\n    \"\\n\",\n    \"    return term1 + term2 + term3\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"30\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from bayes_opt.acquisition import GPHedge, UpperConfidenceBound, ExpectedImprovement\\n\",\n    \"\\n\",\n    \"# Create a list of base acquisition functions\\n\",\n    \"base_acquisitions = [\\n\",\n    \"    UpperConfidenceBound(kappa=2.576),\\n\",\n    \"    ExpectedImprovement(xi=0.01),\\n\",\n    \"    # Add more as needed\\n\",\n    \"]\\n\",\n    \"\\n\",\n    \"gphedge_acq = GPHedge(base_acquisitions)\\n\",\n    \"\\n\",\n    \"result = om.minimize(\\n\",\n    \"    fun=branin,\\n\",\n    \"    params=np.array([1.0, 1.0]),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"acquisition_function\\\": gphedge_acq,\\n\",\n    \"        \\\"seed\\\": 42\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"result.params, result.fun\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"31\",\n   \"metadata\": {},\n   \"source\": [\n    \"### 2. ConstantLiar\\n\",\n    \"\\n\",\n    \"`ConstantLiar` is used for parallelized optimization. It discourages sampling near points that have already been suggested but not yet evaluated.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"32\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from bayes_opt.acquisition import ConstantLiar, UpperConfidenceBound\\n\",\n    \"\\n\",\n    \"base_acq = UpperConfidenceBound(kappa=2.576)\\n\",\n    \"\\n\",\n    \"constant_liar_acq = ConstantLiar(base_acquisition=base_acq, strategy=\\\"max\\\")\\n\",\n    \"\\n\",\n    \"# Use in optimization (Note: ConstantLiar is primarily for async optimization)\\n\",\n    \"result = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.array([1.0, 1.0]),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"acquisition_function\\\": constant_liar_acq,\\n\",\n    \"        \\\"seed\\\": 42\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"result.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"33\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Exploration vs Exploitation Trade-off\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"34\",\n   \"metadata\": {},\n   \"source\": [\n    \"When using Bayesian optimization, the acquisition function decides where to sample next. It balances exploration (try new areas) vs exploitation (refine known good areas).\\n\",\n    \"\\n\",\n    \"- **Exploration**: Sampling in regions with high uncertainty\\n\",\n    \"- **Exploitation**: Sampling in regions with high predicted values\\n\",\n    \"\\n\",\n    \"### Related Parameters\\n\",\n    \"\\n\",\n    \"- **kappa** (UCB): Higher values → more exploration\\n\",\n    \"- **xi** (EI/POI): Higher values → more exploration\\n\",\n    \"- **exploration_decay**: Gradually shift from exploration to exploitation\\n\",\n    \"- **exploration_decay_delay**: When to start the decay\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"35\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def f(x):\\n\",\n    \"  \\\"\\\"\\\"Function with multiple peaks\\\"\\\"\\\"\\n\",\n    \"  x = x[0]\\n\",\n    \"  return float(\\n\",\n    \"        np.exp(-(x - 2) ** 2) +\\n\",\n    \"        np.exp(-(x - 6) ** 2 / 10) +\\n\",\n    \"        1 / (x ** 2 + 1)\\n\",\n    \"    )\\n\",\n    \"x = np.linspace(-2, 10, 100)\\n\",\n    \"Y = [f([xi]) for xi in x]\\n\",\n    \"plt.plot(x, Y)\\n\",\n    \"plt.xlabel(\\\"x\\\")\\n\",\n    \"plt.ylabel(\\\"f(x)\\\")\\n\",\n    \"plt.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"36\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def plot_bayes_opt(result):\\n\",\n    \"    \\\"\\\"\\\"Plot optimization results\\\"\\\"\\\"\\n\",\n    \"    evaluated_points = np.array([p[0] for p in result.history.params])\\n\",\n    \"    function_values = np.array(result.history.fun)\\n\",\n    \"\\n\",\n    \"    plt.figure(figsize=(8,5))\\n\",\n    \"    plt.plot(x, Y, 'b-', label=\\\"Original function f(x)\\\")\\n\",\n    \"    plt.scatter(evaluated_points, function_values, c=\\\"red\\\", s=60, zorder=3, label=\\\"Evaluated points\\\")\\n\",\n    \"    plt.axvline(result.params[0], color=\\\"green\\\", linestyle=\\\"--\\\", label=\\\"Best param\\\")\\n\",\n    \"\\n\",\n    \"    plt.xlabel(\\\"x\\\")\\n\",\n    \"    plt.ylabel(\\\"f(x)\\\")\\n\",\n    \"    plt.legend()\\n\",\n    \"    plt.grid(True, alpha=0.3)\\n\",\n    \"    plt.show()\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"37\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# strategy: exploitation (kappa=0.1) - focuses on known good areas\\n\",\n    \"acquisition_function = acquisition.UpperConfidenceBound(kappa=0.1)\\n\",\n    \"result = om.maximize(\\n\",\n    \"    fun=f,\\n\",\n    \"    params=np.array([0.]),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=om.Bounds(lower=np.full(1, -2.0), upper=np.full(1, 10.0)),\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"acquisition_function\\\": acquisition_function,\\n\",\n    \"        \\\"seed\\\": 987234,\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"# Notice: Points cluster around peaks, might also get stuck in local optimum\\n\",\n    \"plot_bayes_opt(result)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"38\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# strategy: exploration (kappa=10) - explores more broadly\\n\",\n    \"acquisition_function = acquisition.UpperConfidenceBound(kappa=10)\\n\",\n    \"result = om.maximize(\\n\",\n    \"    fun=f,\\n\",\n    \"    params=np.array([0.]),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=om.Bounds(lower=np.full(1, -2.0), upper=np.full(1, 10.0)),\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"acquisition_function\\\": acquisition_function,\\n\",\n    \"        \\\"seed\\\": 987234,\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"# Notice: Points are more spread out, better chance of finding global optimum\\n\",\n    \"plot_bayes_opt(result)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"39\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Sequential Domain Reduction (SDR)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"40\",\n   \"metadata\": {},\n   \"source\": [\n    \"Sequential Domain Reduction (SDR) progressively narrows the search space around promising regions. This can significantly improve optimization, especially for high-dimensional problems.\\n\",\n    \"\\n\",\n    \"### SDR Parameters\\n\",\n    \"\\n\",\n    \"- `enable_sdr`: Enable/disable Sequential Domain Reduction\\n\",\n    \"- `sdr_gamma_osc`: Controls oscillation damping (default: 0.7)\\n\",\n    \"- `sdr_gamma_pan`: Controls panning behavior (default: 1.0)\\n\",\n    \"- `sdr_eta`: Zooming parameter for region shrinking (default: 0.9)\\n\",\n    \"- `sdr_minimum_window`:  Minimum window size (default: 0.0)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"41\",\n   \"metadata\": {},\n   \"source\": [\n    \"### SDR Example\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"42\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"def ackley(x):\\n\",\n    \"    \\\"\\\"\\\"Global minimum: f(x*) = 0 at x* = (0, 0)\\\"\\\"\\\"\\n\",\n    \"    x0, x1 = x\\n\",\n    \"    arg1 = -0.2 * np.sqrt(0.5 * (x0 ** 2 + x1 ** 2))\\n\",\n    \"    arg2 = 0.5 * (np.cos(2 * np.pi * x0) + np.cos(2 * np.pi * x1))\\n\",\n    \"    return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e\\n\",\n    \"\\n\",\n    \"start_params = np.array([2.0, 2.0])\\n\",\n    \"bounds = om.Bounds(\\n\",\n    \"    lower=np.array([-32.768, -32.768]),\\n\",\n    \"    upper=np.array([32.768, 32.768])\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"43\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Standard Bayesian Optimization without SDR\\n\",\n    \"result_standard = om.minimize(\\n\",\n    \"    fun=ackley,\\n\",\n    \"    params=start_params,\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"enable_sdr\\\": False,\\n\",\n    \"        \\\"n_iter\\\": 50,\\n\",\n    \"        \\\"init_points\\\": 2,\\n\",\n    \"        \\\"seed\\\": 1,\\n\",\n    \"        \\\"acquisition_function\\\": \\\"ucb\\\",\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"print(\\\"Standard Bayesian Optimization:\\\")\\n\",\n    \"print(\\\"Best function value:\\\", result_standard.fun)\\n\",\n    \"print(\\\"Best parameters:\\\", result_standard.x)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"44\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Bayesian Optimization with SDR\\n\",\n    \"result_sdr = om.minimize(\\n\",\n    \"    fun=ackley,\\n\",\n    \"    params=start_params,\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    algo_options={\\n\",\n    \"        \\\"enable_sdr\\\": True,\\n\",\n    \"        \\\"sdr_minimum_window\\\": 0.5,\\n\",\n    \"        \\\"sdr_gamma_osc\\\": 0.7,\\n\",\n    \"        \\\"sdr_gamma_pan\\\": 1.0,\\n\",\n    \"        \\\"sdr_eta\\\": 0.9,\\n\",\n    \"        \\\"n_iter\\\": 50,\\n\",\n    \"        \\\"init_points\\\": 2,\\n\",\n    \"        \\\"seed\\\": 1,\\n\",\n    \"        \\\"acquisition_function\\\": \\\"ucb\\\",\\n\",\n    \"    }\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"print(\\\"Bayesian Optimization with SDR:\\\")\\n\",\n    \"print(\\\"Best function value:\\\", result_sdr.fun)\\n\",\n    \"print(\\\"Best parameters:\\\", result_sdr.x)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"45\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Compare convergence behavior\\n\",\n    \"results = {\\n\",\n    \"    \\\"Standard BO\\\": result_standard,\\n\",\n    \"    \\\"BO with SDR\\\": result_sdr\\n\",\n    \"}\\n\",\n    \"\\n\",\n    \"# SDR typically converges faster than standard BO\\n\",\n    \"fig = om.criterion_plot(results)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"46\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Gaussian Process Configuration\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"47\",\n   \"metadata\": {},\n   \"source\": [\n    \"`\\\"bayesian-optimization\\\"` uses a Gaussian Process (GP) as the surrogate model. Its behavior can be tuned with these options via algo_options:\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"* **alpha**: noise level in function evaluations\\n\",\n    \"\\n\",\n    \"  * lower values (e.g.,`1e-6`): assumes nearly precise function evaluations\\n\",\n    \"  * higher values (e.g., `1e-2`): assumes noisy evaluations\\n\",\n    \"\\n\",\n    \"* **n\\\\_restarts**: Number of times to restart the optimization.\\n\",\n    \"\\n\",\n    \"* **seed** → ensures reproducible results.\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"id\": \"48\",\n   \"metadata\": {\n    \"vscode\": {\n     \"languageId\": \"plaintext\"\n    }\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"algo_options = {\\n\",\n    \"    \\\"alpha\\\": 1e-3,\\n\",\n    \"    \\\"n_restarts\\\": 5,\\n\",\n    \"    \\\"seed\\\": 42,\\n\",\n    \"}\\n\",\n    \"\\n\",\n    \"result_configured = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.array([3.0, 3.0]),\\n\",\n    \"    algorithm=\\\"bayes_opt\\\",\\n\",\n    \"    bounds=om.Bounds(lower=np.full(2, -5.0), upper=np.full(2, 5.0)),\\n\",\n    \"    algo_options=algo_options\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"print(\\\"Configured GP results:\\\")\\n\",\n    \"print(f\\\"  Best value: {result_configured.fun}\\\")\\n\",\n    \"print(f\\\"  Function evaluations: {result_configured.n_fun_evals}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"49\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Summary\\n\",\n    \"\\n\",\n    \"Bayesian optimization is a powerful tool for optimizing expensive black-box functions. Key takeaways:\\n\",\n    \"\\n\",\n    \"1. **Choose the right acquisition function** based on your exploration/exploitation needs\\n\",\n    \"2. **Tune acquisition parameters** like kappa (UCB) or xi (EI) to control the trade-off\\n\",\n    \"3. **Use SDR** for high-dimensional problems to focus the search\\n\",\n    \"4. **Configure the GP properly** with appropriate noise levels and restarts\\n\",\n    \"\\n\",\n    \"For more detailed information, check out the [bayesian-optimization documentation](https://bayesian-optimization.github.io/BayesianOptimization/3.1.0/index.html#).\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"language_info\": {\n   \"name\": \"python\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "docs/source/tutorials/index.md",
    "content": "(tutorials)=\n\n# Tutorials\n\nThis section provides an overview of optimagic. It's a good starting point if you are\nnew to optimagic. For more in-depth examples using advanced options, check out the\n[how-to guides](how-to).\n\n`````{grid} 1 2 2 3\n---\ngutter: 3\n---\n````{grid-item-card}\n:text-align: center\n:img-top: ../_static/images/optimization.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} optimization_overview.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nOptimization\n```\n\nLearn numerical optimization with estimagic.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: ../_static/images/differentiation.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} numdiff_overview.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nDifferentiation\n```\n\nLearn numerical differentiation with estimagic.\n\n````\n\n````{grid-item-card}\n:text-align: center\n:img-top: ../_static/images/bayesian_optimization.svg\n:class-img-top: index-card-image\n:shadow: md\n\n```{button-link} bayes_opt_tutorial.html\n---\nclick-parent:\nref-type: ref\nclass: stretched-link index-card-link sd-text-primary\n---\nbayes_opt Optimizer\n```\n\nTutorial on the bayes_opt optimizer in optimagic.\n\n````\n\n`````\n\n```{toctree}\n---\nhidden: true\nmaxdepth: 1\n---\noptimization_overview\nnumdiff_overview\nbayes_opt_tutorial\n```\n"
  },
  {
    "path": "docs/source/tutorials/numdiff_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Numerical differentiation\\n\",\n    \"\\n\",\n    \"In this tutorial, you will learn how to numerically differentiate functions with\\n\",\n    \"optimagic.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Basic usage of `first_derivative`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def fun(params):\\n\",\n    \"    return params @ params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fd = om.first_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Basic usage of `second_derivative`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sd = om.second_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"sd.derivative.round(3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## You can parallelize\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fd = om.first_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    n_cores=4,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sd = om.second_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    n_cores=4,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"sd.derivative.round(3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## `params` do not have to be vectors\\n\",\n    \"\\n\",\n    \"In optimagic, params can be arbitrary [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html). Examples are (nested) dictionaries of numbers, arrays and pandas objects. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def dict_fun(params):\\n\",\n    \"    return params[\\\"a\\\"] ** 2 + params[\\\"b\\\"] ** 2 + (params[\\\"c\\\"] ** 2).sum()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fd = om.first_derivative(\\n\",\n    \"    func=dict_fun,\\n\",\n    \"    params={\\\"a\\\": 0, \\\"b\\\": 1, \\\"c\\\": pd.Series([2, 3, 4])},\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Description of the output\\n\",\n    \"\\n\",\n    \"> Note. Understanding the output of the first and second derivative requires terminolgy\\n\",\n    \"> of pytrees. Please refer to the\\n\",\n    \"> [JAX documentation of pytrees](https://jax.readthedocs.io/en/latest/pytrees.html).\\n\",\n    \"\\n\",\n    \"The output tree of `first_derivative` has the same structure as the params tree.\\n\",\n    \"Equivalent to the 1-d numpy array case, where the gradient is a vector of shape\\n\",\n    \"`(len(params),)`. If, however, the params tree contains non-scalar entries like\\n\",\n    \"`numpy.ndarray`'s, `pandas.Series`', or `pandas.DataFrame`'s, the output is not expanded\\n\",\n    \"but a block is created instead. In the above example, the entry `params[\\\"c\\\"]` is a\\n\",\n    \"`pandas.Series` with 3 entries. Thus, the first derivative output contains the\\n\",\n    \"corresponding 3x1-block of the gradient at the position `[\\\"c\\\"]`:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fd.derivative[\\\"c\\\"]\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sd = om.second_derivative(\\n\",\n    \"    func=dict_fun,\\n\",\n    \"    params={\\\"a\\\": 0, \\\"b\\\": 1, \\\"c\\\": pd.Series([2, 3, 4])},\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"sd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Description of the output\\n\",\n    \"\\n\",\n    \"> Note. Understanding the output of the first and second derivative requires terminolgy\\n\",\n    \"> of pytrees. Please refer to the\\n\",\n    \"> [JAX documentation of pytrees](https://jax.readthedocs.io/en/latest/pytrees.html).\\n\",\n    \"\\n\",\n    \"The output of `second_derivative` when using a general pytrees looks more complex but\\n\",\n    \"is easy once we remember that the second derivative is equivalent to applying the first\\n\",\n    \"derivative twice.\\n\",\n    \"\\n\",\n    \"The output tree is a product of the params tree with itself. This is equivalent to the\\n\",\n    \"1-d numpy array case, where the hessian is a matrix of shape\\n\",\n    \"`(len(params), len(params))`. If, however, the params tree contains non-scalar entries\\n\",\n    \"like `numpy.ndarray`'s, `pandas.Series`', or `pandas.DataFrame`'s, the output is not\\n\",\n    \"expanded but a block is created instead. In the above example, the entry `params[\\\"c\\\"]`\\n\",\n    \"is a 3-dimensional `pandas.Series`. Thus, the second derivative output contains the\\n\",\n    \"corresponding 3x3-block of the hessian at the position `[\\\"c\\\"][\\\"c\\\"]`:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sd.derivative[\\\"c\\\"][\\\"c\\\"].round(3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## There are many options\\n\",\n    \"\\n\",\n    \"You can choose which finite difference method to use, whether we should respect\\n\",\n    \"parameter bounds, or whether to evaluate the function in parallel. Let's go through\\n\",\n    \"some basic examples. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## You can choose the difference method\\n\",\n    \"\\n\",\n    \"> Note. A mathematical explanation of the background of the difference methods can be\\n\",\n    \"> found on the corresponding [explanation page](../explanation/numdiff_background.md).\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fd = om.first_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    method=\\\"backward\\\",  # default: 'central'\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sd = om.second_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    method=\\\"forward\\\",  # default: 'central_cross'\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"sd.derivative.round(3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## You can add bounds  \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"params = np.arange(5)\\n\",\n    \"\\n\",\n    \"fd = om.first_derivative(\\n\",\n    \"    func=fun,\\n\",\n    \"    params=params,\\n\",\n    \"    # forces first_derivative to use forward differences\\n\",\n    \"    bounds=om.Bounds(lower=params, upper=params + 1),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"fd.derivative\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Of course, bounds also work in `second_derivative`.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/tutorials/optimization_overview.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Numerical optimization\\n\",\n    \"\\n\",\n    \"Using simple examples, this tutorial shows how to do an optimization with optimagic. More details on the topics covered here can be found in the [how to guides](../how_to/index.md).\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import pandas as pd\\n\",\n    \"import plotly.io as pio\\n\",\n    \"\\n\",\n    \"pio.renderers.default = \\\"notebook_connected\\\"\\n\",\n    \"\\n\",\n    \"import optimagic as om\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Basic usage of `minimize`\\n\",\n    \"\\n\",\n    \"The basic usage of `optimagic.minimize` is very similar to `scipy.optimize.minimize`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere(params):\\n\",\n    \"    return params @ params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"lbfgsb_res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"lbfgsb_res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## `params` do not have to be vectors\\n\",\n    \"\\n\",\n    \"In optimagic, params can by arbitrary [pytrees](https://jax.readthedocs.io/en/latest/pytrees.html). Examples are (nested) dictionaries of numbers, arrays and pandas objects. This is very useful if you have many parameters!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def dict_sphere(params):\\n\",\n    \"    return params[\\\"a\\\"] ** 2 + params[\\\"b\\\"] ** 2 + (params[\\\"c\\\"] ** 2).sum()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"nm_res = om.minimize(\\n\",\n    \"    fun=dict_sphere,\\n\",\n    \"    params={\\\"a\\\": 0, \\\"b\\\": 1, \\\"c\\\": pd.Series([2, 3, 4])},\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"nm_res.params\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## You can compare optimizers\\n\",\n    \"\\n\",\n    \"In practice, it is super hard to pick the right optimizer for your problem. With optimagic, you can simply try a few and compare their results!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"results = {\\\"lbfgsb\\\": lbfgsb_res, \\\"nelder_mead\\\": nm_res}\\n\",\n    \"fig = om.criterion_plot(results, max_evaluations=300)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \":::{note}\\n\",\n    \"\\n\",\n    \"For details on using other plotting backends, see [How to change the plotting backend](../how_to/how_to_change_plotting_backend.ipynb).\\n\",\n    \"\\n\",\n    \":::\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can also zoom in on the history of specific parameters. This can be super helpful to diagnose problems in the optimization. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.params_plot(\\n\",\n    \"    nm_res,\\n\",\n    \"    max_evaluations=300,\\n\",\n    \"    # optionally select a subset of parameters to plot\\n\",\n    \"    selector=lambda params: params[\\\"c\\\"],\\n\",\n    \")\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## There are many optimizers\\n\",\n    \"\\n\",\n    \"By default, optimagic comes with optimizers from scipy, including global optimizers \\n\",\n    \"and least-squares optimizers. But we also have wrappers for algorithms from **NlOpt**, \\n\",\n    \"**Pygmo**, as well as several optimizers from individual packages like **fides**, \\n\",\n    \"**ipopt**, **pybobyqa** and **dfols**. \\n\",\n    \"\\n\",\n    \"To use optimizers that are not from scipy, follow our [installation guide](../installation.md) for optional dependencies. To see which optimizers we have, check out the [full list](../algorithms.md).\\n\",\n    \"\\n\",\n    \"If you are missing your favorite optimizer in the list, let us know with an [issue](https://github.com/optimagic-dev/optimagic/issues)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Amazing autocomplete \\n\",\n    \"\\n\",\n    \"Assume you need a gradient-free optimizer that supports bounds on the parameters. Moreover, you have a fixed computational budget, so you want to set stopping options. \\n\",\n    \"\\n\",\n    \"In most optimizer libraries, you would have to spend a few minutes with the docs to find an optimizer that fits your needs and the stopping options it supports. In optimagic, all of this is discoverable in your editor!\\n\",\n    \"\\n\",\n    \"If you type `om.algos.`, your editor will show you all available optimizers and a list of categories you can use to filter the results. In our case, we select `GradientFree` and `Bounded`, and we could do that in any order we want.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"![autocomplete_1](../_static/images/autocomplete_1.png)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"After selecting one of the displayed algorithms, in our case `scipy_neldermead`, the editor shows all tuning parameters of that optimizer. If you start to type `stopping`, you will see all stopping criteria that are available.\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"![autocomplete_2](../_static/images/autocomplete_2.png)\\n\",\n    \"\\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Adding bounds\\n\",\n    \"\\n\",\n    \"As any optimizer library, optimagic lets you specify bounds for the parameters.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"bounds = om.Bounds(lower=np.arange(5) - 2, upper=np.array([10, 10, 10, np.inf, np.inf]))\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Fixing parameters \\n\",\n    \"\\n\",\n    \"On top of bounds, you can also fix one or more parameters during the optimization. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    constraints=om.FixedConstraint(selector=lambda params: params[[1, 3]]),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Other constraints\\n\",\n    \"\\n\",\n    \"As an example, let's impose the constraint that the first three parameters are valid probabilities, i.e. they are between zero and one and sum to one:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.array([0.1, 0.5, 0.4, 4, 5]),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    constraints=om.ProbabilityConstraint(selector=lambda params: params[:3]),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"For a full overview of the constraints we support and the corresponding syntaxes, check out [the documentation](../how_to/how_to_constraints.md).\\n\",\n    \"\\n\",\n    \"Note that `\\\"scipy_lbfgsb\\\"` is not a constrained optimizer. If you want to know how we achieve this, check out [the explanations](../explanation/implementation_of_constraints.md).\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## There is also maximize\\n\",\n    \"\\n\",\n    \"If you ever forgot to switch back the sign of your criterion function after doing a maximization with `scipy.optimize.minimize`, there is good news:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def upside_down_sphere(params):\\n\",\n    \"    return -params @ params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.maximize(\\n\",\n    \"    fun=upside_down_sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_bfgs\\\",\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"optimagic got your back.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Speeding up your optimization with derivatives \\n\",\n    \"\\n\",\n    \"You can speed up your optimization by providing closed form derivatives. Those derivatives can be hand-coded or calculated with JAX!\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def sphere_gradient(params):\\n\",\n    \"    return 2 * params\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    jac=sphere_gradient,\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Alternatively, you can let optimagic calculate numerical derivatives with parallelized finite differences. This is very handy if you do not want to invest the time to derive the derivatives of your criterion function. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    numdiff_options=om.NumdiffOptions(n_cores=6),\\n\",\n    \")\\n\",\n    \"\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"For more details and examples check out [how-to speed up your optimization with derivatives](../how_to/how_to_derivatives.ipynb)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Turn local optimizers global with multistart\\n\",\n    \"\\n\",\n    \"Multistart optimization requires finite soft bounds on all parameters. Those bounds will\\n\",\n    \"be used for sampling but not enforced during optimization.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"bounds = om.Bounds(soft_lower=np.full(10, -5), soft_upper=np.full(10, 15))\\n\",\n    \"\\n\",\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(10),\\n\",\n    \"    algorithm=\\\"scipy_neldermead\\\",\\n\",\n    \"    bounds=bounds,\\n\",\n    \"    multistart=om.MultistartOptions(convergence_max_discoveries=5),\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## And plot the history of all local optimizations\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fig = om.criterion_plot(res)\\n\",\n    \"fig.show()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Exploit the structure of your optimization problem\\n\",\n    \"\\n\",\n    \"Many estimation problems have a least-squares structure. If so, specialized optimizers that exploit this structure can be much faster than standard optimizers. The `sphere` function from above is the simplest possible least-squarse problem you could imagine: the least-squares residuals are just the params. \\n\",\n    \"\\n\",\n    \"To use least-squares optimizers in optimagic, you need to declare mark your function with \\n\",\n    \"a decorator and return the least-squares residuals instead of the aggregated function value. \\n\",\n    \"\\n\",\n    \"More details can be found [here](../how_to/how_to_criterion_function.md)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"@om.mark.least_squares\\n\",\n    \"def ls_sphere(params):\\n\",\n    \"    return params\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=ls_sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"pounders\\\",\\n\",\n    \")\\n\",\n    \"res.params.round(5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Of course, any least-squares problem can also be solved with a standard optimizer. \\n\",\n    \"\\n\",\n    \"There are also specialized optimizers for likelihood functions. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Using and reading persistent logging\\n\",\n    \"\\n\",\n    \"For long-running and difficult optimizations, it can be worthwhile to store the progress in a persistent log file. You can do this by providing a path to the `logging` argument:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res = om.minimize(\\n\",\n    \"    fun=sphere,\\n\",\n    \"    params=np.arange(5),\\n\",\n    \"    algorithm=\\\"scipy_lbfgsb\\\",\\n\",\n    \"    logging=\\\"my_log.db\\\",\\n\",\n    \"    log_options={\\\"if_database_exists\\\": \\\"replace\\\"},\\n\",\n    \")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can read the entries in the log file (while the optimization is still running or after it has finished) as follows:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"reader = om.OptimizeLogReader(\\\"my_log.db\\\")\\n\",\n    \"reader.read_history().keys()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"For more information on what you can do with the log file and LogReader object, check out [the logging tutorial](../how_to/how_to_logging.ipynb)\\n\",\n    \"\\n\",\n    \"The persistent log file is always instantly synchronized when the optimizer tries a new parameter vector. This is very handy if an optimization has to be aborted and you want to extract the current status. It can be displayed in `criterion_plot` and `params_plot`, even while the optimization is running. \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.10.14\"\n  },\n  \"vscode\": {\n   \"interpreter\": {\n    \"hash\": \"40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f\"\n   }\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 4\n}\n"
  },
  {
    "path": "docs/source/videos.md",
    "content": "(list_of_videos)=\n\n# Videos\n\nCheck out our tutorials, talks and screencasts about optimagic.\n\n## Talks and tutorials\n\n### EuroSciPy 2023 (Talk)\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/5xYn0v1zEsY\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n### EuroSciPy 2023 (Tutorial)\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/LQo5NDFKH1Q\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n### SciPy 2022 (Tutorial)\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/ftlw0rARrtI\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n## Screencasts\n\nThe screencasts are part of the course _Effective Programming Practices for Economists_,\ntaught at the University of Bonn by\n[Hans-Martin von Gaudecker](https://www.wiwi.uni-bonn.de/gaudecker/), and previously\nalso [Janoś Gabler](https://github.com/janosg). You can find all screencasts of the\ncourse on the\n[course webite](https://effective-programming-practices.vercel.app/landing-page.html).\nHere, we show the screencasts about numerical optimization and optimagic.\n\n### Introduction to numerical optimization\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/hOZueB4Cn1Y\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n### Using optimagic’s minimize and maximize\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/QqTGE3nq0q8\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n### Visualizing optimizer histories\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/wQWWW8rlxmY\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n\n### Choosing optimization algorithms\n\n```{raw} html\n<iframe\nsrc=\"https://www.youtube.com/embed/tJ7Xba3wcxY\"\nstyle=\"width: 100%; aspect-ratio: 16 / 9;\"\nallowfullscreen>\n</iframe>\n```\n"
  },
  {
    "path": "pyproject.toml",
    "content": "# ======================================================================================\n# Project metadata\n# ======================================================================================\n[project]\nname = \"optimagic\"\ndescription = \"Tools to solve difficult numerical optimization problems.\"\nrequires-python = \">=3.12\"\ndependencies = [\n    \"cloudpickle>=2.2\",\n    \"joblib>=1.1\",\n    \"numpy>=1.26\",\n    \"pandas>=2.1\",\n    \"plotly>=5.14\",\n    \"pybaum>=0.1.2\",\n    \"scipy>=1.11\",\n    \"sqlalchemy>=2.0\",\n    \"annotated-types>=0.4\",\n    \"typing-extensions>=4.5\",\n]\ndynamic = [\"version\"]\nkeywords = [\n    \"nonlinear optimization\",\n    \"optimization\",\n    \"derivative free optimization\",\n    \"global optimization\",\n    \"parallel optimization\",\n    \"statistics\",\n    \"estimation\",\n    \"extremum estimation\",\n    \"inference\",\n    \"numerical differentiation\",\n    \"finite differences\",\n    \"method of simulated moments\",\n    \"maximum likelihood\",\n]\nclassifiers = [\n    \"Development Status :: 5 - Production/Stable\",\n    \"Intended Audience :: Science/Research\",\n    \"License :: OSI Approved :: MIT License\",\n    \"Operating System :: MacOS :: MacOS X\",\n    \"Operating System :: Microsoft :: Windows\",\n    \"Operating System :: POSIX\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\",\n    \"Programming Language :: Python :: 3.14\",\n    \"Topic :: Scientific/Engineering\",\n]\nauthors = [\n    { name = \"Janos Gabler\", email = \"janos.gabler@gmail.com\" },\n]\nmaintainers = [\n    { name = \"Janos Gabler\", email = \"janos.gabler@gmail.com\" },\n    { name = \"Tim Mensinger\", email = \"mensingertim@gmail.com\" },\n]\n\n[project.readme]\nfile = \"README.md\"\ncontent-type = \"text/markdown\"\n\n[project.license]\ntext = \"MIT\"\n\n[project.urls]\nRepository = \"https://github.com/optimagic-dev/optimagic\"\nGithub = \"https://github.com/optimagic-dev/optimagic\"\nTracker = \"https://github.com/optimagic-dev/optimagic/issues\"\n\n\n# ======================================================================================\n# Build system configuration\n# ======================================================================================\n[build-system]\nrequires = [\"hatchling\", \"hatch_vcs\"]\nbuild-backend = \"hatchling.build\"\n\n[tool.hatch.build.hooks.vcs]\nversion-file = \"src/optimagic/_version.py\"\n\n[tool.hatch.build.targets.sdist]\nexclude = [\"tests\"]\nonly-packages = true\n\n[tool.hatch.build.targets.wheel]\nonly-include = [\"src\"]\nsources = [\"src\"]\n\n[tool.hatch.version]\nsource = \"vcs\"\n\n[tool.hatch.metadata]\nallow-direct-references = true\n\n\n# ======================================================================================\n# Ruff configuration\n# ======================================================================================\n[tool.ruff]\ntarget-version = \"py312\"\nfix = true\n\n[tool.ruff.lint]\nselect = [\n  # isort\n  \"I\",\n  # pyflakes\n  \"F\",\n  # pycodestyle\n  \"E\",\n  \"W\",\n  # flake8-2020\n  \"YTT\",\n  # flake8-bugbear\n  \"B\",\n  # flake8-quotes\n  \"Q\",\n  # pylint\n  \"PLE\", \"PLR\", \"PLW\",\n  # misc lints\n  \"PIE\",\n  # tidy imports\n  \"TID\",\n  # implicit string concatenation\n  \"ISC\",\n  # pydocstyle\n  \"D\",\n]\n\nextend-ignore = [\n\n  # Missing docstrings — not enforced for now\n  \"D100\",  # public module\n  \"D101\",  # public class\n  \"D102\",  # public method\n  \"D103\",  # public function\n  \"D104\",  # public package\n  \"D105\",  # magic method\n  \"D107\",  # __init__\n  # Docstring content/style rules — too noisy to enforce for now\n  \"D205\",  # blank line between summary and description\n  \"D414\",  # section has no body\n  \"D415\",  # first line punctuation\n  \"D417\",  # missing argument descriptions\n  # allow module import not at top of file, important for notebooks\n  \"E402\",\n  # do not assign a lambda expression, use a def\n  \"E731\",\n  # Too many arguments to function call\n  \"PLR0913\",\n  # Too many returns\n  \"PLR0911\",\n  # Too many branches\n  \"PLR0912\",\n  # Too many statements\n  \"PLR0915\",\n  # Magic number\n  \"PLR2004\",\n  # Consider `elif` instead of `else` then `if` to remove indentation level\n  \"PLR5501\",\n  # For calls to warnings.warn(): No explicit `stacklevel` keyword argument found\n  \"B028\",\n  # Incompatible with formatting\n  \"ISC001\",\n]\n\n[tool.ruff.lint.per-file-ignores]\n\"docs/source/conf.py\" = [\"E501\", \"ERA001\", \"DTZ005\"]\n\"src/optimagic/parameters/kernel_transformations.py\" = [\"ARG001\", \"N806\"]\n\"docs/source/*\" = [\"B018\"]\n\"src/optimagic/algorithms.py\" = [\"E501\"]\n\n[tool.ruff.lint.pydocstyle]\nconvention = \"google\"\n\n\n# ======================================================================================\n# Pytest configuration\n# ======================================================================================\n[tool.pytest.ini_options]\nfilterwarnings = [\n    \"ignore:Using or importing the ABCs from 'collections'\",\n    \"ignore:the imp module is deprecated\",\n    \"ignore:Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.\",\n    \"ignore:In a future version of pandas all arguments of concat except for the argument 'objs' will be keyword-only\",\n    \"ignore:Please use `MemoizeJac` from the `scipy.optimize` namespace\",\n    \"ignore:`scipy.optimize.optimize.MemoizeJac` is deprecated\",\n    \"ignore:Some algorithms did not converge. Their walltime has been set to a very high value instead of infinity because Timedeltas do notsupport infinite values\",\n    \"ignore:In a future version, the Index constructor will not infer numeric dtypes when passed object-dtype sequences\",\n    \"ignore:distutils Version classes are deprecated. Use packaging.version instead\",\n    \"ignore:Standard matrix inversion failed due to LinAlgError\",\n    \"ignore:delta_grad == 0.0\",\n    \"ignore:Widget._active_widgets is deprecated\",\n    \"ignore:Widget._widget_types is deprecated\",\n    \"ignore:Widget.widget_types is deprecated\",\n    \"ignore:Widget.widgets is deprecated\",\n    \"ignore:Parallelization together with\",\n    \"ignore:Conversion of an array with ndim > 0 to a scalar is deprecated\",\n    \"ignore:The following exception was caught when evaluating\",\n    \"ignore:The following exception was caught when calculating\",\n    \"ignore:Usage of the parameter log_options\",\n]\naddopts = [\"--doctest-modules\", \"--pdbcls=pdbp:Pdb\"]\nmarkers = [\n    \"wip: Tests that are work-in-progress.\",\n    \"slow: Tests that take a long time to run and are skipped in continuous integration.\",\n    \"jax: Tests that require jax to be installed and are skipped on non-Linux systems.\",\n]\nnorecursedirs = [\"docs\", \".tools\"]\n\n\n# ======================================================================================\n# Misc configuration\n# ======================================================================================\n[tool.yamlfix]\nline_length = 88\nsequence_style = \"block_style\"\nnone_representation = \"null\"\n\n\n# ======================================================================================\n# Mypy configuration\n# ======================================================================================\n[tool.mypy]\nfiles = [\"src\", \"tests\", \".tools\"]\ncheck_untyped_defs = true\ndisallow_any_generics = true\ndisallow_untyped_defs = true\ndisallow_incomplete_defs = true\nno_implicit_optional = true\nwarn_redundant_casts = true\nwarn_unused_ignores = true\n\n[[tool.mypy.overrides]]\nmodule = [\n    \"optimagic.benchmarking\",\n    \"optimagic.benchmarking.benchmark_reports\",\n    \"optimagic.benchmarking.cartis_roberts\",\n    \"optimagic.benchmarking.get_benchmark_problems\",\n    \"optimagic.benchmarking.more_wild\",\n    \"optimagic.benchmarking.noise_distributions\",\n    \"optimagic.benchmarking.process_benchmark_results\",\n    \"optimagic.benchmarking.run_benchmark\",\n\n    \"optimagic.differentiation\",\n    \"optimagic.differentiation.derivatives\",\n    \"optimagic.differentiation.finite_differences\",\n    \"optimagic.differentiation.generate_steps\",\n    \"optimagic.differentiation.richardson_extrapolation\",\n\n    \"optimagic.examples\",\n    \"optimagic.examples.numdiff_functions\",\n\n    \"optimagic.optimization\",\n    \"optimagic.optimization.algo_options\",\n    \"optimagic.optimization.convergence_report\",\n    \"optimagic.optimization.optimization_logging\",\n    \"optimagic.optimization.optimize_result\",\n    \"optimagic.optimization.optimize\",\n    \"optimagic.optimization.multistart\",\n    \"optimagic.optimization.scipy_aliases\",\n    \"optimagic.optimization.create_optimization_problem\",\n\n    \"optimagic.optimizers._pounders\",\n    \"optimagic.optimizers._pounders.pounders_auxiliary\",\n    \"optimagic.optimizers._pounders.pounders_history\",\n    \"optimagic.optimizers._pounders._conjugate_gradient\",\n    \"optimagic.optimizers._pounders._steihaug_toint\",\n    \"optimagic.optimizers._pounders._trsbox\",\n    \"optimagic.optimizers._pounders.bntr\",\n    \"optimagic.optimizers._pounders.gqtpar\",\n    \"optimagic.optimizers._pounders.linear_subsolvers\",\n\n    \"optimagic.optimizers\",\n    \"optimagic.optimizers.tranquilo\",\n    \"optimagic.optimizers.pygmo_optimizers\",\n    \"optimagic.optimizers.scipy_optimizers\",\n    \"optimagic.optimizers.nag_optimizers\",\n    \"optimagic.optimizers.neldermead\",\n    \"optimagic.optimizers.nlopt_optimizers\",\n    \"optimagic.optimizers.ipopt\",\n    \"optimagic.optimizers.fides\",\n    \"optimagic.optimizers.pounders\",\n    \"optimagic.optimizers.tao_optimizers\",\n\n\n    \"optimagic.parameters\",\n    \"optimagic.parameters.block_trees\",\n    \"optimagic.parameters.check_constraints\",\n    \"optimagic.parameters.consolidate_constraints\",\n    \"optimagic.parameters.constraint_tools\",\n    \"optimagic.parameters.conversion\",\n    \"optimagic.parameters.kernel_transformations\",\n    \"optimagic.parameters.nonlinear_constraints\",\n    \"optimagic.parameters.process_constraints\",\n    \"optimagic.parameters.process_selectors\",\n    \"optimagic.parameters.space_conversion\",\n    \"optimagic.parameters.tree_conversion\",\n    \"optimagic.parameters.tree_registry\",\n\n\n    \"optimagic.shared\",\n    \"optimagic.shared.check_option_dicts\",\n    \"optimagic.shared.compat\",\n    \"optimagic.shared.process_user_function\",\n\n    \"optimagic.visualization\",\n    \"optimagic.visualization.convergence_plot\",\n    \"optimagic.visualization.backends\",\n    \"optimagic.visualization.deviation_plot\",\n    \"optimagic.visualization.history_plots\",\n    \"optimagic.visualization.plotting_utilities\",\n    \"optimagic.visualization.profile_plot\",\n    \"optimagic.visualization.slice_plot\",\n\n    \"optimagic\",\n    \"optimagic.decorators\",\n    \"optimagic.exceptions\",\n    \"optimagic.utilities\",\n    \"optimagic.deprecations\",\n\n    \"estimagic\",\n    \"estimagic.examples\",\n    \"estimagic.examples.logit\",\n    \"estimagic.estimate_ml\",\n    \"estimagic.estimate_msm\",\n    \"estimagic.estimation_summaries\",\n    \"estimagic.msm_weighting\",\n    \"estimagic.bootstrap_ci\",\n    \"estimagic.bootstrap_helpers\",\n    \"estimagic.bootstrap_outcomes\",\n    \"estimagic.bootstrap_samples\",\n    \"estimagic.bootstrap\",\n    \"estimagic.ml_covs\",\n    \"estimagic.msm_covs\",\n    \"estimagic.shared_covs\",\n    \"estimagic.msm_sensitivity\",\n    \"estimagic.estimation_table\",\n    \"estimagic.lollipop_plot\",\n\n]\ncheck_untyped_defs = false\ndisallow_any_generics = false\ndisallow_untyped_defs = false\n\n\n[[tool.mypy.overrides]]\nmodule = \"tests.*\"\ndisallow_untyped_defs = false\nignore_errors = true\n\n[[tool.mypy.overrides]]\nmodule = [\n    \"pybaum\",\n    \"scipy\",\n    \"scipy.linalg\",\n    \"scipy.linalg.lapack\",\n    \"scipy.stats\",\n    \"scipy.optimize\",\n    \"scipy.ndimage\",\n    \"scipy.optimize._trustregion_exact\",\n    \"plotly\",\n    \"plotly.graph_objects\",\n    \"plotly.express\",\n    \"plotly.subplots\",\n    \"matplotlib\",\n    \"matplotlib.pyplot\",\n    \"cyipopt\",\n    \"nlopt\",\n    \"bokeh\",\n    \"bokeh.layouts\",\n    \"bokeh.models\",\n    \"bokeh.plotting\",\n    \"bokeh.application\",\n    \"bokeh.application.handlers\",\n    \"bokeh.application.handlers.function\",\n    \"bokeh.server\",\n    \"bokeh.server.server\",\n    \"bokeh.command\",\n    \"bokeh.command.util\",\n    \"fides\",\n    \"petsc4py\",\n    \"petsc4py.PETSc\",\n    \"tranquilo\",\n    \"tranquilo.tranquilo\",\n    \"tranquilo.options\",\n    \"tranquilo.process_arguments\",\n    \"dfols\",\n    \"pybobyqa\",\n    \"pygmo\",\n    \"jax\",\n    \"joblib\",\n    \"cloudpickle\",\n    \"numba\",\n    \"pathos\",\n    \"pathos.pools\",\n    \"optimagic._version\",\n    \"annotated_types\",\n    \"pdbp\",\n    \"iminuit\",\n    \"nevergrad\",\n    \"nevergrad.optimization.base\",\n    \"pygad\",\n    \"pyswarms\",\n    \"pyswarms.backend.topology\",\n    \"yaml\",\n    \"gradient_free_optimizers\",\n    \"gradient_free_optimizers.optimizers.base_optimizer\",\n  ]\nignore_missing_imports = true\n\n\n# ======================================================================================\n# Pixi configuration\n# ======================================================================================\n[tool.pixi.workspace]\nchannels = [\"conda-forge\"]\nplatforms = [\"linux-64\", \"osx-arm64\", \"win-64\"]\n\n# --- Base dependencies (all environments) -------------------------------------------\n[tool.pixi.dependencies]\npython = \">=3.12,<3.15\"\n# Prefer conda-forge builds for compiled scientific packages\nnumpy = \">=2.0.0\"\nscipy = \">=1.11\"\npandas = \">=2.1,<3\"\njupyterlab = \">=4.0\"\ncyipopt = \">=1.4.0\"\npygmo = \">=2.19.0\"\nnlopt = \">=2.7\"\nstatsmodels = \">=0.14\"\nmatplotlib = \">=3.8\"\nbokeh = \">=3.2\"\naltair = \">=5.1\"\nseaborn = \">=0.13\"\npyyaml = \">=6.0.1\"\njinja2 = \">=3.1\"\ntranquilo = \">=0.1.1\"\niminuit = \">=2.25\"\ncma = \">=3.3\"\npygad = \">=3.2\"\npytorch-cpu = \">=2.2\"\nruff = \">=0.15.5,<0.16\"\n\n[tool.pixi.pypi-dependencies]\noptimagic = { path = \".\", editable = true }\npdbp = \"*\"\nbayesian-optimization = \">=1.4\"\ndfo-ls = \">=1.5.3\"\npy-bobyqa = \">=1.3\"\nfides = \"==0.7.4\"\nkaleido = \">=0.2.1\"\ngradient-free-optimizers = \">=1.6.0\"\npyswarms = \">=1.3\"\n\n# --- Python version features --------------------------------------------------------\n[tool.pixi.feature.py312.dependencies]\npython = \"~=3.12.0\"\n[tool.pixi.feature.py313.dependencies]\npython = \"~=3.13.0\"\n[tool.pixi.feature.py314.dependencies]\npython = \"~=3.14.0\"\n\n# --- Feature: tests (test infrastructure) --------------------------------------------\n[tool.pixi.feature.tests.dependencies]\npytest = \">=7.2\"\npytest-cov = \">=4.0\"\npytest-xdist = \">=3.2\"\n\n[tool.pixi.feature.tests.tasks]\ntests = { cmd = \"pytest\", description = \"Run the full test suite\" }\ntests-fast = { cmd = \"pytest -m 'not slow and not jax'\", description = \"Run tests excluding slow and jax tests\" }\ntests-with-cov = { cmd = \"pytest --cov-report=xml --cov=src\", description = \"Run tests with XML coverage report\" }\n\n# --- Feature: type-checking (mypy + type stubs) --------------------------------------\n[tool.pixi.feature.type-checking.dependencies]\nmypy = \"==1.19.1\"\n\n[tool.pixi.feature.type-checking.pypi-dependencies]\npandas-stubs = \"*\"\ntypes-cffi = \"*\"\ntypes-openpyxl = \"*\"\ntypes-jinja2 = \"*\"\nsqlalchemy-stubs = \"*\"\n\n[tool.pixi.feature.type-checking.tasks]\nmypy = { cmd = \"mypy\", description = \"Run mypy type checker\" }\n\n# --- Feature: linux (Linux-only deps) ------------------------------------------------\n[tool.pixi.feature.linux]\nplatforms = [\"linux-64\"]\n[tool.pixi.feature.linux.dependencies]\njax = \">=0.4.8\"\npetsc4py = \">=3.18\"\n\n# --- Feature: docs -------------------------------------------------------------------\n[tool.pixi.feature.docs.dependencies]\nsphinx = \">=8.2.3\"\nsphinxcontrib-bibtex = \"*\"\nsphinx-copybutton = \"*\"\nsphinx-design = \"*\"\nsphinx-llm = \"*\"\nsphinx-llms-txt = \"*\"\nipython = \"*\"\nipython_genutils = \"*\"\nmyst-nb = \"*\"\nfuro = \"*\"\nanywidget = \"*\"\npatsy = \"*\"\n\n[tool.pixi.feature.docs.pypi-dependencies]\nsphinxcontrib-mermaid = \"*\"\nintersphinx-registry = \"*\"\n\n[tool.pixi.feature.docs.tasks]\nbuild-docs = { cmd = \"make html\", cwd = \"docs\", description = \"Build the HTML documentation\" }\n\n# --- Compat features (version overrides for backward-compat CI) ---------------------\n[tool.pixi.feature.old-plotly.dependencies]\nplotly = \"<6\"\n\n[tool.pixi.feature.old-plotly.pypi-dependencies]\nkaleido = \"<0.3\"\n\n[tool.pixi.feature.nevergrad.pypi-dependencies]\nbayesian-optimization = \"==1.4.0\"\nnevergrad = \"*\"\n\n# --- Environments --------------------------------------------------------------------\n[tool.pixi.environments]\n# Default dev environment (Python 3.14, everything)\ndefault = { features = [\"tests\", \"py314\"], solve-group = \"py314\" }\n# Linux CI (with JAX + PETSc)\ntests-linux-py312 = { features = [\"tests\", \"linux\", \"py312\"], solve-group = \"linux-py312\" }\ntests-linux-py313 = { features = [\"tests\", \"linux\", \"py313\"], solve-group = \"linux-py313\" }\ntests-linux-py314 = { features = [\"tests\", \"linux\", \"py314\"], solve-group = \"linux-py314\" }\n# macOS/Windows CI\ntests-py312 = { features = [\"tests\", \"py312\"], solve-group = \"py312\" }\ntests-py313 = { features = [\"tests\", \"py313\"], solve-group = \"py313\" }\ntests-py314 = { features = [\"tests\", \"py314\"], solve-group = \"py314\" }\n# Backward-compat CI (Python 3.12, lowest supported)\ntests-old-plotly = { features = [\"tests\", \"old-plotly\", \"py312\"] }\n# Nevergrad CI (bayesian-optimization==1.4.0)\ntests-nevergrad-py312 = { features = [\"tests\", \"nevergrad\", \"py312\"] }\ntests-nevergrad-py313 = { features = [\"tests\", \"nevergrad\", \"py313\"] }\ntests-nevergrad-py314 = { features = [\"tests\", \"nevergrad\", \"py314\"] }\n# Type checking\ntype-checking = { features = [\"type-checking\", \"py314\"], solve-group = \"py314\" }\n# Docs\ndocs = { features = [\"docs\", \"py314\"], solve-group = \"py314\" }\n"
  },
  {
    "path": "src/estimagic/__init__.py",
    "content": "import warnings\nfrom dataclasses import dataclass\n\nfrom estimagic import utilities\nfrom estimagic.bootstrap import BootstrapResult, bootstrap\nfrom estimagic.estimate_ml import LikelihoodResult, estimate_ml\nfrom estimagic.estimate_msm import MomentsResult, estimate_msm\nfrom estimagic.estimation_table import (\n    estimation_table,\n    render_html,\n    render_latex,\n)\nfrom estimagic.lollipop_plot import lollipop_plot\nfrom estimagic.msm_weighting import get_moments_cov\nfrom optimagic import OptimizeLogReader as _OptimizeLogReader\nfrom optimagic import OptimizeResult as _OptimizeResult\nfrom optimagic import __version__\nfrom optimagic import check_constraints as _check_constraints\nfrom optimagic import convergence_plot as _convergence_plot\nfrom optimagic import convergence_report as _convergence_report\nfrom optimagic import count_free_params as _count_free_params\nfrom optimagic import criterion_plot as _criterion_plot\nfrom optimagic import first_derivative as _first_derivative\nfrom optimagic import get_benchmark_problems as _get_benchmark_problems\nfrom optimagic import maximize as _maximize\nfrom optimagic import minimize as _minimize\nfrom optimagic import params_plot as _params_plot\nfrom optimagic import profile_plot as _profile_plot\nfrom optimagic import rank_report as _rank_report\nfrom optimagic import run_benchmark as _run_benchmark\nfrom optimagic import second_derivative as _second_derivative\nfrom optimagic import slice_plot as _slice_plot\nfrom optimagic import traceback_report as _traceback_report\nfrom optimagic.decorators import deprecated\n\nMSG = (\n    \"estimagic.{name} has been deprecated in version 0.5.0. Use optimagic.{name} \"\n    \"instead. This function will be removed in version 0.6.0.\"\n)\n\nminimize = deprecated(_minimize, MSG.format(name=\"minimize\"))\nmaximize = deprecated(_maximize, MSG.format(name=\"maximize\"))\nfirst_derivative = deprecated(_first_derivative, MSG.format(name=\"first_derivative\"))\nsecond_derivative = deprecated(_second_derivative, MSG.format(name=\"second_derivative\"))\nrun_benchmark = deprecated(_run_benchmark, MSG.format(name=\"run_benchmark\"))\nget_benchmark_problems = deprecated(\n    _get_benchmark_problems, MSG.format(name=\"get_benchmark_problems\")\n)\nconvergence_report = deprecated(\n    _convergence_report, MSG.format(name=\"convergence_report\")\n)\nrank_report = deprecated(_rank_report, MSG.format(name=\"rank_report\"))\ntraceback_report = deprecated(_traceback_report, MSG.format(name=\"traceback_report\"))\nprofile_plot = deprecated(_profile_plot, MSG.format(name=\"profile_plot\"))\nconvergence_plot = deprecated(_convergence_plot, MSG.format(name=\"convergence_plot\"))\nslice_plot = deprecated(_slice_plot, MSG.format(name=\"slice_plot\"))\ncheck_constraints = deprecated(_check_constraints, MSG.format(name=\"check_constraints\"))\ncount_free_params = deprecated(_count_free_params, MSG.format(name=\"count_free_params\"))\ncriterion_plot = deprecated(_criterion_plot, MSG.format(name=\"criterion_plot\"))\nparams_plot = deprecated(_params_plot, MSG.format(name=\"params_plot\"))\n\n\nclass OptimizeLogReader(_OptimizeLogReader):\n    def __init__(self, path):\n        warnings.warn(\n            \"estimagic.OptimizeLogReader has been deprecated in version 0.5.0. Use \"\n            \"optimagic.OptimizeLogReader instead. This class will be removed in version\"\n            \" 0.6.0.\",\n            FutureWarning,\n        )\n        super().__init__(path)\n\n\n@dataclass\nclass OptimizeResult(_OptimizeResult):\n    def __post_init__(self):\n        warnings.warn(\n            \"estimagic.OptimizeResult has been deprecated in version 0.5.0. Use \"\n            \"optimagic.OptimizeResult instead. This class will be removed in version \"\n            \"0.6.0.\",\n            FutureWarning,\n        )\n\n\n__all__ = [\n    \"LikelihoodResult\",\n    \"estimate_ml\",\n    \"estimate_msm\",\n    \"MomentsResult\",\n    \"estimate_msm\",\n    \"BootstrapResult\",\n    \"bootstrap\",\n    \"get_moments_cov\",\n    \"estimation_table\",\n    \"render_html\",\n    \"render_latex\",\n    \"utilities\",\n    \"minimize\",\n    \"maximize\",\n    \"first_derivative\",\n    \"second_derivative\",\n    \"run_benchmark\",\n    \"get_benchmark_problems\",\n    \"profile_plot\",\n    \"convergence_plot\",\n    \"convergence_report\",\n    \"rank_report\",\n    \"traceback_report\",\n    \"lollipop_plot\",\n    \"slice_plot\",\n    \"check_constraints\",\n    \"count_free_params\",\n    \"OptimizeLogReader\",\n    \"OptimizeResult\",\n    \"criterion_plot\",\n    \"params_plot\",\n    \"__version__\",\n]\n"
  },
  {
    "path": "src/estimagic/batch_evaluators.py",
    "content": "from optimagic.batch_evaluators import joblib_batch_evaluator as _joblib_batch_evaluator\nfrom optimagic.batch_evaluators import (\n    pathos_mp_batch_evaluator as _pathos_mp_batch_evaluator,\n)\nfrom optimagic.batch_evaluators import (\n    process_batch_evaluator as _process_batch_evaluator,\n)\nfrom optimagic.decorators import deprecated\n\nMSG = (\n    \"estimagic.batch_evaluators.{name} has been deprecated in version 0.5.0. Use \"\n    \"optimagic.batch_evaluators.{name} instead. This function will be removed in \"\n    \"version 0.6.0.\"\n)\n\n\npathos_mp_batch_evaluator = deprecated(\n    _pathos_mp_batch_evaluator, MSG.format(name=\"pathos_mp_batch_evaluator\")\n)\n\njoblib_batch_evaluator = deprecated(\n    _joblib_batch_evaluator, MSG.format(name=\"joblib_batch_evaluator\")\n)\n\nprocess_batch_evaluator = deprecated(\n    _process_batch_evaluator, MSG.format(name=\"process_batch_evaluator\")\n)\n"
  },
  {
    "path": "src/estimagic/bootstrap.py",
    "content": "import functools\nfrom dataclasses import dataclass\nfrom functools import cached_property\nfrom typing import Any\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten\n\nfrom estimagic.bootstrap_ci import calculate_ci\nfrom estimagic.bootstrap_helpers import check_inputs\nfrom estimagic.bootstrap_outcomes import get_bootstrap_outcomes\nfrom estimagic.shared_covs import calculate_estimation_summary\nfrom optimagic.batch_evaluators import joblib_batch_evaluator\nfrom optimagic.parameters.block_trees import matrix_to_block_tree\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.utilities import get_rng\n\n\ndef bootstrap(\n    outcome,\n    data,\n    *,\n    existing_result=None,\n    outcome_kwargs=None,\n    n_draws=1_000,\n    weight_by=None,\n    cluster_by=None,\n    seed=None,\n    n_cores=1,\n    error_handling=\"continue\",\n    batch_evaluator=joblib_batch_evaluator,\n):\n    \"\"\"Use the bootstrap to calculate inference quantities.\n\n    Args:\n        outcome (callable): A function that computes the statistic of interest.\n        data (pd.DataFrame): Dataset.\n        existing_result (BootstrapResult): An existing BootstrapResult\n            object from a previous call of bootstrap(). Default is None.\n        outcome_kwargs (dict): Additional keyword arguments for outcome.\n        n_draws (int): Number of bootstrap samples to draw.\n            If len(existing_outcomes) >= n_draws, a random subset of existing_outcomes\n            is used.\n        weight_by (str): Column name of variable with weights or None.\n        cluster_by (str): Column name of variable to cluster by or None.\n        seed (Union[None, int, numpy.random.Generator]): If seed is None or int the\n            numpy.random.default_rng is used seeded with seed. If seed is already a\n            Generator instance then that instance is used.\n        n_cores (int): number of jobs for parallelization.\n        error_handling (str): One of \"continue\", \"raise\". Default \"continue\" which means\n            that bootstrap estimates are only calculated for those samples where no\n            errors occur and a warning is produced if any error occurs.\n        batch_evaluator (str or Callable): Name of a pre-implemented batch evaluator\n            (currently 'joblib' and 'pathos_mp') or Callable with the same interface\n            as the estimagic batch_evaluators. See :ref:`batch_evaluators`.\n\n    Returns:\n        BootstrapResult: A BootstrapResult object storing information on summary\n            statistics, the covariance matrix, and estimated boostrap outcomes.\n\n    \"\"\"\n    if callable(outcome):\n        check_inputs(data=data, weight_by=weight_by, cluster_by=cluster_by)\n\n        if outcome_kwargs is not None:\n            outcome = functools.partial(outcome, **outcome_kwargs)\n    else:\n        raise TypeError(\"outcome must be a callable.\")\n\n    if existing_result is None:\n        base_outcome = outcome(data)\n        existing_outcomes = []\n    elif isinstance(existing_result, BootstrapResult):\n        base_outcome = existing_result.base_outcome\n        existing_outcomes = existing_result.outcomes\n    else:\n        raise ValueError(\"existing_result must be None or a BootstrapResult.\")\n\n    rng = get_rng(seed)\n    n_existing = len(existing_outcomes)\n\n    if n_draws > n_existing:\n        new_outcomes = get_bootstrap_outcomes(\n            data=data,\n            outcome=outcome,\n            weight_by=weight_by,\n            cluster_by=cluster_by,\n            rng=rng,\n            n_draws=n_draws - n_existing,\n            n_cores=n_cores,\n            error_handling=error_handling,\n            batch_evaluator=batch_evaluator,\n        )\n\n        all_outcomes = existing_outcomes + new_outcomes\n    else:\n        random_indices = rng.choice(n_existing, n_draws, replace=False)\n        all_outcomes = [existing_outcomes[k] for k in random_indices]\n\n    # ==================================================================================\n    # Process results\n    # ==================================================================================\n\n    registry = get_registry(extended=True)\n    flat_outcomes = [\n        tree_just_flatten(_outcome, registry=registry) for _outcome in all_outcomes\n    ]\n    internal_outcomes = np.array(flat_outcomes)\n\n    result = BootstrapResult(\n        _base_outcome=base_outcome,\n        _internal_outcomes=internal_outcomes,\n        _internal_cov=np.cov(internal_outcomes, rowvar=False),\n    )\n\n    return result\n\n\n@dataclass\nclass BootstrapResult:\n    _base_outcome: Any\n    _internal_outcomes: np.ndarray\n    _internal_cov: np.ndarray\n\n    @cached_property\n    def _se(self):\n        return self.se()\n\n    @cached_property\n    def _cov(self):\n        return self.cov()\n\n    @cached_property\n    def _ci(self):\n        return self.ci()\n\n    @cached_property\n    def _p_values(self):\n        return self.p_values()\n\n    @cached_property\n    def _summary(self):\n        return self.summary()\n\n    @property\n    def base_outcome(self):\n        \"\"\"Returns the base outcome statistic(s).\n\n        Returns:\n            pytree: Pytree of base outcomes, i.e. the outcome statistic(s) evaluated\n                on the original data set.\n\n        \"\"\"\n        return self._base_outcome\n\n    @cached_property\n    def outcomes(self):\n        \"\"\"Returns the estimated bootstrap outcomes.\n\n        Returns:\n            List[Any]: The boostrap outcomes as a list of pytrees.\n\n        \"\"\"\n        registry = get_registry(extended=True)\n        _, treedef = tree_flatten(self._base_outcome, registry=registry)\n\n        outcomes = [\n            tree_unflatten(treedef, out, registry=registry)\n            for out in self._internal_outcomes\n        ]\n        return outcomes\n\n    def se(self):\n        \"\"\"Calculate standard errors.\n\n        Returns:\n            Any: The standard errors of the estimated parameters as a block-pytree,\n                numpy.ndarray, or pandas.DataFrame.\n\n        \"\"\"\n        cov = self._internal_cov\n        se = np.sqrt(np.diagonal(cov))\n\n        registry = get_registry(extended=True)\n        _, treedef = tree_flatten(self._base_outcome, registry=registry)\n\n        se = tree_unflatten(treedef, se, registry=registry)\n        return se\n\n    def cov(self, return_type=\"pytree\"):\n        \"\"\"Calculate the variance-covariance matrix of the estimated parameters.\n\n        Args:\n            return_type (str): One of \"pytree\", \"array\" or \"dataframe\". Default pytree.\n                If \"array\", a 2d numpy array with the covariance is returned. If\n                \"dataframe\", a pandas DataFrame with parameter names in the\n                index and columns are returned.\n                The default is \"pytree\".\n\n        Returns:\n            Any: The covariance matrix of the estimated parameters as a block-pytree,\n                numpy.ndarray, or pandas.DataFrame.\n\n        \"\"\"\n        cov = self._internal_cov\n\n        if return_type == \"dataframe\":\n            registry = get_registry(extended=True)\n            names = np.array(leaf_names(self._base_outcome, registry=registry))\n            cov = pd.DataFrame(cov, columns=names, index=names)\n        elif return_type == \"pytree\":\n            cov = matrix_to_block_tree(cov, self._base_outcome, self._base_outcome)\n        elif return_type != \"array\":\n            raise ValueError(\n                \"return_type must be one of pytree, array, or dataframe, \"\n                f\"not {return_type}.\"\n            )\n        return cov\n\n    def ci(self, ci_method=\"percentile\", ci_level=0.95):\n        \"\"\"Calculate confidence intervals.\n\n        Args:\n            ci_method (str): Method of choice for computing confidence intervals.\n                The default is \"percentile\".\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n\n        Returns:\n            Any: Pytree with the same structure as base_outcome containing lower\n                bounds of confidence intervals.\n            Any: Pytree with the same structure as base_outcome containing upper\n                bounds of confidence intervals.\n\n        \"\"\"\n        registry = get_registry(extended=True)\n        base_outcome_flat, treedef = tree_flatten(self._base_outcome, registry=registry)\n\n        lower_flat, upper_flat = calculate_ci(\n            base_outcome_flat, self._internal_outcomes, ci_method, ci_level\n        )\n\n        lower = tree_unflatten(treedef, lower_flat, registry=registry)\n        upper = tree_unflatten(treedef, upper_flat, registry=registry)\n        return lower, upper\n\n    def p_values(self):\n        \"\"\"Calculate p-values.\n\n        Returns:\n            Any: A pytree with the same structure as base_outcome containing p-values\n                for the parameter estimates.\n\n        \"\"\"\n        msg = \"Bootstrap p_values are not yet implemented.\"\n        raise NotImplementedError(msg)\n\n    def summary(self, ci_method=\"percentile\", ci_level=0.95):\n        \"\"\"Create a summary of bootstrap results.\n\n        Args:\n            ci_method (str): Method of choice for confidence interval computation.\n                The default is \"percentile\".\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n\n        Returns:\n            pd.DataFrame: The estimation summary as a DataFrame containing information\n                on the mean, standard errors, as well as the confidence intervals.\n                Soon this will be a pytree.\n\n        \"\"\"\n        registry = get_registry(extended=True)\n        names = leaf_names(self.base_outcome, registry=registry)\n        summary_data = _calulcate_summary_data_bootstrap(\n            self, ci_method=ci_method, ci_level=ci_level\n        )\n        summary = calculate_estimation_summary(\n            summary_data=summary_data,\n            names=names,\n            free_names=names,\n        )\n        return summary\n\n\ndef _calulcate_summary_data_bootstrap(bootstrap_result, ci_method, ci_level):\n    lower, upper = bootstrap_result.ci(ci_method=ci_method, ci_level=ci_level)\n    summary_data = {\n        \"value\": bootstrap_result.base_outcome,\n        \"standard_error\": bootstrap_result.se(),\n        \"ci_lower\": lower,\n        \"ci_upper\": upper,\n        \"p_value\": np.full(len(lower), np.nan),  # p-values are not implemented yet\n    }\n    return summary_data\n"
  },
  {
    "path": "src/estimagic/bootstrap_ci.py",
    "content": "import numpy as np\nfrom scipy.stats import norm\n\nfrom estimagic.bootstrap_helpers import check_inputs\n\n\ndef calculate_ci(\n    base_outcome,\n    estimates,\n    ci_method=\"percentile\",\n    ci_level=0.95,\n):\n    \"\"\"Compute confidence interval of bootstrap estimates.\n\n    Parts of the code of the subfunctions of this function are taken from\n    Daniel Saxton's resample library, as found on\n    https://github.com/dsaxton/resample/\n\n\n    Args:\n        base_outcome (list): List of flat base outcomes, i.e. the outcome\n            statistic(s) evaluated on the original data set.\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        ci_method (str): Method of choice for computing confidence intervals.\n            The default is \"percentile\".\n        ci_level (float): Confidence level for the calculation of confidence\n            intervals. The default is 0.95.\n\n    Returns:\n        np.ndarray: 1d array of the lower confidence interval, where the k'th entry\n            contains the lower confidence interval for the k'th parameter.\n        np.ndarray: 1d array of the upper confidence interval, where the k'th entry\n            contains the upper confidence interval for the k'th parameter.\n\n    \"\"\"\n    check_inputs(ci_method=ci_method, ci_level=ci_level, skipdata=True)\n\n    alpha = 1 - ci_level\n\n    if ci_method == \"percentile\":\n        cis = _ci_percentile(estimates, alpha)\n    elif ci_method == \"bc\":\n        cis = _ci_bc(estimates, base_outcome, alpha)\n    elif ci_method == \"t\":\n        cis = _ci_t(estimates, base_outcome, alpha)\n    elif ci_method == \"basic\":\n        cis = _ci_basic(estimates, base_outcome, alpha)\n    elif ci_method == \"normal\":\n        cis = _ci_normal(estimates, base_outcome, alpha)\n\n    return cis[:, 0], cis[:, 1]\n\n\ndef _ci_percentile(estimates, alpha):\n    \"\"\"Compute percentile type confidence interval of bootstrap estimates.\n\n    Args:\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        alpha (float): Statistical significance level of choice.\n\n    Returns:\n        cis (np.ndarray): 2d array where k'th row contains the upper and lower CI\n            for k'th parameter.\n\n    \"\"\"\n    num_params = estimates.shape[1]\n    cis = np.zeros((num_params, 2))\n\n    for k in range(num_params):\n        q = _eqf(estimates[:, k])\n        cis[k, :] = q(alpha / 2), q(1 - alpha / 2)\n\n    return cis\n\n\ndef _ci_bc(estimates, base_outcome, alpha):\n    \"\"\"Compute bc type confidence interval of bootstrap estimates.\n\n    Args:\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        base_outcome (list): List of flat base outcomes, i.e. the outcome\n            statistics evaluated on the original data set.\n        alpha (float): Statistical significance level of choice.\n\n    Returns:\n        cis (np.ndarray): 2d array where k'th row contains the upper and lower CI\n            for k'th parameter.\n\n    \"\"\"\n    num_params = estimates.shape[1]\n    cis = np.zeros((num_params, 2))\n\n    for k in range(num_params):\n        q = _eqf(estimates[:, k])\n        params = estimates[:, k]\n\n        # Bias correction\n        z_naught = norm.ppf(np.mean(params <= base_outcome[k]))\n        z_low = norm.ppf(alpha)\n        z_high = norm.ppf(1 - alpha)\n\n        p1 = norm.cdf(z_naught + (z_naught + z_low))\n        p2 = norm.cdf(z_naught + (z_naught + z_high))\n\n        cis[k, :] = q(p1), q(p2)\n\n    return cis\n\n\ndef _ci_t(estimates, base_outcome, alpha):\n    \"\"\"Compute studentized confidence interval of bootstrap estimates.\n\n    Args:\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        base_outcome (list): List of flat base outcomes, i.e. the outcome\n            statistics evaluated on the original data set.\n        alpha (float): Statistical significance level of choice.\n\n    Returns:\n        cis (np.ndarray): 2d array where k'th row contains the upper and lower CI\n            for k'th parameter.\n\n    \"\"\"\n    num_params = estimates.shape[1]\n    cis = np.zeros((num_params, 2))\n\n    for k in range(num_params):\n        params = estimates[:, k]\n\n        theta_std = np.std(params)\n\n        tq = _eqf((params - base_outcome[k]) / theta_std)\n        t1 = tq(1 - alpha / 2)\n        t2 = tq(alpha / 2)\n\n        cis[k, :] = base_outcome[k] - theta_std * t1, base_outcome[k] - theta_std * t2\n\n    return cis\n\n\ndef _ci_normal(estimates, base_outcome, alpha):\n    \"\"\"Compute approximate normal confidence interval of bootstrap estimates.\n\n    Args:\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        base_outcome (list): List of flat base outcomes, i.e. the outcome\n            statistics evaluated on the original data set.\n        alpha (float): Statistical significance level of choice.\n\n    Returns:\n        cis (np.ndarray): 2d array where k'th row contains the upper and lower CI\n            for k'th parameter.\n\n    \"\"\"\n    num_params = estimates.shape[1]\n    cis = np.zeros((num_params, 2))\n\n    for k in range(num_params):\n        params = estimates[:, k]\n        theta_std = np.std(params)\n        t = norm.ppf(alpha / 2)\n\n        cis[k, :] = base_outcome[k] + theta_std * t, base_outcome[k] - theta_std * t\n\n    return cis\n\n\ndef _ci_basic(estimates, base_outcome, alpha):\n    \"\"\"Compute basic bootstrap confidence interval of bootstrap estimates.\n\n    Args:\n        estimates (np.ndarray): Array of estimates computed on the bootstrapped\n            samples.\n        base_outcome (list): List of flat base outcomes, i.e. the outcome\n            statistics evaluated on the original data set.\n        alpha (float): Statistical significance level of choice.\n\n    Returns:\n        cis (np.ndarray): 2d array where k'th row contains the upper and lower CI\n            for k'th parameter.\n\n    \"\"\"\n    num_params = estimates.shape[1]\n    cis = np.zeros((num_params, 2))\n\n    for k in range(num_params):\n        q = _eqf(estimates[:, k])\n\n        cis[k, :] = (\n            2 * base_outcome[k] - q(1 - alpha / 2),\n            2 * base_outcome[k] - q(alpha / 2),\n        )\n\n    return cis\n\n\ndef _eqf(sample):\n    \"\"\"Return empirical quantile function of the given sample.\n\n    Args:\n        sample (np.ndarray): Sample to base quantile function on.\n\n    Returns:\n        f (callable): Quantile function for given sample.\n\n    \"\"\"\n\n    def f(x):\n        return np.quantile(sample, x)\n\n    return f\n"
  },
  {
    "path": "src/estimagic/bootstrap_helpers.py",
    "content": "import pandas as pd\n\n\ndef check_inputs(\n    data=None,\n    weight_by=None,\n    cluster_by=None,\n    ci_method=\"percentile\",\n    ci_level=0.95,\n    skipdata=False,\n):\n    \"\"\"Check validity of inputs.\n\n    Args:\n        data (pd.DataFrame): Dataset.\n        weight_by (str): Column name of variable with weights.\n        cluster_by (str): Column name of variable to cluster by.\n        ci_method (str): Method of choice for computing confidence intervals.\n            The default is \"percentile\".\n        ci_level (float): Confidence level for the calculation of confidence\n            intervals. The default is 0.95.\n        skipdata (bool): Whether to skip all checks on the data argument.\n\n    \"\"\"\n    ci_method_list = [\"percentile\", \"bc\", \"t\", \"normal\", \"basic\"]\n\n    if not skipdata:\n        if not isinstance(data, pd.DataFrame) and not isinstance(data, pd.Series):\n            raise TypeError(\"Data must be a pandas.DataFrame or pandas.Series.\")\n        elif (weight_by is not None) and (weight_by not in data.columns.tolist()):\n            raise ValueError(\n                \"Input 'weight_by' must be None or a column name of 'data'.\"\n            )\n        elif (cluster_by is not None) and (cluster_by not in data.columns.tolist()):\n            raise ValueError(\n                \"Input 'cluster_by' must be None or a column name of 'data'.\"\n            )\n\n    if ci_method not in ci_method_list:\n        msg = (\n            \"ci_method must be 'percentile', 'bc', 't', 'basic' or 'normal', \"\n            f\"'{ci_method}' was supplied\"\n        )\n        raise ValueError(msg)\n    if ci_level > 1 or ci_level < 0:\n        raise ValueError(\"Input 'ci_level' must be in [0,1].\")\n"
  },
  {
    "path": "src/estimagic/bootstrap_outcomes.py",
    "content": "from estimagic.bootstrap_helpers import check_inputs\nfrom estimagic.bootstrap_samples import get_bootstrap_indices\nfrom optimagic.batch_evaluators import process_batch_evaluator\n\n\ndef get_bootstrap_outcomes(\n    data,\n    outcome,\n    weight_by=None,\n    cluster_by=None,\n    rng=None,\n    n_draws=1000,\n    n_cores=1,\n    error_handling=\"continue\",\n    batch_evaluator=\"joblib\",\n):\n    \"\"\"Draw bootstrap samples and calculate outcomes.\n\n    Args:\n        data (pandas.DataFrame): original dataset.\n        outcome (callable): function of the dataset calculating statistic of interest.\n            Returns a general pytree (e.g. pandas Series, dict, numpy array, etc.).\n        weight_by (str): column name of the variable with weights.\n        cluster_by (str): column name of the variable to cluster by.\n        rng (numpy.random.Generator): A random number generator.\n        n_draws (int): number of bootstrap draws.\n        n_cores (int): number of jobs for parallelization.\n        error_handling (str): One of \"continue\", \"raise\". Default \"continue\" which means\n            that bootstrap estimates are only calculated for those samples where no\n            errors occur and a warning is produced if any error occurs.\n        batch_evaluator (str or Callable): Name of a pre-implemented batch evaluator\n            (currently 'joblib' and 'pathos_mp') or Callable with the same interface\n            as the estimagic batch_evaluators. See :ref:`batch_evaluators`.\n\n    Returns:\n        estimates (list):  List of pytrees of estimated bootstrap outcomes.\n\n    \"\"\"\n    check_inputs(data=data, weight_by=weight_by, cluster_by=cluster_by)\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n\n    indices = get_bootstrap_indices(\n        data=data,\n        rng=rng,\n        weight_by=weight_by,\n        cluster_by=cluster_by,\n        n_draws=n_draws,\n    )\n\n    estimates = _get_bootstrap_outcomes_from_indices(\n        indices=indices,\n        data=data,\n        outcome=outcome,\n        n_cores=n_cores,\n        error_handling=error_handling,\n        batch_evaluator=batch_evaluator,\n    )\n\n    return estimates\n\n\ndef _get_bootstrap_outcomes_from_indices(\n    indices,\n    data,\n    outcome,\n    n_cores,\n    error_handling,\n    batch_evaluator,\n):\n    arguments = [{\"data\": data, \"indices\": ind, \"outcome\": outcome} for ind in indices]\n\n    raw_estimates = batch_evaluator(\n        _take_indices_and_calculate_outcome,\n        arguments,\n        n_cores=n_cores,\n        unpack_symbol=\"**\",\n        error_handling=error_handling,\n    )\n\n    estimates = [est for est in raw_estimates if not isinstance(est, str)]\n    tracebacks = [est for est in raw_estimates if isinstance(est, str)]\n\n    if not estimates:\n        msg = (\n            \"Calculating of all bootstrap outcomes failed. The tracebacks of the \"\n            \"raised Exceptions are reproduced below:\"\n        )\n        raise RuntimeError(msg + \"\\n\\n\" + \"\\n\\n\".join(tracebacks))\n\n    if tracebacks:\n        msg = (\n            \"Calculating bootstrap outcomes failed for some samples. Those samples \"\n            \"are excluded from the calculation of bootstrap standard errors and \"\n            \"confidence intervals, rendering them invalid. Do not use them for \"\n            \"anything but diagnostic purposes. Check warnings for more information. \"\n        )\n\n    return estimates\n\n\ndef _take_indices_and_calculate_outcome(indices, data, outcome):\n    return outcome(data.iloc[indices])\n"
  },
  {
    "path": "src/estimagic/bootstrap_samples.py",
    "content": "import numpy as np\nimport pandas as pd\n\n\ndef get_bootstrap_indices(\n    data,\n    rng,\n    weight_by=None,\n    cluster_by=None,\n    n_draws=1000,\n):\n    \"\"\"Draw positional indices for the construction of bootstrap samples.\n\n    Storing the positional indices instead of the full bootstrap samples saves a lot\n    of memory for datasets with many variables.\n\n    Args:\n        data (pandas.DataFrame): original dataset.\n        rng (numpy.random.Generator): A random number generator.\n        weight_by (str): column name of the variable with weights.\n        cluster_by (str): column name of the variable to cluster by.\n        n_draws (int): number of draws, only relevant if seeds is None.\n\n    Returns:\n        list: list of numpy arrays with positional indices\n\n    \"\"\"\n    n_obs = len(data)\n    probs = _calculate_bootstrap_indices_weights(data, weight_by, cluster_by)\n\n    if cluster_by is None:\n        bootstrap_indices = list(\n            rng.choice(n_obs, size=(n_draws, n_obs), replace=True, p=probs)\n        )\n    else:\n        clusters = data[cluster_by].unique()\n        drawn_clusters = rng.choice(\n            clusters, size=(n_draws, len(clusters)), replace=True, p=probs\n        )\n\n        bootstrap_indices = _convert_cluster_ids_to_indices(\n            data[cluster_by], drawn_clusters\n        )\n\n    return bootstrap_indices\n\n\ndef _calculate_bootstrap_indices_weights(data, weight_by, cluster_by):\n    \"\"\"Calculate weights for drawing bootstrap indices.\n\n    If weights_by is not None and cluster_by is None, the weights are normalized to sum\n    to one. If weights_by and cluster_by are both not None, the weights are normalized\n    to sum to one within each cluster.\n\n    Args:\n        data (pandas.DataFrame): original dataset.\n        weight_by (str): column name of the variable with weights.\n        cluster_by (str): column name of the variable to cluster by.\n\n    Returns:\n        list: None or pd.Series of weights.\n\n    \"\"\"\n    if weight_by is None:\n        probs = None\n    else:\n        if cluster_by is None:\n            probs = data[weight_by] / data[weight_by].sum()\n        else:\n            cluster_weights = data.groupby(cluster_by, sort=False)[weight_by].sum()\n            probs = cluster_weights / cluster_weights.sum()\n    return probs\n\n\ndef _convert_cluster_ids_to_indices(cluster_col, drawn_clusters):\n    \"\"\"Convert the drawn clusters to positional indices of individual observations.\n\n    Args:\n        cluster_col (pandas.Series):\n\n    \"\"\"\n    bootstrap_indices = []\n    cluster_to_locs = pd.Series(np.arange(len(cluster_col)), index=cluster_col)\n    for draw in drawn_clusters:\n        bootstrap_indices.append(cluster_to_locs[draw].to_numpy())\n    return bootstrap_indices\n\n\ndef get_bootstrap_samples(\n    data,\n    rng,\n    weight_by=None,\n    cluster_by=None,\n    n_draws=1000,\n):\n    \"\"\"Draw bootstrap samples.\n\n    If you have memory issues you should use get_bootstrap_indices instead and construct\n    the full samples only as needed.\n\n    Args:\n        data (pandas.DataFrame): original dataset.\n        rng (numpy.random.Generator): A random number generator.\n        weight_by (str): weights for the observations.\n        cluster_by (str): column name of the variable to cluster by.\n        n_draws (int): number of draws, only relevant if seeds is None.\n\n    Returns:\n        list: list of resampled datasets.\n\n    \"\"\"\n    indices = get_bootstrap_indices(\n        data=data,\n        rng=rng,\n        weight_by=weight_by,\n        cluster_by=cluster_by,\n        n_draws=n_draws,\n    )\n    datasets = _get_bootstrap_samples_from_indices(data=data, bootstrap_indices=indices)\n    return datasets\n\n\ndef _get_bootstrap_samples_from_indices(data, bootstrap_indices):\n    \"\"\"Convert bootstrap indices into actual bootstrap samples.\n\n    Args:\n        data (pandas.DataFrame): original dataset.\n        bootstrap_indices (list): List with numpy arrays containing positional indices\n            of observations in data.\n\n    Returns:\n        list: list of DataFrames\n\n    \"\"\"\n    out = [data.iloc[idx] for idx in bootstrap_indices]\n    return out\n"
  },
  {
    "path": "src/estimagic/config.py",
    "content": "from pathlib import Path\n\nEXAMPLE_DIR = Path(__file__).parent / \"examples\"\n"
  },
  {
    "path": "src/estimagic/estimate_ml.py",
    "content": "import warnings\nfrom dataclasses import asdict, dataclass, field\nfrom functools import cached_property\nfrom typing import Any, Dict\n\nimport numpy as np\nimport pandas as pd\n\nfrom estimagic.ml_covs import (\n    cov_cluster_robust,\n    cov_hessian,\n    cov_jacobian,\n    cov_robust,\n    cov_strata_robust,\n)\nfrom estimagic.shared_covs import (\n    FreeParams,\n    calculate_ci,\n    calculate_estimation_summary,\n    calculate_free_estimates,\n    calculate_p_values,\n    calculate_summary_data_estimation,\n    get_derivative_case,\n    transform_covariance,\n    transform_free_cov_to_cov,\n    transform_free_values_to_params_tree,\n)\nfrom optimagic import deprecations, mark\nfrom optimagic.deprecations import (\n    replace_and_warn_about_deprecated_bounds,\n)\nfrom optimagic.differentiation.derivatives import first_derivative, second_derivative\nfrom optimagic.differentiation.numdiff_options import (\n    NumdiffPurpose,\n    get_default_numdiff_options,\n    pre_process_numdiff_options,\n)\nfrom optimagic.exceptions import InvalidFunctionError, NotAvailableError\nfrom optimagic.optimization.fun_value import (\n    convert_fun_output_to_function_value,\n    enforce_return_type,\n)\nfrom optimagic.optimization.optimize import maximize\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree\nfrom optimagic.parameters.bounds import Bounds, pre_process_bounds\nfrom optimagic.parameters.conversion import Converter, get_converter\nfrom optimagic.parameters.space_conversion import InternalParams\nfrom optimagic.shared.check_option_dicts import (\n    check_optimization_options,\n)\nfrom optimagic.typing import AggregationLevel\nfrom optimagic.utilities import get_rng, to_pickle\n\n\ndef estimate_ml(\n    loglike,\n    params,\n    optimize_options,\n    *,\n    bounds=None,\n    constraints=None,\n    logging=None,\n    loglike_kwargs=None,\n    jacobian=None,\n    jacobian_kwargs=None,\n    jacobian_numdiff_options=None,\n    hessian=None,\n    hessian_kwargs=None,\n    hessian_numdiff_options=None,\n    design_info=None,\n    # deprecated\n    log_options=None,\n    lower_bounds=None,\n    upper_bounds=None,\n    numdiff_options=None,\n):\n    \"\"\"Do a maximum likelihood (ml) estimation.\n\n    This is a high level interface of our lower level functions for maximization,\n    numerical differentiation and inference. It does the full workflow for maximum\n    likelihood estimation with just one function call.\n\n    While we have good defaults, you can still configure each aspect of each step\n    via the optional arguments of this function. If you find it easier to do the\n    maximization separately, you can do so and just provide the optimal parameters as\n    ``params`` and set ``optimize_options=False``\n\n    Args:\n        loglike (callable): Likelihood function that takes a params (and potentially\n            other keyword arguments) a pytree containing the likelihood contributions\n            for each observation or a FunctionValue object.\n        params (pytree): A pytree containing the estimated or start parameters of the\n            likelihood model. If the supplied parameters are estimated parameters, set\n            optimize_options to False. Pytrees can be a numpy array, a pandas Series, a\n            DataFrame with \"value\" column, a float and any kind of (nested) dictionary\n            or list containing these elements. See :ref:`params` for examples.\n        optimize_options (dict, Algorithm, str or False): Keyword arguments that govern\n            the numerical optimization. Valid entries are all arguments of\n            :func:`~estimagic.optimization.optimize.minimize` except for those that are\n            passed explicilty to ``estimate_ml``. If you pass False as optimize_options\n            you signal that ``params`` are already the optimal parameters and no\n            numerical optimization is needed. If you pass a str as optimize_options it\n            is used as the ``algorithm`` option.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. Check our how-to guide on bounds\n            for examples. If params is a flat numpy array, you can also provide bounds\n            via any format that is supported by scipy.optimize.minimize.\n        constraints (list, dict): List with constraint dictionaries or single dict.\n            See :ref:`constraints`.\n        logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has\n            the file extension ``.db``. If the file does not exist, it will be created.\n        log_options (dict): Additional keyword arguments to configure the logging.\n            - \"fast_logging\": A boolean that determines if \"unsafe\" settings are used\n            to speed up write processes to the database. This should only be used for\n            very short running criterion functions where the main purpose of the log\n            is monitoring and it would not be catastrophic to get a\n            corrupted database in case of a sudden system shutdown. If one evaluation\n            of the criterion function (and gradient if applicable) takes more than\n            100 ms, the logging overhead is negligible.\n            - \"if_table_exists\": (str) One of \"extend\", \"replace\", \"raise\". What to\n            do if the tables we want to write to already exist. Default \"extend\".\n            - \"if_database_exists\": (str): One of \"extend\", \"replace\", \"raise\". What to\n            do if the database we want to write to already exists. Default \"extend\".\n        loglike_kwargs (dict): Additional keyword arguments for loglike.\n        jacobian (callable or None): A function that takes ``params`` and potentially\n            other keyword arguments and returns the jacobian of loglike[\"contributions\"]\n            with respect to the params. Note that you only need to pass a Jacobian\n            function if you have a closed form Jacobian. If you pass None, a numerical\n            Jacobian will be calculated.\n        jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function.\n        jacobian_numdiff_options (dict): Keyword arguments for the calculation of\n            numerical derivatives for the calculation of standard errors. See\n            :ref:`first_derivative` for details.\n        hessian (callable or None or False): A function that takes ``params`` and\n            potentially other keyword arguments and returns the Hessian of\n            loglike[\"value\"] with respect to the params.  If you pass None, a numerical\n            Hessian will be calculated. If you pass ``False``, you signal that no\n            Hessian should be calculated. Thus, no result that requires the Hessian will\n            be calculated.\n        hessian_kwargs (dict): Additional keyword arguments for the Hessian function.\n        hessian_numdiff_options (dict): Keyword arguments for the calculation of\n            numerical derivatives for the calculation of standard errors.\n        design_info (pandas.DataFrame): DataFrame with one row per observation that\n            contains some or all of the variables \"psu\" (primary sampling unit),\n            \"strata\" and \"fpc\" (finite population corrector). See\n            :ref:`robust_likelihood_inference` for details.\n\n    Returns:\n        LikelihoodResult: A LikelihoodResult object.\n\n    \"\"\"\n    # ==================================================================================\n    # handle deprecations\n    # ==================================================================================\n\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n\n    if numdiff_options is not None:\n        deprecations.throw_numdiff_options_deprecated_in_estimate_ml_future_warning()\n        if jacobian_numdiff_options is None:\n            jacobian_numdiff_options = numdiff_options\n        if hessian_numdiff_options is None:\n            hessian_numdiff_options = numdiff_options\n\n    deprecations.throw_dict_constraints_future_warning_if_required(constraints)\n\n    # ==================================================================================\n    # Check and process inputs\n    # ==================================================================================\n\n    loglike = mark.likelihood(loglike)\n\n    bounds = pre_process_bounds(bounds)\n    jacobian_numdiff_options = pre_process_numdiff_options(jacobian_numdiff_options)\n    hessian_numdiff_options = pre_process_numdiff_options(hessian_numdiff_options)\n    # TODO: Replace dict_constraints with constraints, once we deprecate dictionary\n    # constraints.\n    dict_constraints = deprecations.pre_process_constraints(constraints)\n\n    if jacobian_numdiff_options is None:\n        jacobian_numdiff_options = get_default_numdiff_options(\n            purpose=NumdiffPurpose.ESTIMATE_JACOBIAN\n        )\n\n    if hessian_numdiff_options is None:\n        hessian_numdiff_options = get_default_numdiff_options(\n            purpose=NumdiffPurpose.ESTIMATE_HESSIAN\n        )\n\n    is_optimized = optimize_options is False\n\n    if not is_optimized:\n        # If optimize_options is not a dictionary and not False, we assume it represents\n        # an algorithm. The actual testing of whether it is a valid algorithm is done\n        # when `maximize` is called.\n        if not isinstance(optimize_options, dict):\n            optimize_options = {\"algorithm\": optimize_options}\n\n        check_optimization_options(\n            optimize_options,\n            usage=\"estimate_ml\",\n            algorithm_mandatory=True,\n        )\n\n    jac_case = get_derivative_case(jacobian)\n    hess_case = get_derivative_case(hessian)\n\n    loglike_kwargs = {} if loglike_kwargs is None else loglike_kwargs\n    jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs\n    hessian_kwargs = {} if hessian_kwargs is None else hessian_kwargs\n\n    # ==================================================================================\n    # Calculate estimates via maximization (if necessary)\n    # ==================================================================================\n    # Note: We do not need to handle deprecations for the optimization because that\n    # is already done inside `maximize`.\n    if is_optimized:\n        estimates = params\n        opt_res = None\n    else:\n        opt_res = maximize(\n            fun=loglike,\n            fun_kwargs=loglike_kwargs,\n            params=params,\n            bounds=bounds,\n            constraints=constraints,\n            logging=logging,\n            log_options=log_options,\n            **optimize_options,\n        )\n        estimates = opt_res.params\n\n    # ==================================================================================\n    # Do first function evaluations at estimated parameters\n    # ==================================================================================\n\n    try:\n        loglike_eval = loglike(estimates, **loglike_kwargs)\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception as e:\n        msg = \"Error while evaluating loglike at estimated params.\"\n        raise InvalidFunctionError(msg) from e\n\n    if callable(jacobian):\n        try:\n            jacobian_eval = jacobian(estimates, **jacobian_kwargs)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"Error while evaluating closed form jacobian at estimated params.\"\n            raise InvalidFunctionError(msg) from e\n    else:\n        jacobian_eval = None\n\n    if callable(hessian):\n        try:\n            hessian_eval = hessian(estimates, **hessian_kwargs)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"Error while evaluating closed form hessian at estimated params.\"\n            raise InvalidFunctionError(msg) from e\n    else:\n        hessian_eval = None\n\n    # ==================================================================================\n    # Handle deprecated function output\n    # ==================================================================================\n    if deprecations.is_dict_output(loglike_eval):\n        deprecations.throw_dict_output_warning()\n        loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval)\n        loglike = deprecations.replace_dict_output(loglike)\n    else:\n        loglike_eval = convert_fun_output_to_function_value(\n            loglike_eval, AggregationLevel.LIKELIHOOD\n        )\n        loglike = enforce_return_type(AggregationLevel.LIKELIHOOD)(loglike)\n\n    # ==================================================================================\n    # Get the converter for params and function outputs\n    # ==================================================================================\n\n    converter, internal_estimates = get_converter(\n        params=estimates,\n        constraints=dict_constraints,\n        bounds=bounds,\n        func_eval=loglike_eval.value,\n        solver_type=\"contributions\",\n        derivative_eval=jacobian_eval,\n    )\n\n    # ==================================================================================\n    # Calculate internal jacobian\n    # ==================================================================================\n\n    if jac_case == \"closed-form\":\n        int_jac = converter.derivative_to_internal(\n            jacobian_eval, internal_estimates.values\n        )\n    elif jac_case == \"numerical\":\n\n        def func(x):\n            p = converter.params_from_internal(x)\n            loglike_eval = loglike(p, **loglike_kwargs)\n            if deprecations.is_dict_output(loglike_eval):\n                deprecations.throw_dict_output_warning()\n                loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval)\n\n            out = loglike_eval.internal_value(AggregationLevel.LIKELIHOOD)\n            return out\n\n        jac_res = first_derivative(\n            func=func,\n            params=internal_estimates.values,\n            bounds=Bounds(\n                lower=internal_estimates.lower_bounds,\n                upper=internal_estimates.upper_bounds,\n            ),\n            error_handling=\"continue\",\n            **asdict(jacobian_numdiff_options),\n        )\n\n        int_jac = jac_res.derivative\n    else:\n        int_jac = None\n\n    if dict_constraints in [None, []] and jacobian_eval is None and int_jac is not None:\n        loglike_contribs = loglike_eval.value\n\n        jacobian_eval = matrix_to_block_tree(\n            int_jac,\n            outer_tree=loglike_contribs,\n            inner_tree=estimates,\n        )\n\n    if jacobian_eval is None:\n        _no_jac_reason = (\n            \"no closed form jacobian was provided and there are constraints\"\n        )\n    else:\n        _no_jac_reason = None\n    # ==================================================================================\n    # Calculate internal Hessian\n    # ==================================================================================\n\n    if hess_case == \"skip\":\n        int_hess = None\n    elif hess_case == \"numerical\":\n\n        def func(x):\n            p = converter.params_from_internal(x)\n            loglike_eval = loglike(p, **loglike_kwargs)\n            if deprecations.is_dict_output(loglike_eval):\n                deprecations.throw_dict_output_warning()\n                loglike_eval = deprecations.convert_dict_to_function_value(loglike_eval)\n\n            out = loglike_eval.internal_value(AggregationLevel.SCALAR)\n            return out\n\n        hess_res = second_derivative(\n            func=func,\n            params=internal_estimates.values,\n            bounds=Bounds(\n                lower=internal_estimates.lower_bounds,\n                upper=internal_estimates.upper_bounds,\n            ),\n            error_handling=\"continue\",\n            **asdict(hessian_numdiff_options),\n        )\n        int_hess = hess_res.derivative\n    elif hess_case == \"closed-form\" and dict_constraints:\n        raise NotImplementedError(\n            \"Closed-form Hessians are not yet compatible with constraints.\"\n        )\n    elif hess_case == \"closed-form\":\n        int_hess = block_tree_to_matrix(\n            hessian_eval,\n            outer_tree=params,\n            inner_tree=params,\n        )\n    else:\n        raise ValueError()\n\n    if dict_constraints in [None, []] and hessian_eval is None and int_hess is not None:\n        hessian_eval = matrix_to_block_tree(\n            int_hess,\n            outer_tree=params,\n            inner_tree=params,\n        )\n\n    if hessian_eval is None:\n        if hess_case == \"skip\":\n            _no_hess_reason = \"the hessian calculation was explicitly skipped.\"\n        else:\n            _no_hess_reason = (\n                \"no closed form hessian was provided and there are constraints\"\n            )\n    else:\n        _no_hess_reason = None\n\n    # ==================================================================================\n    # create a LikelihoodResult object\n    # ==================================================================================\n\n    free_estimates = calculate_free_estimates(estimates, internal_estimates)\n\n    res = LikelihoodResult(\n        _params=estimates,\n        _converter=converter,\n        _optimize_result=opt_res,\n        _jacobian=jacobian_eval,\n        _no_jacobian_reason=_no_jac_reason,\n        _hessian=hessian_eval,\n        _no_hessian_reason=_no_hess_reason,\n        _internal_jacobian=int_jac,\n        _internal_hessian=int_hess,\n        _design_info=design_info,\n        _internal_estimates=internal_estimates,\n        _free_estimates=free_estimates,\n        _has_constraints=dict_constraints not in [None, []],\n    )\n\n    return res\n\n\n@dataclass\nclass LikelihoodResult:\n    \"\"\"Likelihood estimation results object.\"\"\"\n\n    _params: Any\n    _internal_estimates: InternalParams\n    _free_estimates: FreeParams\n    _converter: Converter\n    _has_constraints: bool\n    _optimize_result: OptimizeResult | None = None\n    _jacobian: Any = None\n    _no_jacobian_reason: str | None = None\n    _hessian: Any = None\n    _no_hessian_reason: str | None = None\n    _internal_jacobian: np.ndarray | None = None\n    _internal_hessian: np.ndarray | None = None\n    _design_info: pd.DataFrame | None = None\n    _cache: Dict = field(default_factory=dict)\n\n    def __post_init__(self):\n        if self._internal_jacobian is None and self._internal_hessian is None:\n            raise ValueError(\n                \"At least one of _internal_jacobian or _internal_hessian must be \"\n                \"not None.\"\n            )\n\n        elif self._internal_jacobian is None:\n            valid_methods = [\"hessian\"]\n        elif self._internal_hessian is None:\n            valid_methods = [\"jacobian\"]\n        else:\n            valid_methods = [\"jacobian\", \"hessian\", \"robust\"]\n            if self._design_info is not None:\n                if \"psu\" in self._design_info:\n                    valid_methods.append(\"cluster_robust\")\n                if {\"strata\", \"psu\", \"fpc\"}.issubset(self._design_info):\n                    valid_methods.append(\"strata_robust\")\n\n        self._valid_methods = set(valid_methods)\n\n    def _get_free_cov(\n        self,\n        method,\n        n_samples,\n        bounds_handling,\n        seed,\n    ):\n        if method not in self._valid_methods:\n            msg = f\"Invalid method: {method}. Valid methods are {self._valid_methods}.\"\n            raise ValueError(msg)\n        args = (method, n_samples, bounds_handling, seed)\n        is_cached = args in self._cache\n\n        if is_cached:\n            free_cov = self._cache[args]\n        else:\n            free_cov = _calculate_free_cov_ml(\n                method=method,\n                internal_estimates=self._internal_estimates,\n                converter=self._converter,\n                internal_jacobian=self._internal_jacobian,\n                internal_hessian=self._internal_hessian,\n                n_samples=n_samples,\n                design_info=self._design_info,\n                bounds_handling=bounds_handling,\n                seed=seed,\n            )\n            if seed is not None:\n                self._cache[args] = free_cov\n            elif self._converter.has_transforming_constraints:\n                msg = (\n                    \"seed is set to None and constraints are transforming. This leads \"\n                    \"to randomness in the result. To avoid random behavior, choose a \"\n                    \"non-None seed.\"\n                )\n                warnings.warn(msg)\n\n        return free_cov\n\n    @property\n    def params(self):\n        return self._params\n\n    @property\n    def optimize_result(self):\n        return self._optimize_result\n\n    @property\n    def jacobian(self):\n        if self._jacobian is None:\n            raise NotAvailableError(\n                f\"No jacobian is available because {self._no_jacobian_reason}.\"\n            )\n        return self._jacobian\n\n    @property\n    def hessian(self):\n        if self._hessian is None:\n            raise NotAvailableError(\n                f\"No hessian is available because {self._no_hessian_reason}.\"\n            )\n        return self._hessian\n\n    @cached_property\n    def _se(self):\n        return self.se()\n\n    @cached_property\n    def _cov(self):\n        return self.cov()\n\n    @cached_property\n    def _summary(self):\n        return self.summary()\n\n    @cached_property\n    def _ci(self):\n        return self.ci()\n\n    @cached_property\n    def _p_values(self):\n        return self.p_values()\n\n    def se(\n        self,\n        method=\"jacobian\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate standard errors.\n\n        Args:\n            method (str): One of \"jacobian\", \"hessian\", \"robust\", \"cluster_robust\",\n                \"strata_robust\". Default \"jacobian\". \"cluster_robust\" is only available\n                if design_info containts a columns called \"psu\" that identifies the\n                primary sampling unit. \"strata_robust\" is only available if the columns\n                \"strata\", \"fpc\" and \"psu\" are in design_info.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: A pytree with the same structure as params containing standard errors\n                for the parameter estimates.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_se = np.sqrt(np.diagonal(free_cov))\n\n        se = transform_free_values_to_params_tree(\n            values=free_se,\n            free_params=self._free_estimates,\n            params=self._params,\n        )\n        return se\n\n    def cov(\n        self,\n        method=\"jacobian\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        return_type=\"pytree\",\n        seed=None,\n    ):\n        \"\"\"Calculate the variance-covariance (matrix) of the estimated parameters.\n\n        Args:\n            method (str): One of \"jacobian\", \"hessian\", \"robust\", \"cluster_robust\",\n                \"strata_robust\". Default \"jacobian\". \"cluster_robust\" is only available\n                if design_info containts a columns called \"psu\" that identifies the\n                primary sampling unit. \"strata_robust\" is only available if the columns\n                \"strata\", \"fpc\" and \"psu\" are in design_info.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            return_type (str): One of \"pytree\", \"array\" or \"dataframe\". Default pytree.\n                If \"array\", a 2d numpy array with the covariance is returned. If\n                \"dataframe\", a pandas DataFrame with parameter names in the\n                index and columns are returned.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: The covariance matrix of the estimated parameters as block-pytree,\n                numpy.ndarray or pandas.DataFrame.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n        cov = transform_free_cov_to_cov(\n            free_cov=free_cov,\n            free_params=self._free_estimates,\n            params=self._params,\n            return_type=return_type,\n        )\n        return cov\n\n    def summary(\n        self,\n        method=\"jacobian\",\n        n_samples=10_000,\n        ci_level=0.95,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Create a summary of estimation results.\n\n        Args:\n            method (str): One of \"jacobian\", \"hessian\", \"robust\", \"cluster_robust\",\n                \"strata_robust\". Default \"jacobian\". \"cluster_robust\" is only available\n                if design_info containts a columns called \"psu\" that identifies the\n                primary sampling unit. \"strata_robust\" is only available if the columns\n                \"strata\", \"fpc\" and \"psu\" are in design_info.\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: The estimation summary as pytree of DataFrames.\n\n        \"\"\"\n        summary_data = calculate_summary_data_estimation(\n            self,\n            free_estimates=self._free_estimates,\n            method=method,\n            ci_level=ci_level,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n        summary = calculate_estimation_summary(\n            summary_data=summary_data,\n            names=self._free_estimates.all_names,\n            free_names=self._free_estimates.free_names,\n        )\n        return summary\n\n    def ci(\n        self,\n        method=\"jacobian\",\n        n_samples=10_000,\n        ci_level=0.95,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate confidence intervals.\n\n        Args:\n            method (str): One of \"jacobian\", \"hessian\", \"robust\", \"cluster_robust\",\n                \"strata_robust\". Default \"jacobian\". \"cluster_robust\" is only available\n                if design_info containts a columns called \"psu\" that identifies the\n                primary sampling unit. \"strata_robust\" is only available if the columns\n                \"strata\", \"fpc\" and \"psu\" are in design_info.\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: Pytree with the same structure as params containing lower bounds of\n                confidence intervals.\n            Any: Pytree with the same structure as params containing upper bounds of\n                confidence intervals.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_lower, free_upper = calculate_ci(\n            free_values=self._free_estimates.values,\n            free_standard_errors=np.sqrt(np.diagonal(free_cov)),\n            ci_level=ci_level,\n        )\n\n        lower, upper = (\n            transform_free_values_to_params_tree(\n                values, free_params=self._free_estimates, params=self._params\n            )\n            for values in (free_lower, free_upper)\n        )\n        return lower, upper\n\n    def p_values(\n        self,\n        method=\"jacobian\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate p-values.\n\n        Args:\n            method (str): One of \"jacobian\", \"hessian\", \"robust\", \"cluster_robust\",\n                \"strata_robust\". Default \"jacobian\". \"cluster_robust\" is only available\n                if design_info containts a columns called \"psu\" that identifies the\n                primary sampling unit. \"strata_robust\" is only available if the columns\n                \"strata\", \"fpc\" and \"psu\" are in design_info.\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: Pytree with the same structure as params containing p-values.\n            Any: Pytree with the same structure as params containing p-values.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_p_values = calculate_p_values(\n            free_values=self._free_estimates.values,\n            free_standard_errors=np.sqrt(np.diagonal(free_cov)),\n        )\n\n        p_values = transform_free_values_to_params_tree(\n            free_p_values, free_params=self._free_estimates, params=self._params\n        )\n        return p_values\n\n    def to_pickle(self, path):\n        \"\"\"Save the LikelihoodResult object to pickle.\n\n        Args:\n            path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle.\n\n        \"\"\"\n        to_pickle(self, path=path)\n\n\ndef _calculate_free_cov_ml(\n    method,\n    internal_estimates,\n    converter,\n    internal_jacobian,\n    internal_hessian,\n    n_samples,\n    design_info,\n    bounds_handling,\n    seed,\n):\n    if method == \"jacobian\":\n        int_cov = cov_jacobian(internal_jacobian)\n    elif method == \"hessian\":\n        int_cov = cov_hessian(internal_hessian)\n    elif method == \"robust\":\n        int_cov = cov_robust(jac=internal_jacobian, hess=internal_hessian)\n    elif method == \"cluster_robust\":\n        int_cov = cov_cluster_robust(\n            jac=internal_jacobian, hess=internal_hessian, design_info=design_info\n        )\n    elif method == \"strata_robust\":\n        int_cov = cov_strata_robust(\n            jac=internal_jacobian, hess=internal_hessian, design_info=design_info\n        )\n\n    rng = get_rng(seed)\n\n    free_cov = transform_covariance(\n        internal_params=internal_estimates,\n        internal_cov=int_cov,\n        converter=converter,\n        rng=rng,\n        n_samples=n_samples,\n        bounds_handling=bounds_handling,\n    )\n    return free_cov\n"
  },
  {
    "path": "src/estimagic/estimate_msm.py",
    "content": "\"\"\"Do a method of simlated moments estimation.\"\"\"\n\nimport functools\nimport warnings\nfrom collections.abc import Callable\nfrom dataclasses import asdict, dataclass, field\nfrom functools import cached_property\nfrom typing import Any, Dict, Union\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import leaf_names, tree_just_flatten\n\nfrom estimagic.msm_covs import cov_optimal, cov_robust\nfrom estimagic.msm_sensitivity import (\n    calculate_actual_sensitivity_to_noise,\n    calculate_actual_sensitivity_to_removal,\n    calculate_fundamental_sensitivity_to_noise,\n    calculate_fundamental_sensitivity_to_removal,\n    calculate_sensitivity_to_bias,\n    calculate_sensitivity_to_weighting,\n)\nfrom estimagic.msm_weighting import get_weighting_matrix\nfrom estimagic.shared_covs import (\n    FreeParams,\n    calculate_ci,\n    calculate_estimation_summary,\n    calculate_free_estimates,\n    calculate_p_values,\n    calculate_summary_data_estimation,\n    get_derivative_case,\n    transform_covariance,\n    transform_free_cov_to_cov,\n    transform_free_values_to_params_tree,\n)\nfrom optimagic import deprecations, mark\nfrom optimagic.deprecations import (\n    replace_and_warn_about_deprecated_bounds,\n)\nfrom optimagic.differentiation.derivatives import first_derivative\nfrom optimagic.differentiation.numdiff_options import (\n    NumdiffPurpose,\n    get_default_numdiff_options,\n    pre_process_numdiff_options,\n)\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.optimization.fun_value import LeastSquaresFunctionValue\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree\nfrom optimagic.parameters.bounds import Bounds, pre_process_bounds\nfrom optimagic.parameters.conversion import Converter, get_converter\nfrom optimagic.parameters.space_conversion import InternalParams\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.shared.check_option_dicts import (\n    check_optimization_options,\n)\nfrom optimagic.utilities import get_rng, to_pickle\n\n\ndef estimate_msm(\n    simulate_moments,\n    empirical_moments,\n    moments_cov,\n    params,\n    optimize_options,\n    *,\n    bounds=None,\n    constraints=None,\n    logging=None,\n    simulate_moments_kwargs=None,\n    weights=\"diagonal\",\n    jacobian=None,\n    jacobian_kwargs=None,\n    jacobian_numdiff_options=None,\n    # deprecated\n    log_options=None,\n    lower_bounds=None,\n    upper_bounds=None,\n    numdiff_options=None,\n):\n    \"\"\"Do a method of simulated moments or indirect inference estimation.\n\n    This is a high level interface for our lower level functions for minimization,\n    numerical differentiation, inference and sensitivity analysis. It does the full\n    workflow for MSM or indirect inference estimation with just one function call.\n\n    While we have good defaults, you can still configure each aspect of each steps\n    vial the optional arguments of this functions. If you find it easier to do the\n    minimization separately, you can do so and just provide the optimal parameters as\n    ``params`` and set ``optimize_options=False``.\n\n    Args:\n        simulate_moments (callable): Function that takes params and potentially other\n            keyword arguments and returns a pytree with simulated moments. If the\n            function returns a dict containing the key ``\"simulated_moments\"`` we only\n            use the value corresponding to that key. Other entries are stored in the\n            log database if you use logging.\n\n        empirical_moments (pandas.Series): A pytree with the same structure as the\n            result of ``simulate_moments``.\n        moments_cov (pandas.DataFrame): A block-pytree containing the covariance\n            matrix of the empirical moments. This is typically calculated with\n            our ``get_moments_cov`` function.\n        params (pytree): A pytree containing the estimated or start parameters of the\n            model. If the supplied parameters are estimated parameters, set\n            optimize_options to False. Pytrees can be a numpy array, a pandas Series, a\n            DataFrame with \"value\" column, a float and any kind of (nested) dictionary\n            or list containing these elements. See :ref:`params` for examples.\n        optimize_options (dict, Algorithm, str or False): Keyword arguments that govern\n            the numerical optimization. Valid entries are all arguments of\n            :func:`~estimagic.optimization.optimize.minimize` except for those that can\n            be passed explicitly to ``estimate_msm``.  If you pass False as\n            ``optimize_options`` you signal that ``params`` are already\n            the optimal parameters and no numerical optimization is needed. If you pass\n            a str as optimize_options it is used as the ``algorithm`` option.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. Check our how-to guide on bounds\n            for examples. If params is a flat numpy array, you can also provide bounds\n            via any format that is supported by scipy.optimize.minimize.\n        simulate_moments_kwargs (dict): Additional keyword arguments for\n            ``simulate_moments``.\n        weights (str): One of \"diagonal\" (default), \"identity\" or \"optimal\".\n            Note that \"optimal\" refers to the asymptotically optimal weighting matrix\n            and is often not a good choice due to large finite sample bias.\n        constraints (list, dict): List with constraint dictionaries or single dict.\n            See :ref:`constraints`.\n        logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has\n            the file extension ``.db``. If the file does not exist, it will be created.\n\n        log_options (dict): Additional keyword arguments to configure the logging.\n\n            - \"fast_logging\" (bool):\n                A boolean that determines if \"unsafe\" settings are used to speed up\n                write processes to the database. This should only be used for very short\n                running criterion functions where the main purpose of the log is a\n                monitoring and it would not be catastrophic to get a corrupted\n                database in case of a sudden system shutdown. If one evaluation of the\n                criterion function (and gradient if applicable) takes more than 100 ms,\n                the logging overhead is negligible.\n            - \"if_table_exists\" (str):\n                One of \"extend\", \"replace\", \"raise\". What to do if the tables we want to\n                write to already exist. Default \"extend\".\n            - \"if_database_exists\" (str):\n                One of \"extend\", \"replace\", \"raise\". What to do if the database we want\n                to write to already exists. Default \"extend\".\n        jacobian (callable): A function that take ``params`` and\n            potentially other keyword arguments and returns the jacobian of\n            simulate_moments with respect to the params.\n        jacobian_kwargs (dict): Additional keyword arguments for the jacobian function.\n        jacobian_numdiff_options (dict): Keyword arguments for the calculation of\n            numerical derivatives for the calculation of standard errors. See\n            :ref:`first_derivative` for details. Note that by default we increase the\n            step_size by a factor of 2 compared to the rule of thumb for optimal\n            step sizes. This is because many msm criterion functions are slightly noisy.\n\n    Returns:\n            dict: The estimated parameters, standard errors and sensitivity measures\n                and covariance matrix of the parameters.\n\n    \"\"\"\n    # ==================================================================================\n    # handle deprecations\n    # ==================================================================================\n\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n\n    if numdiff_options is not None:\n        deprecations.throw_numdiff_options_deprecated_in_estimate_msm_future_warning()\n        if jacobian_numdiff_options is not None:\n            jacobian_numdiff_options = numdiff_options\n\n    deprecations.throw_dict_constraints_future_warning_if_required(constraints)\n\n    # ==================================================================================\n    # Check and process inputs\n    # ==================================================================================\n\n    bounds = pre_process_bounds(bounds)\n    # TODO: Replace dict_constraints with constraints, once we deprecate dictionary\n    # constraints.\n    dict_constraints = deprecations.pre_process_constraints(constraints)\n    jacobian_numdiff_options = pre_process_numdiff_options(jacobian_numdiff_options)\n    if jacobian_numdiff_options is None:\n        jacobian_numdiff_options = get_default_numdiff_options(\n            purpose=NumdiffPurpose.ESTIMATE_JACOBIAN\n        )\n\n    if weights not in [\"diagonal\", \"optimal\", \"identity\"]:\n        raise NotImplementedError(\"Custom weighting matrices are not yet implemented.\")\n\n    is_optimized = optimize_options is False\n\n    if not is_optimized:\n        # If optimize_options is not a dictionary and not False, we assume it represents\n        # an algorithm. The actual testing of whether it is a valid algorithm is done\n        # when `minimize` is called.\n        if not isinstance(optimize_options, dict):\n            optimize_options = {\"algorithm\": optimize_options}\n\n        check_optimization_options(\n            optimize_options,\n            usage=\"estimate_msm\",\n            algorithm_mandatory=True,\n        )\n\n    jac_case = get_derivative_case(jacobian)\n\n    weights, internal_weights = get_weighting_matrix(\n        moments_cov=moments_cov,\n        method=weights,\n        empirical_moments=empirical_moments,\n        return_type=\"pytree_and_array\",\n    )\n\n    internal_moments_cov = block_tree_to_matrix(\n        moments_cov,\n        outer_tree=empirical_moments,\n        inner_tree=empirical_moments,\n    )\n\n    jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs\n    simulate_moments_kwargs = (\n        {} if simulate_moments_kwargs is None else simulate_moments_kwargs\n    )\n\n    # ==================================================================================\n    # Calculate estimates via minimization (if necessary)\n    # ==================================================================================\n\n    if is_optimized:\n        estimates = params\n        opt_res = None\n    else:\n        funcs = get_msm_optimization_functions(\n            simulate_moments=simulate_moments,\n            empirical_moments=empirical_moments,\n            weights=weights,\n            simulate_moments_kwargs=simulate_moments_kwargs,\n            # Always pass None because we do not support closed form jacobians during\n            # optimization yet. Otherwise we would get a NotImplementedError\n            jacobian=None,\n            jacobian_kwargs=jacobian_kwargs,\n        )\n\n        opt_res = minimize(\n            bounds=bounds,\n            constraints=constraints,\n            logging=logging,\n            log_options=log_options,\n            params=params,\n            **funcs,  # contains the criterion func and possibly more\n            **optimize_options,\n        )\n\n        estimates = opt_res.params\n\n    # ==================================================================================\n    # do first function evaluations\n    # ==================================================================================\n\n    try:\n        sim_mom_eval = simulate_moments(estimates, **simulate_moments_kwargs)\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception as e:\n        msg = \"Error while evaluating simulate_moments at estimated params.\"\n        raise InvalidFunctionError(msg) from e\n\n    if callable(jacobian):\n        try:\n            jacobian_eval = jacobian(estimates, **jacobian_kwargs)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"Error while evaluating derivative at estimated params.\"\n            raise InvalidFunctionError(msg) from e\n\n    else:\n        jacobian_eval = None\n\n    # ==================================================================================\n    # get converter for params and function outputs\n    # ==================================================================================\n\n    if isinstance(sim_mom_eval, dict) and \"simulated_moments\" in sim_mom_eval:\n        func_eval = {\"contributions\": sim_mom_eval[\"simulated_moments\"]}\n    else:\n        func_eval = {\"contributions\": sim_mom_eval}\n\n    converter, internal_estimates = get_converter(\n        params=estimates,\n        constraints=dict_constraints,\n        bounds=bounds,\n        func_eval=func_eval,\n        solver_type=\"contributions\",\n        derivative_eval=jacobian_eval,\n    )\n\n    # ==================================================================================\n    # Calculate internal jacobian\n    # ==================================================================================\n\n    if jac_case == \"closed-form\":\n        x = converter.params_to_internal(estimates)\n        int_jac = converter.derivative_to_internal(jacobian_eval, x)\n    else:\n\n        def func(x):\n            params = converter.params_from_internal(x)\n            sim_mom = simulate_moments(params, **simulate_moments_kwargs)\n            if isinstance(sim_mom, dict) and \"simulated_moments\" in sim_mom:\n                sim_mom = sim_mom[\"simulated_moments\"]\n            registry = get_registry(extended=True)\n            out = np.array(tree_just_flatten(sim_mom, registry=registry))\n            return out\n\n        int_jac = first_derivative(\n            func=func,\n            params=internal_estimates.values,\n            bounds=Bounds(\n                lower=internal_estimates.lower_bounds,\n                upper=internal_estimates.upper_bounds,\n            ),\n            error_handling=\"continue\",\n            **asdict(jacobian_numdiff_options),\n        ).derivative\n\n    # ==================================================================================\n    # Calculate external jac (if no constraints and not closed form )\n    # ==================================================================================\n\n    if dict_constraints in [None, []] and jacobian_eval is None and int_jac is not None:\n        jacobian_eval = matrix_to_block_tree(\n            int_jac,\n            outer_tree=empirical_moments,\n            inner_tree=estimates,\n        )\n\n    if jacobian_eval is None:\n        _no_jac_reason = (\n            \"no closed form jacobian was provided and there are constraints\"\n        )\n    else:\n        _no_jac_reason = None\n\n    # ==================================================================================\n    # Create MomentsResult\n    # ==================================================================================\n\n    free_estimates = calculate_free_estimates(estimates, internal_estimates)\n\n    res = MomentsResult(\n        _params=estimates,\n        _weights=weights,\n        _converter=converter,\n        _optimize_result=opt_res,\n        _internal_weights=internal_weights,\n        _internal_moments_cov=internal_moments_cov,\n        _internal_jacobian=int_jac,\n        _jacobian=jacobian_eval,\n        _no_jacobian_reason=_no_jac_reason,\n        _empirical_moments=empirical_moments,\n        _internal_estimates=internal_estimates,\n        _free_estimates=free_estimates,\n        _has_constraints=dict_constraints not in [None, []],\n    )\n    return res\n\n\ndef get_msm_optimization_functions(\n    simulate_moments,\n    empirical_moments,\n    weights,\n    *,\n    simulate_moments_kwargs=None,\n    jacobian=None,\n    jacobian_kwargs=None,\n):\n    \"\"\"Construct criterion functions and their derivatives for msm estimation.\n\n    Args:\n        simulate_moments (callable): Function that takes params and potentially other\n            keyworrd arguments and returns simulated moments as a pandas Series.\n            Alternatively, the function can return a dict with any number of entries\n            as long as one of those entries is \"simulated_moments\".\n        empirical_moments (pandas.Series): A pandas series with the empirical\n            equivalents of the simulated moments.\n        weights (pytree): The weighting matrix as block pytree.\n        simulate_moments_kwargs (dict): Additional keyword arguments for\n            ``simulate_moments``.\n        jacobian (callable or pandas.DataFrame): A function that take ``params`` and\n            potentially other keyword arguments and returns the jacobian of\n            simulate_moments with respect to the params. Alternatively you can pass\n            a pandas.DataFrame with the jacobian at the optimal parameters. This is\n            only possible if you pass ``optimize_options=False``.\n        jacobian_kwargs (dict): Additional keyword arguments for jacobian.\n\n    Returns:\n        dict: Dictionary containing at least the entry \"fun\". If enough inputs\n            are provided it also contains the entries \"jac\" and\n            \"fun_and_jac\". All values are functions that take params\n            as only argument.\n\n    \"\"\"\n    flat_weights = block_tree_to_matrix(\n        weights,\n        outer_tree=empirical_moments,\n        inner_tree=empirical_moments,\n    )\n\n    chol_weights = np.linalg.cholesky(flat_weights)\n\n    registry = get_registry(extended=True)\n    flat_emp_mom = tree_just_flatten(empirical_moments, registry=registry)\n\n    _simulate_moments = _partial_kwargs(simulate_moments, simulate_moments_kwargs)\n    _jacobian = _partial_kwargs(jacobian, jacobian_kwargs)\n\n    criterion = mark.least_squares(\n        functools.partial(\n            _msm_criterion,\n            simulate_moments=_simulate_moments,\n            flat_empirical_moments=flat_emp_mom,\n            chol_weights=chol_weights,\n            registry=registry,\n        )\n    )\n\n    out = {\"fun\": criterion}\n\n    if _jacobian is not None:\n        raise NotImplementedError(\n            \"Closed form jacobians are not yet supported in estimate_msm\"\n        )\n\n    return out\n\n\ndef _msm_criterion(\n    params, simulate_moments, flat_empirical_moments, chol_weights, registry\n):\n    \"\"\"Calculate msm criterion given parameters and building blocks.\"\"\"\n    simulated = simulate_moments(params)\n    if isinstance(simulated, dict) and \"simulated_moments\" in simulated:\n        simulated = simulated[\"simulated_moments\"]\n    if isinstance(simulated, np.ndarray) and simulated.ndim == 1:\n        simulated_flat = simulated\n    else:\n        simulated_flat = np.array(tree_just_flatten(simulated, registry=registry))\n\n    deviations = simulated_flat - flat_empirical_moments\n    residuals = deviations @ chol_weights\n\n    return LeastSquaresFunctionValue(value=residuals)\n\n\ndef _partial_kwargs(func, kwargs):\n    \"\"\"Partial keyword arguments into a function.\n\n    In contrast to normal partial this works if kwargs in None. If func is not a\n    callable it simply returns None.\n\n    \"\"\"\n    if isinstance(func, Callable):\n        if kwargs not in (None, {}):\n            out = functools.partial(func, **kwargs)\n        else:\n            out = func\n    else:\n        out = None\n\n    return out\n\n\n@dataclass\nclass MomentsResult:\n    \"\"\"Method of moments estimation results object.\"\"\"\n\n    _params: Any\n    _internal_estimates: InternalParams\n    _free_estimates: FreeParams\n    _weights: Any\n    _converter: Converter\n    _internal_moments_cov: np.ndarray\n    _internal_weights: np.ndarray\n    _internal_jacobian: np.ndarray\n    _empirical_moments: Any\n    _has_constraints: bool\n    _optimize_result: Union[OptimizeResult, None] = None\n    _jacobian: Any = None\n    _no_jacobian_reason: Union[str, None] = None\n    _cache: Dict = field(default_factory=dict)\n\n    def _get_free_cov(self, method, n_samples, bounds_handling, seed):\n        if method not in {\"optimal\", \"robust\"}:\n            msg = f\"Invalid method {method}. method must be in {'optimal', 'robust'}\"\n            raise ValueError(msg)\n        args = (method, n_samples, bounds_handling, seed)\n        is_cached = args in self._cache\n\n        if is_cached:\n            free_cov = self._cache[args]\n        else:\n            free_cov = _calculate_free_cov_msm(\n                internal_estimates=self._internal_estimates,\n                internal_jacobian=self._internal_jacobian,\n                internal_moments_cov=self._internal_moments_cov,\n                internal_weights=self._internal_weights,\n                converter=self._converter,\n                method=method,\n                n_samples=n_samples,\n                bounds_handling=bounds_handling,\n                seed=seed,\n            )\n            if seed is not None:\n                self._cache[args] = free_cov\n            elif self._converter.has_transforming_constraints:\n                msg = (\n                    \"seed is set to None and constraints are transforming. This leads \"\n                    \"to randomness in the result. To avoid random behavior, choose a \"\n                    \"non-None seed.\"\n                )\n                warnings.warn(msg)\n\n        return free_cov\n\n    @property\n    def params(self):\n        return self._params\n\n    @property\n    def optimize_result(self):\n        return self._optimize_result\n\n    @property\n    def weights(self):\n        return self._weights\n\n    @property\n    def jacobian(self):\n        return self._jacobian\n\n    @cached_property\n    def _se(self):\n        return self.se()\n\n    @cached_property\n    def _cov(self):\n        return self.cov()\n\n    @cached_property\n    def _summary(self):\n        return self.summary()\n\n    @cached_property\n    def _ci(self):\n        return self.ci()\n\n    @cached_property\n    def _p_values(self):\n        return self.p_values()\n\n    def se(\n        self,\n        method=\"robust\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate standard errors.\n\n        Args:\n            method (str): One of \"robust\", \"optimal\". Despite the name, \"optimal\" is\n                not recommended in finite samples and \"optimal\" standard errors are\n                only valid if the asymptotically optimal weighting matrix has been\n                used. It is only supported because it is needed to calculate\n                sensitivity measures.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n\n        Returns:\n            Any: A pytree with the same structure as params containing standard errors\n                for the parameter estimates.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_se = np.sqrt(np.diagonal(free_cov))\n\n        se = transform_free_values_to_params_tree(\n            values=free_se,\n            free_params=self._free_estimates,\n            params=self._params,\n        )\n        return se\n\n    def cov(\n        self,\n        method=\"robust\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        return_type=\"pytree\",\n        seed=None,\n    ):\n        \"\"\"Calculate the variance-covariance matrix of the estimated parameters.\n\n        Args:\n            method (str): One of \"robust\", \"optimal\". Despite the name, \"optimal\" is\n                not recommended in finite samples and \"optimal\" standard errors are\n                only valid if the asymptotically optimal weighting matrix has been\n                used. It is only supported because it is needed to calculate\n                sensitivity measures.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            return_type (str): One of \"pytree\", \"array\" or \"dataframe\". Default pytree.\n                If \"array\", a 2d numpy array with the covariance is returned. If\n                \"dataframe\", a pandas DataFrame with parameter names in the\n                index and columns are returned.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n\n        Returns:\n            Any: The covariance matrix of the estimated parameters as block-pytree or\n                numpy array.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n        cov = transform_free_cov_to_cov(\n            free_cov=free_cov,\n            free_params=self._free_estimates,\n            params=self._params,\n            return_type=return_type,\n        )\n        return cov\n\n    def summary(\n        self,\n        method=\"robust\",\n        n_samples=10_000,\n        ci_level=0.95,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Create a summary of estimation results.\n\n        Args:\n            method (str): One of \"robust\", \"optimal\". Despite the name, \"optimal\" is\n                not recommended in finite samples and \"optimal\" standard errors are\n                only valid if the asymptotically optimal weighting matrix has been\n                used. It is only supported because it is needed to calculate\n                sensitivity measures.\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: The estimation summary as pytree of DataFrames.\n\n        \"\"\"\n        summary_data = calculate_summary_data_estimation(\n            self,\n            free_estimates=self._free_estimates,\n            method=method,\n            ci_level=ci_level,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n        summary = calculate_estimation_summary(\n            summary_data=summary_data,\n            names=self._free_estimates.all_names,\n            free_names=self._free_estimates.free_names,\n        )\n        return summary\n\n    def ci(\n        self,\n        method=\"robust\",\n        n_samples=10_000,\n        ci_level=0.95,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate confidence intervals.\n\n        Args:\n            method (str): One of \"robust\", \"optimal\". Despite the name, \"optimal\" is\n                not recommended in finite samples and \"optimal\" standard errors are\n                only valid if the asymptotically optimal weighting matrix has been\n                used. It is only supported because it is needed to calculate\n                sensitivity measures.\n            ci_level (float): Confidence level for the calculation of confidence\n                intervals. The default is 0.95.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n\n        Returns:\n            Any: Pytree with the same structure as params containing lower bounds of\n                confidence intervals.\n            Any: Pytree with the same structure as params containing upper bounds of\n                confidence intervals.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_lower, free_upper = calculate_ci(\n            free_values=self._free_estimates.values,\n            free_standard_errors=np.sqrt(np.diagonal(free_cov)),\n            ci_level=ci_level,\n        )\n\n        lower, upper = (\n            transform_free_values_to_params_tree(\n                values, free_params=self._free_estimates, params=self._params\n            )\n            for values in (free_lower, free_upper)\n        )\n        return lower, upper\n\n    def p_values(\n        self,\n        method=\"robust\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        seed=None,\n    ):\n        \"\"\"Calculate p-values.\n\n        Args:\n            method (str): One of \"robust\", \"optimal\". Despite the name, \"optimal\" is\n                not recommended in finite samples and \"optimal\" standard errors are\n                only valid if the asymptotically optimal weighting matrix has been\n                used. It is only supported because it is needed to calculate\n                sensitivity measures.\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n\n        Returns:\n            Any: Pytree with the same structure as params containing p-values.\n            Any: Pytree with the same structure as params containing p-values.\n\n        \"\"\"\n        free_cov = self._get_free_cov(\n            method=method,\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        free_p_values = calculate_p_values(\n            free_values=self._free_estimates.values,\n            free_standard_errors=np.sqrt(np.diagonal(free_cov)),\n        )\n\n        p_values = transform_free_values_to_params_tree(\n            free_p_values, free_params=self._free_estimates, params=self._params\n        )\n        return p_values\n\n    def sensitivity(\n        self,\n        kind=\"bias\",\n        n_samples=10_000,\n        bounds_handling=\"clip\",\n        seed=None,\n        return_type=\"pytree\",\n    ):\n        \"\"\"Calculate sensitivity measures for moments estimates.\n\n        The sensitivity measures are based on the following papers:\n\n        Andrews, Gentzkow & Shapiro (2017, Quarterly Journal of Economics)\n\n        Honore, Jorgensen & de Paula\n        (https://onlinelibrary.wiley.com/doi/full/10.1002/jae.2779)\n\n        In the papers the different kinds of sensitivity measures are just called\n        m1, e2, e3, e4, e5 and e6. We try to give them more informative names, but\n        list the original names for references.\n\n        Args:\n            kind (str): The following kinds are supported:\n\n                - \"bias\":\n                    Origally m1. How strongly would the parameter estimates be biased if\n                    the kth moment was misspecified, i.e not zero in expectation?\n                - \"noise_fundamental\":\n                    Originally e2. How much precision would be lost if the kth moment\n                    was subject to a little additional noise if the optimal weighting\n                    matrix was used?\n                - \"noise\":\n                    Originally e3. How much precision would be lost if the kth moment\n                    was subjet to a little additional noise?\n                - \"removal\":\n                    Originally e4. How much precision would be lost if the kth moment\n                    was excluded from the estimation?\n                - \"removal_fundamental\":\n                    Originally e5. How much precision would be lost if the kth moment\n                    was excluded from the estimation if the asymptotically optimal\n                    weighting matrix was used.\n                - \"weighting\":\n                    Originally e6. How would the precision change if the weight of the\n                    kth moment is increased a little?\n            n_samples (int): Number of samples used to transform the covariance matrix\n                of the internal parameter vector into the covariance matrix of the\n                external parameters. For background information about internal and\n                external params see :ref:`implementation_of_constraints`. This is only\n                used if you are using constraints.\n            bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how\n                bounds are handled. If \"clip\", confidence intervals are clipped at the\n                bounds. Standard errors are only adjusted if a sampling step is\n                necessary due to additional constraints. If \"raise\" and any lower or\n                upper bound is binding, we raise an Error. If \"ignore\", boundary\n                problems are simply ignored.\n            seed (int): Seed for the random number generator. Only used if there are\n                transforming constraints.\n            return_type (str): One of \"array\", \"dataframe\" or \"pytree\". Default pytree.\n                If your params or moments have a very nested format, return_type\n                \"dataframe\" might be the better choice.\n\n        Returns:\n            Any: The sensitivity measure as a pytree, numpy array or DataFrame.\n                In 2d formats, the sensitivity measures have one row per estimated\n                parameter and one column per moment.\n\n        \"\"\"\n        if self._has_constraints:\n            raise NotImplementedError(\n                \"Sensitivity measures with constraints are not yet implemented.\"\n            )\n        jac = self._internal_jacobian\n        weights = self._internal_weights\n        moments_cov = self._internal_moments_cov\n        params_cov = self._get_free_cov(\n            method=\"robust\",\n            n_samples=n_samples,\n            bounds_handling=bounds_handling,\n            seed=seed,\n        )\n\n        weights_opt = get_weighting_matrix(\n            moments_cov=moments_cov,\n            method=\"optimal\",\n            empirical_moments=self._empirical_moments,\n        )\n        params_cov_opt = cov_optimal(jac, weights_opt)\n\n        if kind == \"bias\":\n            raw = calculate_sensitivity_to_bias(jac=jac, weights=weights)\n        elif kind == \"noise_fundamental\":\n            raw = calculate_fundamental_sensitivity_to_noise(\n                jac=jac,\n                weights=weights_opt,\n                moments_cov=moments_cov,\n                params_cov_opt=params_cov_opt,\n            )\n        elif kind == \"noise\":\n            m1 = calculate_sensitivity_to_bias(jac=jac, weights=weights)\n            raw = calculate_actual_sensitivity_to_noise(\n                sensitivity_to_bias=m1,\n                weights=weights,\n                moments_cov=moments_cov,\n                params_cov=params_cov,\n            )\n        elif kind == \"removal\":\n            raw = calculate_actual_sensitivity_to_removal(\n                jac=jac,\n                weights=weights,\n                moments_cov=moments_cov,\n                params_cov=params_cov,\n            )\n        elif kind == \"removal_fundamental\":\n            raw = calculate_fundamental_sensitivity_to_removal(\n                jac=jac,\n                moments_cov=moments_cov,\n                params_cov_opt=params_cov_opt,\n            )\n\n        elif kind == \"weighting\":\n            raw = calculate_sensitivity_to_weighting(\n                jac=jac,\n                weights=weights,\n                moments_cov=moments_cov,\n                params_cov=params_cov,\n            )\n        else:\n            raise ValueError(f\"Invalid kind: {kind}\")\n\n        if return_type == \"array\":\n            out = raw\n        elif return_type == \"pytree\":\n            out = matrix_to_block_tree(\n                raw,\n                outer_tree=self._params,\n                inner_tree=self._empirical_moments,\n            )\n        elif return_type == \"dataframe\":\n            registry = get_registry(extended=True)\n            row_names = self._internal_estimates.names\n            col_names = leaf_names(self._empirical_moments, registry=registry)\n            out = pd.DataFrame(\n                data=raw,\n                index=row_names,\n                columns=col_names,\n            )\n        else:\n            msg = (\n                f\"Invalid return type: {return_type}. Valid are 'pytree', 'array' \"\n                \"and 'dataframe'\"\n            )\n            raise ValueError(msg)\n        return out\n\n    def to_pickle(self, path):\n        \"\"\"Save the MomentsResult object to pickle.\n\n        Args:\n            path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle.\n\n        \"\"\"\n        to_pickle(self, path=path)\n\n\ndef _calculate_free_cov_msm(\n    internal_estimates,\n    internal_jacobian,\n    internal_moments_cov,\n    internal_weights,\n    converter,\n    method,\n    n_samples,\n    bounds_handling,\n    seed,\n):\n    if method == \"optimal\":\n        internal_cov = cov_optimal(internal_jacobian, internal_weights)\n    else:\n        internal_cov = cov_robust(\n            internal_jacobian, internal_weights, internal_moments_cov\n        )\n\n    rng = get_rng(seed)\n\n    free_cov = transform_covariance(\n        internal_params=internal_estimates,\n        internal_cov=internal_cov,\n        converter=converter,\n        n_samples=n_samples,\n        rng=rng,\n        bounds_handling=bounds_handling,\n    )\n    return free_cov\n"
  },
  {
    "path": "src/estimagic/estimation_summaries.py",
    "content": ""
  },
  {
    "path": "src/estimagic/estimation_table.py",
    "content": "import re\nfrom copy import deepcopy\nfrom functools import partial\nfrom pathlib import Path\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic.shared.compat import pd_df_map\n\nsuppress_performance_warnings = np.testing.suppress_warnings()\nsuppress_performance_warnings.filter(category=pd.errors.PerformanceWarning)\n\n\n@suppress_performance_warnings\ndef estimation_table(\n    models,\n    *,\n    return_type=\"dataframe\",\n    render_options=None,\n    show_col_names=True,\n    show_col_groups=None,\n    show_index_names=False,\n    show_inference=True,\n    show_stars=True,\n    show_footer=True,\n    custom_param_names=None,\n    custom_col_names=None,\n    custom_col_groups=None,\n    custom_index_names=None,\n    custom_notes=None,\n    confidence_intervals=False,\n    significance_levels=(0.1, 0.05, 0.01),\n    append_notes=True,\n    notes_label=\"Note:\",\n    stats_options=None,\n    number_format=(\"{0:.3g}\", \"{0:.5f}\", \"{0:.4g}\"),\n    add_trailing_zeros=True,\n    escape_special_characters=True,\n    siunitx_warning=True,\n):\n    r\"\"\"Generate html or LaTex tables provided (lists of) of models.\n\n    The function can create publication quality tables in various formats from\n    statsmodels or estimagic results.\n\n    It allows for extensive customization via optional arguments and almost limitless\n    flexibility when using a two-stage approach where the ``return_type`` is set to\n    ``\"render_inputs\"``, the resulting dictionary representation of the table is\n    modified and that modified version is then passed to ``render_latex`` or\n    ``render_html``.\n\n    The formatting of the numbers in the table is completely configurable via the\n    ``number_format`` argument. By default we round to three significant digits (i.e.\n    the three leftmost non-zero digits are displayed). This is very different from\n    other table packages and motivated by the fact that most estimation tables give\n    a wrong feeling of precision by showing too many decimal points.\n\n    Args:\n        models (list): list of estimation results. The models can come from\n            statmodels or be constructed from the outputs of `estimagic.estimate_ml`\n            or `estimagic.estimate_msm`. With a little bit of work it is also possible\n            to construct them out of R or other results. If a model is not a\n            statsmodels results they must be dictionaries with the following entries:\n            \"params\" (a DataFrame with value column), \"info\" (a dictionary with summary\n            statistics such as \"n_obs\", \"rsquared\", ...) and \"name\" (a string), or a\n            DataFrame with value column. If a models is a statsmodels result,\n            model.endog_names is used as name and the rest is extracted from\n            corresponding statsmodels attributes. The model names do not have to be\n            unique but if they are not, models with the same name need to be grouped\n            together.\n        return_type (str): Can be \"dataframe\", \"latex\", \"html\", \"render_inputs\" or a\n            file path with the extension .tex or .html. If \"render_inputs\" is passed,\n            a dictionary with the entries \"body\", \"footer\" and other\n            information is returned. The entries can be modified by the user (\n            e.g. change formatting, renameof columns or index, ...) and then passed\n            to ``render_latex`` or ``render_html``. Default \"dataframe\".\n        render_options (dict): a dictionary with keyword arguments that are passed to\n            df.style.to_latex or df.style.to_html, depending on the return_type.\n            The default is None.\n        show_col_names (bool): If True, the column names are displayed. The default\n            column names are the model names if the model names are unique, otherwise\n            (1), (2), etc.. Default True.\n        show_col_groups (bool): If True, the column groups are displayed. The default\n            column groups are the model names if the model names are not unique and\n            undefined otherwise. Default None. None means that the column groups are\n            displayed if they are defined.\n        show_index_names (bool): If True, the index names are displayed. Default False.\n            This is mostly relevant when working with estimagic style params DataFrames\n            with a MultiIndex.\n        show_inference(bool): If True, inference (standard errors or confidence\n            intervals) are displayed below parameter values. Default True.\n        show_stars (bool): a boolean variable for displaying significance stars.\n            Default is True.\n        show_footer (bool): a boolean variable for displaying statistics, e.g. R2,\n            Obs numbers. Default is True. Which statistics are displayed and how they\n            are labeled can be determined via ``stats_options``.\n        custom_param_names (dict): Dictionary that is used to rename parameters. The\n            keys are the old parameter names or index entries. The values are\n            the new names. Default None.\n        custom_col_names (dict or list): A list of column names or dict to rename the\n            default column names. The default column names are the model names if the\n            model names are unique, otherwise (1), (2), etc..\n        custom_col_groups (dict or list): A list of column group or dict to rename\n            the default column groups. The default column groups are the model names\n            if the model names are not unique and undefined otherwise.\n        custom_index_names (dict or list): Dictionary or list to set the names of the\n            index levels of the parameters. This is mostly relevant when working with\n            estimagic style params DataFrames with a MultiIndex and only used if\n            \"index_names\" is set to True in the render_options. Default None.\n        custom_notes (list): A list of strings for additional notes. Default is None.\n        confidence_intervals (bool): If True, display confidence intervals as inference\n            values. Display standard errors otherwise. Default False.\n        significance_levels (list): a list of floats for p value's significance cut-off\n            values. This is used to generate the significance stars. Default is\n            [0.1,0.05,0.01].\n        append_notes (bool): A boolean variable for printing p value cutoff explanation\n            and additional notes, if applicable. Default is True.\n        notes_label (str): A sting to print as the title of the notes section, if\n            applicable. Default is 'Notes'\n        stats_options (dict): A dictionary that determines which statistics (e.g.\n            R-Squared, No. of Observations) are displayed and how they are labeled.\n            The keys are the names of the statistics inside the model['info'] dictionary\n            or attribute names of a statsmodels results object. The values are the new\n            labels to be displayed for those statistics, i.e. the set of the values is\n            used as row names in the table.\n        number_format (int, str, iterable or callable): A callable, iterable, integer\n            or string that is used to apply string formatter(s) to floats in the\n            table. Default (\"{0:.3g}\", \"{0:.5f}\", \"{0:.4g}\").\n        add_trailing_zeros (bool): If True, format floats such that they have same\n            number of digits after the decimal point. Default True.\n        siunitx_warning (bool): If True, print warning about LaTex preamble to add for\n            proper compilation of  when working with siunitx package. Default True.\n        escape_special_characters (bool): If True, replaces special characters\n            in parameter and model names with LaTeX or HTML safe sequences.\n\n    Returns:\n        res_table (data frame, str or dictionary): depending on the rerturn type,\n            data frame with formatted strings, a string for html or latex tables,\n            or a dictionary with statistics and parameters dataframes, and strings\n            for footers is returned. If the return type is a path, the function saves\n            the resulting table at the given path.\n\n    \"\"\"\n    if not isinstance(models, (tuple, list)):\n        raise TypeError(f\"models must be a list or tuple. Not: {type(models)}\")\n    models = [_process_model(model) for model in models]\n    model_names = _get_model_names(models)\n    default_col_names, default_col_groups = _get_default_column_names_and_groups(\n        model_names\n    )\n    column_groups = _customize_col_groups(\n        default_col_groups=default_col_groups, custom_col_groups=custom_col_groups\n    )\n    column_names = _customize_col_names(\n        default_col_names=default_col_names, custom_col_names=custom_col_names\n    )\n    show_col_groups = _update_show_col_groups(show_col_groups, column_groups)\n    stats_options = _set_default_stats_options(stats_options)\n    body, footer = _get_estimation_table_body_and_footer(\n        models,\n        column_names,\n        column_groups,\n        custom_param_names,\n        custom_index_names,\n        significance_levels,\n        stats_options,\n        show_col_names,\n        show_col_groups,\n        show_stars,\n        show_inference,\n        confidence_intervals,\n        number_format,\n        add_trailing_zeros,\n    )\n\n    render_inputs = {\n        \"body\": body,\n        \"footer\": footer,\n        \"render_options\": render_options,\n    }\n    if return_type == \"render_inputs\":\n        out = render_inputs\n    elif str(return_type).endswith(\"tex\"):\n        out = _render_latex(\n            **render_inputs,\n            show_footer=show_footer,\n            append_notes=append_notes,\n            notes_label=notes_label,\n            significance_levels=significance_levels,\n            custom_notes=custom_notes,\n            siunitx_warning=siunitx_warning,\n            show_index_names=show_index_names,\n            show_col_names=show_col_names,\n            escape_special_characters=escape_special_characters,\n        )\n    elif str(return_type).endswith(\"html\"):\n        out = render_html(\n            **render_inputs,\n            show_footer=show_footer,\n            append_notes=append_notes,\n            notes_label=notes_label,\n            custom_notes=custom_notes,\n            significance_levels=significance_levels,\n            show_index_names=show_index_names,\n            show_col_names=show_col_names,\n            escape_special_characters=escape_special_characters,\n        )\n\n    elif return_type == \"dataframe\":\n        if show_footer:\n            footer.index.names = body.index.names\n            out = pd.concat([body.reset_index(), footer.reset_index()]).set_index(\n                body.index.names\n            )\n        else:\n            out = body\n    else:\n        raise ValueError(\n            f\"\"\"Value of return type can be either of\n            ['data_frame', 'render_inputs','latex' ,'html']\n            or a path ending with '.html' or '.tex'. Not: {return_type}.\"\"\"\n        )\n\n    return_type = Path(return_type)\n    if return_type.suffix not in (\".html\", \".tex\"):\n        return out\n    else:\n        return_type.write_text(out)\n\n\n@suppress_performance_warnings\ndef render_latex(\n    body,\n    footer,\n    render_options=None,\n    show_footer=True,\n    append_notes=True,\n    notes_label=\"Note:\",\n    significance_levels=(0.1, 0.05, 0.01),\n    custom_notes=None,\n    siunitx_warning=True,\n    show_index_names=False,\n    show_col_names=True,\n    show_col_groups=True,\n    escape_special_characters=True,\n):\n    r\"\"\"Return estimation table in LaTeX format as string.\n\n    Args:\n        body (pandas.DataFrame): DataFrame with formatted strings of parameter\n            values, inferences (standard errors or confidence intervals, if\n            applicable) and significance stars (if applicable).\n        footer (pandas.DataFrame): DataFrame with formatted strings of summary\n            statistics (such as number of observations, r-squared, etc.)\n        render_options(dict): A dictionary with custom kwargs to pass to\n            pd.Styler.to_latex(), to update the default options. An example keyword\n            argument is:\n            - siunitx (bool): If True, the table is structured to be compatible\n            with siunitx package. Default is set to True internally.\n            For the list of all possible arguments, see documentation of\n            `pandas.io.formats.style.Styler.to_latex`.\n        show_footer (bool): a boolean variable for displaying footer_df. Default True.\n        append_notes (bool): A boolean variable for printing p value cutoff explanation\n            and additional notes, if applicable. Default is True.\n        notes_label (str): A sting to print as the title of the notes section, if\n            applicable. Default is 'Notes'\n        significance_levels (list or tuple): a list of floats for p value's significance\n            cutt-off values. Default is [0.1,0.05,0.01].\n        custom_notes (list): A list of strings for additional notes. Default is None.\n        siunitx_warning (bool): If True, print warning about LaTex preamble to add for\n            proper compilation of  when working with siunitx package. Default True.\n        show_index_names (bool): If True, display index names in the table.\n        show_col_names (bool): If True, the column names are displayed.\n        show_col_groups (bool): If True, the column groups are displayed.\n        escape_special_characters (bool): If True, replaces the characters &, %,\n            $, #, _, {, }, ~, ^, and \\ in parameter and model names with\n            LaTeX-safe sequences.\n\n    Returns:\n        latex_str (str): The resulting string with Latex tabular code.\n\n    \"\"\"\n    return _render_latex(\n        body=body,\n        footer=footer,\n        render_options=render_options,\n        show_footer=show_footer,\n        append_notes=append_notes,\n        notes_label=notes_label,\n        significance_levels=significance_levels,\n        custom_notes=custom_notes,\n        siunitx_warning=siunitx_warning,\n        show_index_names=show_index_names,\n        show_col_names=show_col_names,\n        show_col_groups=show_col_groups,\n        escape_special_characters=escape_special_characters,\n    )\n\n\ndef _render_latex(\n    body,\n    footer,\n    render_options=None,\n    show_footer=True,\n    append_notes=True,\n    notes_label=\"Note:\",\n    significance_levels=(0.1, 0.05, 0.01),\n    custom_notes=None,\n    siunitx_warning=True,\n    show_index_names=False,\n    show_col_names=True,\n    show_col_groups=True,\n    escape_special_characters=True,\n):\n    \"\"\"See docstring of render_latex for more information.\"\"\"\n    if not pd.__version__ >= \"1.4.0\":\n        raise ValueError(\n            r\"\"\"render_latex or estimation_table with return_type=\"latex\" requires\n            pandas 1.4.0 or higher. Update to a newer version of pandas or use\n            estimation_table with return_type=\"render_inputs\" and manually render those\n            results using the DataFrame.to_latex method.\n        \"\"\"\n        )\n    if siunitx_warning:\n        warn(\n            r\"\"\"Proper LaTeX compilation requires the package siunitx and adding\n                   \\sisetup{\n                       input-symbols            = (),\n                       table-align-text-post    = false,\n                       group-digits             = false,\n                    }\n                    to your main tex file. To turn\n                    this warning off set value of siunitx_warning = False\"\"\"\n        )\n    body = body.copy(deep=True)\n    try:\n        ci_in_body = body.loc[(\"\",)][body.columns[0]].str.contains(\";\").any()\n    except KeyError:\n        ci_in_body = False\n\n    if ci_in_body:\n        body.loc[(\"\",)] = pd_df_map(body.loc[(\"\",)], \"{{{}}}\".format).values\n    if body.columns.nlevels > 1:\n        column_groups = body.columns.get_level_values(0)\n    else:\n        column_groups = None\n\n    group_to_col_position = _create_group_to_col_position(column_groups)\n    n_levels = body.index.nlevels\n    n_columns = len(body.columns)\n\n    if escape_special_characters:\n        escape_special_characters = \"latex\"\n    else:\n        escape_special_characters = None\n    body_styler = _get_updated_styler(\n        body,\n        show_index_names=show_index_names,\n        show_col_names=show_col_names,\n        show_col_groups=show_col_groups,\n        escape_special_characters=escape_special_characters,\n    )\n    default_options = {\n        \"multicol_align\": \"c\",\n        \"hrules\": True,\n        \"siunitx\": True,\n        \"column_format\": \"l\" * n_levels + \"S\" * n_columns,\n        \"multirow_align\": \"t\",\n    }\n    if render_options:\n        default_options.update(render_options)\n    latex_str = body_styler.to_latex(**default_options)\n\n    if group_to_col_position:\n        temp_str = \"\\n\"\n        for k in group_to_col_position:\n            max_col = max(group_to_col_position[k]) + n_levels + 1\n            min_col = min(group_to_col_position[k]) + n_levels + 1\n            temp_str += f\"\\\\cmidrule(lr){{{min_col}-{max_col}}}\"\n            temp_str += \"\\n\"\n        latex_str = (\n            latex_str.split(\"\\\\\\\\\", 1)[0]\n            + \"\\\\\\\\\"\n            + temp_str\n            + latex_str.split(\"\\\\\\\\\", 1)[1]\n        )\n    latex_str = latex_str.split(\"\\\\bottomrule\")[0]\n    if show_footer:\n        footer = footer.copy(deep=True)\n        footer = footer.apply(_center_align_integers_and_non_numeric_strings, axis=1)\n        footer_styler = footer.style\n        stats_str = footer_styler.to_latex(**default_options)\n        if \"\\\\midrule\" in stats_str:\n            stats_str = (\n                \"\\\\midrule\" + stats_str.split(\"\\\\midrule\")[1].split(\"\\\\bottomrule\")[0]\n            )\n        else:\n            stats_str = (\n                \"\\\\midrule\" + stats_str.split(\"\\\\toprule\")[1].split(\"\\\\bottomrule\")[0]\n            )\n        latex_str += stats_str\n    notes = _generate_notes_latex(\n        append_notes, notes_label, significance_levels, custom_notes, body\n    )\n    latex_str += notes\n    latex_str += \"\\\\bottomrule\\n\\\\end{tabular}\\n\"\n    if latex_str.startswith(\"\\\\begin{table}\"):\n        latex_str += \"\\n\\\\end{table}\\n\"\n    return latex_str\n\n\ndef render_html(\n    body,\n    footer,\n    render_options=None,\n    show_footer=True,\n    append_notes=True,\n    notes_label=\"Note:\",\n    custom_notes=None,\n    significance_levels=(0.1, 0.05, 0.01),\n    show_index_names=False,\n    show_col_names=True,\n    show_col_groups=True,\n    escape_special_characters=True,\n    **kwargs,  # noqa: ARG001\n):\n    \"\"\"Return estimation table in html format as string.\n\n    Args:\n        body (pandas.DataFrame): DataFrame with formatted strings of parameter\n            values, inferences (standard errors or confidence intervals, if\n            applicable) and significance stars (if applicable).\n        footer (pandas.DataFrame): DataFrame with formatted strings of summary\n            statistics (such as number of observations, r-squared, etc.)\n        notes (str): The html string with notes with additional information\n            (e.g. mapping from pvalues to significance stars) to append to the footer\n            of the estimation table string with LaTex code for the notes section.\n        render_options(dict): A dictionary with custom kwargs to pass to pd.to_latex(),\n            to update the default options. An example is `{header: False}` that\n            disables displaying column names.\n        show_footer (bool): a boolean variable for displaying footer_df. Default True.\n        append_notes (bool): A boolean variable for printing p value cutoff explanation\n            and additional notes, if applicable. Default is True.\n        notes_label (str): A sting to print as the title of the notes section, if\n            applicable. Default is 'Notes'\n        significance_levels (list or tuple): a list of floats for p value's significance\n            cutt-off values. Default is [0.1,0.05,0.01].\n        show_index_names (bool): If True, display index names in the table.\n        show_col_names (bool): If True, the column names are displayed.\n        show_col_groups (bool): If True, the column groups are displayed.\n        escape_special_characters (bool): If True,  replace the characters &, <, >, ',\n            and \" in parameter and model names with HTML-safe sequences.\n\n    Returns:\n        html_str (str): The resulting string with html tabular code.\n\n    \"\"\"\n    if not pd.__version__ >= \"1.4.0\":\n        raise ValueError(\n            r\"\"\"render_html or estimation_table with return_type=\"html\" requires\n            pandas 1.4.0 or higher. Update to a newer version of pandas or use\n            estimation_table with return_type=\"render_inputs\" and manually render those\n            results using the DataFrame.to_html method.\n        \"\"\"\n        )\n    n_levels = body.index.nlevels\n    n_columns = len(body.columns)\n    html_str = \"\"\n    if escape_special_characters:\n        escape_special_characters = \"html\"\n    else:\n        escape_special_characters = None\n    body_styler = _get_updated_styler(\n        body,\n        show_index_names=show_index_names,\n        show_col_names=show_col_names,\n        show_col_groups=show_col_groups,\n        escape_special_characters=escape_special_characters,\n    )\n    default_options = {\"exclude_styles\": True}\n    if render_options:\n        default_options.update(render_options)\n    html_str = body_styler.to_html(**default_options).split(\"</tbody>\\n</table>\")[0]\n    if show_footer:\n        stats_str = \"\"\"<tr><td colspan=\"{}\" style=\"border-bottom: 1px solid black\">\n            </td></tr>\"\"\".format(n_levels + n_columns)\n        stats_str += (\n            footer.style.to_html(**default_options)\n            .split(\"</thead>\\n\")[1]\n            .split(\"</tbody>\\n</table>\")[0]\n        )\n        stats_str = re.sub(r\"(?<=[\\d)}{)])}\", \"\", re.sub(r\"{(?=[}\\d(])\", \"\", stats_str))\n        html_str += stats_str\n    notes = _generate_notes_html(\n        append_notes, notes_label, significance_levels, custom_notes, body\n    )\n    html_str += notes\n    html_str += \"</tbody>\\n</table>\"\n    return html_str\n\n\ndef _process_model(model):\n    \"\"\"Check model validity, convert to dictionary.\n\n    Args:\n        model: Estimation result. See docstring of estimation_table for more info.\n\n    Returns:\n        processed_model: A dictionary with keys params, info and name.\n\n    \"\"\"\n    if isinstance(model, dict):\n        params = model[\"params\"].copy(deep=True)\n        info = model.get(\"info\", {})\n        name = model.get(\"name\", \"\")\n    elif isinstance(model, pd.DataFrame):\n        params = model.copy(deep=True)\n        info = {}\n        name = None\n    else:\n        try:\n            params = _extract_params_from_sm(model)\n            info = {**_extract_info_from_sm(model)}\n            name = info.pop(\"name\")\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            raise TypeError(\n                f\"\"\"Model can  be of type dict,  pd.DataFrame\n                or a statsmodels result. Model {model} is of type {type(model)}.\"\"\"\n            ) from e\n    if \"pvalue\" in params.columns:\n        params = params.rename(columns={\"pvalue\": \"p_value\"})\n    processed_model = {\"params\": params, \"info\": info, \"name\": name}\n    return processed_model\n\n\ndef _get_estimation_table_body_and_footer(\n    models,\n    column_names,\n    column_groups,\n    custom_param_names,\n    custom_index_names,\n    significance_levels,\n    stats_options,\n    show_col_names,\n    show_col_groups,\n    show_stars,\n    show_inference,\n    confidence_intervals,\n    number_format,\n    add_trailing_zeros,\n):\n    \"\"\"Create body and footer blocs with significance stars and inference values.\n\n    Applies number formatting to parameters and summary statitistics.\n    Concatinates infere values to parameter values if applicable,\n    Adds significance stars if applicable.\n\n    Args:\n        models (list): List of dictionaries with keys 'params', 'info' and 'name'.\n        column_names (list): List of strigs to display as names of the model columns in\n            estimation table.\n        column_groups (list or NoneType): If defined, list of strings to display as\n            names of groups of model columns in estimation table.\n        custom_param_names (dict or list): A list of strings to display as parameter\n            names or a mapping from original to custom paramter names.\n        custom_index_names (dict or list): Dictionary or list to set the names of the\n            index levels of the parameters.\n        significance_levels (list): a list of floats for p value's significance\n            cutt-off values.\n        stats_options (dict): A dictionary with displayed statistics names as keys,\n            and statistics names to be retrieved from model['info'] as values\n        show_col_names (bool): If True, the column names are displayed.\n        show_col_groups (bool): If True, the column groups are displayed.\n        show_stars (bool): a boolean variable for printing significance stars.\n        show_inference(bool): If True, inference (standard errors or confidence\n            intervals) below param values.\n        confidence_intervals (bool): If True, display confidence intervals as inference\n            values.\n        number_format (int, str, iterable or callable): A callable, iterable, integer\n            or callable that is used to apply string formatter(s) to floats in the\n            table.\n        add_trailing_zeros (bool): If True, format floats such that they have same\n            number of digits after the decimal point.\n\n    Returns:\n        body (DataFrame): DataFrame data frame with formatted strings of parameter\n            and inference values and significance stars to display in estimation table.\n        footer (DataFrame): DataFrame with formatted strings of summary statistics to\n            display at the bottom of estimation table.\n\n    \"\"\"\n    body, max_trail = _build_estimation_table_body(\n        models,\n        column_names,\n        column_groups,\n        custom_param_names,\n        custom_index_names,\n        show_col_names,\n        show_col_groups,\n        show_inference,\n        show_stars,\n        confidence_intervals,\n        significance_levels,\n        number_format,\n        add_trailing_zeros,\n    )\n    footer = _build_estimation_table_footer(\n        models,\n        stats_options,\n        significance_levels,\n        show_stars,\n        number_format,\n        add_trailing_zeros,\n        max_trail,\n    )\n    footer.columns = body.columns\n    return body, footer\n\n\ndef _build_estimation_table_body(\n    models,\n    column_names,\n    column_groups,\n    custom_param_names,\n    custom_index_names,\n    show_col_names,\n    show_col_groups,\n    show_inference,\n    show_stars,\n    confidence_intervals,\n    significance_levels,\n    number_format,\n    add_trailing_zeros,\n):\n    \"\"\"Create body bloc significance stars and inference values.\n\n    Applies number formatting to parameters. Concatinates inference values\n    to parameter values if applicable. Adds significance stars if applicable.\n\n    Args:\n        models (list): List of dictionaries with keys 'params', 'info' and 'name'.\n        column_names (list): List of strigs to display as names of the model columns in\n            estimation table.\n        column_groups (list or NoneType): If defined, list of strings to display as\n            names of groups of model columns in estimation table.\n        custom_param_names (dict or list): A list of strings to display as parameter\n            names or a mapping from original to custom paramter names.\n        custom_index_names (dict or list): Dictionary or list to set the names of the\n            index levels of the parameters.\n        significance_levels (list): a list of floats for p value's significance\n            cutt-off values.\n        show_col_names (bool): If True, the column names are displayed.\n        show_col_groups (bool): If True, the column groups are displayed.\n        show_stars (bool): a boolean variable for printing significance stars.\n        show_inference(bool): If True, inference (standard errors or confidence\n            intervals) below param values.\n        confidence_intervals (bool): If True, display confidence intervals as inference\n            values.\n        number_format (int, str, iterable or callable): A callable, iterable, integer\n            or callable that is used to apply string formatter(s) to floats in the\n            table.\n        add_trailing_zeros (bool): If True, format floats such that they have same\n            number of digits after the decimal point.\n\n    Returns:\n        body (DataFrame): DataFrame data frame with formatted strings of parameter\n            and inference values and significance stars to display in estimation table.\n        max_trail (int): Integer that shows the maximum number of digits after a decimal\n            point in the parameters DataFrame. Is passed to\n            `_build_estimation_table_footer` to get same number of trailing zeros as in\n            parameters DataFrame and torender_latex for formatting tables in siunitx\n            package.\n\n    \"\"\"\n    dfs, max_trail = _reindex_and_float_format_params(\n        models, show_inference, confidence_intervals, number_format, add_trailing_zeros\n    )\n    to_convert = []\n    if show_stars:\n        for df, mod in zip(dfs, models, strict=False):\n            to_convert.append(\n                pd.concat([df, mod[\"params\"].reindex(df.index)[\"p_value\"]], axis=1)\n            )\n    else:\n        to_convert = dfs\n    # convert DataFrames to string series with inference and siginificance\n    # information.\n    to_concat = [\n        _convert_frame_to_string_series(\n            df,\n            significance_levels,\n            show_stars,\n        )\n        for df in to_convert\n    ]\n    df = pd.concat(to_concat, axis=1)\n    df = _process_frame_indices(\n        df=df,\n        custom_param_names=custom_param_names,\n        custom_index_names=custom_index_names,\n        show_col_names=show_col_names,\n        show_col_groups=show_col_groups,\n        column_names=column_names,\n        column_groups=column_groups,\n    )\n    return df, max_trail\n\n\ndef _build_estimation_table_footer(\n    models,\n    stats_options,\n    significance_levels,\n    show_stars,\n    number_format,\n    add_trailing_zeros,\n    max_trail,\n):\n    \"\"\"Create footer bloc of estimation table.\n\n    Applies number formatting to parameters and summary statitistics.\n    Concatinates infere values to parameter values if applicable,\n    Adds significance stars if applicable.\n\n    Args:\n        models (list): List of dictionaries with keys 'params', 'info' and 'name'.\n        stats_options (dict): A dictionary with displayed statistics names as keys,\n            and statistics names to be retrieved from model['info'] as values\n        significance_levels (list): a list of floats for p value's significance cutt-off\n            values.\n        number_format (int, str, iterable or callable): A callable, iterable, integer\n            or callable that is used to apply string formatter(s) to floats in the\n            table.\n        add_trailing_zeros (bool): If True, format floats such that they haave same\n            number of digits after the decimal point.\n        max_trail (int): If add_trailing_zeros is True, add corresponding number of\n            trailing zeros to floats in the stats DataFrame to have number of digits\n            after a decimal point equal to max_trail for each float.\n\n    Returns:\n        footer (DataFrame): DataFrame with formatted strings of summary statistics to\n            display at the bottom of estimation table.\n\n    \"\"\"\n    to_concat = [\n        _create_statistics_sr(\n            mod,\n            stats_options,\n            significance_levels,\n            show_stars,\n            number_format,\n            add_trailing_zeros,\n            max_trail,\n        )\n        for mod in models\n    ]\n    stats = pd.concat(to_concat, axis=1)\n    return stats\n\n\ndef _reindex_and_float_format_params(\n    models, show_inference, confidence_intervals, number_format, add_trailing_zeros\n):\n    \"\"\"Reindex all params DataFrames with a common index and apply number formatting.\"\"\"\n    dfs = _get_params_frames_with_common_index(models)\n    cols_to_format = _get_cols_to_format(show_inference, confidence_intervals)\n    formatted_frames, max_trail = _apply_number_formatting_frames(\n        dfs, cols_to_format, number_format, add_trailing_zeros\n    )\n    return formatted_frames, max_trail\n\n\ndef _get_params_frames_with_common_index(models):\n    \"\"\"Get a list of params frames, reindexed with a common index.\"\"\"\n    dfs = [model[\"params\"] for model in models]\n    common_index = _get_common_index(dfs)\n    out = [model[\"params\"].reindex(common_index) for model in models]\n    return out\n\n\ndef _get_common_index(dfs):\n    \"\"\"Get common index from a list of DataFrames.\"\"\"\n    common_index = []\n    for d_ in dfs:\n        common_index += [ind for ind in d_.index.to_list() if ind not in common_index]\n    return common_index\n\n\ndef _get_cols_to_format(show_inference, confidence_intervals):\n    \"\"\"Get the list of names of columns that need to be formatted.\n\n    By default, formatting is applied to  parameter values. If inference values need to\n    displayed, adds confidence intervals or standard erros to the list.\n\n    \"\"\"\n    cols = [\"value\"]\n    if show_inference:\n        if confidence_intervals:\n            cols += [\"ci_lower\", \"ci_upper\"]\n        else:\n            cols.append(\"standard_error\")\n    return cols\n\n\ndef _apply_number_formatting_frames(dfs, columns, number_format, add_trailing_zeros):\n    \"\"\"Apply string formatter to specific columns of a list of DataFrames.\"\"\"\n    raw_formatted = [\n        _apply_number_format(df[columns], number_format, format_integers=False)\n        for df in dfs\n    ]\n    max_trail = int(max([_get_digits_after_decimal(df) for df in raw_formatted]))\n    if add_trailing_zeros:\n        formatted = [\n            _apply_number_format(df, max_trail, format_integers=True)\n            for df in raw_formatted\n        ]\n    else:\n        formatted = raw_formatted\n    return formatted, max_trail\n\n\ndef _update_show_col_groups(show_col_groups, column_groups):\n    \"\"\"Set the value of show_col_groups to False or True given column_groups.\n\n    Updates the default None to True if column_groups is not None. Sets to False\n    otherwise.\n\n    \"\"\"\n    if show_col_groups is None:\n        if column_groups is not None:\n            show_col_groups = True\n        else:\n            show_col_groups = False\n    return show_col_groups\n\n\ndef _set_default_stats_options(stats_options):\n    \"\"\"Define some default summary statistics to display in estimation table.\"\"\"\n    if stats_options is None:\n        stats_options = {\n            \"n_obs\": \"Observations\",\n            \"rsquared\": \"R$^2$\",\n            \"rsquared_adj\": \"Adj. R$^2$\",\n            \"resid_std_err\": \"Residual Std. Error\",\n            \"fvalue\": \"F Statistic\",\n        }\n    else:\n        if not isinstance(stats_options, dict):\n            raise TypeError(\n                f\"\"\"stats_options can be of types dict or NoneType.\n            Not: {type(stats_options)}.\"\"\"\n            )\n    return stats_options\n\n\ndef _get_model_names(processed_models):\n    \"\"\"Get names of model names if defined, set based on position otherwise.\n\n    Args:\n        processed_models (list): List of estimation results processed to dictionaries.\n\n    Returns:\n        names (list): List of model names given either by name attribute of each model\n            if defined or the position (counting from 1) of each model in parentheses.\n\n    \"\"\"\n    names = []\n    for i, mod in enumerate(processed_models):\n        if mod.get(\"name\"):\n            names.append(mod[\"name\"])\n        else:\n            names.append(f\"({i + 1})\")\n    _check_order_of_model_names(names)\n    return names\n\n\ndef _check_order_of_model_names(model_names):\n    \"\"\"Check identically named models are adjacent.\n\n    Args:\n        model_names (list): List of model names.\n\n    Raises:\n        ValueError: if models that share a name are not next to each other.\n\n    \"\"\"\n    group_to_col_index = _create_group_to_col_position(model_names)\n    for positions in group_to_col_index.values():\n        if positions != list(range(positions[0], positions[-1] + 1)):\n            raise ValueError(\n                \"If there are repetitions in model_names, models with the \"\n                f\"same name need to be adjacent. You provided: {model_names}\"\n            )\n\n\ndef _get_default_column_names_and_groups(model_names):\n    \"\"\"Get column names and groups to display in the estimation table.\n\n    Args:\n        model_names (list): List of model names.\n\n    Returns:\n        col_names (list): List of estimation column names to display in estimation\n            table. Same as model_names if model_names are unique. Given by column\n            position (counting from 1) in braces otherwise.\n        col_groups (list or NoneType): If defined, list of strings unique values\n            of which will define column groups. Not defined if model_names are unique.\n\n    \"\"\"\n    if len(set(model_names)) == len(model_names):\n        col_groups = None\n        col_names = model_names\n    else:\n        col_groups = model_names\n        col_names = [f\"({i + 1})\" for i in range(len(model_names))]\n\n    return col_names, col_groups\n\n\ndef _customize_col_groups(default_col_groups, custom_col_groups):\n    \"\"\"Change default (inferred) column group titles using custom column groups.\n\n    Args:\n        default_col_groups (list or NoneType): The inferred column groups.\n        custom_col_groups (list or dict): Dictionary mapping defautl column group\n            titles to custom column group titles, if the defautl column groups are\n            defined. Must be a list of the same lenght as models otherwise.\n\n    Returns:\n        col_groups (list): Column groups to display in estimation table.\n\n    \"\"\"\n    if custom_col_groups:\n        if not default_col_groups:\n            if not isinstance(custom_col_groups, list):\n                raise ValueError(\n                    \"\"\"With unique model names, multiple models can't be grouped\n                under common group name. Provide list of unique group names instead,\n                if you wish to add column level.\"\"\"\n                )\n            col_groups = custom_col_groups\n        else:\n            if isinstance(custom_col_groups, list):\n                col_groups = custom_col_groups\n            elif isinstance(custom_col_groups, dict):\n                col_groups = (\n                    pd.Series(default_col_groups).replace(custom_col_groups).to_list()\n                )\n            else:\n                raise TypeError(\n                    f\"\"\"Invalid type for custom_col_groups. Can be either list\n                    or dictionary, or NoneType. Not: {type(col_groups)}.\"\"\"\n                )\n    else:\n        col_groups = default_col_groups\n    return col_groups\n\n\ndef _customize_col_names(default_col_names, custom_col_names):\n    \"\"\"Change default (inferred) column names using custom column names.\n\n    Args:\n        deafult_col_names (list): The default (inferred) column names.\n        custom_col_names (list or dict): Dictionary mapping default column names\n            to custom column names, or list to display as the name of each\n            model column.\n\n    Returns:\n        column_names (list): The column names to display in the estimatino table.\n\n    \"\"\"\n    if not custom_col_names:\n        col_names = default_col_names\n    elif isinstance(custom_col_names, dict):\n        col_names = list(pd.Series(default_col_names).replace(custom_col_names))\n    elif isinstance(custom_col_names, list):\n        if not len(custom_col_names) == len(default_col_names):\n            raise ValueError(\n                f\"\"\"If provided as a list, custom_col_names should have same length as\n                default_col_names. Lenght of custom_col_names {len(custom_col_names)}\n                !=length of default_col_names {len(default_col_names)}\"\"\"\n            )\n        elif any(isinstance(i, list) for i in custom_col_names):\n            raise ValueError(\"Custom_col_names cannot be a nested list\")\n        col_names = custom_col_names\n    else:\n        raise TypeError(\n            f\"\"\"Invalid type for custom_col_names.\n            Can be either list or dictionary, or NoneType. Not: {col_names}.\"\"\"\n        )\n    return col_names\n\n\ndef _create_group_to_col_position(column_groups):\n    \"\"\"Get mapping from column groups to column positions.\n\n    Args:\n        column_names (list): The column groups to display in the estimatino table.\n\n    Returns:\n        group_to_col_index(dict): The mapping from column group titles to column\n            positions.\n\n    \"\"\"\n    if column_groups is not None:\n        group_to_col_index = {group: [] for group in list(set(column_groups))}\n        for i, group in enumerate(column_groups):\n            group_to_col_index[group].append(i)\n    else:\n        group_to_col_index = None\n    return group_to_col_index\n\n\ndef _convert_frame_to_string_series(\n    df,\n    significance_levels,\n    show_stars,\n):\n    \"\"\"Return processed value series with significance stars and inference information.\n\n    Args:\n        df (DataFrame): params DataFrame of the model\n        significance_levels (list): see main docstring\n        number_format (int, str, iterable or callable): see main docstring\n        show_inference (bool): see main docstring\n        confidence_intervals (bool): see main docstring\n        show_stars (bool): see main docstring\n\n    Returns:\n        sr (pd.Series): string series with values and inferences.\n\n    \"\"\"\n    value_sr = df[\"value\"]\n    if show_stars:\n        sig_bins = [-1, *sorted(significance_levels)] + [2]\n        value_sr += \"$^{\"\n        value_sr += (\n            pd.cut(\n                df[\"p_value\"],\n                bins=sig_bins,\n                labels=[\n                    \"*\" * (len(significance_levels) - i)\n                    for i in range(len(significance_levels) + 1)\n                ],\n            )\n            .astype(\"str\")\n            .replace(\"nan\", \"\")\n            .replace(np.nan, \"\")\n        )\n        value_sr += \" }$\"\n    if \"ci_lower\" in df:\n        ci_lower = df[\"ci_lower\"]\n        ci_upper = df[\"ci_upper\"]\n        inference_sr = \"(\"\n        inference_sr += ci_lower\n        inference_sr += r\";\"\n        inference_sr += ci_upper\n        inference_sr += \")\"\n        sr = _combine_series(value_sr, inference_sr)\n    elif \"standard_error\" in df:\n        standard_error = df[\"standard_error\"]\n        inference_sr = \"(\" + standard_error + \")\"\n        sr = _combine_series(value_sr, inference_sr)\n    else:\n        sr = value_sr\n    # replace empty braces with empty string\n    sr = sr.where(sr.apply(lambda x: bool(re.search(r\"\\d\", x))), \"\")\n    sr.name = \"\"\n    return sr\n\n\ndef _combine_series(value_sr, inference_sr):\n    \"\"\"Merge value and inference series.\n\n    Return string series with parameter values and precision values below respective\n    param values.\n\n    Args:\n        values_sr (Series): string series of estimated parameter values\n        inference_sr (Series): string series of inference values\n\n    Returns:\n        series: combined string series of param and inference values\n\n    \"\"\"\n    value_df = value_sr.to_frame(name=\"\")\n    original_cols = value_df.columns\n    value_df.reset_index(drop=False, inplace=True)\n    index_names = [item for item in value_df.columns if item not in original_cols]\n    # set the index to even numbers, starting at 0\n    value_df.index = value_df.index * 2\n    inference_df = inference_sr.to_frame(name=\"\")\n    inference_df.reset_index(drop=False, inplace=True)\n    # set the index to odd numbers, starting at 1\n    inference_df.index = (inference_df.index * 2) + 1\n    inference_df[index_names[-1]] = \"\"\n    df = pd.concat([value_df, inference_df]).sort_index()\n    df.set_index(index_names, inplace=True, drop=True)\n    return df[\"\"]\n\n\ndef _create_statistics_sr(\n    model,\n    stats_options,\n    significance_levels,\n    show_stars,\n    number_format,\n    add_trailing_zeros,\n    max_trail,\n):\n    \"\"\"Process statistics values, return string series.\n\n    Args:\n        model (estimation result): see main docstring\n        stats_options (dict): see main docstring\n        significance_levels (list): see main docstring\n        show_stars (bool): see main docstring\n        number_format (int, str, iterable or callable): see main docstring\n        add_trailing_zeros (bool): If True, format floats such that they haave same\n            number of digits after the decimal point.\n        max_trail (int): If add_trailing_zeros is True, add corresponding number of\n            trailing zeros to floats in the stats DataFrame to have number of digits\n            after a decimal point equal to max_trail for each float.\n\n    Returns:\n        series: string series with summary statistics values and additional info\n            if applicable.\n\n    \"\"\"\n    stats_values = {}\n    stats_options = deepcopy(stats_options)\n    if \"show_dof\" in stats_options:\n        show_dof = stats_options.pop(\"show_dof\")\n    else:\n        show_dof = None\n    for k in stats_options:\n        stats_values[stats_options[k]] = model[\"info\"].get(k, np.nan)\n\n    raw_formatted = _apply_number_format(\n        pd.DataFrame(pd.Series(stats_values)), number_format, format_integers=False\n    )\n    if add_trailing_zeros:\n        formatted = _apply_number_format(\n            raw_formatted, max_trail, format_integers=False\n        )\n    else:\n        formatted = raw_formatted\n    stats_values = formatted.to_dict()[0]\n    if \"fvalue\" in model[\"info\"] and \"F Statistic\" in stats_values:\n        if show_stars and \"f_pvalue\" in model[\"info\"]:\n            sig_bins = [-1, *sorted(significance_levels)] + [2]\n            sig_icon_fstat = \"*\" * (\n                len(significance_levels)\n                - np.digitize(model[\"info\"][\"f_pvalue\"], sig_bins)\n                + 1\n            )\n            stats_values[\"F Statistic\"] = (\n                stats_values[\"F Statistic\"] + \"$^{\" + sig_icon_fstat + \"}$\"\n            )\n        if show_dof:\n            fstat_str = \"{{{}(df={};{})}}\"\n            stats_values[\"F Statistic\"] = fstat_str.format(\n                stats_values[\"F Statistic\"],\n                int(model[\"info\"][\"df_model\"]),\n                int(model[\"info\"][\"df_resid\"]),\n            )\n    if \"resid_std_err\" in model[\"info\"] and \"Residual Std. Error\" in stats_values:\n        if show_dof:\n            rse_str = \"{{{}(df={})}}\"\n            stats_values[\"Residual Std. Error\"] = rse_str.format(\n                stats_values[\"Residual Std. Error\"], int(model[\"info\"][\"df_resid\"])\n            )\n    stat_sr = pd.Series(stats_values)\n    # the following is to make sure statistics dataframe has as many levels of\n    # indices as the parameters dataframe.\n    stat_ind = np.empty((len(stat_sr), model[\"params\"].index.nlevels - 1), dtype=str)\n    stat_ind = np.concatenate(\n        [stat_sr.index.values.reshape(len(stat_sr), 1), stat_ind], axis=1\n    ).T\n    stat_sr.index = pd.MultiIndex.from_arrays(stat_ind)\n    return stat_sr.astype(\"str\").replace(\"nan\", \"\")\n\n\ndef _process_frame_indices(\n    df,\n    custom_param_names,\n    custom_index_names,\n    show_col_names,\n    show_col_groups,\n    column_names,\n    column_groups,\n):\n    \"\"\"Process body DataFrame, customize the header.\n\n    Args:\n        df (DataFrame): string DataFrame with parameter values and inferences.\n        custom_param_names (dict): see main docstring\n        custom_index_names (list): see main docstring\n        show_col_names (bool): see main docstring\n        column_names (list): List of column names to display in estimation table.\n        column_groups (list): List of column group titles to display in estimation\n            table.\n\n    Returns:\n        processed_df (DataFrame): string DataFrame with customized header.\n\n    \"\"\"\n    # The column names of the  df are empty strings.\n    # If show_col_names is True, rename columns using column_names.\n    # Add column level if show col_groups is True.\n    if show_col_names:\n        if show_col_groups:\n            df.columns = pd.MultiIndex.from_tuples(\n                [(i, j) for i, j in zip(column_groups, column_names, strict=False)]\n            )\n        else:\n            df.columns = column_names\n    if custom_index_names:\n        if isinstance(custom_index_names, list):\n            df.index.names = custom_index_names\n        elif isinstance(custom_index_names, dict):\n            df.rename_axis(index=custom_index_names, inplace=True)\n        else:\n            raise TypeError(\n                f\"\"\"Invalid custom_index_names can be of type either list or dict,\n                or NoneType. Not: {type(custom_index_names)}.\"\"\"\n            )\n    if custom_param_names:\n        ind = df.index.to_frame()\n        ind = ind.replace(custom_param_names)\n        df.index = pd.MultiIndex.from_frame(ind)\n    return df\n\n\ndef _generate_notes_latex(\n    append_notes, notes_label, significance_levels, custom_notes, df\n):\n    \"\"\"Generate the LaTex script of the notes section.\n\n    Args:\n        append_notes (bool): see main docstring\n        notes_label (str): see main docstring\n        significance_levels (list): see main docstring\n        custom_notes (str): see main docstring\n        df (DataFrame): params DataFrame of estimation model\n\n    Returns:\n        notes_latex (str): a string with LaTex script\n\n    \"\"\"\n    n_levels = df.index.nlevels\n    n_columns = len(df.columns)\n    significance_levels = sorted(significance_levels)\n    notes_text = \"\"\n    if append_notes:\n        notes_text += \"\\\\midrule\\n\"\n        notes_text += \"\\\\textit{{{}}} & \\\\multicolumn{{{}}}{{r}}{{\".format(\n            notes_label, str(n_columns + n_levels - 1)\n        )\n        # iterate over penultimate significance_lelvels since last item of legend\n        # is not followed by a semi column\n        for i in range(len(significance_levels) - 1):\n            star = \"*\" * (len(significance_levels) - i)\n            notes_text += f\"$^{{{star}}}$p$<${significance_levels[i]};\"\n        notes_text += \"$^{*}$p$<$\" + str(significance_levels[-1]) + \"} \\\\\\\\\\n\"\n        if custom_notes:\n            amp_n = \"&\" * n_levels\n            if isinstance(custom_notes, list):\n                if not all(isinstance(n, str) for n in custom_notes):\n                    not_str_notes = [n for n in custom_notes if not isinstance(n, str)]\n                    not_str_notes_types = [type(n) for n in not_str_notes]\n                    raise ValueError(\n                        f\"\"\"Each custom note can only be of string type.\n                        The following notes:\n                        {not_str_notes} are of types {not_str_notes_types}\n                        respectively.\"\"\"\n                    )\n                for n in custom_notes:\n                    notes_text += \"\"\"\n                    {}\\\\multicolumn{{{}}}{{r}}\\\\textit{{{}}}\\\\\\\\\\n\"\"\".format(\n                        amp_n, n_columns, n\n                    )\n            elif isinstance(custom_notes, str):\n                notes_text += \"{}\\\\multicolumn{{{}}}{{r}}\\\\textit{{{}}}\\\\\\\\\\n\".format(\n                    amp_n, n_columns, custom_notes\n                )\n            else:\n                raise TypeError(\n                    f\"\"\"Custom notes can be either a string or a list of strings.\n                    Not: {type(custom_notes)}.\"\"\"\n                )\n    return notes_text\n\n\ndef _generate_notes_html(\n    append_notes, notes_label, significance_levels, custom_notes, df\n):\n    \"\"\"Generate the html script of the notes section of the estimation table.\n\n    Args:\n        append_notes (bool): see main docstring\n        notes_label (str): see main docstring\n        significance_levels (list): see main docstring\n        custom_notes (str): see main docstring\n        df (DataFrame): params DataFrame of estimation model\n\n    Returns:\n        notes_latex (str): a string with html script\n\n    \"\"\"\n    n_levels = df.index.nlevels\n    n_columns = len(df.columns)\n    significance_levels = sorted(significance_levels)\n    notes_text = \"\"\"<tr><td colspan=\"{}\" style=\"border-bottom: 1px solid black\">\n        </td></tr>\"\"\".format(n_columns + n_levels)\n    if append_notes:\n        notes_text += \"\"\"\n        <tr><td style=\"text-align: left\">{}</td><td colspan=\"{}\"\n        style=\"text-align: right\">\"\"\".format(notes_label, n_columns + n_levels - 1)\n        for i in range(len(significance_levels) - 1):\n            stars = \"*\" * (len(significance_levels) - i)\n            notes_text += f\"<sup>{stars}</sup>p&lt;{significance_levels[i]}; \"\n        notes_text += f\"\"\"<sup>*</sup>p&lt;{significance_levels[-1]} </td>\"\"\"\n        if custom_notes:\n            if isinstance(custom_notes, list):\n                if not all(isinstance(n, str) for n in custom_notes):\n                    not_str_notes = [n for n in custom_notes if not isinstance(n, str)]\n                    not_str_notes_types = [type(n) for n in not_str_notes]\n                    raise ValueError(\n                        f\"\"\"Each custom note can only be of string type.\n                        The following notes:\n                        {not_str_notes} are of types {not_str_notes_types}\n                        respectively.\"\"\"\n                    )\n                notes_text += \"\"\"\n                    <tr><td></td><td colspan=\"{}\"style=\"text-align: right\">{}</td></tr>\n                    \"\"\".format(n_columns + n_levels - 1, custom_notes[0])\n                if len(custom_notes) > 1:\n                    for i in range(1, len(custom_notes)):\n                        notes_text += \"\"\"\n                        <tr><td></td><td colspan=\"{}\"style=\"text-align: right\">\n                        {}</td></tr>\n                        \"\"\".format(n_columns + n_levels - 1, custom_notes[i])\n            elif isinstance(custom_notes, str):\n                notes_text += \"\"\"\n                    <tr><td></td><td colspan=\"{}\"style=\"text-align: right\">{}</td></tr>\n                    \"\"\".format(n_columns + n_levels - 1, custom_notes)\n            else:\n                raise TypeError(\n                    f\"\"\"Custom notes can be either a string or a list of strings,\n                    not {type(custom_notes)}.\"\"\"\n                )\n\n    return notes_text\n\n\ndef _extract_params_from_sm(model):\n    \"\"\"Convert statsmodels like estimation result to estimagic like params dataframe.\"\"\"\n    to_concat = []\n    params_list = [\"params\", \"pvalues\", \"bse\"]\n    for col in params_list:\n        to_concat.append(getattr(model, col))\n    to_concat.append(model.conf_int())\n    params_df = pd.concat(to_concat, axis=1)\n    params_df.columns = [\"value\", \"p_value\", \"standard_error\", \"ci_lower\", \"ci_upper\"]\n    return params_df\n\n\ndef _extract_info_from_sm(model):\n    \"\"\"Process statsmodels estimation result to retrieve summary statistics as dict.\"\"\"\n    info = {}\n    key_values = [\n        \"rsquared\",\n        \"rsquared_adj\",\n        \"fvalue\",\n        \"f_pvalue\",\n        \"df_model\",\n        \"df_resid\",\n    ]\n    for kv in key_values:\n        info[kv] = getattr(model, kv)\n    info[\"name\"] = model.model.endog_names\n    info[\"resid_std_err\"] = np.sqrt(model.scale)\n    info[\"n_obs\"] = model.df_model + model.df_resid + 1\n    return info\n\n\ndef _apply_number_format(df_raw, number_format, format_integers):\n    \"\"\"Apply string format to DataFrame cells.\n\n    Args:\n        df_raw (DataFrame): The DataFrame with float values to format.\n        number_format (str, list, tuple, callable or int): User defined number format\n            to apply to the DataFrame.\n        format_integers (bool): Apply number format also to integers\n\n    Returns:\n        df_formatted (DataFrame): Formatted DataFrame.\n\n    \"\"\"\n    processed_format = _process_number_format(number_format)\n    df_raw = df_raw.copy(deep=True)\n    if isinstance(processed_format, (list, tuple)):\n        df_formatted = df_raw.copy(deep=True).astype(\"float\")\n        for formatter in processed_format[:-1]:\n            df_formatted = pd_df_map(df_formatted, formatter.format).astype(\"float\")\n        df_formatted = pd_df_map(\n            df_formatted.astype(\"float\"), processed_format[-1].format\n        )\n    elif isinstance(processed_format, str):\n        df_formatted = pd_df_map(\n            df_raw.astype(\"str\"),\n            partial(_format_non_scientific_numbers, format_string=processed_format),\n        )\n    elif callable(processed_format):\n        df_formatted = pd_df_map(df_raw, processed_format)\n\n    # Don't format integers: set to original value\n    if not format_integers:\n        integer_locs = pd_df_map(df_raw, _is_integer)\n        df_formatted[integer_locs] = pd_df_map(\n            df_raw[integer_locs].astype(float), \"{:.0f}\".format\n        )\n    return df_formatted\n\n\ndef _format_non_scientific_numbers(number_string, format_string):\n    \"\"\"Apply number format if the number string is not in scientific format.\"\"\"\n    if \"e\" in number_string:\n        out = number_string\n    else:\n        out = format_string.format(float(number_string))\n    return out\n\n\ndef _process_number_format(raw_format):\n    \"\"\"Process the user define formatter.\n\n    Reduces cases for number format in apply_number_format.\n\n    \"\"\"\n    if isinstance(raw_format, str):\n        processed_format = [raw_format]\n    elif isinstance(raw_format, int):\n        processed_format = f\"{{0:.{raw_format}f}}\"\n    elif callable(raw_format) or isinstance(raw_format, (list, tuple)):\n        processed_format = raw_format\n    else:\n        raise TypeError(\n            f\"\"\"Number format can be either of [str, int, tuple, list, callable] types.\n           Not: {type(raw_format)}.\"\"\"\n        )\n    return processed_format\n\n\ndef _get_digits_after_decimal(df):\n    \"\"\"Get the maximum number of digits after a decimal point in a DataFrame.\"\"\"\n    max_trail = 0\n    for c in df.columns:\n        try:\n            trail_length = (\n                (\n                    df[c][~df[c].astype(\"str\").str.contains(\"e\")]\n                    .astype(\"str\")\n                    .str.split(\".\", expand=True)[1]\n                    .astype(\"str\")\n                    .replace(\"None\", \"\")\n                )\n                .str.len()\n                .max()\n            )\n        except KeyError:\n            trail_length = 0\n        max_trail = max(trail_length, max_trail)\n    return max_trail\n\n\ndef _center_align_integers_and_non_numeric_strings(sr):\n    \"\"\"Align integer numbers and strings at the center of model column.\"\"\"\n    sr = deepcopy(sr)\n    for i in sr.index:\n        if _is_integer(sr[i]):\n            sr[i] = f\"\\\\multicolumn{{1}}{{c}}{{{str(int(float(sr[i])))}}}\"\n        else:\n            string_without_stars = sr[i].split(\"$\", 1)[0]\n            if not string_without_stars.replace(\".\", \"\").isnumeric():\n                sr[i] = f\"\\\\multicolumn{{1}}{{c}}{{{sr[i]}}}\"\n    return sr\n\n\ndef _get_updated_styler(\n    df, show_index_names, show_col_names, show_col_groups, escape_special_characters\n):\n    \"\"\"Return pandas.Styler object based ont the data and styling options.\"\"\"\n    styler = df.style\n    if not show_index_names:\n        styler = styler.hide(names=True)\n    if not show_col_names:\n        styler = styler.hide(axis=1)\n    if not show_col_groups:\n        styler = styler.hide(axis=1, level=0)\n    for ax in [0, 1]:\n        styler = styler.format_index(escape=escape_special_characters, axis=ax)\n    return styler\n\n\ndef _is_integer(num):\n    \"\"\"Check if number is an integer (including a float with only zeros as digits)\"\"\"\n    try:\n        out = int(float(num)) == float(num)\n    except ValueError:\n        out = False\n    return out\n"
  },
  {
    "path": "src/estimagic/examples/__init__.py",
    "content": ""
  },
  {
    "path": "src/estimagic/examples/diabetes.csv",
    "content": ",Age,Sex,BMI,ABP,S1,S2,S3,S4,S5,S6,target\n0,0.0380759064334241,0.0506801187398187,0.0616962065186885,0.0218723549949558,-0.0442234984244464,-0.0348207628376986,-0.0434008456520269,-0.00259226199818282,0.0199084208763183,-0.0176461251598052,151.0\n1,-0.00188201652779104,-0.044641636506989,-0.0514740612388061,-0.0263278347173518,-0.00844872411121698,-0.019163339748222,0.0744115640787594,-0.0394933828740919,-0.0683297436244215,-0.09220404962683,75.0\n2,0.0852989062966783,0.0506801187398187,0.0444512133365941,-0.00567061055493425,-0.0455994512826475,-0.0341944659141195,-0.0323559322397657,-0.00259226199818282,0.00286377051894013,-0.0259303389894746,141.0\n3,-0.0890629393522603,-0.044641636506989,-0.0115950145052127,-0.0366564467985606,0.0121905687618,0.0249905933641021,-0.0360375700438527,0.0343088588777263,0.0226920225667445,-0.0093619113301358,206.0\n4,0.00538306037424807,-0.044641636506989,-0.0363846922044735,0.0218723549949558,0.00393485161259318,0.0155961395104161,0.0081420836051921,-0.00259226199818282,-0.0319914449413559,-0.0466408735636482,135.0\n5,-0.0926954778032799,-0.044641636506989,-0.0406959404999971,-0.0194420933298793,-0.0689906498720667,-0.0792878444118122,0.0412768238419757,-0.076394503750001,-0.0411803851880079,-0.0963461565416647,97.0\n6,-0.0454724779400257,0.0506801187398187,-0.0471628129432825,-0.015999222636143,-0.040095639849843,-0.0248000120604336,0.000778807997017968,-0.0394933828740919,-0.0629129499162512,-0.0383566597339788,138.0\n7,0.063503675590561,0.0506801187398187,-0.00189470584028465,0.0666296740135272,0.0906198816792644,0.108914381123697,0.0228686348215404,0.0177033544835672,-0.0358167281015492,0.00306440941436832,63.0\n8,0.0417084448844436,0.0506801187398187,0.0616962065186885,-0.0400993174922969,-0.0139525355440215,0.00620168565673016,-0.0286742944356786,-0.00259226199818282,-0.0149564750249113,0.0113486232440377,110.0\n9,-0.0709002470971626,-0.044641636506989,0.0390621529671896,-0.0332135761048244,-0.0125765826858204,-0.034507614375909,-0.0249926566315915,-0.00259226199818282,0.0677363261102861,-0.0135040182449705,310.0\n10,-0.0963280162542995,-0.044641636506989,-0.0838084234552331,0.0081008722200108,-0.103389471327095,-0.0905611890362353,-0.0139477432193303,-0.076394503750001,-0.0629129499162512,-0.0342145528191441,101.0\n11,0.0271782910803654,0.0506801187398187,0.0175059114895716,-0.0332135761048244,-0.00707277125301585,0.0459715403040008,-0.0654906724765493,0.0712099797536354,-0.096433222891784,-0.0590671943081523,69.0\n12,0.0162806757273067,-0.044641636506989,-0.0288400076873072,-0.00911348124867051,-0.00432086553661359,-0.00976888589453599,0.0449584616460628,-0.0394933828740919,-0.0307512098645563,-0.0424987666488135,179.0\n13,0.00538306037424807,0.0506801187398187,-0.00189470584028465,0.0081008722200108,-0.00432086553661359,-0.0157187066685371,-0.0029028298070691,-0.00259226199818282,0.0383932482116977,-0.0135040182449705,185.0\n14,0.0453409833354632,-0.044641636506989,-0.0256065714656645,-0.0125563519424068,0.0176943801946045,-6.12835790604833e-05,0.0817748396869335,-0.0394933828740919,-0.0319914449413559,-0.0756356219674911,118.0\n15,-0.0527375548420648,0.0506801187398187,-0.0180618869484982,0.0804011567884723,0.0892439288210632,0.107661787276539,-0.0397192078479398,0.108111100629544,0.0360557900898319,-0.0424987666488135,171.0\n16,-0.00551455497881059,-0.044641636506989,0.0422955891888323,0.0494153205448459,0.0245741444856101,-0.0238605666750649,0.0744115640787594,-0.0394933828740919,0.0522799997967812,0.0279170509033766,166.0\n17,0.0707687524926,0.0506801187398187,0.0121168511201671,0.0563010619323185,0.034205814493018,0.0494161733836856,-0.0397192078479398,0.0343088588777263,0.027367707542609,-0.00107769750046639,144.0\n18,-0.0382074010379866,-0.044641636506989,-0.0105172024313319,-0.0366564467985606,-0.0373437341334407,-0.0194764882100115,-0.0286742944356786,-0.00259226199818282,-0.0181182673078967,-0.0176461251598052,97.0\n19,-0.0273097856849279,-0.044641636506989,-0.0180618869484982,-0.0400993174922969,-0.00294491267841247,-0.0113346282034837,0.0375951860378887,-0.0394933828740919,-0.0089440189577978,-0.0549250873933176,168.0\n20,-0.0491050163910452,-0.044641636506989,-0.0568631216082106,-0.0435421881860331,-0.0455994512826475,-0.043275771306016,0.000778807997017968,-0.0394933828740919,-0.0119006848015081,0.0154907301588724,68.0\n21,-0.0854304009012408,0.0506801187398187,-0.0223731352440218,0.00121513083253827,-0.0373437341334407,-0.0263657543693812,0.0155053592133662,-0.0394933828740919,-0.072128454601956,-0.0176461251598052,49.0\n22,-0.0854304009012408,-0.044641636506989,-0.00405032998804645,-0.00911348124867051,-0.00294491267841247,0.00776742796567782,0.0228686348215404,-0.0394933828740919,-0.0611765950943345,-0.0135040182449705,68.0\n23,0.0453409833354632,0.0506801187398187,0.0606183944448076,0.0310533436263482,0.0287020030602135,-0.0473467013092799,-0.0544457590642881,0.0712099797536354,0.133598980013008,0.135611830689079,245.0\n24,-0.0636351701951234,-0.044641636506989,0.0358287167455469,-0.0228849640236156,-0.0304639698424351,-0.0188501912864324,-0.00658446761115617,-0.00259226199818282,-0.0259524244351894,-0.0549250873933176,184.0\n25,-0.067267708646143,0.0506801187398187,-0.0126728265790937,-0.0400993174922969,-0.0153284884022226,0.0046359433477825,-0.0581273968683752,0.0343088588777263,0.0191990330785671,-0.0342145528191441,202.0\n26,-0.107225631607358,-0.044641636506989,-0.0773415510119477,-0.0263278347173518,-0.0896299427450836,-0.0961978613484469,0.0265502726256275,-0.076394503750001,-0.0425721049227942,-0.0052198044153011,137.0\n27,-0.0236772472339084,-0.044641636506989,0.0595405823709267,-0.0400993174922969,-0.0428475455662452,-0.0435889197678055,0.0118237214092792,-0.0394933828740919,-0.0159982677581387,0.0403433716478807,85.0\n28,0.0526060602375023,-0.044641636506989,-0.0212953231701409,-0.0745280244296595,-0.040095639849843,-0.0376390989938044,-0.00658446761115617,-0.0394933828740919,-0.000609254186102297,-0.0549250873933176,131.0\n29,0.0671362140415805,0.0506801187398187,-0.00620595413580824,0.063186803319791,-0.0428475455662452,-0.0958847128866574,0.052321737254237,-0.076394503750001,0.0594238004447941,0.0527696923923848,283.0\n30,-0.0600026317441039,-0.044641636506989,0.0444512133365941,-0.0194420933298793,-0.00982467696941811,-0.00757684666200928,0.0228686348215404,-0.0394933828740919,-0.0271286455543265,-0.0093619113301358,129.0\n31,-0.0236772472339084,-0.044641636506989,-0.0654856181992578,-0.081413765817132,-0.0387196869916418,-0.0536096705450705,0.0596850128624111,-0.076394503750001,-0.0371283460104736,-0.0424987666488135,59.0\n32,0.0344433679824045,0.0506801187398187,0.125287118877662,0.0287580963824284,-0.0538551684318543,-0.0129003705124313,-0.10230705051742,0.108111100629544,0.000271485727907132,0.0279170509033766,341.0\n33,0.030810829531385,-0.044641636506989,-0.0503962491649252,-0.00222773986119799,-0.0442234984244464,-0.0899348921126563,0.118591217727804,-0.076394503750001,-0.0181182673078967,0.00306440941436832,87.0\n34,0.0162806757273067,-0.044641636506989,-0.063329994051496,-0.0573136709609782,-0.0579830270064577,-0.0489124436182275,0.0081420836051921,-0.0394933828740919,-0.0594726974107223,-0.0673514081378217,65.0\n35,0.0489735217864827,0.0506801187398187,-0.030995631835069,-0.0492803060204031,0.0493412959332305,-0.00413221358232442,0.133317768944152,-0.0535158088069373,0.0213108465682448,0.0196328370737072,102.0\n36,0.0126481372762872,-0.044641636506989,0.0228949718589761,0.0528581912385822,0.00806271018719657,-0.0285577936019079,0.0375951860378887,-0.0394933828740919,0.0547240033481791,-0.0259303389894746,265.0\n37,-0.00914709342983014,-0.044641636506989,0.0110390390462862,-0.0573136709609782,-0.0249601584096305,-0.0429626228442264,0.0302319104297145,-0.0394933828740919,0.01703713241478,-0.0052198044153011,276.0\n38,-0.00188201652779104,0.0506801187398187,0.0713965151836166,0.0976155102571536,0.0878679759628621,0.0754074957122168,-0.0213110188275045,0.0712099797536354,0.0714240327805764,0.0237749439885419,252.0\n39,-0.00188201652779104,0.0506801187398187,0.0142724752679289,-0.0745280244296595,0.00255889875439205,0.00620168565673016,-0.0139477432193303,-0.00259226199818282,0.0191990330785671,0.00306440941436832,90.0\n40,0.00538306037424807,0.0506801187398187,-0.00836157828357004,0.0218723549949558,0.054845107366035,0.07321545647969,-0.0249926566315915,0.0343088588777263,0.0125531528133893,0.094190761540732,100.0\n41,-0.099960554705319,-0.044641636506989,-0.0676412423470196,-0.108956731367022,-0.0744944613048712,-0.072711726714232,0.0155053592133662,-0.0394933828740919,-0.0498684677352306,-0.0093619113301358,55.0\n42,-0.0600026317441039,0.0506801187398187,-0.0105172024313319,-0.0148515990830405,-0.0497273098572509,-0.0235474182132754,-0.0581273968683752,0.0158582984397717,-0.00991895736315477,-0.0342145528191441,61.0\n43,0.0199132141783263,-0.044641636506989,-0.0234509473179027,-0.0710851537359232,0.0204462859110067,-0.0100820343563255,0.118591217727804,-0.076394503750001,-0.0425721049227942,0.0734802269665584,92.0\n44,0.0453409833354632,0.0506801187398187,0.068163078961974,0.0081008722200108,-0.0167044412604238,0.0046359433477825,-0.0765355858888105,0.0712099797536354,0.0324332257796019,-0.0176461251598052,259.0\n45,0.0271782910803654,0.0506801187398187,-0.0353068801305926,0.0322009670761646,-0.0112006298276192,0.00150445872988718,-0.0102661054152432,-0.00259226199818282,-0.0149564750249113,-0.0507829804784829,53.0\n46,-0.0563700932930843,-0.044641636506989,-0.0115950145052127,-0.0332135761048244,-0.0469754041408486,-0.0476598497710694,0.00446044580110504,-0.0394933828740919,-0.00797939755454164,-0.0880619427119953,190.0\n47,-0.0781653239992017,-0.044641636506989,-0.0730303027164241,-0.0573136709609782,-0.0841261313122791,-0.0742774690231797,-0.0249926566315915,-0.0394933828740919,-0.0181182673078967,-0.0839198357971606,142.0\n48,0.0671362140415805,0.0506801187398187,-0.041773752573878,0.0115437429137471,0.00255889875439205,0.00588853719494063,0.0412768238419757,-0.0394933828740919,-0.0594726974107223,-0.0217882320746399,75.0\n49,-0.0418399394890061,0.0506801187398187,0.0142724752679289,-0.00567061055493425,-0.0125765826858204,0.00620168565673016,-0.0728539480847234,0.0712099797536354,0.0354619386607697,-0.0135040182449705,142.0\n50,0.0344433679824045,-0.044641636506989,-0.00728376620968916,0.0149866136074833,-0.0442234984244464,-0.0373259505320149,-0.0029028298070691,-0.0394933828740919,-0.02139368094036,0.00720651632920303,155.0\n51,0.0598711371395414,0.0506801187398187,0.0164280994156907,0.0287580963824284,-0.0414715927080441,-0.029184090525487,-0.0286742944356786,-0.00259226199818282,-0.00239668149341427,-0.0217882320746399,225.0\n52,-0.0527375548420648,-0.044641636506989,-0.00943939035745095,-0.00567061055493425,0.0397096259258226,0.0447189464568426,0.0265502726256275,-0.00259226199818282,-0.0181182673078967,-0.0135040182449705,59.0\n53,-0.00914709342983014,-0.044641636506989,-0.0159062628007364,0.0700725447072635,0.0121905687618,0.0221722572079963,0.0155053592133662,-0.00259226199818282,-0.0332487872476258,0.0486275854775501,104.0\n54,-0.0491050163910452,-0.044641636506989,0.0250505960067379,0.0081008722200108,0.0204462859110067,0.0177881787429428,0.052321737254237,-0.0394933828740919,-0.0411803851880079,0.00720651632920303,182.0\n55,-0.0418399394890061,-0.044641636506989,-0.0493184370910443,-0.0366564467985606,-0.00707277125301585,-0.0226079728279068,0.0854564774910206,-0.0394933828740919,-0.0664881482228354,0.00720651632920303,128.0\n56,-0.0418399394890061,-0.044641636506989,0.0412177771149514,-0.0263278347173518,-0.0318399227006362,-0.0304366843726451,-0.0360375700438527,0.00294290613320356,0.0336568129023847,-0.0176461251598052,52.0\n57,-0.0273097856849279,-0.044641636506989,-0.063329994051496,-0.0504279295735057,-0.0896299427450836,-0.104339721354975,0.052321737254237,-0.076394503750001,-0.0561575730950062,-0.0673514081378217,37.0\n58,0.0417084448844436,-0.044641636506989,-0.064407806125377,0.0356438377699009,0.0121905687618,-0.057993749010124,0.181179060397284,-0.076394503750001,-0.000609254186102297,-0.0507829804784829,170.0\n59,0.063503675590561,0.0506801187398187,-0.0256065714656645,0.0115437429137471,0.0644767773734429,0.048476727998317,0.0302319104297145,-0.00259226199818282,0.0383932482116977,0.0196328370737072,170.0\n60,-0.0709002470971626,-0.044641636506989,-0.00405032998804645,-0.0400993174922969,-0.0662387441556644,-0.0786615474882331,0.052321737254237,-0.076394503750001,-0.0514005352605825,-0.0342145528191441,61.0\n61,-0.0418399394890061,0.0506801187398187,0.00457216660300077,-0.0538708002672419,-0.0442234984244464,-0.0273051997547498,-0.0802172236928976,0.0712099797536354,0.0366457977933988,0.0196328370737072,144.0\n62,-0.0273097856849279,0.0506801187398187,-0.00728376620968916,-0.0400993174922969,-0.0112006298276192,-0.0138398158977999,0.0596850128624111,-0.0394933828740919,-0.0823814832581028,-0.0259303389894746,52.0\n63,-0.034574862586967,-0.044641636506989,-0.0374625042783544,-0.0607565416547144,0.0204462859110067,0.0434663526096845,-0.0139477432193303,-0.00259226199818282,-0.0307512098645563,-0.0714935150526564,128.0\n64,0.0671362140415805,0.0506801187398187,-0.0256065714656645,-0.0400993174922969,-0.0634868384392622,-0.0598726397808612,-0.0029028298070691,-0.0394933828740919,-0.0191970476139445,0.0113486232440377,71.0\n65,-0.0454724779400257,0.0506801187398187,-0.0245287593917836,0.0597439326260547,0.00531080447079431,0.0149698425868371,-0.0544457590642881,0.0712099797536354,0.0423448954496075,0.0154907301588724,163.0\n66,-0.00914709342983014,0.0506801187398187,-0.0180618869484982,-0.0332135761048244,-0.0208322998350272,0.0121515064307313,-0.0728539480847234,0.0712099797536354,0.000271485727907132,0.0196328370737072,150.0\n67,0.0417084448844436,0.0506801187398187,-0.0148284507268555,-0.0171468461892456,-0.00569681839481472,0.00839372488925688,-0.0139477432193303,-0.00185423958066465,-0.0119006848015081,0.00306440941436832,97.0\n68,0.0380759064334241,0.0506801187398187,-0.0299178197611881,-0.0400993174922969,-0.0332158755588373,-0.0241737151368545,-0.0102661054152432,-0.00259226199818282,-0.0129079422541688,0.00306440941436832,160.0\n69,0.0162806757273067,-0.044641636506989,-0.0460850008694016,-0.00567061055493425,-0.0758704141630723,-0.0614383820898088,-0.0139477432193303,-0.0394933828740919,-0.0514005352605825,0.0196328370737072,178.0\n70,-0.00188201652779104,-0.044641636506989,-0.0697968664947814,-0.0125563519424068,-0.000193006962010205,-0.00914258897095694,0.0707299262746723,-0.0394933828740919,-0.0629129499162512,0.0403433716478807,48.0\n71,-0.00188201652779104,-0.044641636506989,0.0336730925977851,0.125158475807044,0.0245741444856101,0.0262431872112602,-0.0102661054152432,-0.00259226199818282,0.0267142576335128,0.0610539062220542,270.0\n72,0.063503675590561,0.0506801187398187,-0.00405032998804645,-0.0125563519424068,0.103003457403075,0.0487898764601065,0.056003375058324,-0.00259226199818282,0.0844952822124031,-0.0176461251598052,202.0\n73,0.0126481372762872,0.0506801187398187,-0.02021751109626,-0.00222773986119799,0.0383336730676214,0.05317395492516,-0.00658446761115617,0.0343088588777263,-0.00514530798026311,-0.0093619113301358,111.0\n74,0.0126481372762872,0.0506801187398187,0.00241654245523897,0.0563010619323185,0.0273260502020124,0.0171618818193638,0.0412768238419757,-0.0394933828740919,0.00371173823343597,0.0734802269665584,85.0\n75,-0.00914709342983014,0.0506801187398187,-0.030995631835069,-0.0263278347173518,-0.0112006298276192,-0.00100072896442909,-0.0213110188275045,-0.00259226199818282,0.0062093156165054,0.0279170509033766,42.0\n76,-0.0309423241359475,0.0506801187398187,0.0282840322283806,0.0700725447072635,-0.126780669916514,-0.106844909049291,-0.0544457590642881,-0.047980640675551,-0.0307512098645563,0.0154907301588724,170.0\n77,-0.0963280162542995,-0.044641636506989,-0.0363846922044735,-0.0745280244296595,-0.0387196869916418,-0.0276183482165393,0.0155053592133662,-0.0394933828740919,-0.0740888714915354,-0.00107769750046639,200.0\n78,0.00538306037424807,-0.044641636506989,-0.0579409336820915,-0.0228849640236156,-0.0676146970138656,-0.0683276482491785,-0.0544457590642881,-0.00259226199818282,0.0428956878925287,-0.0839198357971606,252.0\n79,-0.103593093156339,-0.044641636506989,-0.0374625042783544,-0.0263278347173518,0.00255889875439205,0.0199802179754696,0.0118237214092792,-0.00259226199818282,-0.0683297436244215,-0.0259303389894746,113.0\n80,0.0707687524926,-0.044641636506989,0.0121168511201671,0.0425295791573734,0.0713565416644485,0.0534871033869495,0.052321737254237,-0.00259226199818282,0.0253931349154494,-0.0052198044153011,143.0\n81,0.0126481372762872,0.0506801187398187,-0.0223731352440218,-0.0297707054110881,0.0108146159035988,0.0284352264437869,-0.0213110188275045,0.0343088588777263,-0.00608024819631442,-0.00107769750046639,51.0\n82,-0.0164121703318693,-0.044641636506989,-0.0353068801305926,-0.0263278347173518,0.0328298616348169,0.0171618818193638,0.100183028707369,-0.0394933828740919,-0.0702093127286876,-0.0797777288823259,52.0\n83,-0.0382074010379866,-0.044641636506989,0.00996122697240527,-0.0469850588797694,-0.0593589798646588,-0.0529833736214915,-0.0102661054152432,-0.0394933828740919,-0.0159982677581387,-0.0424987666488135,210.0\n84,0.00175052192322852,-0.044641636506989,-0.0396181284261162,-0.100923366426447,-0.0290880169842339,-0.0301235359108556,0.0449584616460628,-0.0501947079281055,-0.0683297436244215,-0.129483011860342,65.0\n85,0.0453409833354632,-0.044641636506989,0.0713965151836166,0.00121513083253827,-0.00982467696941811,-0.00100072896442909,0.0155053592133662,-0.0394933828740919,-0.0411803851880079,-0.0714935150526564,141.0\n86,-0.0709002470971626,0.0506801187398187,-0.0751859268641859,-0.0400993174922969,-0.051103262715452,-0.015092409744958,-0.0397192078479398,-0.00259226199818282,-0.096433222891784,-0.0342145528191441,55.0\n87,0.0453409833354632,-0.044641636506989,-0.00620595413580824,0.0115437429137471,0.0631008245152418,0.0162224364339952,0.0965013909032818,-0.0394933828740919,0.0428956878925287,-0.0383566597339788,134.0\n88,-0.0527375548420648,0.0506801187398187,-0.0406959404999971,-0.067642283042187,-0.0318399227006362,-0.0370128020702253,0.0375951860378887,-0.0394933828740919,-0.0345237153303495,0.0693381200517237,42.0\n89,-0.0454724779400257,-0.044641636506989,-0.0482406250171634,-0.0194420933298793,-0.000193006962010205,-0.0160318551303266,0.0670482884705852,-0.0394933828740919,-0.0247911874324607,0.0196328370737072,111.0\n90,0.0126481372762872,-0.044641636506989,-0.0256065714656645,-0.0400993174922969,-0.0304639698424351,-0.0451546620767532,0.0780932018828464,-0.076394503750001,-0.072128454601956,0.0113486232440377,98.0\n91,0.0453409833354632,-0.044641636506989,0.0519958978537604,-0.0538708002672419,0.0631008245152418,0.0647604480113727,-0.0102661054152432,0.0343088588777263,0.0372320112089689,0.0196328370737072,164.0\n92,-0.0200447087828888,-0.044641636506989,0.00457216660300077,0.0976155102571536,0.00531080447079431,-0.0207290820571696,0.0633666506664982,-0.0394933828740919,0.0125531528133893,0.0113486232440377,48.0\n93,-0.0491050163910452,-0.044641636506989,-0.064407806125377,-0.10207098997955,-0.00294491267841247,-0.0154055582067476,0.0633666506664982,-0.0472426182580328,-0.0332487872476258,-0.0549250873933176,96.0\n94,-0.0781653239992017,-0.044641636506989,-0.0169840748746173,-0.0125563519424068,-0.000193006962010205,-0.0135266674360104,0.0707299262746723,-0.0394933828740919,-0.0411803851880079,-0.09220404962683,90.0\n95,-0.0709002470971626,-0.044641636506989,-0.0579409336820915,-0.081413765817132,-0.0455994512826475,-0.0288709420636975,-0.0434008456520269,-0.00259226199818282,0.00114379737951254,-0.0052198044153011,162.0\n96,0.0562385986885218,0.0506801187398187,0.00996122697240527,0.0494153205448459,-0.00432086553661359,-0.0122740735888523,-0.0434008456520269,0.0343088588777263,0.060787754150744,0.0320591578182113,150.0\n97,-0.0273097856849279,-0.044641636506989,0.088641508365711,-0.0251802111642493,0.0218222387692079,0.0425269072243159,-0.0323559322397657,0.0343088588777263,0.00286377051894013,0.0776223338813931,279.0\n98,0.00175052192322852,0.0506801187398187,-0.00512814206192736,-0.0125563519424068,-0.0153284884022226,-0.0138398158977999,0.0081420836051921,-0.0394933828740919,-0.00608024819631442,-0.0673514081378217,92.0\n99,-0.00188201652779104,-0.044641636506989,-0.064407806125377,0.0115437429137471,0.0273260502020124,0.0375165318356834,-0.0139477432193303,0.0343088588777263,0.0117839003835759,-0.0549250873933176,83.0\n100,0.0162806757273067,-0.044641636506989,0.0175059114895716,-0.0228849640236156,0.0603489187988395,0.0444057979950531,0.0302319104297145,-0.00259226199818282,0.0372320112089689,-0.00107769750046639,128.0\n101,0.0162806757273067,0.0506801187398187,-0.0450071887955207,0.063186803319791,0.0108146159035988,-0.00037443204085002,0.0633666506664982,-0.0394933828740919,-0.0307512098645563,0.036201264733046,102.0\n102,-0.0926954778032799,-0.044641636506989,0.0282840322283806,-0.015999222636143,0.0369577202094203,0.0249905933641021,0.056003375058324,-0.0394933828740919,-0.00514530798026311,-0.00107769750046639,302.0\n103,0.0598711371395414,0.0506801187398187,0.0412177771149514,0.0115437429137471,0.0410855787840237,0.0707102687853738,-0.0360375700438527,0.0343088588777263,-0.0109044358473771,-0.0300724459043093,198.0\n104,-0.0273097856849279,-0.044641636506989,0.0649296427403312,-0.00222773986119799,-0.0249601584096305,-0.0172844489774848,0.0228686348215404,-0.0394933828740919,-0.0611765950943345,-0.063209301222987,95.0\n105,0.0235457526293458,0.0506801187398187,-0.0320734439089499,-0.0400993174922969,-0.0318399227006362,-0.0216685274425382,-0.0139477432193303,-0.00259226199818282,-0.0109044358473771,0.0196328370737072,53.0\n106,-0.0963280162542995,-0.044641636506989,-0.0762637389380668,-0.0435421881860331,-0.0455994512826475,-0.0348207628376986,0.0081420836051921,-0.0394933828740919,-0.0594726974107223,-0.0839198357971606,134.0\n107,0.0271782910803654,-0.044641636506989,0.0498402737059986,-0.0550184238203444,-0.00294491267841247,0.0406480164535787,-0.0581273968683752,0.0527594193156808,-0.0529587932392004,-0.0052198044153011,144.0\n108,0.0199132141783263,0.0506801187398187,0.045529025410475,0.0299057198322448,-0.062110885581061,-0.0558017097775973,-0.0728539480847234,0.0269286347025444,0.0456008084141249,0.0403433716478807,232.0\n109,0.0380759064334241,0.0506801187398187,-0.00943939035745095,0.0023627543856408,0.00118294589619092,0.0375165318356834,-0.0544457590642881,0.0501763408543672,-0.0259524244351894,0.106617082285236,81.0\n110,0.0417084448844436,0.0506801187398187,-0.0320734439089499,-0.0228849640236156,-0.0497273098572509,-0.0401442866881206,0.0302319104297145,-0.0394933828740919,-0.126097385560409,0.0154907301588724,104.0\n111,0.0199132141783263,-0.044641636506989,0.00457216660300077,-0.0263278347173518,0.023198191627409,0.0102726156599941,0.0670482884705852,-0.0394933828740919,-0.0236445575721341,-0.0466408735636482,59.0\n112,-0.0854304009012408,-0.044641636506989,0.0207393477112143,-0.0263278347173518,0.00531080447079431,0.01966706951368,-0.0029028298070691,-0.00259226199818282,-0.0236445575721341,0.00306440941436832,246.0\n113,0.0199132141783263,0.0506801187398187,0.0142724752679289,0.063186803319791,0.0149424744782022,0.0202933664372591,-0.0470824834561139,0.0343088588777263,0.0466607723568145,0.0900486546258972,297.0\n114,0.0235457526293458,-0.044641636506989,0.110197749843329,0.063186803319791,0.0135665216200011,-0.0329418720669614,-0.0249926566315915,0.0206554441536399,0.09924022573399,0.0237749439885419,258.0\n115,-0.0309423241359475,0.0506801187398187,0.00133873038135806,-0.00567061055493425,0.0644767773734429,0.0494161733836856,-0.0470824834561139,0.108111100629544,0.0837967663655224,0.00306440941436832,229.0\n116,0.0489735217864827,0.0506801187398187,0.0584627702970458,0.0700725447072635,0.0135665216200011,0.0206065148990486,-0.0213110188275045,0.0343088588777263,0.0220040504561505,0.0279170509033766,275.0\n117,0.0598711371395414,-0.044641636506989,-0.0212953231701409,0.0872868981759448,0.0452134373586271,0.0315667110616823,-0.0470824834561139,0.0712099797536354,0.0791210813896579,0.135611830689079,281.0\n118,-0.0563700932930843,0.0506801187398187,-0.0105172024313319,0.0253152256886921,0.023198191627409,0.0400217195299996,-0.0397192078479398,0.0343088588777263,0.0206123307213641,0.0569117993072195,179.0\n119,0.0162806757273067,-0.044641636506989,-0.0471628129432825,-0.00222773986119799,-0.019456346976826,-0.0429626228442264,0.0339135482338016,-0.0394933828740919,0.027367707542609,0.0279170509033766,200.0\n120,-0.0491050163910452,-0.044641636506989,0.00457216660300077,0.0115437429137471,-0.0373437341334407,-0.0185370428246429,-0.0176293810234174,-0.00259226199818282,-0.0398095943643375,-0.0217882320746399,200.0\n121,0.063503675590561,-0.044641636506989,0.0175059114895716,0.0218723549949558,0.00806271018719657,0.0215459602844172,-0.0360375700438527,0.0343088588777263,0.0199084208763183,0.0113486232440377,173.0\n122,0.0489735217864827,0.0506801187398187,0.0810968238485447,0.0218723549949558,0.0438374845004259,0.0641341510877936,-0.0544457590642881,0.0712099797536354,0.0324332257796019,0.0486275854775501,180.0\n123,0.00538306037424807,0.0506801187398187,0.034750904671666,-0.00108011630809546,0.152537760298315,0.198787989657293,-0.0618090346724622,0.185234443260194,0.0155668445407018,0.0734802269665584,84.0\n124,-0.00551455497881059,-0.044641636506989,0.023972783932857,0.0081008722200108,-0.0345918284170385,-0.0388916928409625,0.0228686348215404,-0.0394933828740919,-0.0159982677581387,-0.0135040182449705,121.0\n125,-0.00551455497881059,0.0506801187398187,-0.00836157828357004,-0.00222773986119799,-0.0332158755588373,-0.0636304213223356,-0.0360375700438527,-0.00259226199818282,0.0805854642386665,0.00720651632920303,161.0\n126,-0.0890629393522603,-0.044641636506989,-0.0611743699037342,-0.0263278347173518,-0.0552311212900554,-0.0545491159304391,0.0412768238419757,-0.076394503750001,-0.0939356455087147,-0.0549250873933176,99.0\n127,0.0344433679824045,0.0506801187398187,-0.00189470584028465,-0.0125563519424068,0.0383336730676214,0.0137172487396789,0.0780932018828464,-0.0394933828740919,0.00455189046612778,-0.0963461565416647,109.0\n128,-0.0527375548420648,-0.044641636506989,-0.0622521819776151,-0.0263278347173518,-0.00569681839481472,-0.005071658967693,0.0302319104297145,-0.0394933828740919,-0.0307512098645563,-0.0714935150526564,115.0\n129,0.00901559882526763,-0.044641636506989,0.0164280994156907,0.00465800152627453,0.0094386630453977,0.0105857641217836,-0.0286742944356786,0.0343088588777263,0.0389683660308856,0.11904340302974,268.0\n130,-0.0636351701951234,0.0506801187398187,0.0961861928828773,0.104501251644626,-0.00294491267841247,-0.00475851050590347,-0.00658446761115617,-0.00259226199818282,0.0226920225667445,0.0734802269665584,274.0\n131,-0.0963280162542995,-0.044641636506989,-0.0697968664947814,-0.067642283042187,-0.019456346976826,-0.0107083312799046,0.0155053592133662,-0.0394933828740919,-0.0468794828442166,-0.0797777288823259,158.0\n132,0.0162806757273067,0.0506801187398187,-0.0212953231701409,-0.00911348124867051,0.034205814493018,0.047850431074738,0.000778807997017968,-0.00259226199818282,-0.0129079422541688,0.0237749439885419,107.0\n133,-0.0418399394890061,0.0506801187398187,-0.0536296853865679,-0.0400993174922969,-0.0841261313122791,-0.0717722813288634,-0.0029028298070691,-0.0394933828740919,-0.072128454601956,-0.0300724459043093,83.0\n134,-0.0745327855481821,-0.044641636506989,0.0433734012627132,-0.0332135761048244,0.0121905687618,0.000251864882729031,0.0633666506664982,-0.0394933828740919,-0.0271286455543265,-0.0466408735636482,103.0\n135,-0.00551455497881059,-0.044641636506989,0.056307146149284,-0.0366564467985606,-0.0483513569990498,-0.0429626228442264,-0.0728539480847234,0.0379989709653172,0.0507815133629732,0.0569117993072195,272.0\n136,-0.0926954778032799,-0.044641636506989,-0.0816527993074713,-0.0573136709609782,-0.0607349327228599,-0.068014499787389,0.0486400994501499,-0.076394503750001,-0.0664881482228354,-0.0217882320746399,85.0\n137,0.00538306037424807,-0.044641636506989,0.0498402737059986,0.0976155102571536,-0.0153284884022226,-0.0163450035921162,-0.00658446761115617,-0.00259226199818282,0.01703713241478,-0.0135040182449705,280.0\n138,0.0344433679824045,0.0506801187398187,0.11127556191721,0.076958286094736,-0.0318399227006362,-0.03388131745233,-0.0213110188275045,-0.00259226199818282,0.028016506523264,0.0734802269665584,336.0\n139,0.0235457526293458,-0.044641636506989,0.0616962065186885,0.0528581912385822,-0.0345918284170385,-0.0489124436182275,-0.0286742944356786,-0.00259226199818282,0.0547240033481791,-0.0052198044153011,281.0\n140,0.0417084448844436,0.0506801187398187,0.0142724752679289,0.0425295791573734,-0.0304639698424351,-0.00131387742621863,-0.0434008456520269,-0.00259226199818282,-0.0332487872476258,0.0154907301588724,118.0\n141,-0.0273097856849279,-0.044641636506989,0.0476846495582368,-0.0469850588797694,0.034205814493018,0.0572448849284239,-0.0802172236928976,0.13025177315509,0.0450661683362615,0.131469723774244,317.0\n142,0.0417084448844436,0.0506801187398187,0.0121168511201671,0.0390867084636372,0.054845107366035,0.0444057979950531,0.00446044580110504,-0.00259226199818282,0.0456008084141249,-0.00107769750046639,235.0\n143,-0.0309423241359475,-0.044641636506989,0.00564997867688165,-0.00911348124867051,0.0190703330528056,0.00682798258030921,0.0744115640787594,-0.0394933828740919,-0.0411803851880079,-0.0424987666488135,60.0\n144,0.030810829531385,0.0506801187398187,0.0466068374843559,-0.015999222636143,0.0204462859110067,0.0506687672308438,-0.0581273968683752,0.0712099797536354,0.0062093156165054,0.00720651632920303,174.0\n145,-0.0418399394890061,-0.044641636506989,0.128520555099304,0.063186803319791,-0.0332158755588373,-0.0326287236051719,0.0118237214092792,-0.0394933828740919,-0.0159982677581387,-0.0507829804784829,259.0\n146,-0.0309423241359475,0.0506801187398187,0.0595405823709267,0.00121513083253827,0.0121905687618,0.0315667110616823,-0.0434008456520269,0.0343088588777263,0.0148227108412663,0.00720651632920303,178.0\n147,-0.0563700932930843,-0.044641636506989,0.0929527566612346,-0.0194420933298793,0.0149424744782022,0.0234248510551544,-0.0286742944356786,0.0254525898675081,0.0260560896336847,0.0403433716478807,128.0\n148,-0.0600026317441039,0.0506801187398187,0.0153502873418098,-0.0194420933298793,0.0369577202094203,0.0481635795365275,0.0191869970174533,-0.00259226199818282,-0.0307512098645563,-0.00107769750046639,96.0\n149,-0.0491050163910452,0.0506801187398187,-0.00512814206192736,-0.0469850588797694,-0.0208322998350272,-0.0204159335953801,-0.0691723102806364,0.0712099797536354,0.061237907519701,-0.0383566597339788,126.0\n150,0.0235457526293458,-0.044641636506989,0.0703187031097357,0.0253152256886921,-0.0345918284170385,-0.014466112821379,-0.0323559322397657,-0.00259226199818282,-0.0191970476139445,-0.0093619113301358,288.0\n151,0.00175052192322852,-0.044641636506989,-0.00405032998804645,-0.00567061055493425,-0.00844872411121698,-0.0238605666750649,0.052321737254237,-0.0394933828740919,-0.0089440189577978,-0.0135040182449705,88.0\n152,-0.034574862586967,0.0506801187398187,-0.000816893766403737,0.0700725447072635,0.0397096259258226,0.0669524872438994,-0.0654906724765493,0.108111100629544,0.0267142576335128,0.0734802269665584,292.0\n153,0.0417084448844436,0.0506801187398187,-0.0439293767216398,0.063186803319791,-0.00432086553661359,0.0162224364339952,-0.0139477432193303,-0.00259226199818282,-0.0345237153303495,0.0113486232440377,71.0\n154,0.0671362140415805,0.0506801187398187,0.0207393477112143,-0.00567061055493425,0.0204462859110067,0.0262431872112602,-0.0029028298070691,-0.00259226199818282,0.00864028293306308,0.00306440941436832,197.0\n155,-0.0273097856849279,0.0506801187398187,0.0606183944448076,0.0494153205448459,0.0851160702464598,0.0863676918748504,-0.0029028298070691,0.0343088588777263,0.0378144788263439,0.0486275854775501,186.0\n156,-0.0164121703318693,-0.044641636506989,-0.0105172024313319,0.00121513083253827,-0.0373437341334407,-0.0357602082230672,0.0118237214092792,-0.0394933828740919,-0.02139368094036,-0.0342145528191441,25.0\n157,-0.00188201652779104,0.0506801187398187,-0.0331512559828308,-0.0182944697767768,0.0314539087766158,0.0428400556861055,-0.0139477432193303,0.0199174217361217,0.0102256424049578,0.0279170509033766,84.0\n158,-0.0127796318808497,-0.044641636506989,-0.0654856181992578,-0.0699375301828207,0.00118294589619092,0.0168487333575743,-0.0029028298070691,-0.00702039650329191,-0.0307512098645563,-0.0507829804784829,96.0\n159,-0.00551455497881059,-0.044641636506989,0.0433734012627132,0.0872868981759448,0.0135665216200011,0.00714113104209875,-0.0139477432193303,-0.00259226199818282,0.0423448954496075,-0.0176461251598052,195.0\n160,-0.00914709342983014,-0.044641636506989,-0.0622521819776151,-0.0745280244296595,-0.0235842055514294,-0.0132135189742209,0.00446044580110504,-0.0394933828740919,-0.0358167281015492,-0.0466408735636482,53.0\n161,-0.0454724779400257,0.0506801187398187,0.0638518306664503,0.0700725447072635,0.133274420283499,0.131461070372543,-0.0397192078479398,0.108111100629544,0.0757375884575476,0.0859065477110625,217.0\n162,-0.0527375548420648,-0.044641636506989,0.0304396563761424,-0.0745280244296595,-0.0235842055514294,-0.0113346282034837,-0.0029028298070691,-0.00259226199818282,-0.0307512098645563,-0.00107769750046639,172.0\n163,0.0162806757273067,0.0506801187398187,0.0724743272574975,0.076958286094736,-0.00844872411121698,0.00557538873315109,-0.00658446761115617,-0.00259226199818282,-0.0236445575721341,0.0610539062220542,131.0\n164,0.0453409833354632,-0.044641636506989,-0.019139699022379,0.0218723549949558,0.0273260502020124,-0.0135266674360104,0.100183028707369,-0.0394933828740919,0.0177634778671173,-0.0135040182449705,214.0\n165,-0.0418399394890061,-0.044641636506989,-0.0665634302731387,-0.0469850588797694,-0.0373437341334407,-0.043275771306016,0.0486400994501499,-0.0394933828740919,-0.0561575730950062,-0.0135040182449705,59.0\n166,-0.0563700932930843,0.0506801187398187,-0.0600965578298533,-0.0366564467985606,-0.0882539898868825,-0.0708328359434948,-0.0139477432193303,-0.0394933828740919,-0.0781409106690696,-0.104630370371334,70.0\n167,0.0707687524926,-0.044641636506989,0.0692408910358548,0.0379390850138207,0.0218222387692079,0.00150445872988718,-0.0360375700438527,0.0391060045915944,0.0776327891955595,0.106617082285236,220.0\n168,0.00175052192322852,0.0506801187398187,0.0595405823709267,-0.00222773986119799,0.0617248716570406,0.063194705702425,-0.0581273968683752,0.108111100629544,0.0689822116363026,0.12732761685941,268.0\n169,-0.00188201652779104,-0.044641636506989,-0.0266843835395454,0.0494153205448459,0.0589729659406384,-0.0160318551303266,-0.0470824834561139,0.0712099797536354,0.133598980013008,0.0196328370737072,152.0\n170,0.0235457526293458,0.0506801187398187,-0.02021751109626,-0.0366564467985606,-0.0139525355440215,-0.015092409744958,0.0596850128624111,-0.0394933828740919,-0.096433222891784,-0.0176461251598052,47.0\n171,-0.0200447087828888,-0.044641636506989,-0.0460850008694016,-0.0986281192858133,-0.0758704141630723,-0.0598726397808612,-0.0176293810234174,-0.0394933828740919,-0.0514005352605825,-0.0466408735636482,74.0\n172,0.0417084448844436,0.0506801187398187,0.0713965151836166,0.0081008722200108,0.0383336730676214,0.0159092879722056,-0.0176293810234174,0.0343088588777263,0.0734100780491161,0.0859065477110625,295.0\n173,-0.0636351701951234,0.0506801187398187,-0.0794971751597095,-0.00567061055493425,-0.071742555588469,-0.0664487574784414,-0.0102661054152432,-0.0394933828740919,-0.0181182673078967,-0.0549250873933176,101.0\n174,0.0162806757273067,0.0506801187398187,0.00996122697240527,-0.0435421881860331,-0.0965097070360893,-0.0946321190394993,-0.0397192078479398,-0.0394933828740919,0.01703713241478,0.00720651632920303,151.0\n175,0.0671362140415805,-0.044641636506989,-0.0385403163522353,-0.0263278347173518,-0.0318399227006362,-0.0263657543693812,0.0081420836051921,-0.0394933828740919,-0.0271286455543265,0.00306440941436832,127.0\n176,0.0453409833354632,0.0506801187398187,0.0196615356373334,0.0390867084636372,0.0204462859110067,0.0259300387494707,0.0081420836051921,-0.00259226199818282,-0.003303712578677,0.0196328370737072,237.0\n177,0.0489735217864827,-0.044641636506989,0.0272062201544997,-0.0251802111642493,0.023198191627409,0.0184144756665219,-0.0618090346724622,0.0800662487638535,0.0722236508199124,0.0320591578182113,225.0\n178,0.0417084448844436,-0.044641636506989,-0.00836157828357004,-0.0263278347173518,0.0245741444856101,0.0162224364339952,0.0707299262746723,-0.0394933828740919,-0.0483617248028919,-0.0300724459043093,81.0\n179,-0.0236772472339084,-0.044641636506989,-0.0159062628007364,-0.0125563519424068,0.0204462859110067,0.0412743133771578,-0.0434008456520269,0.0343088588777263,0.0140724525157685,-0.0093619113301358,151.0\n180,-0.0382074010379866,0.0506801187398187,0.00457216660300077,0.0356438377699009,-0.0112006298276192,0.00588853719494063,-0.0470824834561139,0.0343088588777263,0.0163049527999418,-0.00107769750046639,107.0\n181,0.0489735217864827,-0.044641636506989,-0.0428515646477589,-0.0538708002672419,0.0452134373586271,0.0500424703072647,0.0339135482338016,-0.00259226199818282,-0.0259524244351894,-0.063209301222987,64.0\n182,0.0453409833354632,0.0506801187398187,0.00564997867688165,0.0563010619323185,0.0644767773734429,0.0891860280309562,-0.0397192078479398,0.0712099797536354,0.0155668445407018,-0.0093619113301358,138.0\n183,0.0453409833354632,0.0506801187398187,-0.0353068801305926,0.063186803319791,-0.00432086553661359,-0.00162702588800815,-0.0102661054152432,-0.00259226199818282,0.0155668445407018,0.0569117993072195,185.0\n184,0.0162806757273067,-0.044641636506989,0.023972783932857,-0.0228849640236156,-0.0249601584096305,-0.0260526059075917,-0.0323559322397657,-0.00259226199818282,0.0372320112089689,0.0320591578182113,265.0\n185,-0.0745327855481821,0.0506801187398187,-0.0180618869484982,0.0081008722200108,-0.019456346976826,-0.0248000120604336,-0.0654906724765493,0.0343088588777263,0.0673172179146849,-0.0176461251598052,101.0\n186,-0.0817978624502212,0.0506801187398187,0.0422955891888323,-0.0194420933298793,0.0397096259258226,0.0575580333902134,-0.0691723102806364,0.108111100629544,0.0471861678860197,-0.0383566597339788,137.0\n187,-0.067267708646143,-0.044641636506989,-0.0547074974604488,-0.0263278347173518,-0.0758704141630723,-0.082106180567918,0.0486400994501499,-0.076394503750001,-0.0868289932162924,-0.104630370371334,143.0\n188,0.00538306037424807,-0.044641636506989,-0.00297251791416553,0.0494153205448459,0.0741084473808508,0.0707102687853738,0.0449584616460628,-0.00259226199818282,-0.00149858682029207,-0.0093619113301358,141.0\n189,-0.00188201652779104,-0.044641636506989,-0.0665634302731387,0.00121513083253827,-0.00294491267841247,0.00307020103883484,0.0118237214092792,-0.00259226199818282,-0.0202887477516296,-0.0259303389894746,79.0\n190,0.00901559882526763,-0.044641636506989,-0.0126728265790937,0.0287580963824284,-0.0180803941186249,-0.005071658967693,-0.0470824834561139,0.0343088588777263,0.0233748412798208,-0.0052198044153011,292.0\n191,-0.00551455497881059,0.0506801187398187,-0.041773752573878,-0.0435421881860331,-0.0799982727376757,-0.0761563597939169,-0.0323559322397657,-0.0394933828740919,0.0102256424049578,-0.0093619113301358,178.0\n192,0.0562385986885218,0.0506801187398187,-0.030995631835069,0.0081008722200108,0.0190703330528056,0.0212328118226277,0.0339135482338016,-0.0394933828740919,-0.0295276227417736,-0.0590671943081523,91.0\n193,0.00901559882526763,0.0506801187398187,-0.00512814206192736,-0.0641994123484507,0.0699805888062474,0.0838625041805342,-0.0397192078479398,0.0712099797536354,0.0395398780720242,0.0196328370737072,116.0\n194,-0.067267708646143,-0.044641636506989,-0.0590187457559724,0.0322009670761646,-0.051103262715452,-0.0495387405418066,-0.0102661054152432,-0.0394933828740919,0.00200784054982379,0.0237749439885419,86.0\n195,0.0271782910803654,0.0506801187398187,0.0250505960067379,0.0149866136074833,0.0259500973438113,0.048476727998317,-0.0397192078479398,0.0343088588777263,0.00783714230182385,0.0237749439885419,122.0\n196,-0.0236772472339084,-0.044641636506989,-0.0460850008694016,-0.0332135761048244,0.0328298616348169,0.0362639379885253,0.0375951860378887,-0.00259226199818282,-0.0332487872476258,0.0113486232440377,72.0\n197,0.0489735217864827,0.0506801187398187,0.00349435452911985,0.0700725447072635,-0.00844872411121698,0.0134041002778894,-0.0544457590642881,0.0343088588777263,0.0133159679089277,0.036201264733046,129.0\n198,-0.0527375548420648,-0.044641636506989,0.0541515220015222,-0.0263278347173518,-0.0552311212900554,-0.03388131745233,-0.0139477432193303,-0.0394933828740919,-0.0740888714915354,-0.0590671943081523,142.0\n199,0.0417084448844436,-0.044641636506989,-0.0450071887955207,0.0344962143200845,0.0438374845004259,-0.0157187066685371,0.0375951860378887,-0.0144006206784737,0.089898693277671,0.00720651632920303,90.0\n200,0.0562385986885218,-0.044641636506989,-0.0579409336820915,-0.00796585769556799,0.0520932016496327,0.0491030249218961,0.056003375058324,-0.0214118336448964,-0.0283202425479987,0.0444854785627154,158.0\n201,-0.034574862586967,0.0506801187398187,-0.0557853095343297,-0.015999222636143,-0.00982467696941811,-0.00788999512379879,0.0375951860378887,-0.0394933828740919,-0.0529587932392004,0.0279170509033766,39.0\n202,0.0816663678456587,0.0506801187398187,0.00133873038135806,0.0356438377699009,0.126394655992494,0.0910649188016934,0.0191869970174533,0.0343088588777263,0.0844952822124031,-0.0300724459043093,196.0\n203,-0.00188201652779104,0.0506801187398187,0.0304396563761424,0.0528581912385822,0.0397096259258226,0.0566185880048449,-0.0397192078479398,0.0712099797536354,0.0253931349154494,0.0279170509033766,222.0\n204,0.110726675453815,0.0506801187398187,0.00672779075076256,0.0287580963824284,-0.0277120641260328,-0.00726369820021974,-0.0470824834561139,0.0343088588777263,0.00200784054982379,0.0776223338813931,277.0\n205,-0.0309423241359475,-0.044641636506989,0.0466068374843559,0.0149866136074833,-0.0167044412604238,-0.0470335528474903,0.000778807997017968,-0.00259226199818282,0.0634559213720654,-0.0259303389894746,99.0\n206,0.00175052192322852,0.0506801187398187,0.0261284080806188,-0.00911348124867051,0.0245741444856101,0.038455977221052,-0.0213110188275045,0.0343088588777263,0.00943640914607987,0.00306440941436832,196.0\n207,0.00901559882526763,-0.044641636506989,0.045529025410475,0.0287580963824284,0.0121905687618,-0.0138398158977999,0.0265502726256275,-0.0394933828740919,0.0461323310394148,0.036201264733046,202.0\n208,0.030810829531385,-0.044641636506989,0.0401399650410705,0.076958286094736,0.0176943801946045,0.0378296802974729,-0.0286742944356786,0.0343088588777263,-0.00149858682029207,0.11904340302974,155.0\n209,0.0380759064334241,0.0506801187398187,-0.0180618869484982,0.0666296740135272,-0.051103262715452,-0.0166581520539057,-0.0765355858888105,0.0343088588777263,-0.0119006848015081,-0.0135040182449705,77.0\n210,0.00901559882526763,-0.044641636506989,0.0142724752679289,0.0149866136074833,0.054845107366035,0.0472241341511589,0.0707299262746723,-0.0394933828740919,-0.0332487872476258,-0.0590671943081523,191.0\n211,0.0925639831987174,-0.044641636506989,0.0369065288194278,0.0218723549949558,-0.0249601584096305,-0.0166581520539057,0.000778807997017968,-0.0394933828740919,-0.0225121719296605,-0.0217882320746399,70.0\n212,0.0671362140415805,-0.044641636506989,0.00349435452911985,0.0356438377699009,0.0493412959332305,0.0312535625998928,0.0707299262746723,-0.0394933828740919,-0.000609254186102297,0.0196328370737072,73.0\n213,0.00175052192322852,-0.044641636506989,-0.0708746785686623,-0.0228849640236156,-0.00156895982021134,-0.00100072896442909,0.0265502726256275,-0.0394933828740919,-0.0225121719296605,0.00720651632920303,49.0\n214,0.030810829531385,-0.044641636506989,-0.0331512559828308,-0.0228849640236156,-0.0469754041408486,-0.0811667351825494,0.103864666511456,-0.076394503750001,-0.0398095943643375,-0.0549250873933176,65.0\n215,0.0271782910803654,0.0506801187398187,0.0940305687351156,0.0976155102571536,-0.0345918284170385,-0.0320024266815928,-0.0434008456520269,-0.00259226199818282,0.0366457977933988,0.106617082285236,263.0\n216,0.0126481372762872,0.0506801187398187,0.0358287167455469,0.0494153205448459,0.0534691545078339,0.0741549018650587,-0.0691723102806364,0.145012221505454,0.0456008084141249,0.0486275854775501,248.0\n217,0.0744012909436196,-0.044641636506989,0.0315174684500233,0.10105838095089,0.0465893902168282,0.0368902349121043,0.0155053592133662,-0.00259226199818282,0.0336568129023847,0.0444854785627154,296.0\n218,-0.0418399394890061,-0.044641636506989,-0.0654856181992578,-0.0400993174922969,-0.00569681839481472,0.014343545663258,-0.0434008456520269,0.0343088588777263,0.00702686254915195,-0.0135040182449705,214.0\n219,-0.0890629393522603,-0.044641636506989,-0.041773752573878,-0.0194420933298793,-0.0662387441556644,-0.0742774690231797,0.0081420836051921,-0.0394933828740919,0.00114379737951254,-0.0300724459043093,185.0\n220,0.0235457526293458,0.0506801187398187,-0.0396181284261162,-0.00567061055493425,-0.0483513569990498,-0.0332550205287509,0.0118237214092792,-0.0394933828740919,-0.101643547945512,-0.0673514081378217,78.0\n221,-0.0454724779400257,-0.044641636506989,-0.0385403163522353,-0.0263278347173518,-0.0153284884022226,0.000878161806308105,-0.0323559322397657,-0.00259226199818282,0.00114379737951254,-0.0383566597339788,93.0\n222,-0.0236772472339084,0.0506801187398187,-0.0256065714656645,0.0425295791573734,-0.0538551684318543,-0.0476598497710694,-0.0213110188275045,-0.0394933828740919,0.00114379737951254,0.0196328370737072,252.0\n223,-0.099960554705319,-0.044641636506989,-0.0234509473179027,-0.0641994123484507,-0.0579830270064577,-0.0601857882426507,0.0118237214092792,-0.0394933828740919,-0.0181182673078967,-0.0507829804784829,150.0\n224,-0.0273097856849279,-0.044641636506989,-0.0665634302731387,-0.112399602060758,-0.0497273098572509,-0.0413968805352788,0.000778807997017968,-0.0394933828740919,-0.0358167281015492,-0.0093619113301358,77.0\n225,0.030810829531385,0.0506801187398187,0.0325952805239042,0.0494153205448459,-0.040095639849843,-0.0435889197678055,-0.0691723102806364,0.0343088588777263,0.0630166151147464,0.00306440941436832,208.0\n226,-0.103593093156339,0.0506801187398187,-0.0460850008694016,-0.0263278347173518,-0.0249601584096305,-0.0248000120604336,0.0302319104297145,-0.0394933828740919,-0.0398095943643375,-0.0549250873933176,77.0\n227,0.0671362140415805,0.0506801187398187,-0.0299178197611881,0.0574486853821349,-0.000193006962010205,-0.0157187066685371,0.0744115640787594,-0.0505637191368646,-0.0384591123013538,0.00720651632920303,108.0\n228,-0.0527375548420648,-0.044641636506989,-0.0126728265790937,-0.0607565416547144,-0.000193006962010205,0.00808057642746734,0.0118237214092792,-0.00259226199818282,-0.0271286455543265,-0.0507829804784829,160.0\n229,-0.0273097856849279,0.0506801187398187,-0.0159062628007364,-0.0297707054110881,0.00393485161259318,-0.000687580502639557,0.0412768238419757,-0.0394933828740919,-0.0236445575721341,0.0113486232440377,53.0\n230,-0.0382074010379866,0.0506801187398187,0.0713965151836166,-0.0573136709609782,0.153913713156516,0.155886650392127,0.000778807997017968,0.0719480021711535,0.0502764933899896,0.0693381200517237,220.0\n231,0.00901559882526763,-0.044641636506989,-0.030995631835069,0.0218723549949558,0.00806271018719657,0.00870687335104641,0.00446044580110504,-0.00259226199818282,0.00943640914607987,0.0113486232440377,154.0\n232,0.0126481372762872,0.0506801187398187,0.000260918307477141,-0.0114087283893043,0.0397096259258226,0.0572448849284239,-0.0397192078479398,0.0560805201945126,0.024052583226893,0.0320591578182113,259.0\n233,0.0671362140415805,-0.044641636506989,0.0369065288194278,-0.0504279295735057,-0.0235842055514294,-0.034507614375909,0.0486400994501499,-0.0394933828740919,-0.0259524244351894,-0.0383566597339788,90.0\n234,0.0453409833354632,-0.044641636506989,0.0390621529671896,0.0459724498511097,0.00668675732899544,-0.0241737151368545,0.0081420836051921,-0.0125555646346783,0.0643282330236709,0.0569117993072195,246.0\n235,0.0671362140415805,0.0506801187398187,-0.0148284507268555,0.0585963091762383,-0.0593589798646588,-0.034507614375909,-0.0618090346724622,0.012906208769699,-0.00514530798026311,0.0486275854775501,124.0\n236,0.0271782910803654,-0.044641636506989,0.00672779075076256,0.0356438377699009,0.0796122588136553,0.0707102687853738,0.0155053592133662,0.0343088588777263,0.0406722637144977,0.0113486232440377,67.0\n237,0.0562385986885218,-0.044641636506989,-0.0687190544209005,-0.0687899065952895,-0.000193006962010205,-0.00100072896442909,0.0449584616460628,-0.0376483268302965,-0.0483617248028919,-0.00107769750046639,72.0\n238,0.0344433679824045,0.0506801187398187,-0.00943939035745095,0.0597439326260547,-0.0359677812752396,-0.00757684666200928,-0.0765355858888105,0.0712099797536354,0.0110081010458725,-0.0217882320746399,257.0\n239,0.0235457526293458,-0.044641636506989,0.0196615356373334,-0.0125563519424068,0.0837401173882587,0.0387691256828415,0.0633666506664982,-0.00259226199818282,0.0660482061630984,0.0486275854775501,262.0\n240,0.0489735217864827,0.0506801187398187,0.0746299514052593,0.0666296740135272,-0.00982467696941811,-0.00225332281158722,-0.0434008456520269,0.0343088588777263,0.0336568129023847,0.0196328370737072,275.0\n241,0.030810829531385,0.0506801187398187,-0.00836157828357004,0.00465800152627453,0.0149424744782022,0.0274957810584184,0.0081420836051921,-0.00812743012956918,-0.0295276227417736,0.0569117993072195,177.0\n242,-0.103593093156339,0.0506801187398187,-0.0234509473179027,-0.0228849640236156,-0.0868780370286814,-0.0677013513255995,-0.0176293810234174,-0.0394933828740919,-0.0781409106690696,-0.0714935150526564,71.0\n243,0.0162806757273067,0.0506801187398187,-0.0460850008694016,0.0115437429137471,-0.0332158755588373,-0.0160318551303266,-0.0102661054152432,-0.00259226199818282,-0.0439854025655911,-0.0424987666488135,47.0\n244,-0.0600026317441039,0.0506801187398187,0.0541515220015222,-0.0194420933298793,-0.0497273098572509,-0.0489124436182275,0.0228686348215404,-0.0394933828740919,-0.0439854025655911,-0.0052198044153011,187.0\n245,-0.0273097856849279,-0.044641636506989,-0.0353068801305926,-0.0297707054110881,-0.0566070741482565,-0.058620045933703,0.0302319104297145,-0.0394933828740919,-0.0498684677352306,-0.129483011860342,125.0\n246,0.0417084448844436,-0.044641636506989,-0.0320734439089499,-0.061904165207817,0.0796122588136553,0.0509819156926333,0.056003375058324,-0.00997248617336464,0.0450661683362615,-0.0590671943081523,78.0\n247,-0.0817978624502212,-0.044641636506989,-0.0816527993074713,-0.0400993174922969,0.00255889875439205,-0.0185370428246429,0.0707299262746723,-0.0394933828740919,-0.0109044358473771,-0.09220404962683,51.0\n248,-0.0418399394890061,-0.044641636506989,0.0476846495582368,0.0597439326260547,0.127770608850695,0.128016437292858,-0.0249926566315915,0.108111100629544,0.0638931206368394,0.0403433716478807,258.0\n249,-0.0127796318808497,-0.044641636506989,0.0606183944448076,0.0528581912385822,0.0479653430750293,0.0293746718291555,-0.0176293810234174,0.0343088588777263,0.0702112981933102,0.00720651632920303,215.0\n250,0.0671362140415805,-0.044641636506989,0.056307146149284,0.0735154154009998,-0.0139525355440215,-0.039204841302752,-0.0323559322397657,-0.00259226199818282,0.0757375884575476,0.036201264733046,303.0\n251,-0.0527375548420648,0.0506801187398187,0.098341817030639,0.0872868981759448,0.0603489187988395,0.0487898764601065,-0.0581273968683752,0.108111100629544,0.0844952822124031,0.0403433716478807,243.0\n252,0.00538306037424807,-0.044641636506989,0.0595405823709267,-0.0561660474078757,0.0245741444856101,0.0528608064633705,-0.0434008456520269,0.0509143632718854,-0.00421985970694603,-0.0300724459043093,91.0\n253,0.0816663678456587,-0.044641636506989,0.0336730925977851,0.0081008722200108,0.0520932016496327,0.0566185880048449,-0.0176293810234174,0.0343088588777263,0.0348641930961596,0.0693381200517237,150.0\n254,0.030810829531385,0.0506801187398187,0.056307146149284,0.076958286094736,0.0493412959332305,-0.0122740735888523,-0.0360375700438527,0.0712099797536354,0.120053382001538,0.0900486546258972,310.0\n255,0.00175052192322852,-0.044641636506989,-0.0654856181992578,-0.00567061055493425,-0.00707277125301585,-0.0194764882100115,0.0412768238419757,-0.0394933828740919,-0.003303712578677,0.00720651632920303,153.0\n256,-0.0491050163910452,-0.044641636506989,0.160854917315731,-0.0469850588797694,-0.0290880169842339,-0.019789636671801,-0.0470824834561139,0.0343088588777263,0.028016506523264,0.0113486232440377,346.0\n257,-0.0273097856849279,0.0506801187398187,-0.0557853095343297,0.0253152256886921,-0.00707277125301585,-0.0235474182132754,0.052321737254237,-0.0394933828740919,-0.00514530798026311,-0.0507829804784829,63.0\n258,0.0780338293946392,0.0506801187398187,-0.0245287593917836,-0.0423945646329306,0.00668675732899544,0.0528608064633705,-0.0691723102806364,0.0808042711813717,-0.0371283460104736,0.0569117993072195,89.0\n259,0.0126481372762872,-0.044641636506989,-0.0363846922044735,0.0425295791573734,-0.0139525355440215,0.0129343775852051,-0.0268334755336351,0.00515697338575809,-0.0439854025655911,0.00720651632920303,50.0\n260,0.0417084448844436,-0.044641636506989,-0.00836157828357004,-0.0573136709609782,0.00806271018719657,-0.0313761297580137,0.151725957964588,-0.076394503750001,-0.0802365402489018,-0.0176461251598052,39.0\n261,0.0489735217864827,-0.044641636506989,-0.041773752573878,0.104501251644626,0.0355817673512192,-0.0257394574458021,0.177497422593197,-0.076394503750001,-0.0129079422541688,0.0154907301588724,103.0\n262,-0.0164121703318693,0.0506801187398187,0.127442743025423,0.0976155102571536,0.0163184273364034,0.0174750302811533,-0.0213110188275045,0.0343088588777263,0.0348641930961596,0.00306440941436832,308.0\n263,-0.0745327855481821,0.0506801187398187,-0.0773415510119477,-0.0469850588797694,-0.0469754041408486,-0.0326287236051719,0.00446044580110504,-0.0394933828740919,-0.072128454601956,-0.0176461251598052,116.0\n264,0.0344433679824045,0.0506801187398187,0.0282840322283806,-0.0332135761048244,-0.0455994512826475,-0.00976888589453599,-0.050764121260201,-0.00259226199818282,-0.0594726974107223,-0.0217882320746399,145.0\n265,-0.034574862586967,0.0506801187398187,-0.0256065714656645,-0.0171468461892456,0.00118294589619092,-0.00287961973516629,0.0081420836051921,-0.015507654304751,0.0148227108412663,0.0403433716478807,74.0\n266,-0.0527375548420648,0.0506801187398187,-0.0622521819776151,0.0115437429137471,-0.00844872411121698,-0.0366996536084358,0.122272855531891,-0.076394503750001,-0.0868289932162924,0.00306440941436832,45.0\n267,0.0598711371395414,-0.044641636506989,-0.000816893766403737,-0.0848566365108683,0.075484400239052,0.0794784257154807,0.00446044580110504,0.0343088588777263,0.0233748412798208,0.0279170509033766,115.0\n268,0.063503675590561,0.0506801187398187,0.088641508365711,0.0700725447072635,0.0204462859110067,0.0375165318356834,-0.050764121260201,0.0712099797536354,0.0293004132685869,0.0734802269665584,264.0\n269,0.00901559882526763,-0.044641636506989,-0.0320734439089499,-0.0263278347173518,0.0424615316422248,-0.0103951828181151,0.159089233572762,-0.076394503750001,-0.0119006848015081,-0.0383566597339788,87.0\n270,0.00538306037424807,0.0506801187398187,0.0304396563761424,0.0838440274822086,-0.0373437341334407,-0.0473467013092799,0.0155053592133662,-0.0394933828740919,0.00864028293306308,0.0154907301588724,202.0\n271,0.0380759064334241,0.0506801187398187,0.00888341489852436,0.0425295791573734,-0.0428475455662452,-0.0210422305189592,-0.0397192078479398,-0.00259226199818282,-0.0181182673078967,0.00720651632920303,127.0\n272,0.0126481372762872,-0.044641636506989,0.00672779075076256,-0.0561660474078757,-0.0758704141630723,-0.0664487574784414,-0.0213110188275045,-0.0376483268302965,-0.0181182673078967,-0.09220404962683,182.0\n273,0.0744012909436196,0.0506801187398187,-0.02021751109626,0.0459724498511097,0.0741084473808508,0.0328193049088404,-0.0360375700438527,0.0712099797536354,0.106354276741726,0.036201264733046,241.0\n274,0.0162806757273067,-0.044641636506989,-0.0245287593917836,0.0356438377699009,-0.00707277125301585,-0.00319276819695581,-0.0139477432193303,-0.00259226199818282,0.0155668445407018,0.0154907301588724,66.0\n275,-0.00551455497881059,0.0506801187398187,-0.0115950145052127,0.0115437429137471,-0.0222082526932283,-0.0154055582067476,-0.0213110188275045,-0.00259226199818282,0.0110081010458725,0.0693381200517237,94.0\n276,0.0126481372762872,-0.044641636506989,0.0261284080806188,0.063186803319791,0.125018703134293,0.0916912157252725,0.0633666506664982,-0.00259226199818282,0.057572856202426,-0.0217882320746399,283.0\n277,-0.034574862586967,-0.044641636506989,-0.0590187457559724,0.00121513083253827,-0.0538551684318543,-0.078035250564654,0.0670482884705852,-0.076394503750001,-0.02139368094036,0.0154907301588724,64.0\n278,0.0671362140415805,0.0506801187398187,-0.0363846922044735,-0.0848566365108683,-0.00707277125301585,0.01966706951368,-0.0544457590642881,0.0343088588777263,0.00114379737951254,0.0320591578182113,102.0\n279,0.0380759064334241,0.0506801187398187,-0.0245287593917836,0.00465800152627453,-0.0263361112678317,-0.0263657543693812,0.0155053592133662,-0.0394933828740919,-0.0159982677581387,-0.0259303389894746,200.0\n280,0.00901559882526763,0.0506801187398187,0.0185837235634525,0.0390867084636372,0.0176943801946045,0.0105857641217836,0.0191869970174533,-0.00259226199818282,0.0163049527999418,-0.0176461251598052,265.0\n281,-0.0926954778032799,0.0506801187398187,-0.0902752958985185,-0.0573136709609782,-0.0249601584096305,-0.0304366843726451,-0.00658446761115617,-0.00259226199818282,0.024052583226893,0.00306440941436832,94.0\n282,0.0707687524926,-0.044641636506989,-0.00512814206192736,-0.00567061055493425,0.0878679759628621,0.102964560349696,0.0118237214092792,0.0343088588777263,-0.0089440189577978,0.0279170509033766,230.0\n283,-0.0164121703318693,-0.044641636506989,-0.052551873312687,-0.0332135761048244,-0.0442234984244464,-0.0363865051466462,0.0191869970174533,-0.0394933828740919,-0.0683297436244215,-0.0300724459043093,181.0\n284,0.0417084448844436,0.0506801187398187,-0.0223731352440218,0.0287580963824284,-0.0662387441556644,-0.0451546620767532,-0.0618090346724622,-0.00259226199818282,0.00286377051894013,-0.0549250873933176,156.0\n285,0.0126481372762872,-0.044641636506989,-0.02021751109626,-0.015999222636143,0.0121905687618,0.0212328118226277,-0.0765355858888105,0.108111100629544,0.0598807230654812,-0.0217882320746399,233.0\n286,-0.0382074010379866,-0.044641636506989,-0.0547074974604488,-0.0779708951233958,-0.0332158755588373,-0.0864902590329714,0.140681044552327,-0.076394503750001,-0.0191970476139445,-0.0052198044153011,60.0\n287,0.0453409833354632,-0.044641636506989,-0.00620595413580824,-0.015999222636143,0.125018703134293,0.125198101136752,0.0191869970174533,0.0343088588777263,0.0324332257796019,-0.0052198044153011,219.0\n288,0.0707687524926,0.0506801187398187,-0.0169840748746173,0.0218723549949558,0.0438374845004259,0.0563054395430553,0.0375951860378887,-0.00259226199818282,-0.0702093127286876,-0.0176461251598052,80.0\n289,-0.0745327855481821,0.0506801187398187,0.0552293340754031,-0.0400993174922969,0.0534691545078339,0.05317395492516,-0.0434008456520269,0.0712099797536354,0.061237907519701,-0.0342145528191441,68.0\n290,0.0598711371395414,0.0506801187398187,0.0767855755530211,0.0253152256886921,0.00118294589619092,0.0168487333575743,-0.0544457590642881,0.0343088588777263,0.0299356483965325,0.0444854785627154,332.0\n291,0.0744012909436196,-0.044641636506989,0.0185837235634525,0.063186803319791,0.0617248716570406,0.0428400556861055,0.0081420836051921,-0.00259226199818282,0.0580391276638951,-0.0590671943081523,248.0\n292,0.00901559882526763,-0.044641636506989,-0.0223731352440218,-0.0320659525517218,-0.0497273098572509,-0.0686407967109681,0.0780932018828464,-0.0708593356186146,-0.0629129499162512,-0.0383566597339788,84.0\n293,-0.0709002470971626,-0.044641636506989,0.0929527566612346,0.0126913664668496,0.0204462859110067,0.0425269072243159,0.000778807997017968,0.000359827671889909,-0.0545441527110952,-0.00107769750046639,200.0\n294,0.0235457526293458,0.0506801187398187,-0.030995631835069,-0.00567061055493425,-0.0167044412604238,0.0177881787429428,-0.0323559322397657,-0.00259226199818282,-0.0740888714915354,-0.0342145528191441,55.0\n295,-0.0527375548420648,0.0506801187398187,0.0390621529671896,-0.0400993174922969,-0.00569681839481472,-0.0129003705124313,0.0118237214092792,-0.0394933828740919,0.0163049527999418,0.00306440941436832,85.0\n296,0.0671362140415805,-0.044641636506989,-0.0611743699037342,-0.0400993174922969,-0.0263361112678317,-0.024486863598644,0.0339135482338016,-0.0394933828740919,-0.0561575730950062,-0.0590671943081523,89.0\n297,0.00175052192322852,-0.044641636506989,-0.00836157828357004,-0.0641994123484507,-0.0387196869916418,-0.024486863598644,0.00446044580110504,-0.0394933828740919,-0.0646830224644503,-0.0549250873933176,31.0\n298,0.0235457526293458,0.0506801187398187,-0.0374625042783544,-0.0469850588797694,-0.0910058956032848,-0.0755300628703378,-0.0323559322397657,-0.0394933828740919,-0.0307512098645563,-0.0135040182449705,129.0\n299,0.0380759064334241,0.0506801187398187,-0.0137506386529745,-0.015999222636143,-0.0359677812752396,-0.0219816759043277,-0.0139477432193303,-0.00259226199818282,-0.0259524244351894,-0.00107769750046639,83.0\n300,0.0162806757273067,-0.044641636506989,0.0735521393313785,-0.0412469410453994,-0.00432086553661359,-0.0135266674360104,-0.0139477432193303,-0.00111621716314646,0.0428956878925287,0.0444854785627154,275.0\n301,-0.00188201652779104,0.0506801187398187,-0.0245287593917836,0.0528581912385822,0.0273260502020124,0.0300009687527346,0.0302319104297145,-0.00259226199818282,-0.02139368094036,0.036201264733046,65.0\n302,0.0126481372762872,-0.044641636506989,0.0336730925977851,0.0333485905259811,0.0300779559184146,0.0271826325966288,-0.0029028298070691,0.00884708547334898,0.0311929907028023,0.0279170509033766,198.0\n303,0.0744012909436196,-0.044641636506989,0.034750904671666,0.0941726395634173,0.0575970130824372,0.0202933664372591,0.0228686348215404,-0.00259226199818282,0.0738021469200488,-0.0217882320746399,236.0\n304,0.0417084448844436,0.0506801187398187,-0.0385403163522353,0.0528581912385822,0.0768603530972531,0.116429944206646,-0.0397192078479398,0.0712099797536354,-0.0225121719296605,-0.0135040182449705,253.0\n305,-0.00914709342983014,0.0506801187398187,-0.0396181284261162,-0.0400993174922969,-0.00844872411121698,0.0162224364339952,-0.0654906724765493,0.0712099797536354,0.0177634778671173,-0.0673514081378217,124.0\n306,0.00901559882526763,0.0506801187398187,-0.00189470584028465,0.0218723549949558,-0.0387196869916418,-0.0248000120604336,-0.00658446761115617,-0.0394933828740919,-0.0398095943643375,-0.0135040182449705,44.0\n307,0.0671362140415805,0.0506801187398187,-0.030995631835069,0.00465800152627453,0.0245741444856101,0.0356376410649462,-0.0286742944356786,0.0343088588777263,0.0233748412798208,0.0817644407962278,172.0\n308,0.00175052192322852,-0.044641636506989,-0.0460850008694016,-0.0332135761048244,-0.07311850844667,-0.0814798836443389,0.0449584616460628,-0.0693832907835783,-0.0611765950943345,-0.0797777288823259,114.0\n309,-0.00914709342983014,0.0506801187398187,0.00133873038135806,-0.00222773986119799,0.0796122588136553,0.0700839718617947,0.0339135482338016,-0.00259226199818282,0.0267142576335128,0.0817644407962278,142.0\n310,-0.00551455497881059,-0.044641636506989,0.0649296427403312,0.0356438377699009,-0.00156895982021134,0.0149698425868371,-0.0139477432193303,0.000728838880648992,-0.0181182673078967,0.0320591578182113,109.0\n311,0.096196521649737,-0.044641636506989,0.0401399650410705,-0.0573136709609782,0.0452134373586271,0.0606895180081088,-0.0213110188275045,0.0361539149215217,0.0125531528133893,0.0237749439885419,180.0\n312,-0.0745327855481821,-0.044641636506989,-0.0234509473179027,-0.00567061055493425,-0.0208322998350272,-0.0141529643595894,0.0155053592133662,-0.0394933828740919,-0.0384591123013538,-0.0300724459043093,144.0\n313,0.0598711371395414,0.0506801187398187,0.0530737099276413,0.0528581912385822,0.0328298616348169,0.01966706951368,-0.0102661054152432,0.0343088588777263,0.0552050380896167,-0.00107769750046639,163.0\n314,-0.0236772472339084,-0.044641636506989,0.0401399650410705,-0.0125563519424068,-0.00982467696941811,-0.00100072896442909,-0.0029028298070691,-0.00259226199818282,-0.0119006848015081,-0.0383566597339788,147.0\n315,0.00901559882526763,-0.044641636506989,-0.02021751109626,-0.0538708002672419,0.0314539087766158,0.0206065148990486,0.056003375058324,-0.0394933828740919,-0.0109044358473771,-0.00107769750046639,97.0\n316,0.0162806757273067,0.0506801187398187,0.0142724752679289,0.00121513083253827,0.00118294589619092,-0.0213553789807487,-0.0323559322397657,0.0343088588777263,0.0749683360277342,0.0403433716478807,220.0\n317,0.0199132141783263,-0.044641636506989,-0.0342290680567117,0.055153438482502,0.0672286830898452,0.0741549018650587,-0.00658446761115617,0.0328328140426899,0.0247253233428045,0.0693381200517237,190.0\n318,0.0889314447476978,-0.044641636506989,0.00672779075076256,0.0253152256886921,0.0300779559184146,0.00870687335104641,0.0633666506664982,-0.0394933828740919,0.00943640914607987,0.0320591578182113,109.0\n319,0.0199132141783263,-0.044641636506989,0.00457216660300077,0.0459724498511097,-0.0180803941186249,-0.0545491159304391,0.0633666506664982,-0.0394933828740919,0.0286607203138089,0.0610539062220542,191.0\n320,-0.0236772472339084,-0.044641636506989,0.0304396563761424,-0.00567061055493425,0.0823641645300576,0.092004364187062,-0.0176293810234174,0.0712099797536354,0.0330470723549341,0.00306440941436832,122.0\n321,0.096196521649737,-0.044641636506989,0.0519958978537604,0.0792535333386559,0.054845107366035,0.0365770864503148,-0.0765355858888105,0.141322109417863,0.098646374304928,0.0610539062220542,230.0\n322,0.0235457526293458,0.0506801187398187,0.0616962065186885,0.0620391798699746,0.0245741444856101,-0.0360733566848567,-0.0912621371051588,0.155344535350708,0.133395733837469,0.0817644407962278,242.0\n323,0.0707687524926,0.0506801187398187,-0.00728376620968916,0.0494153205448459,0.0603489187988395,-0.00444536204411395,-0.0544457590642881,0.108111100629544,0.129019411600168,0.0569117993072195,248.0\n324,0.030810829531385,-0.044641636506989,0.00564997867688165,0.0115437429137471,0.0782363059554542,0.077912683406533,-0.0434008456520269,0.108111100629544,0.0660482061630984,0.0196328370737072,249.0\n325,-0.00188201652779104,-0.044641636506989,0.0541515220015222,-0.0664946594890845,0.0727324945226497,0.0566185880048449,-0.0434008456520269,0.0848633944777217,0.0844952822124031,0.0486275854775501,192.0\n326,0.0453409833354632,0.0506801187398187,-0.00836157828357004,-0.0332135761048244,-0.00707277125301585,0.00119131026809764,-0.0397192078479398,0.0343088588777263,0.0299356483965325,0.0279170509033766,131.0\n327,0.0744012909436196,-0.044641636506989,0.114508998138853,0.0287580963824284,0.0245741444856101,0.0249905933641021,0.0191869970174533,-0.00259226199818282,-0.000609254186102297,-0.0052198044153011,237.0\n328,-0.0382074010379866,-0.044641636506989,0.067085266888093,-0.0607565416547144,-0.0290880169842339,-0.0232342697514859,-0.0102661054152432,-0.00259226199818282,-0.00149858682029207,0.0196328370737072,78.0\n329,-0.0127796318808497,0.0506801187398187,-0.0557853095343297,-0.00222773986119799,-0.0277120641260328,-0.029184090525487,0.0191869970174533,-0.0394933828740919,-0.0170521046047435,0.0444854785627154,135.0\n330,0.00901559882526763,0.0506801187398187,0.0304396563761424,0.0425295791573734,-0.00294491267841247,0.0368902349121043,-0.0654906724765493,0.0712099797536354,-0.0236445575721341,0.0154907301588724,244.0\n331,0.0816663678456587,0.0506801187398187,-0.0256065714656645,-0.0366564467985606,-0.0703666027302678,-0.0464072559239113,-0.0397192078479398,-0.00259226199818282,-0.0411803851880079,-0.0052198044153011,199.0\n332,0.030810829531385,-0.044641636506989,0.104808689473925,0.076958286094736,-0.0112006298276192,-0.0113346282034837,-0.0581273968683752,0.0343088588777263,0.0571041874478439,0.036201264733046,270.0\n333,0.0271782910803654,0.0506801187398187,-0.00620595413580824,0.0287580963824284,-0.0167044412604238,-0.00162702588800815,-0.0581273968683752,0.0343088588777263,0.0293004132685869,0.0320591578182113,164.0\n334,-0.0600026317441039,0.0506801187398187,-0.0471628129432825,-0.0228849640236156,-0.071742555588469,-0.0576806005483345,-0.00658446761115617,-0.0394933828740919,-0.0629129499162512,-0.0549250873933176,72.0\n335,0.00538306037424807,-0.044641636506989,-0.0482406250171634,-0.0125563519424068,0.00118294589619092,-0.00663740127664067,0.0633666506664982,-0.0394933828740919,-0.0514005352605825,-0.0590671943081523,96.0\n336,-0.0200447087828888,-0.044641636506989,0.0854080721440683,-0.0366564467985606,0.0919958345374655,0.0894991764927457,-0.0618090346724622,0.145012221505454,0.0809479135112756,0.0527696923923848,306.0\n337,0.0199132141783263,0.0506801187398187,-0.0126728265790937,0.0700725447072635,-0.0112006298276192,0.00714113104209875,-0.0397192078479398,0.0343088588777263,0.00538436996854573,0.00306440941436832,91.0\n338,-0.0636351701951234,-0.044641636506989,-0.0331512559828308,-0.0332135761048244,0.00118294589619092,0.0240511479787335,-0.0249926566315915,-0.00259226199818282,-0.0225121719296605,-0.0590671943081523,214.0\n339,0.0271782910803654,-0.044641636506989,-0.00728376620968916,-0.0504279295735057,0.075484400239052,0.0566185880048449,0.0339135482338016,-0.00259226199818282,0.0434431722527813,0.0154907301588724,95.0\n340,-0.0164121703318693,-0.044641636506989,-0.0137506386529745,0.132044217194516,-0.00982467696941811,-0.00381906512053488,0.0191869970174533,-0.0394933828740919,-0.0358167281015492,-0.0300724459043093,216.0\n341,0.030810829531385,0.0506801187398187,0.0595405823709267,0.0563010619323185,-0.0222082526932283,0.00119131026809764,-0.0323559322397657,-0.00259226199818282,-0.0247911874324607,-0.0176461251598052,263.0\n342,0.0562385986885218,0.0506801187398187,0.0218171597850952,0.0563010619323185,-0.00707277125301585,0.0181013272047324,-0.0323559322397657,-0.00259226199818282,-0.0236445575721341,0.0237749439885419,178.0\n343,-0.0200447087828888,-0.044641636506989,0.0185837235634525,0.090729768869681,0.00393485161259318,0.00870687335104641,0.0375951860378887,-0.0394933828740919,-0.0578000656756125,0.00720651632920303,113.0\n344,-0.107225631607358,-0.044641636506989,-0.0115950145052127,-0.0400993174922969,0.0493412959332305,0.0644472995495832,-0.0139477432193303,0.0343088588777263,0.00702686254915195,-0.0300724459043093,200.0\n345,0.0816663678456587,0.0506801187398187,-0.00297251791416553,-0.0332135761048244,0.0424615316422248,0.057871181852003,-0.0102661054152432,0.0343088588777263,-0.000609254186102297,-0.00107769750046639,139.0\n346,0.00538306037424807,0.0506801187398187,0.0175059114895716,0.0322009670761646,0.127770608850695,0.127390140369279,-0.0213110188275045,0.0712099797536354,0.062575181458056,0.0154907301588724,139.0\n347,0.0380759064334241,0.0506801187398187,-0.0299178197611881,-0.0745280244296595,-0.0125765826858204,-0.0125872220506418,0.00446044580110504,-0.00259226199818282,0.00371173823343597,-0.0300724459043093,88.0\n348,0.030810829531385,-0.044641636506989,-0.02021751109626,-0.00567061055493425,-0.00432086553661359,-0.0294972389872765,0.0780932018828464,-0.0394933828740919,-0.0109044358473771,-0.00107769750046639,148.0\n349,0.00175052192322852,0.0506801187398187,-0.0579409336820915,-0.0435421881860331,-0.0965097070360893,-0.0470335528474903,-0.098625412713333,0.0343088588777263,-0.0611765950943345,-0.0714935150526564,88.0\n350,-0.0273097856849279,0.0506801187398187,0.0606183944448076,0.107944122338362,0.0121905687618,-0.0175975974392743,-0.0029028298070691,-0.00259226199818282,0.0702112981933102,0.135611830689079,243.0\n351,-0.0854304009012408,0.0506801187398187,-0.0406959404999971,-0.0332135761048244,-0.0813742255958769,-0.0695802420963367,-0.00658446761115617,-0.0394933828740919,-0.0578000656756125,-0.0424987666488135,71.0\n352,0.0126481372762872,0.0506801187398187,-0.0719524906425432,-0.0469850588797694,-0.051103262715452,-0.0971373067338155,0.118591217727804,-0.076394503750001,-0.0202887477516296,-0.0383566597339788,77.0\n353,-0.0527375548420648,-0.044641636506989,-0.0557853095343297,-0.0366564467985606,0.0892439288210632,-0.00319276819695581,0.0081420836051921,0.0343088588777263,0.132372649338676,0.00306440941436832,109.0\n354,-0.0236772472339084,0.0506801187398187,0.045529025410475,0.0218723549949558,0.10988322169408,0.0888728795691667,0.000778807997017968,0.0343088588777263,0.0741925366900307,0.0610539062220542,272.0\n355,-0.0745327855481821,0.0506801187398187,-0.00943939035745095,0.0149866136074833,-0.0373437341334407,-0.0216685274425382,-0.0139477432193303,-0.00259226199818282,-0.0332487872476258,0.0113486232440377,60.0\n356,-0.00551455497881059,0.0506801187398187,-0.0331512559828308,-0.015999222636143,0.00806271018719657,0.0162224364339952,0.0155053592133662,-0.00259226199818282,-0.0283202425479987,-0.0756356219674911,54.0\n357,-0.0600026317441039,0.0506801187398187,0.0498402737059986,0.0184294843012196,-0.0167044412604238,-0.0301235359108556,-0.0176293810234174,-0.00259226199818282,0.049768659920749,-0.0590671943081523,221.0\n358,-0.0200447087828888,-0.044641636506989,-0.084886235529114,-0.0263278347173518,-0.0359677812752396,-0.0341944659141195,0.0412768238419757,-0.0516707527631419,-0.0823814832581028,-0.0466408735636482,90.0\n359,0.0380759064334241,0.0506801187398187,0.00564997867688165,0.0322009670761646,0.00668675732899544,0.0174750302811533,-0.0249926566315915,0.0343088588777263,0.0148227108412663,0.0610539062220542,311.0\n360,0.0162806757273067,-0.044641636506989,0.0207393477112143,0.0218723549949558,-0.0139525355440215,-0.0132135189742209,-0.00658446761115617,-0.00259226199818282,0.0133159679089277,0.0403433716478807,281.0\n361,0.0417084448844436,-0.044641636506989,-0.00728376620968916,0.0287580963824284,-0.0428475455662452,-0.0482861466946485,0.052321737254237,-0.076394503750001,-0.072128454601956,0.0237749439885419,182.0\n362,0.0199132141783263,0.0506801187398187,0.104808689473925,0.0700725447072635,-0.0359677812752396,-0.0266789028311707,-0.0249926566315915,-0.00259226199818282,0.00371173823343597,0.0403433716478807,321.0\n363,-0.0491050163910452,0.0506801187398187,-0.0245287593917836,6.75072794357462e-05,-0.0469754041408486,-0.0282446451401184,-0.0654906724765493,0.0284046795375808,0.0191990330785671,0.0113486232440377,58.0\n364,0.00175052192322852,0.0506801187398187,-0.00620595413580824,-0.0194420933298793,-0.00982467696941811,0.00494909180957202,-0.0397192078479398,0.0343088588777263,0.0148227108412663,0.0983328684555666,262.0\n365,0.0344433679824045,-0.044641636506989,-0.0385403163522353,-0.0125563519424068,0.0094386630453977,0.00526224027136155,-0.00658446761115617,-0.00259226199818282,0.0311929907028023,0.0983328684555666,206.0\n366,-0.0454724779400257,0.0506801187398187,0.137143051690352,-0.015999222636143,0.0410855787840237,0.0318798595234718,-0.0434008456520269,0.0712099797536354,0.0710215779459822,0.0486275854775501,233.0\n367,-0.00914709342983014,0.0506801187398187,0.17055522598066,0.0149866136074833,0.0300779559184146,0.033758750294209,-0.0213110188275045,0.0343088588777263,0.0336568129023847,0.0320591578182113,242.0\n368,-0.0164121703318693,0.0506801187398187,0.00241654245523897,0.0149866136074833,0.0218222387692079,-0.0100820343563255,-0.0249926566315915,0.0343088588777263,0.085533121187439,0.0817644407962278,123.0\n369,-0.00914709342983014,-0.044641636506989,0.0379843408933087,-0.0400993174922969,-0.0249601584096305,-0.00381906512053488,-0.0434008456520269,0.0158582984397717,-0.00514530798026311,0.0279170509033766,167.0\n370,0.0199132141783263,-0.044641636506989,-0.0579409336820915,-0.0573136709609782,-0.00156895982021134,-0.0125872220506418,0.0744115640787594,-0.0394933828740919,-0.0611765950943345,-0.0756356219674911,63.0\n371,0.0526060602375023,0.0506801187398187,-0.00943939035745095,0.0494153205448459,0.0507172487914316,-0.019163339748222,-0.0139477432193303,0.0343088588777263,0.119343994203787,-0.0176461251598052,197.0\n372,-0.0273097856849279,0.0506801187398187,-0.0234509473179027,-0.015999222636143,0.0135665216200011,0.0127778033543103,0.0265502726256275,-0.00259226199818282,-0.0109044358473771,-0.0217882320746399,71.0\n373,-0.0745327855481821,-0.044641636506989,-0.0105172024313319,-0.00567061055493425,-0.0662387441556644,-0.0570543036247554,-0.0029028298070691,-0.0394933828740919,-0.0425721049227942,-0.00107769750046639,168.0\n374,-0.107225631607358,-0.044641636506989,-0.0342290680567117,-0.067642283042187,-0.0634868384392622,-0.0705196874817053,0.0081420836051921,-0.0394933828740919,-0.000609254186102297,-0.0797777288823259,140.0\n375,0.0453409833354632,0.0506801187398187,-0.00297251791416553,0.107944122338362,0.0355817673512192,0.0224854056697859,0.0265502726256275,-0.00259226199818282,0.028016506523264,0.0196328370737072,217.0\n376,-0.00188201652779104,-0.044641636506989,0.068163078961974,-0.00567061055493425,0.119514891701488,0.130208476525385,-0.0249926566315915,0.0867084505215172,0.0461323310394148,-0.00107769750046639,121.0\n377,0.0199132141783263,0.0506801187398187,0.00996122697240527,0.0184294843012196,0.0149424744782022,0.0447189464568426,-0.0618090346724622,0.0712099797536354,0.00943640914607987,-0.063209301222987,235.0\n378,0.0162806757273067,0.0506801187398187,0.00241654245523897,-0.00567061055493425,-0.00569681839481472,0.0108989125835731,-0.050764121260201,0.0343088588777263,0.0226920225667445,-0.0383566597339788,245.0\n379,-0.00188201652779104,-0.044641636506989,-0.0385403163522353,0.0218723549949558,-0.108893282759899,-0.115613065979398,0.0228686348215404,-0.076394503750001,-0.0468794828442166,0.0237749439885419,40.0\n380,0.0162806757273067,-0.044641636506989,0.0261284080806188,0.0585963091762383,-0.0607349327228599,-0.0442152166913845,-0.0139477432193303,-0.0339582147427055,-0.0514005352605825,-0.0259303389894746,52.0\n381,-0.0709002470971626,0.0506801187398187,-0.0891974838246376,-0.0745280244296595,-0.0428475455662452,-0.0257394574458021,-0.0323559322397657,-0.00259226199818282,-0.0129079422541688,-0.0549250873933176,104.0\n382,0.0489735217864827,-0.044641636506989,0.0606183944448076,-0.0228849640236156,-0.0235842055514294,-0.072711726714232,-0.0434008456520269,-0.00259226199818282,0.104137611358979,0.036201264733046,132.0\n383,0.00538306037424807,0.0506801187398187,-0.0288400076873072,-0.00911348124867051,-0.0318399227006362,-0.0288709420636975,0.0081420836051921,-0.0394933828740919,-0.0181182673078967,0.00720651632920303,88.0\n384,0.0344433679824045,0.0506801187398187,-0.0299178197611881,0.00465800152627453,0.0933717873956666,0.0869939887984295,0.0339135482338016,-0.00259226199818282,0.024052583226893,-0.0383566597339788,69.0\n385,0.0235457526293458,0.0506801187398187,-0.019139699022379,0.0494153205448459,-0.0634868384392622,-0.0611252336280193,0.00446044580110504,-0.0394933828740919,-0.0259524244351894,-0.0135040182449705,219.0\n386,0.0199132141783263,-0.044641636506989,-0.0406959404999971,-0.015999222636143,-0.00844872411121698,-0.0175975974392743,0.052321737254237,-0.0394933828740919,-0.0307512098645563,0.00306440941436832,72.0\n387,-0.0454724779400257,-0.044641636506989,0.0153502873418098,-0.0745280244296595,-0.0497273098572509,-0.0172844489774848,-0.0286742944356786,-0.00259226199818282,-0.104364820832166,-0.0756356219674911,201.0\n388,0.0526060602375023,0.0506801187398187,-0.0245287593917836,0.0563010619323185,-0.00707277125301585,-0.005071658967693,-0.0213110188275045,-0.00259226199818282,0.0267142576335128,-0.0383566597339788,110.0\n389,-0.00551455497881059,0.0506801187398187,0.00133873038135806,-0.0848566365108683,-0.0112006298276192,-0.0166581520539057,0.0486400994501499,-0.0394933828740919,-0.0411803851880079,-0.0880619427119953,51.0\n390,0.00901559882526763,0.0506801187398187,0.0692408910358548,0.0597439326260547,0.0176943801946045,-0.0232342697514859,-0.0470824834561139,0.0343088588777263,0.103292264911524,0.0734802269665584,277.0\n391,-0.0236772472339084,-0.044641636506989,-0.0697968664947814,-0.0641994123484507,-0.0593589798646588,-0.0504781859271752,0.0191869970174533,-0.0394933828740919,-0.0891368600793477,-0.0507829804784829,63.0\n392,-0.0418399394890061,0.0506801187398187,-0.0299178197611881,-0.00222773986119799,0.0218222387692079,0.0365770864503148,0.0118237214092792,-0.00259226199818282,-0.0411803851880079,0.065196013136889,118.0\n393,-0.0745327855481821,-0.044641636506989,-0.0460850008694016,-0.0435421881860331,-0.0290880169842339,-0.0232342697514859,0.0155053592133662,-0.0394933828740919,-0.0398095943643375,-0.0217882320746399,69.0\n394,0.0344433679824045,-0.044641636506989,0.0185837235634525,0.0563010619323185,0.0121905687618,-0.0545491159304391,-0.0691723102806364,0.0712099797536354,0.130080609521753,0.00720651632920303,273.0\n395,-0.0600026317441039,-0.044641636506989,0.00133873038135806,-0.0297707054110881,-0.00707277125301585,-0.0216685274425382,0.0118237214092792,-0.00259226199818282,0.0318152175007986,-0.0549250873933176,258.0\n396,-0.0854304009012408,0.0506801187398187,-0.030995631835069,-0.0228849640236156,-0.0634868384392622,-0.0542359674686496,0.0191869970174533,-0.0394933828740919,-0.096433222891784,-0.0342145528191441,43.0\n397,0.0526060602375023,-0.044641636506989,-0.00405032998804645,-0.0309183289641906,-0.0469754041408486,-0.0583068974719135,-0.0139477432193303,-0.0258399681500055,0.0360557900898319,0.0237749439885419,198.0\n398,0.0126481372762872,-0.044641636506989,0.0153502873418098,-0.0332135761048244,0.0410855787840237,0.0321930079852613,-0.0029028298070691,-0.00259226199818282,0.0450661683362615,-0.0673514081378217,242.0\n399,0.0598711371395414,0.0506801187398187,0.0228949718589761,0.0494153205448459,0.0163184273364034,0.0118383579689417,-0.0139477432193303,-0.00259226199818282,0.0395398780720242,0.0196328370737072,232.0\n400,-0.0236772472339084,-0.044641636506989,0.045529025410475,0.090729768869681,-0.0180803941186249,-0.0354470597612776,0.0707299262746723,-0.0394933828740919,-0.0345237153303495,-0.0093619113301358,175.0\n401,0.0162806757273067,-0.044641636506989,-0.0450071887955207,-0.0573136709609782,-0.0345918284170385,-0.05392281900686,0.0744115640787594,-0.076394503750001,-0.0425721049227942,0.0403433716478807,93.0\n402,0.110726675453815,0.0506801187398187,-0.0331512559828308,-0.0228849640236156,-0.00432086553661359,0.0202933664372591,-0.0618090346724622,0.0712099797536354,0.0155668445407018,0.0444854785627154,168.0\n403,-0.0200447087828888,-0.044641636506989,0.0972640049567582,-0.00567061055493425,-0.00569681839481472,-0.0238605666750649,-0.0213110188275045,-0.00259226199818282,0.0616858488238662,0.0403433716478807,275.0\n404,-0.0164121703318693,-0.044641636506989,0.0541515220015222,0.0700725447072635,-0.0332158755588373,-0.0279314966783289,0.0081420836051921,-0.0394933828740919,-0.0271286455543265,-0.0093619113301358,293.0\n405,0.0489735217864827,0.0506801187398187,0.1231314947299,0.0838440274822086,-0.104765424185296,-0.10089508827529,-0.0691723102806364,-0.00259226199818282,0.0366457977933988,-0.0300724459043093,281.0\n406,-0.0563700932930843,-0.044641636506989,-0.0805749872335904,-0.0848566365108683,-0.0373437341334407,-0.0370128020702253,0.0339135482338016,-0.0394933828740919,-0.0561575730950062,-0.137767225690012,72.0\n407,0.0271782910803654,-0.044641636506989,0.0929527566612346,-0.0527231767141394,0.00806271018719657,0.0397085710682101,-0.0286742944356786,0.021024455362399,-0.0483617248028919,0.0196328370737072,140.0\n408,0.063503675590561,-0.044641636506989,-0.0503962491649252,0.107944122338362,0.0314539087766158,0.0193539210518905,-0.0176293810234174,0.0236075338237126,0.0580391276638951,0.0403433716478807,189.0\n409,-0.0527375548420648,0.0506801187398187,-0.0115950145052127,0.0563010619323185,0.0562210602242361,0.0729023080179005,-0.0397192078479398,0.0712099797536354,0.0305664873984148,-0.0052198044153011,181.0\n410,-0.00914709342983014,0.0506801187398187,-0.0277621956134263,0.0081008722200108,0.0479653430750293,0.0372033833738938,-0.0286742944356786,0.0343088588777263,0.0660482061630984,-0.0424987666488135,209.0\n411,0.00538306037424807,-0.044641636506989,0.0584627702970458,-0.0435421881860331,-0.07311850844667,-0.0723985782524425,0.0191869970174533,-0.076394503750001,-0.0514005352605825,-0.0259303389894746,136.0\n412,0.0744012909436196,-0.044641636506989,0.0854080721440683,0.063186803319791,0.0149424744782022,0.0130909518160999,0.0155053592133662,-0.00259226199818282,0.0062093156165054,0.0859065477110625,261.0\n413,-0.0527375548420648,-0.044641636506989,-0.000816893766403737,-0.0263278347173518,0.0108146159035988,0.00714113104209875,0.0486400994501499,-0.0394933828740919,-0.0358167281015492,0.0196328370737072,113.0\n414,0.0816663678456587,0.0506801187398187,0.00672779075076256,-0.00452298700183173,0.10988322169408,0.117056241130225,-0.0323559322397657,0.0918746074441444,0.0547240033481791,0.00720651632920303,131.0\n415,-0.00551455497881059,-0.044641636506989,0.00888341489852436,-0.0504279295735057,0.0259500973438113,0.0472241341511589,-0.0434008456520269,0.0712099797536354,0.0148227108412663,0.00306440941436832,174.0\n416,-0.0273097856849279,-0.044641636506989,0.0800190117746638,0.09876313370697,-0.00294491267841247,0.0181013272047324,-0.0176293810234174,0.00331191734196264,-0.0295276227417736,0.036201264733046,257.0\n417,-0.0527375548420648,-0.044641636506989,0.0713965151836166,-0.0745280244296595,-0.0153284884022226,-0.00131387742621863,0.00446044580110504,-0.0214118336448964,-0.0468794828442166,0.00306440941436832,55.0\n418,0.00901559882526763,-0.044641636506989,-0.0245287593917836,-0.0263278347173518,0.0988755988284711,0.0941964034195887,0.0707299262746723,-0.00259226199818282,-0.02139368094036,0.00720651632920303,84.0\n419,-0.0200447087828888,-0.044641636506989,-0.0547074974604488,-0.0538708002672419,-0.0662387441556644,-0.0573674520865449,0.0118237214092792,-0.0394933828740919,-0.0740888714915354,-0.0052198044153011,42.0\n420,0.0235457526293458,-0.044641636506989,-0.0363846922044735,6.75072794357462e-05,0.00118294589619092,0.0346981956795776,-0.0434008456520269,0.0343088588777263,-0.0332487872476258,0.0610539062220542,146.0\n421,0.0380759064334241,0.0506801187398187,0.0164280994156907,0.0218723549949558,0.0397096259258226,0.0450320949186321,-0.0434008456520269,0.0712099797536354,0.049768659920749,0.0154907301588724,212.0\n422,-0.0781653239992017,0.0506801187398187,0.077863387626902,0.0528581912385822,0.0782363059554542,0.0644472995495832,0.0265502726256275,-0.00259226199818282,0.0406722637144977,-0.0093619113301358,233.0\n423,0.00901559882526763,0.0506801187398187,-0.0396181284261162,0.0287580963824284,0.0383336730676214,0.0735286049414796,-0.0728539480847234,0.108111100629544,0.0155668445407018,-0.0466408735636482,91.0\n424,0.00175052192322852,0.0506801187398187,0.0110390390462862,-0.0194420933298793,-0.0167044412604238,-0.00381906512053488,-0.0470824834561139,0.0343088588777263,0.024052583226893,0.0237749439885419,111.0\n425,-0.0781653239992017,-0.044641636506989,-0.0406959404999971,-0.081413765817132,-0.100637565610693,-0.112794729823292,0.0228686348215404,-0.076394503750001,-0.0202887477516296,-0.0507829804784829,152.0\n426,0.030810829531385,0.0506801187398187,-0.0342290680567117,0.0436772026071898,0.0575970130824372,0.0688313780146366,-0.0323559322397657,0.057556565029549,0.0354619386607697,0.0859065477110625,120.0\n427,-0.034574862586967,0.0506801187398187,0.00564997867688165,-0.00567061055493425,-0.07311850844667,-0.062690975936967,-0.00658446761115617,-0.0394933828740919,-0.045420957777041,0.0320591578182113,67.0\n428,0.0489735217864827,0.0506801187398187,0.088641508365711,0.0872868981759448,0.0355817673512192,0.0215459602844172,-0.0249926566315915,0.0343088588777263,0.0660482061630984,0.131469723774244,310.0\n429,-0.0418399394890061,-0.044641636506989,-0.0331512559828308,-0.0228849640236156,0.0465893902168282,0.0415874618389473,0.056003375058324,-0.0247329345237283,-0.0259524244351894,-0.0383566597339788,94.0\n430,-0.00914709342983014,-0.044641636506989,-0.0568631216082106,-0.0504279295735057,0.0218222387692079,0.0453452433804217,-0.0286742944356786,0.0343088588777263,-0.00991895736315477,-0.0176461251598052,183.0\n431,0.0707687524926,0.0506801187398187,-0.030995631835069,0.0218723549949558,-0.0373437341334407,-0.0470335528474903,0.0339135482338016,-0.0394933828740919,-0.0149564750249113,-0.00107769750046639,66.0\n432,0.00901559882526763,-0.044641636506989,0.0552293340754031,-0.00567061055493425,0.0575970130824372,0.0447189464568426,-0.0029028298070691,0.0232385226149535,0.0556835477026737,0.106617082285236,173.0\n433,-0.0273097856849279,-0.044641636506989,-0.0600965578298533,-0.0297707054110881,0.0465893902168282,0.0199802179754696,0.122272855531891,-0.0394933828740919,-0.0514005352605825,-0.0093619113301358,72.0\n434,0.0162806757273067,-0.044641636506989,0.00133873038135806,0.0081008722200108,0.00531080447079431,0.0108989125835731,0.0302319104297145,-0.0394933828740919,-0.045420957777041,0.0320591578182113,49.0\n435,-0.0127796318808497,-0.044641636506989,-0.0234509473179027,-0.0400993174922969,-0.0167044412604238,0.0046359433477825,-0.0176293810234174,-0.00259226199818282,-0.0384591123013538,-0.0383566597339788,64.0\n436,-0.0563700932930843,-0.044641636506989,-0.074108114790305,-0.0504279295735057,-0.0249601584096305,-0.0470335528474903,0.0928197530991947,-0.076394503750001,-0.0611765950943345,-0.0466408735636482,48.0\n437,0.0417084448844436,0.0506801187398187,0.0196615356373334,0.0597439326260547,-0.00569681839481472,-0.00256647127337676,-0.0286742944356786,-0.00259226199818282,0.0311929907028023,0.00720651632920303,178.0\n438,-0.00551455497881059,0.0506801187398187,-0.0159062628007364,-0.067642283042187,0.0493412959332305,0.0791652772536912,-0.0286742944356786,0.0343088588777263,-0.0181182673078967,0.0444854785627154,104.0\n439,0.0417084448844436,0.0506801187398187,-0.0159062628007364,0.0172818607481171,-0.0373437341334407,-0.0138398158977999,-0.0249926566315915,-0.0110795197996419,-0.0468794828442166,0.0154907301588724,132.0\n440,-0.0454724779400257,-0.044641636506989,0.0390621529671896,0.00121513083253827,0.0163184273364034,0.0152829910486266,-0.0286742944356786,0.0265596234937854,0.0445283740214053,-0.0259303389894746,220.0\n441,-0.0454724779400257,-0.044641636506989,-0.0730303027164241,-0.081413765817132,0.0837401173882587,0.0278089295202079,0.17381578478911,-0.0394933828740919,-0.00421985970694603,0.00306440941436832,57.0\n"
  },
  {
    "path": "src/estimagic/examples/exam_points.csv",
    "content": "points\n275.5\n351.5\n346.25\n228.25\n108.25\n380.75\n346.25\n360.75\n196\n414.75\n370.5\n371.75\n143.75\n333.5\n397.5\n405.75\n154.75\n321\n279\n326.5\n49.5\n402.75\n389.75\n382.25\n337.75\n311\n105.5\n380.5\n236\n326.5\n343.75\n328.75\n316.25\n348.25\n338.75\n375.75\n410\n17\n414.25\n21.25\n369.625\n318.875\n336.125\n429.875\n407.5\n415.75\n332.375\n397\n375.875\n419.125\n270.125\n299.25\n384.125\n335\n408.5\n414.25\n253.5\n339.25\n338.75\n355.375\n326.375\n240.375\n385\n435\n317.25\n365.625\n372.75\n365.125\n349.625\n366.75\n386.5\n391.75\n403\n258.5\n386\n411\n350.25\n402.25\n294.625\n291.125\n378.125\n442.0\n428.1\n347.3\n431.8\n430.4\n426.0\n433.5\n331.1\n405.7\n415.5\n406.4\n418.6\n400.7\n408.8\n404.8\n409.4\n410.8\n402.5\n401.0\n415.3\n390.8\n394.6\n399.0\n380.0\n397.5\n368.7\n394.7\n304.3\n391.1\n388.4\n370.3\n384.6\n383.5\n305.6\n286.5\n367.9\n329.8\n288.2\n338.5\n333.6\n268.6\n335.2\n296.3\n269.1\n243.2\n159.4\n448.4\n449.8\n435.9\n429.4\n428.3\n427.5\n422.5\n409.8\n415.8\n413.4\n416.8\n406.7\n383.9\n389.0\n387.2\n368.6\n399.5\n382.6\n355.9\n389.9\n342.5\n365.2\n320.3\n341.5\n248.1\n305.0\n279.2\n275.7\n204.5\n235.0\n102.2\n112.3\n130.6\n60.2\n"
  },
  {
    "path": "src/estimagic/examples/logit.py",
    "content": "\"\"\"Likelihood functions and derivatives of a logit model.\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic import mark\n\n\ndef logit_loglike_and_derivative(params, y, x):\n    return logit_loglike(params, y, x), logit_jac(params, y, x)\n\n\n@mark.scalar\ndef scalar_logit_fun_and_jac(params, y, x):\n    return logit_loglike(params, y, x).sum(), logit_grad(params, y, x)\n\n\n@mark.likelihood\ndef logit_loglike(params, y, x):\n    \"\"\"Log-likelihood function of a logit model.\n\n    Args:\n        params (pd.DataFrame): The index consists of the parameter names,\n            the \"value\" column are the parameter values.\n        y (np.array): 1d numpy array with the dependent variable\n        x (np.array): 2d numpy array with the independent variables\n\n    Returns:\n        loglike (np.array): 1d numpy array with likelihood contribution  per individual\n\n    \"\"\"\n    if isinstance(params, pd.DataFrame):\n        p = params[\"value\"].to_numpy()\n    else:\n        p = params\n    q = 2 * y - 1\n    contribs = np.log(1 / (1 + np.exp(-(q * np.dot(x, p)))))\n\n    return contribs\n\n\n@mark.scalar\ndef logit_grad(params, y, x):\n    return logit_jac(params, y, x).sum(axis=0)\n\n\ndef logit_jac(params, y, x):\n    \"\"\"Derivative of the log-likelihood for each observation of a logit model.\n\n    Args:\n        params (pd.DataFrame): The index consists of the parmater names,\n            the \"value\" column are the parameter values.\n        y (np.array): 1d numpy array with the dependent variable\n        x (np.array): 2d numpy array with the independent variables\n\n    Returns:\n        jac : array-like\n            The derivative of the loglikelihood for each observation evaluated\n            at `params`.\n\n    \"\"\"\n    if isinstance(params, pd.DataFrame):\n        p = params[\"value\"].to_numpy()\n    else:\n        p = params\n    y = y.to_numpy()\n    c = 1 / (1 + np.exp(-(np.dot(x, p))))\n    jac = (y - c)[:, None] * x\n    return jac\n\n\ndef logit_hess(params, y, x):  # noqa: ARG001\n    \"\"\"Hessian matrix of the log-likelihood.\n\n    Args:\n        params (pd.DataFrame): The index consists of the parmater names,\n            the \"value\" column are the parameter values.\n        y (np.array): 1d numpy array with the dependent variable\n        x (np.array): 2d numpy array with the independent variables\n\n    Returns:\n        hessian (np.array) : 2d numpy array with the hessian of the\n            logl-ikelihood function evaluated at `params`\n\n    \"\"\"\n    if isinstance(params, pd.DataFrame):\n        p = params[\"value\"].to_numpy()\n    else:\n        p = params\n    c = 1 / (1 + np.exp(-(np.dot(x, p))))\n    return -np.dot(c * (1 - c) * x.T, x)\n"
  },
  {
    "path": "src/estimagic/examples/sensitivity_probit_example_data.csv",
    "content": ",y,intercept,x1,x2\n0,1,1.0,2.967339833505456,0.7105279305877271\n1,1,1.0,-0.4737153743988922,-1.1947183078244987\n2,0,1.0,-1.1011968596889783,-1.1704333745431343\n3,0,1.0,-1.1832573322549391,-1.812714817628745\n4,1,1.0,-2.3917863439314444,-0.2947731027029936\n5,1,1.0,0.0908366872724484,0.3628735729212425\n6,1,1.0,0.5150137863290288,0.5485807069534177\n7,1,1.0,0.5220340116294889,1.748757460776195\n8,1,1.0,0.5485275001956246,-0.5687296924071432\n9,1,1.0,1.43973351826228,-0.6278313683011209\n10,1,1.0,1.8281942689784905,0.5833740898183319\n11,0,1.0,0.6203701506873812,-1.2327182433057997\n12,0,1.0,0.2076431867285613,-0.8558012930544818\n13,1,1.0,-0.713737602411397,-0.4573876589872307\n14,1,1.0,0.9673009144681528,0.4266483456470918\n15,0,1.0,-0.4631899865771679,-1.0403265409190658\n16,1,1.0,0.2599554285953085,0.6341859007451927\n17,1,1.0,-0.4562329870545088,-0.1583203259067126\n18,1,1.0,1.1682151881456315,0.1039509925891936\n19,0,1.0,-0.0605745077022622,-1.8645801044006725\n20,0,1.0,0.5822247107503228,-0.2371636329667403\n21,0,1.0,0.4520876106211226,-0.9460673311759388\n22,1,1.0,0.0409344380538476,0.0426628982425797\n23,0,1.0,-0.143137988496383,0.0679483511882124\n24,1,1.0,0.9013176034221748,0.304171639050923\n25,1,1.0,-0.40511856420813,-1.1427561325612587\n26,0,1.0,-1.5053126843329996,-2.884391731316892\n27,0,1.0,-1.1331512623824767,-1.051132720307086\n28,1,1.0,0.8375281213414355,2.579436350691107\n29,1,1.0,1.0007174023555803,-0.3155487287071573\n30,1,1.0,0.9387951669370302,0.1871519140067916\n31,1,1.0,0.6863269382319725,0.1518721944545467\n32,0,1.0,-1.3296603207436988,-1.4590430175050315\n33,0,1.0,0.2695905640603205,-1.1971290308909963\n34,1,1.0,-0.767164882859859,0.2577870141677244\n35,1,1.0,-0.3378354875372247,-0.9501433638483396\n36,1,1.0,-0.5069580021169584,-0.875829581774297\n37,1,1.0,-0.6167194328609338,-0.5736531300036655\n38,1,1.0,-1.6214767172342575,0.188139381199072\n39,0,1.0,-1.1672730585489146,-0.6365164563768158\n40,1,1.0,1.172776386377527,1.0297981695080152\n41,0,1.0,-1.297569295949586,-1.7718674689141647\n42,1,1.0,0.1157608402516829,1.628218918792883\n43,0,1.0,-1.7134262798084272,-1.1743038064390103\n44,1,1.0,-0.3378301805529568,-0.2910465476647137\n45,0,1.0,-1.2507660936638987,0.5192700189880949\n46,1,1.0,0.1852781797882933,0.532037838399578\n47,1,1.0,0.4713992108946831,-0.0537546275328069\n48,1,1.0,0.4179504108182624,-0.3475018384774174\n49,1,1.0,-0.9778983061644152,-0.729408252986756\n50,0,1.0,0.7944699277825029,0.329545003238688\n51,1,1.0,1.2343066783419687,1.5491790039309807\n52,1,1.0,0.1235621702894244,0.0592456128791801\n53,1,1.0,1.269120052278218,-0.1843042492235104\n54,1,1.0,-1.2634617606506475,0.3337263439422797\n55,0,1.0,-0.7040770738291899,0.6044788486986316\n56,1,1.0,0.8193210871328203,-0.0202599958298661\n57,0,1.0,-1.1187665172260364,-0.7329443748308223\n58,1,1.0,0.7733653804552899,0.9515755686139656\n59,0,1.0,-1.4284200762508827,-0.1591355401498399\n60,1,1.0,1.2557808993112447,0.7783356023914118\n61,1,1.0,1.0954237482760552,1.3705251159656435\n62,1,1.0,-0.1691717408839249,0.7651427516343878\n63,1,1.0,0.1626843344471919,1.6872421302843787\n64,0,1.0,-0.7163214429613063,0.0419288919170111\n65,0,1.0,-0.162170564451405,-0.304954808305126\n66,1,1.0,-1.0327080821266987,0.702572524175719\n67,0,1.0,-0.0696354589891792,-0.2855075672616649\n68,1,1.0,-0.2624176936441514,0.1752919443310249\n69,1,1.0,2.9393342559065134,1.137135462818222\n70,1,1.0,1.4841485099826488,1.712258677673838\n71,1,1.0,0.4565596145128141,-0.1027463605459767\n72,1,1.0,0.2084141086516602,1.2085839475816187\n73,0,1.0,-0.2347957644651526,0.1513618892279431\n74,1,1.0,1.9542771245178288,1.1043410659097217\n75,1,1.0,-0.3135962069181063,-0.2517000246000336\n76,1,1.0,0.2555445644824194,-0.0730217916434685\n77,1,1.0,-0.2059823410868791,-0.0543716903474794\n78,1,1.0,-0.0248905950961883,-0.5150388954448659\n79,0,1.0,1.225105650926888,-0.3862704052917259\n80,1,1.0,2.896474517346035,2.1344493415925743\n81,0,1.0,-0.9491912347452498,-0.8480648749834054\n82,1,1.0,1.876352942575416,0.5530603658361252\n83,0,1.0,-0.0174570333207706,-0.3103318292477376\n84,0,1.0,-1.019760355761109,-1.0971349945509807\n85,1,1.0,0.4084657488393273,0.794047731322371\n86,1,1.0,-0.0458605880080085,-0.0353863139287917\n87,1,1.0,-0.6693379543480799,0.1142902898413986\n88,1,1.0,0.1779490476364521,-0.4603061273110742\n89,0,1.0,-0.9547283329747902,-1.8625332801880468\n90,1,1.0,0.3239303547353877,0.2107081373806453\n91,1,1.0,1.6972498476778457,1.044410604093328\n92,1,1.0,0.6813433805395099,1.4921237344313034\n93,1,1.0,0.4461024414775839,0.0654578597019615\n94,1,1.0,0.2078533896018308,-0.2541940646803443\n95,1,1.0,0.3263353239247097,0.4982692324763858\n96,0,1.0,-0.7302304887271422,-1.2242819044689828\n97,0,1.0,-2.134148270229161,-1.4070260108821095\n98,1,1.0,0.7663482764745027,0.4445176623754732\n99,1,1.0,1.1178424200542096,1.146653429079442\n100,1,1.0,0.9365769455461784,0.7638840833057274\n101,1,1.0,-0.0006918433006383,-1.2705885563349224\n102,0,1.0,-1.6115798626589557,-1.0135984193972905\n103,1,1.0,1.1393695947132625,0.292807998613878\n104,0,1.0,-1.4110909753020493,-2.0360703145826013\n105,0,1.0,-0.2591328569667465,0.9303415754138996\n106,1,1.0,2.1297240912820303,0.9722628513110064\n107,1,1.0,0.1969352464276115,0.2044264256090287\n108,1,1.0,0.5632045286945059,2.166210352951132\n109,1,1.0,-0.8431918326214936,0.0646403189429225\n110,1,1.0,-0.0462805349133515,0.629760884331796\n111,1,1.0,0.8478655164781554,-0.324044211049318\n112,0,1.0,-0.3430960696190223,-0.1083513310120653\n113,1,1.0,-0.8412163461781723,-0.3694768855277115\n114,1,1.0,0.0425250043881166,2.5822006195163314\n115,0,1.0,-2.1038431417665224,-1.9043419555741448\n116,0,1.0,-0.3549849715649531,-0.8638934105406288\n117,0,1.0,-0.7386323058296131,-0.8546395079376573\n118,1,1.0,1.2668048563539456,0.0781058994909303\n119,1,1.0,0.9845806200984912,0.5908293048839913\n120,1,1.0,1.1762057615240264,-1.2267758372574409\n121,1,1.0,-0.9524628689796972,0.1346436887974137\n122,1,1.0,0.1833494108495044,-1.0260862933604546\n123,1,1.0,0.133916753789661,1.296504893146537\n124,0,1.0,0.1578065358086548,0.2694749105461692\n125,0,1.0,-1.296910200945708,-1.1032412046335796\n126,1,1.0,1.2892943166086595,-0.1787385210775674\n127,1,1.0,0.9431499935730242,-0.0326729938331903\n128,0,1.0,-2.4371635709848047,-0.92093226123648\n129,1,1.0,-0.2628329413698394,-0.93694947924651\n130,1,1.0,0.6031729148792794,0.6189866518971492\n131,1,1.0,-0.90559012177888,-1.5529327070681278\n132,1,1.0,-0.855044509066238,-1.2704022111290432\n133,1,1.0,-0.5705964537327763,0.3076153691824254\n134,1,1.0,0.5608257110081025,0.6174472584833083\n135,1,1.0,-0.0228631974187066,1.2042243267456483\n136,0,1.0,0.0530696858084396,0.0558334991389864\n137,1,1.0,0.297809951960623,1.3739524610207354\n138,0,1.0,0.3429162534394854,-0.5915842517979316\n139,1,1.0,0.1734106016517921,1.507517484625247\n140,0,1.0,0.33821118682033,1.0102552429771807\n141,1,1.0,1.02653644515944,1.02143778909253\n142,1,1.0,0.3027320514309517,-1.4960041452449413\n143,1,1.0,-1.2697668292847764,-0.5618196019415743\n144,1,1.0,-0.5485272823778224,-2.198463918159258\n145,1,1.0,0.693512418939432,0.0015694927958309\n146,1,1.0,-0.0784353030320485,0.0376932580498911\n147,0,1.0,-0.4037594846701222,-1.9366725167024248\n148,1,1.0,-1.6694098774422772,-0.0899218514434318\n149,1,1.0,0.8971866095072558,-0.2810372876345518\n150,1,1.0,0.7863694950315233,0.2404933928094482\n151,0,1.0,-0.3557892260599152,0.2877744618702097\n152,1,1.0,-0.3264627056735612,-1.3272226462525392\n153,1,1.0,1.7931975011096195,0.5312626320716513\n154,1,1.0,0.7713546449156672,0.4081133931417659\n155,1,1.0,1.172176474950308,-0.7681777891551985\n156,0,1.0,-1.7397413150964665,-1.3331921217915137\n157,1,1.0,-0.7642442274527602,-0.3543317697385149\n158,1,1.0,-0.7622973505390092,-1.0043654423227852\n159,0,1.0,0.2183761115134746,0.1012303526877383\n160,0,1.0,0.757650591005793,-1.3193289959201857\n161,1,1.0,1.5339011909795075,0.1427797797290031\n162,1,1.0,0.3428802701803621,-0.6709166177455135\n163,0,1.0,-0.6695756656569956,-0.1236548642449643\n164,0,1.0,-2.670817685024346,-2.872780248512667\n165,1,1.0,1.2968864520966965,-0.734234141896909\n166,1,1.0,1.3055762611896382,0.5622459948440263\n167,1,1.0,-1.3767653403773457,0.091126858522262\n168,1,1.0,0.3896937795124568,0.0943941400849941\n169,1,1.0,-0.774479613424448,-0.3019000802803239\n170,0,1.0,-0.0634110861092311,-0.1665790405124566\n171,1,1.0,-0.5236970783533639,1.219647820786846\n172,1,1.0,1.6421650319269059,2.0004080783764366\n173,0,1.0,-0.9528542209170808,-1.688639170444758\n174,0,1.0,0.2367776663421717,-0.9764936985442892\n175,1,1.0,-0.1103685422432995,1.2263870453620218\n176,1,1.0,0.6468029014209795,-0.1125491795350973\n177,1,1.0,0.433514024457416,-0.699606770429479\n178,1,1.0,0.5557093563379646,1.5376372410355468\n179,1,1.0,0.0684287864291487,0.2549539224012551\n180,0,1.0,-1.6020562130119167,-3.1523422992631676\n181,1,1.0,2.530993840708988,2.0089350789073968\n182,0,1.0,-2.1789047186506245,-1.5661421903098047\n183,0,1.0,-1.0143967120165147,1.553384547849153\n184,1,1.0,-0.3766649261347968,-1.0992087933659138\n185,0,1.0,-0.3264425692830396,-0.5393112782332404\n186,1,1.0,0.0992541561086452,0.1564331665687704\n187,1,1.0,0.0779009573487326,0.1297158875442746\n188,0,1.0,-1.2011229146210671,-0.6769386208397218\n189,0,1.0,-1.034835688105053,-0.6932541343113545\n190,1,1.0,-1.0538139954195052,-0.6644377275004232\n191,0,1.0,-0.0787614364836631,-0.7218602561575269\n192,0,1.0,-0.2312767149706146,-0.0291788116972061\n193,1,1.0,0.5368286496648684,-0.4652339662035581\n194,1,1.0,-0.5161769087658401,-0.5950592586145473\n195,0,1.0,-1.6891790799062465,-1.5863280071109278\n196,1,1.0,0.2171321954941918,1.6213288020007652\n197,0,1.0,-0.9128648541202304,0.4576788535533095\n198,0,1.0,-1.492745460495128,-0.6209847326587016\n199,1,1.0,0.0575406705969811,0.1938543753585582\n200,1,1.0,2.1172076399797217,-0.1236919908770869\n201,1,1.0,-0.1626427486852519,-0.0418562938180431\n202,1,1.0,0.6054438030974071,1.8062297489412764\n203,1,1.0,0.3028163105308905,-0.015132002316731\n204,1,1.0,0.5093766380457077,0.5200349901127361\n205,0,1.0,-0.9777859179248424,-0.6048699537019336\n206,1,1.0,1.550736237638344,1.8122982462119728\n207,1,1.0,0.8357124233487145,-1.3200050086406558\n208,0,1.0,0.0523292492226149,0.3103116279689318\n209,1,1.0,-0.2433775165368695,-0.2069161324931882\n210,1,1.0,0.9772189727418472,-0.6958513049018134\n211,0,1.0,-1.3899868885523656,-0.850863428787955\n212,1,1.0,0.3558687272455471,1.0763750887530195\n213,1,1.0,0.1825344690271067,0.6058034898038257\n214,0,1.0,-1.7230734924219526,-1.3982647789591447\n215,1,1.0,-0.893682326457443,-0.2559151634337065\n216,1,1.0,1.6124510712779236,0.9274695947325436\n217,1,1.0,0.8533514042158431,-0.5173271109299739\n218,1,1.0,-0.5940285064261097,-0.8869896231033375\n219,0,1.0,-1.1571085386996875,-0.4007227965229337\n220,1,1.0,-0.4811524615163916,-0.9157792540196568\n221,1,1.0,0.7577697909855564,0.3726344661588177\n222,1,1.0,0.8336022247491442,0.1830281488597728\n223,0,1.0,-0.1475136397050963,-0.7707608952355066\n224,0,1.0,-3.997368053852816,-2.101625256349573\n225,0,1.0,-0.6322397999095749,-1.3741277039341029\n226,1,1.0,1.1718199437536057,-0.2596835687715374\n227,1,1.0,1.0383038297649536,-0.2241598969261504\n228,1,1.0,-0.0663217464490231,-0.9663854691419248\n229,0,1.0,-0.2620401802843139,-0.6551163537399683\n230,1,1.0,-0.8711551943827907,1.6526234409045248\n231,1,1.0,-0.454503170626116,-0.3992887139743629\n232,1,1.0,-0.3398309735153191,0.5915035882354636\n233,1,1.0,-0.0941102884706825,0.5781564535766524\n234,1,1.0,-0.0050339294241279,0.0585440231134717\n235,1,1.0,0.9312014517072728,0.4425799319986031\n236,1,1.0,0.7967252896162756,2.569724989482353\n237,1,1.0,0.8944925587361772,1.1566950632271873\n238,1,1.0,-0.7521110245807455,-0.4267067547412025\n239,1,1.0,-0.0535873810152297,-0.7360222350516838\n240,1,1.0,1.2521867392737955,0.056428559644252\n241,1,1.0,0.1033942998202911,0.5605960667292729\n242,1,1.0,0.5833348969824241,0.2531813880173922\n243,0,1.0,-1.0645438621752024,-1.303562039254952\n244,1,1.0,0.4885443174441546,0.7233863844230481\n245,1,1.0,-0.1604764282411145,0.105122448427376\n246,1,1.0,-1.300957177234422,-1.5532019347076516\n247,0,1.0,-1.7044162128692306,-2.0542500697482384\n248,0,1.0,1.3276652048384383,0.9682809898945218\n249,1,1.0,-0.1830327437266867,0.2037877709707388\n250,0,1.0,-0.5272269625826158,-0.209700992281532\n251,0,1.0,0.0259507800846553,0.0411236100334752\n252,1,1.0,0.2710353305750642,0.2943050293540375\n253,1,1.0,1.5041294473761584,0.5946513596450765\n254,1,1.0,0.0145544352540653,0.3705696340714013\n255,0,1.0,-0.1291168438974311,0.111426386746702\n256,1,1.0,0.0817896748680334,0.0606794408675246\n257,1,1.0,0.7621088732237331,-0.8905255802050267\n258,0,1.0,-0.8322133747312974,-0.7790859392929107\n259,1,1.0,0.0355942880687503,0.3009959974810477\n260,1,1.0,0.6369944436845489,0.6189645756553264\n261,1,1.0,1.2670501959399332,0.391628198512087\n262,0,1.0,-0.4787291522723686,-0.5703539678097848\n263,0,1.0,-0.826517804170331,-0.4972232873404489\n264,0,1.0,-1.718542348135264,-1.224754253077296\n265,1,1.0,1.0484752763280232,0.5893242036287374\n266,1,1.0,0.4474382854026535,-0.2669761606754156\n267,1,1.0,-1.7636646176498751,-0.581777714938195\n268,0,1.0,-0.1044211730895233,0.1915387906660626\n269,1,1.0,0.1009447676484083,-0.3522684357081524\n270,0,1.0,-1.2350658136514983,-1.9464534855344024\n271,0,1.0,-0.92056925302896,-1.2931521291522536\n272,0,1.0,-1.3220685255590627,-0.9675978863071708\n273,1,1.0,0.6052114261269296,0.728102938213813\n274,1,1.0,0.5149992113311551,0.0165534507833951\n275,0,1.0,0.1586237445585029,-0.313239766080771\n276,1,1.0,1.8345306001451973,1.12546911647858\n277,0,1.0,-1.790005298850818,-1.7223741187038832\n278,0,1.0,-0.8219700326667985,0.3612843693356167\n279,0,1.0,-0.4396539011852441,-0.9215054194243268\n280,1,1.0,0.5123834603118314,0.0455702091769979\n281,1,1.0,0.7899626513107794,1.0660503420463314\n282,1,1.0,0.4338554957801895,-0.6210919770787423\n283,0,1.0,-0.07884888966718,-1.563819819117406\n284,0,1.0,-0.4082491508367082,0.9470829546722284\n285,1,1.0,1.3678165391481167,2.4329547329193164\n286,1,1.0,2.1374523981366247,1.9690326249256476\n287,1,1.0,0.4036209992141275,1.7000818372076283\n288,0,1.0,-2.316725988882649,-1.3224712601990403\n289,1,1.0,2.9225629006916485,0.7067649055710895\n290,1,1.0,-0.3178591434216822,-0.9781152975546747\n291,0,1.0,-1.7814341389327732,0.4358027604157619\n292,1,1.0,-0.7759742860112459,0.4343797947519167\n293,0,1.0,-1.521144008128242,-1.8770242441720129\n294,1,1.0,1.207933413154748,0.5848609575113036\n295,0,1.0,-0.5195977105007731,-1.9801780905893072\n296,0,1.0,-1.2670988239980543,0.0464519360412306\n297,1,1.0,1.7391157801536254,1.5648059307712128\n298,1,1.0,1.8341510139252708,1.0819962650092607\n299,0,1.0,-0.956008516719905,-1.0790439674454115\n300,0,1.0,-0.2283707586108963,-0.8154219598905247\n301,0,1.0,0.1027208659533498,-1.209479193874335\n302,1,1.0,0.2342677470450889,-0.3841667249881214\n303,0,1.0,-0.9822848116666076,-0.1730465612300329\n304,1,1.0,-0.2736723014269126,-0.9943192348217365\n305,1,1.0,0.6694745607141676,-0.7283559417298938\n306,1,1.0,-0.5030360205988657,2.42714457383548\n307,1,1.0,-0.5091672703742999,-0.2853721752296898\n308,0,1.0,-0.5248330968832379,-1.6180776625709798\n309,1,1.0,-1.5004986329180396,-0.484153601771506\n310,1,1.0,-0.6158479387505932,0.0178753874195085\n311,0,1.0,0.1222948381328843,0.1178646894817386\n312,0,1.0,0.533319323502044,0.1500645438542796\n313,1,1.0,1.2984280643034,0.1410441764464916\n314,0,1.0,-1.2076469395463743,-0.2149425674625211\n315,1,1.0,0.85504548218145,0.7231904990548563\n316,0,1.0,-1.344968676687029,-0.1620686622510434\n317,0,1.0,-1.4043650973680473,-0.6189631658982028\n318,1,1.0,-0.3957641275287884,-0.3723833259463206\n319,1,1.0,-0.8685195500942487,-0.8328101737390551\n320,1,1.0,0.4547358035067283,0.2561435661792704\n321,1,1.0,0.792675209341637,1.1277798330473867\n322,1,1.0,0.7382319891040461,-0.3199441621407357\n323,1,1.0,-0.168298776723828,0.2744198530881772\n324,0,1.0,-1.3209228037598135,-1.8641796960926813\n325,1,1.0,0.443657137582849,1.1550768990864873\n326,1,1.0,0.011542739833444,-0.7094220262449857\n327,1,1.0,0.5773609086306638,-0.7259515479675905\n328,1,1.0,0.8879340843067987,1.4070505205555208\n329,1,1.0,0.6200477359005288,1.1345345467203014\n330,1,1.0,0.7310595252555993,0.7455911761286926\n331,0,1.0,-1.7658619849888864,-1.3720928516281985\n332,0,1.0,-0.1341473456163144,-0.994298239199161\n333,0,1.0,-0.6744754495951865,-0.3490680106918694\n334,1,1.0,0.3879026726978911,0.3258163255631186\n335,1,1.0,0.7540381493807256,0.6777241712068732\n336,0,1.0,-1.3716973324326427,-0.4155500665400312\n337,0,1.0,-0.534645259819218,-0.0229027180032056\n338,0,1.0,-1.1086912800257407,-0.0672810453228422\n339,1,1.0,-0.0252833781001102,-0.2917860158218366\n340,0,1.0,-0.732104812450567,-1.4723941014147677\n341,1,1.0,-0.2918157185783673,0.2069278085364787\n342,1,1.0,-1.0591658056569309,-0.1148390749014321\n343,1,1.0,1.157863530635342,0.1520750836099127\n344,1,1.0,0.2324638798226632,-0.7465587815502271\n345,0,1.0,-0.2067659239640128,-1.0167996994927333\n346,0,1.0,-0.3971782657873483,-1.5076667897825005\n347,1,1.0,0.953349979616936,1.7390279032350708\n348,1,1.0,0.0700395737287643,-1.3279362540064\n349,1,1.0,-0.6789129387004738,0.3332036016237296\n350,0,1.0,-0.4391369271827264,-0.7074565150801869\n351,1,1.0,-0.3443075117824245,1.8097972610869313\n352,1,1.0,-0.1284660003310376,-0.5801554388335964\n353,1,1.0,1.34829720408885,0.4320826453797517\n354,1,1.0,0.8318590446012307,0.6864361974022913\n355,0,1.0,-1.5564319014907797,-0.6033157273688537\n356,1,1.0,1.1595761619710239,0.2178616340559158\n357,1,1.0,0.7048647888684799,0.8918527296532835\n358,1,1.0,0.6951026378263953,1.2800949387118135\n359,0,1.0,0.2926265412028538,0.3070625379533885\n360,0,1.0,0.8741458802788883,-0.7216229201473695\n361,0,1.0,-1.42192945762898,-0.6202074352425955\n362,0,1.0,-1.9656085754759007,-3.1766209803054286\n363,1,1.0,0.3574557075096347,-1.0496463677400432\n364,0,1.0,-0.8922492072567841,-1.479611796991133\n365,0,1.0,-0.0832253490197983,-0.6592784984364464\n366,1,1.0,0.995113052382914,1.5947636437525377\n367,1,1.0,0.3382176201063367,1.1104992284320743\n368,1,1.0,1.0212880016810113,0.8654728026738276\n369,0,1.0,-1.1426015797190967,0.0662036821644343\n370,1,1.0,0.4506288328915611,-0.558854137371921\n371,0,1.0,-2.104889350035209,-2.362407390816069\n372,1,1.0,1.1024369159275051,-0.9347324212512744\n373,1,1.0,-1.519101980822113,-0.2600589435428634\n374,1,1.0,1.142816304263952,0.2951330740252899\n375,1,1.0,-0.7371888587016935,0.9359396425381616\n376,0,1.0,-1.5065209256280567,-0.410848272122933\n377,1,1.0,-0.0595100402309776,0.5550810233373265\n378,1,1.0,-0.188660503511723,0.7186023351995943\n379,1,1.0,-0.7452214093124996,-0.7450187122196846\n380,1,1.0,-0.531544435414745,0.3138769959444683\n381,1,1.0,2.048227866003794,1.5083554913635764\n382,1,1.0,0.6001940600803981,-0.7264227059830543\n383,0,1.0,0.1519383904003125,-1.2891480708140328\n384,0,1.0,-0.3545910695625223,-2.192961497477754\n385,0,1.0,-0.3057643966312685,-0.585921640264153\n386,1,1.0,0.2471959359997532,0.048414904403108\n387,0,1.0,0.2543414216850956,-1.2469656323452512\n388,0,1.0,-0.7609394037421923,-0.8448954377142418\n389,0,1.0,-0.9163726974412084,0.9882031619427212\n390,1,1.0,0.1508650108106266,0.8568553921820786\n391,0,1.0,-0.6642978950140246,-0.8889735434778726\n392,0,1.0,0.3548133507721483,-0.7779884413338854\n393,1,1.0,-0.8600548840980936,-0.4539912247700959\n394,1,1.0,-0.7429537738065075,0.0114437356426277\n395,1,1.0,-0.2535340844255676,0.9076532949641268\n396,1,1.0,0.8001937976744286,0.5722618352767321\n397,1,1.0,-0.3883565308771623,-0.8259023842662157\n398,1,1.0,0.5977958997071127,-0.275801624651131\n399,1,1.0,0.6387069878621207,1.3655600918843147\n400,1,1.0,1.0911760733371816,1.5791502556562729\n401,0,1.0,-0.9805425769609684,-0.3666617549156375\n402,0,1.0,-1.369441385924611,-0.9847889416331238\n403,1,1.0,1.1582984487228056,-1.2266167224361129\n404,1,1.0,0.4440907238874604,-0.3936073933953013\n405,1,1.0,0.6611205983388556,1.060123465219715\n406,1,1.0,-0.8335795139696339,-0.9755222594150011\n407,0,1.0,-1.6439914475814803,-0.8675406395128159\n408,1,1.0,-0.8819368683329126,-0.1957812104822939\n409,1,1.0,0.8354705639144281,0.1287775822586926\n410,0,1.0,-0.2609988170970853,1.0192787770404355\n411,0,1.0,0.4776824481723035,-1.6632972544475249\n412,1,1.0,0.7352530908904895,0.1464596422096873\n413,1,1.0,1.7754145709866491,0.5240941887032902\n414,0,1.0,-0.6624419960181424,-0.7988957278612207\n415,1,1.0,-0.4443604762316816,-1.642515989493213\n416,1,1.0,0.2813076346511557,0.5221328197258545\n417,1,1.0,0.4047248032129417,-0.2519314931457125\n418,0,1.0,-1.4394125219510452,-0.6468744429238464\n419,1,1.0,-0.858863344498971,-1.0834174787617243\n420,1,1.0,-1.8221972955681007,-0.4423770310388989\n421,1,1.0,0.0340291364481049,0.0535410677891895\n422,1,1.0,0.1389924160873612,1.6561826698924016\n423,1,1.0,1.3137538710602334,0.2668931722847939\n424,0,1.0,-1.5823800259906016,-0.4843119202077253\n425,0,1.0,-0.9884121757552115,-1.017275176721108\n426,1,1.0,0.1891846729526719,0.7202276734240844\n427,1,1.0,0.5549509525985887,0.6555349243663006\n428,1,1.0,0.743046629972897,-0.2620526326348232\n429,1,1.0,-0.7994828361716214,0.0674717100057284\n430,1,1.0,1.3452888092027804,1.2993383495036197\n431,1,1.0,0.593434907945471,0.5542402806505377\n432,1,1.0,-0.3604995799057854,0.0717109903593987\n433,1,1.0,-0.4386099200416233,-0.8651858607251545\n434,0,1.0,-0.4402702669756882,-0.195029986235245\n435,0,1.0,0.3032458645675097,-1.0372346857526795\n436,1,1.0,-0.4333408019199423,-0.5682367355085381\n437,1,1.0,1.5358280369397963,1.1336084514545677\n438,1,1.0,1.0444229598809338,-1.706007604872449\n439,0,1.0,-0.6895667267398463,0.2197286472743179\n440,1,1.0,-1.0835887859738067,-0.5133199774445829\n441,1,1.0,-1.435345781022083,-0.3507862424218248\n442,1,1.0,1.9297909746680324,0.8369372594479194\n443,0,1.0,-0.338182401181062,-1.4017763122942484\n444,0,1.0,-1.6733718505492423,-1.1502748432880705\n445,1,1.0,0.6428480304549449,-0.8565841265118209\n446,1,1.0,1.625043954688396,0.2684616284515225\n447,1,1.0,0.2876272826778734,0.1478022099240175\n448,1,1.0,0.3368801552005952,0.893746682993275\n449,1,1.0,-0.0654948493457917,0.1471922583989302\n450,0,1.0,-1.3491325809773853,-1.5042520420301004\n451,1,1.0,2.19922820165968,0.9176696435031358\n452,0,1.0,-1.0334672799120719,1.0862360341907649\n453,0,1.0,-0.9915011452555988,-0.8899359464273447\n454,0,1.0,-1.860269273300948,-1.4938459110286948\n455,0,1.0,1.2143771399004315,-0.1574495810904875\n456,0,1.0,-0.0257266277321376,-2.763283716356398\n457,1,1.0,0.8793234918769298,-0.4790330268995254\n458,1,1.0,-0.6848896091284365,-0.6895061753139731\n459,0,1.0,-0.5058512330043357,-1.0735551021865912\n460,0,1.0,-0.2349362672485728,-0.7195382062522444\n461,0,1.0,-1.525836738077298,0.3768448376636971\n462,1,1.0,1.343277042835544,1.4922969894105973\n463,0,1.0,-1.1425390778723443,-0.951202897699515\n464,0,1.0,-0.3226463317225551,-0.7402452426696127\n465,0,1.0,-1.2404890638754444,-1.5531842506526792\n466,1,1.0,-0.3536059317590624,0.3771460530559369\n467,0,1.0,-1.3282742623104344,0.5339134714454185\n468,0,1.0,-0.9258748330652584,-1.3421387779066227\n469,0,1.0,-1.4665246512288688,-0.5451657549430262\n470,1,1.0,-1.1812080835635677,-1.3493029874057316\n471,0,1.0,0.4135585813164385,-0.025350021067494\n472,0,1.0,-1.3243619524797448,0.409534672879704\n473,0,1.0,-0.7228225038935124,-1.514869732841602\n474,0,1.0,-2.20980025728101,-1.0093006645488696\n475,1,1.0,1.238750423806423,-0.7446564777409403\n476,1,1.0,-0.4186349451861238,-0.5618885655764582\n477,1,1.0,1.0857714428735843,1.8243557159731605\n478,0,1.0,-1.799327737479811,-0.2480898901303862\n479,0,1.0,-0.3253674708553154,-0.3914478506480767\n480,1,1.0,-0.5136395015989216,-0.9317499359334656\n481,0,1.0,-0.3081096323101271,-0.5011679303275849\n482,1,1.0,-0.2208773209943117,1.4017034313293963\n483,0,1.0,0.02167470940763,-0.3736350816473629\n484,1,1.0,0.0896169433721431,1.2400084680697327\n485,1,1.0,0.4025748274273325,-0.8595955345264295\n486,1,1.0,0.0028195413080264,1.4772960576588925\n487,1,1.0,-0.0397108752283274,0.0221461207268718\n488,1,1.0,0.2719210349381246,0.6933413861564819\n489,0,1.0,-0.8869306477742079,-0.57935496421845\n490,0,1.0,-0.1052532576360905,-1.1272101493683535\n491,1,1.0,-0.3964232816485298,0.2347032941233135\n492,1,1.0,0.7519728559295678,-0.1073490103732342\n493,0,1.0,-0.8375571507510022,-0.0193217276753288\n494,0,1.0,-0.6531098282499894,0.4829876741298714\n495,0,1.0,-0.7959626582390963,-1.5185537150051427\n496,0,1.0,-2.230755547576115,-2.015629118756931\n497,1,1.0,0.1662432821120798,1.92140063364716\n498,0,1.0,-1.3813185589498524,-1.7651345098203135\n499,0,1.0,0.5973042353337923,-0.4216421296311065\n"
  },
  {
    "path": "src/estimagic/lollipop_plot.py",
    "content": "import math\n\nimport pandas as pd\nimport plotly.graph_objects as go\n\nfrom optimagic.config import PLOTLY_PALETTE, PLOTLY_TEMPLATE\nfrom optimagic.visualization.plotting_utilities import create_grid_plot, create_ind_dict\n\n\ndef lollipop_plot(\n    data,\n    *,\n    sharex=True,\n    plot_bar=True,\n    n_rows=1,\n    scatterplot_kws=None,\n    barplot_kws=None,\n    combine_plots_in_grid=True,\n    template=PLOTLY_TEMPLATE,\n    palette=PLOTLY_PALETTE,\n):\n    \"\"\"Make a lollipop plot.\n\n    Args:\n        data (pandas.DataFrame): The datapoints to be plotted. The whole data will be\n        plotted. Thus if you want to plot just some variables or rows you need\n        to restrict the dataset before passing it.\n        sharex (bool): Whether the x-axis is shared across variables, default True.\n        plot_bar (bool): Whether thin bars are plotted, default True.\n        n_rows (int): Number of rows for a grid if plots are combined\n            in a grid, default 1. The number of columns is determined automatically.\n        scatterplot_kws (dict): Keyword arguments to plot the dots of the lollipop plot\n            via the scatter function.\n        barplot_kws (dict): Keyword arguments to plot the lines of the lollipop plot\n            via the barplot function.\n        combine_plots_in_grid (bool): decide whether to return a one\n        figure containing subplots for each factor pair or a dictionary\n        of individual plots. Default True.\n        template (str): The template for the figure. Default is \"plotly_white\".\n        palette: The coloring palette for traces. Default is \"qualitative.Plotly\".\n\n    Returns:\n        plotly.Figure: The grid plot or dict of individual plots\n\n    \"\"\"\n    data, varnames = _harmonize_data(data)\n\n    scatter_dict = {\n        \"mode\": \"markers\",\n        \"marker\": {\"color\": palette[0]},\n        \"showlegend\": False,\n    }\n\n    bar_dict = {\n        \"orientation\": \"h\",\n        \"width\": 0.03,\n        \"marker\": {\"color\": palette[0]},\n        \"showlegend\": False,\n    }\n\n    scatterplot_kws = (\n        scatter_dict\n        if scatterplot_kws is None\n        else scatter_dict.update(\n            {k: v for k, v in scatterplot_kws.items() if k not in scatter_dict}\n        )\n    )\n    barplot_kws = (\n        bar_dict\n        if barplot_kws is None\n        else bar_dict.update(\n            {k: v for k, v in barplot_kws.items() if k not in bar_dict}\n        )\n    )\n\n    # container for individual plots\n    g_list = []\n    # container for titles\n    titles = []\n\n    # creating data traces for plotting faceted/individual plots\n    for indep_name in varnames:\n        g_ind = []\n        # dot plot using the scatter function\n        to_plot = data[data[\"indep\"] == indep_name]\n        trace_1 = go.Scatter(x=to_plot[\"values\"], y=to_plot[\"__name__\"], **scatter_dict)\n        g_ind.append(trace_1)\n\n        # bar plot\n        if plot_bar:\n            trace_2 = go.Bar(x=to_plot[\"values\"], y=to_plot[\"__name__\"], **bar_dict)\n        g_ind.append(trace_2)\n\n        g_list.append(g_ind)\n        titles.append(indep_name)\n\n    # common x range\n    lower_candidate = data[[\"indep\", \"values\"]].groupby(\"indep\").min().min()\n    upper_candidate = data[[\"indep\", \"values\"]].groupby(\"indep\").max().max()\n    padding = (upper_candidate - lower_candidate) / 10\n    lower = lower_candidate - padding\n    upper = upper_candidate + padding\n\n    common_dependencies = {\n        \"ind_list\": g_list,\n        \"names\": titles,\n        \"share_xax\": sharex,\n        \"x_min\": lower,\n        \"x_max\": upper,\n    }\n    common_layout = {\n        \"template\": template,\n        \"margin\": {\"l\": 10, \"r\": 10, \"t\": 30, \"b\": 10},\n    }\n\n    # Plot with subplots\n    if combine_plots_in_grid:\n        n_cols = math.ceil(len(varnames) / n_rows)\n\n        g = create_grid_plot(\n            rows=n_rows,\n            cols=n_cols,\n            **common_dependencies,\n            kws={\"height\": 150 * n_rows, \"width\": 150 * n_cols, **common_layout},\n        )\n        out = g\n\n    # Dictionary for individual plots\n    else:\n        ind_dict = create_ind_dict(\n            **common_dependencies,\n            kws={\"height\": 150, \"width\": 150, \"title_x\": 0.5, **common_layout},\n        )\n        out = ind_dict\n\n    return out\n\n\ndef _harmonize_data(data):\n    if not isinstance(data, list):\n        data = [data]\n\n    to_concat = []\n    for i, _df in enumerate(data):\n        df = _df.copy()\n        df.columns = _make_string_index(df.columns)\n        df.index = _make_string_index(df.index)\n        df[\"__name__\"] = df.index\n        df[\"__hue__\"] = i\n        to_concat.append(df)\n\n    combined = pd.concat(to_concat)\n    # so that it is possibel to facet the strip plot\n    new_data = pd.melt(\n        combined, id_vars=[\"__name__\", \"__hue__\"], var_name=\"indep\", value_name=\"values\"\n    )\n\n    varnames = new_data[\"indep\"].unique()\n\n    return new_data, varnames\n\n\ndef _make_string_index(ind):\n    if isinstance(ind, pd.MultiIndex):\n        out = ind.map(lambda tup: \"_\".join(str(name) for name in tup)).tolist()\n    else:\n        out = ind.map(str).tolist()\n    return out\n"
  },
  {
    "path": "src/estimagic/ml_covs.py",
    "content": "\"\"\"Functions for inferences in maximum likelihood models.\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom estimagic.shared_covs import process_pandas_arguments\nfrom optimagic.exceptions import INVALID_INFERENCE_MSG\nfrom optimagic.utilities import robust_inverse\n\n\ndef cov_hessian(hess):\n    \"\"\"Covariance based on the negative inverse of the hessian of loglike.\n\n    While this method makes slightly weaker statistical assumptions than a covariance\n    estimate based on the outer product of gradients, it is numerically much more\n    problematic for the following reasons:\n\n    - It is much more difficult to estimate a hessian numerically or with automatic\n      differentiation than it is to estimate the gradient / jacobian\n    - The resulting hessian might not be positive definite and thus not invertible.\n\n    Args:\n        hess (numpy.ndarray): 2d array hessian matrix of dimension (nparams, nparams)\n\n    Returns:\n       numpy.ndarray: covariance matrix (nparams, nparams)\n\n\n    Resources: Marno Verbeek - A guide to modern econometrics :cite:`Verbeek2008`\n\n    \"\"\"\n    _hess, names = process_pandas_arguments(hess=hess)\n    info_matrix = -_hess\n    cov = robust_inverse(info_matrix, msg=INVALID_INFERENCE_MSG)\n\n    if \"params\" in names:\n        cov = pd.DataFrame(cov, columns=names[\"params\"], index=names[\"params\"])\n\n    return cov\n\n\ndef cov_jacobian(jac):\n    \"\"\"Covariance based on outer product of jacobian of loglikeobs.\n\n    Args:\n        jac (numpy.ndarray): 2d array jacobian matrix of dimension (nobs, nparams)\n\n    Returns:\n        numpy.ndarray: covariance matrix of size (nparams, nparams)\n\n\n    Resources: Marno Verbeek - A guide to modern econometrics.\n\n    \"\"\"\n    _jac, names = process_pandas_arguments(jac=jac)\n\n    info_matrix = _jac.T @ _jac\n    cov = robust_inverse(info_matrix, msg=INVALID_INFERENCE_MSG)\n\n    if \"params\" in names:\n        cov = pd.DataFrame(cov, columns=names[\"params\"], index=names[\"params\"])\n\n    return cov\n\n\ndef cov_robust(jac, hess):\n    \"\"\"Covariance of parameters based on HJJH dot product.\n\n    H stands for Hessian of the log likelihood function and J for Jacobian,\n    of the log likelihood per individual.\n\n    Args:\n        jac (numpy.ndarray): 2d array jacobian matrix of dimension (nobs, nparams)\n        hess (numpy.ndarray): 2d array hessian matrix of dimension (nparams, nparams)\n\n\n    Returns:\n        numpy.ndarray: covariance HJJH matrix (nparams, nparams)\n\n    Resources:\n        https://tinyurl.com/yym5d4cw\n\n    \"\"\"\n    _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess)\n\n    info_matrix_hess = -_hess\n    cov_hess = robust_inverse(info_matrix_hess, msg=INVALID_INFERENCE_MSG)\n\n    cov = cov_hess @ _jac.T @ _jac @ cov_hess\n\n    if \"params\" in names:\n        cov = pd.DataFrame(cov, columns=names[\"params\"], index=names[\"params\"])\n\n    return cov\n\n\ndef se_from_cov(cov):\n    \"\"\"Standard deviation of parameter estimates based on the function of choice.\n\n    Args:\n        cov (numpy.ndarray): Covariance matrix\n\n    Returns:\n        standard_errors (numpy.ndarray): 1d array with standard errors\n\n    \"\"\"\n    standard_errors = np.sqrt(np.diag(cov))\n\n    if isinstance(cov, pd.DataFrame):\n        standard_errors = pd.Series(standard_errors, index=cov.index)\n\n    return standard_errors\n\n\ndef cov_cluster_robust(jac, hess, design_info):\n    \"\"\"Cluster robust standard errors.\n\n    A cluster is a group of observations that correlate amongst each other,\n    but not between groups. Each cluster is seen as independent. As the number\n    of clusters increase, the standard errors approach robust standard errors.\n\n    Args:\n        jac (np.array): \"jacobian\" - an n x k + 1-dimensional array of first\n            derivatives of the pseudo-log-likelihood function w.r.t. the parameters\n        hess (np.array): \"hessian\" - a k + 1 x k + 1-dimensional array of\n            second derivatives of the pseudo-log-likelihood function w.r.t.\n            the parameters\n        design_info (pd.DataFrame): dataframe containing psu, stratum,\n            population/design weight and/or a finite population corrector (fpc)\n\n    Returns:\n        cluster_robust_se (np.array): a 1d array of k + 1 standard errors\n        cluster_robust_var (np.array): 2d variance-covariance matrix\n\n    \"\"\"\n    _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess)\n\n    cluster_meat = _clustering(_jac, design_info)\n    cov = _sandwich_step(_hess, cluster_meat)\n\n    if \"params\" in names:\n        cov = pd.DataFrame(cov, columns=names[\"params\"], index=names[\"params\"])\n\n    return cov\n\n\ndef cov_strata_robust(jac, hess, design_info):\n    \"\"\"Cluster robust standard errors.\n\n    A stratum is a group of observations that share common information. Each\n    stratum can be constructed based on age, gender, education, region, etc.\n    The function runs the same formulation for cluster_robust_se for each\n    stratum and returns the sum. Each stratum contain primary sampling units\n    (psu) or clusters. If observations are independent, but wish to have to\n    strata, make the psu column take the values of the index.\n\n    Args:\n        jac (np.array): \"jacobian\" - an n x k + 1-dimensional array of first\n            derivatives of the pseudo-log-likelihood function w.r.t. the parameters\n        hess (np.array): \"hessian\" - a k + 1 x k + 1-dimensional array of\n            second derivatives of the pseudo-log-likelihood function w.r.t.\n            the parameters\n        design_info (pd.DataFrame): dataframe containing psu, stratum,\n            population/design weight and/or a finite population corrector (fpc)\n\n    Returns:\n        strata_robust_se (np.array): a 1d array of k + 1 standard errors\n        strata_robust_var (np.array): 2d variance-covariance matrix\n\n    \"\"\"\n    _jac, _hess, names = process_pandas_arguments(jac=jac, hess=hess)\n    strata_meat = _stratification(_jac, design_info)\n    cov = _sandwich_step(_hess, strata_meat)\n\n    if \"params\" in names:\n        cov = pd.DataFrame(cov, columns=names[\"params\"], index=names[\"params\"])\n\n    return cov\n\n\ndef _sandwich_step(hess, meat):\n    \"\"\"The sandwich estimator for variance estimation.\n\n    This is used in several robust covariance formulae.\n\n    Args:\n        hess (np.array): \"hessian\" - a k + 1 x k + 1-dimensional array of\n            second derivatives of the pseudo-log-likelihood function w.r.t.\n            the parameters\n        meat (np.array): the variance of the total scores\n\n    Returns:\n        se (np.array): a 1d array of k + 1 standard errors\n        var (np.array): 2d variance-covariance matrix\n\n    \"\"\"\n    invhessian = robust_inverse(hess, INVALID_INFERENCE_MSG)\n    var = np.dot(np.dot(invhessian, meat), invhessian)\n    return var\n\n\ndef _clustering(jac, design_info):\n    \"\"\"Variance estimation for each cluster.\n\n    The function takes the sum of the jacobian observations for each cluster.\n    The result is the meat of the sandwich estimator.\n\n    Args:\n        jac (np.array): \"jacobian\" - an n x k + 1-dimensional array of first\n            derivatives of the pseudo-log-likelihood function w.r.t. the parameters\n        design_info (pd.DataFrame): dataframe containing psu, stratum,\n            population/design weight and/or a finite population corrector (fpc)\n\n    Returns:\n        cluster_meat (np.array): 2d square array of length k + 1. Variance of\n            the likelihood equation (Pg.557, 14-10, Greene 7th edition)\n\n    \"\"\"\n    list_of_clusters = design_info[\"psu\"].unique()\n    meat = np.zeros([len(jac[0, :]), len(jac[0, :])])\n    for psu in list_of_clusters:\n        psu_scores = jac[design_info[\"psu\"] == psu]\n        psu_scores_sum = psu_scores.sum(axis=0)\n        meat += np.dot(psu_scores_sum[:, None], psu_scores_sum[:, None].T)\n    cluster_meat = len(list_of_clusters) / (len(list_of_clusters) - 1) * meat\n    return cluster_meat\n\n\ndef _stratification(jac, design_info):\n    \"\"\"Variance estimation for each stratum.\n\n    The function takes the sum of the jacobian observations for each cluster\n    within strata. The result is the meat of the sandwich estimator.\n\n    Args:\n        design_options (pd.DataFrame): dataframe containing psu, stratum,\n            population/design weight and/or a finite population corrector (fpc)\n        jac (np.array): \"jacobian\" - an n x k + 1-dimensional array of first\n            derivatives of the pseudo-log-likelihood function w.r.t. the parameters\n\n    Returns:\n        strata_meat (np.array): 2d square array of length k + 1. Variance of\n        the likelihood equation\n\n    \"\"\"\n    n_params = len(jac[0, :])\n    stratum_col = design_info[\"strata\"]\n    # Stratification does not require clusters\n    if \"psu\" not in design_info:\n        design_info[\"psu\"] = design_info.index\n    else:\n        pass\n    psu_col = design_info[\"psu\"]\n    strata_meat = np.zeros([n_params, n_params])\n    # Variance estimation per stratum\n    for stratum in stratum_col.unique():\n        psu_in_strata = psu_col[stratum_col == stratum].unique()\n        psu_jac = np.zeros([n_params])\n        if \"fpc\" in design_info:\n            fpc = design_info[\"fpc\"][stratum_col == stratum].unique()\n        else:\n            fpc = 1\n        # psu_jac stacks the sum of the observations for each cluster.\n        for psu in psu_in_strata:\n            psu_jac = np.vstack([psu_jac, np.sum(jac[psu_col == psu], axis=0)])\n        psu_jac_mean = np.sum(psu_jac, axis=0) / len(psu_in_strata)\n        if len(psu_in_strata) > 1:\n            mid_step = np.dot(\n                (psu_jac[1:] - psu_jac_mean).T, (psu_jac[1:] - psu_jac_mean)\n            )\n            strata_meat += (\n                fpc * (len(psu_in_strata) / (len(psu_in_strata) - 1)) * mid_step\n            )\n        # Apply \"grand-mean\" method for single unit stratum\n        elif len(psu_in_strata) == 1:\n            strata_meat += fpc * np.dot(psu_jac[1:].T, psu_jac[1:])\n\n    return strata_meat\n"
  },
  {
    "path": "src/estimagic/msm_covs.py",
    "content": "import pandas as pd\n\nfrom estimagic.shared_covs import process_pandas_arguments\nfrom optimagic.exceptions import INVALID_INFERENCE_MSG\nfrom optimagic.utilities import robust_inverse\n\n\ndef cov_robust(jac, weights, moments_cov):\n    \"\"\"Calculate the cov of msm estimates with asymptotically non-efficient weights.\n\n    Note that asymptotically non-efficient weights are typically preferrable because\n    they lead to less finite sample bias.\n\n    Args:\n        jac (np.ndarray or pandas.DataFrame): Numpy array or DataFrame with the jacobian\n            of simulate_moments with respect to params. The derivative needs to be taken\n            at the estimated parameters. Has shape n_moments, n_params.\n        weights (np.ndarray): The weighting matrix for msm estimation.\n        moments_cov (np.ndarray): The covariance matrix of the empirical moments.\n\n    Returns:\n        numpy.ndarray: numpy array with covariance matrix.\n\n    \"\"\"\n    _jac, _weights, _moments_cov, names = process_pandas_arguments(\n        jac=jac, weights=weights, moments_cov=moments_cov\n    )\n\n    bread = robust_inverse(\n        _jac.T @ _weights @ _jac,\n        msg=INVALID_INFERENCE_MSG,\n    )\n\n    butter = _jac.T @ _weights @ _moments_cov @ _weights @ _jac\n\n    cov = bread @ butter @ bread\n\n    if names:\n        cov = pd.DataFrame(cov, columns=names.get(\"params\"), index=names.get(\"params\"))\n\n    return cov\n\n\ndef cov_optimal(jac, weights):\n    \"\"\"Calculate the cov of msm estimates with asymptotically efficient weights.\n\n    Note that asymptotically efficient weights have substantial finite sample\n    bias and are typically not a good choice.\n\n    Args:\n        jac (np.ndarray or pandas.DataFrame): Numpy array or DataFrame with the jacobian\n            of simulate_moments with respect to params. The derivative needs to be taken\n            at the estimated parameters. Has shape n_moments, n_params.\n        weights (np.ndarray): The weighting matrix for msm estimation.\n        moments_cov (np.ndarray): The covariance matrix of the empirical moments.\n\n    Returns:\n        numpy.ndarray: numpy array with covariance matrix.\n\n    \"\"\"\n    _jac, _weights, names = process_pandas_arguments(jac=jac, weights=weights)\n\n    cov = robust_inverse(_jac.T @ _weights @ _jac, msg=INVALID_INFERENCE_MSG)\n\n    if names:\n        cov = pd.DataFrame(cov, columns=names.get(\"params\"), index=names.get(\"params\"))\n\n    return cov\n"
  },
  {
    "path": "src/estimagic/msm_sensitivity.py",
    "content": "\"\"\"Implement local sensitivity measures for method of moments.\n\nmeasures:\nm1: Andrews, Gentzkow & Shapiro (2017, Quarterly Journal of Economics)\n\nepsilon 2-6: Honore, Jorgensen & de Paula\n(https://onlinelibrary.wiley.com/doi/full/10.1002/jae.2779)\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom estimagic.msm_covs import cov_robust\nfrom estimagic.shared_covs import process_pandas_arguments\nfrom optimagic.exceptions import INVALID_SENSITIVITY_MSG\nfrom optimagic.utilities import robust_inverse\n\n\ndef calculate_sensitivity_to_bias(jac, weights):\n    \"\"\"Calculate the sensitivity to bias.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How strongly would the parameter estimates be\n        biased if the kth moment was misspecified, i.e not zero in expectation?\n\n    Args:\n        jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with\n            respect to params, evaluated at the  point estimates.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    _jac, _weights, names = process_pandas_arguments(jac=jac, weights=weights)\n    gwg = _sandwich(_jac, _weights)\n    gwg_inverse = robust_inverse(gwg, INVALID_SENSITIVITY_MSG)\n    m1 = -gwg_inverse @ _jac.T @ _weights\n\n    if names:\n        m1 = pd.DataFrame(m1, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return m1\n\n\ndef calculate_fundamental_sensitivity_to_noise(\n    jac, weights, moments_cov, params_cov_opt\n):\n    \"\"\"Calculate the fundamental sensitivity to noise.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How much precision would be lost if the kth\n        moment was subject to a little additional noise if the optimal weighting matrix\n        is used?\n\n    Args:\n        jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with\n            respect to params, evaluated at the  point estimates.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n        moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            empirical moments.\n        params_cov_opt (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            parameter estimates. Note that this needs to be the parameter covariance\n            matrix using the formula for asymptotically optimal MSM.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    _jac, _weights, _moments_cov, _params_cov_opt, names = process_pandas_arguments(\n        jac=jac, weights=weights, moments_cov=moments_cov, params_cov_opt=params_cov_opt\n    )\n\n    m2 = []\n\n    for k in range(len(_weights)):\n        mask_matrix_o = np.zeros(shape=_weights.shape)\n        mask_matrix_o[k, k] = 1\n\n        meat = _sandwich_plus(_jac, _weights, mask_matrix_o)\n\n        m2k = _params_cov_opt @ meat @ _params_cov_opt\n        m2k = np.diagonal(m2k)\n\n        m2.append(m2k)\n\n    m2 = np.array(m2).T\n\n    moments_variances = np.diagonal(_moments_cov)\n    params_variances = np.diagonal(_params_cov_opt)\n\n    e2 = m2 / params_variances.reshape(-1, 1)\n    e2 = e2 * moments_variances\n\n    if names:\n        e2 = pd.DataFrame(e2, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return e2\n\n\ndef calculate_actual_sensitivity_to_noise(\n    sensitivity_to_bias, weights, moments_cov, params_cov\n):\n    \"\"\"Calculate the actual sensitivity to noise.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How much precision would be lost if the kth\n        moment was subjet to a little additional noise if \"weights\" is used as\n        weighting matrix?\n\n    Args:\n        sensitivity_to_bias (np.ndarray or pandas.DataFrame): See\n            ``calculate_sensitivity_to_bias`` for details.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n        moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            empirical moments.\n        params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            parameter estimates.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    if isinstance(sensitivity_to_bias, pd.DataFrame):\n        sensitivity_to_bias = sensitivity_to_bias.to_numpy()\n\n    _weights, _moments_cov, _params_cov, names = process_pandas_arguments(\n        weights=weights, moments_cov=moments_cov, params_cov=params_cov\n    )\n\n    m3 = []\n\n    for k in range(len(_weights)):\n        mask_matrix_o = np.zeros(shape=_weights.shape)\n        mask_matrix_o[k, k] = 1\n\n        m3k = _sandwich(sensitivity_to_bias.T, mask_matrix_o)\n        m3k = np.diagonal(m3k)\n\n        m3.append(m3k)\n\n    m3 = np.array(m3).T\n\n    moments_variances = np.diagonal(_moments_cov)\n    params_variances = np.diagonal(_params_cov)\n\n    e3 = m3 / params_variances.reshape(-1, 1)\n    e3 = e3 * moments_variances\n\n    if names:\n        e3 = pd.DataFrame(e3, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return e3\n\n\ndef calculate_actual_sensitivity_to_removal(jac, weights, moments_cov, params_cov):\n    \"\"\"Calculate the actual sensitivity to removal.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How much precision would be lost if the kth\n        moment was excluded from the estimation if \"weights\" is used as weighting\n        matrix?\n\n    Args:\n        sensitivity_to_bias (np.ndarray or pandas.DataFrame): See\n            ``calculate_sensitivity_to_bias`` for details.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n        moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            empirical moments.\n        params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            parameter estimates.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    m4 = []\n\n    _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments(\n        jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov\n    )\n\n    for k in range(len(_weights)):\n        weight_tilde_k = np.copy(_weights)\n        weight_tilde_k[k, :] = 0\n        weight_tilde_k[:, k] = 0\n\n        sigma_tilde_k = cov_robust(_jac, weight_tilde_k, _moments_cov)\n\n        m4k = sigma_tilde_k - _params_cov\n        m4k = m4k.diagonal()\n\n        m4.append(m4k)\n\n    m4 = np.array(m4).T\n\n    params_variances = np.diagonal(_params_cov)\n    e4 = m4 / params_variances.reshape(-1, 1)\n\n    if names:\n        e4 = pd.DataFrame(e4, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return e4\n\n\ndef calculate_fundamental_sensitivity_to_removal(jac, moments_cov, params_cov_opt):\n    \"\"\"Calculate the fundamental sensitivity to removal.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How much precision would be lost if the kth\n        moment was excluded from the estimation with if the optimal weighting matrix is\n        used?\n\n    Args:\n        jac (np.ndarray or pandas.DataFrame): The jacobian of simulate_moments with\n            respect to params, evaluated at the  point estimates.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n        moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            empirical moments.\n        params_cov_opt (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            parameter estimates. Note that this needs to be the parameter covariance\n            matrix using the formula for asymptotically optimal MSM.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    _jac, _moments_cov, _params_cov_opt, names = process_pandas_arguments(\n        jac=jac,\n        moments_cov=moments_cov,\n        params_cov_opt=params_cov_opt,\n    )\n    m5 = []\n\n    for k in range(len(_moments_cov)):\n        g_k = np.copy(_jac)\n        g_k = np.delete(g_k, k, axis=0)\n\n        s_k = np.copy(_moments_cov)\n        s_k = np.delete(s_k, k, axis=0)\n        s_k = np.delete(s_k, k, axis=1)\n\n        sigma_k = _sandwich(g_k, robust_inverse(s_k, INVALID_SENSITIVITY_MSG))\n        sigma_k = robust_inverse(sigma_k, INVALID_SENSITIVITY_MSG)\n\n        m5k = sigma_k - _params_cov_opt\n        m5k = m5k.diagonal()\n\n        m5.append(m5k)\n\n    m5 = np.array(m5).T\n\n    params_variances = np.diagonal(_params_cov_opt)\n    e5 = m5 / params_variances.reshape(-1, 1)\n\n    if names:\n        e5 = pd.DataFrame(e5, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return e5\n\n\ndef calculate_sensitivity_to_weighting(jac, weights, moments_cov, params_cov):\n    \"\"\"Calculate the sensitivity to weighting.\n\n    The sensitivity measure is calculated for each parameter wrt each moment.\n\n    It answers the following question: How would the precision change if the weight of\n        the kth moment is increased a little?\n\n    Args:\n        sensitivity_to_bias (np.ndarray or pandas.DataFrame): See\n            ``calculate_sensitivity_to_bias`` for details.\n        weights (np.ndarray or pandas.DataFrame): The weighting matrix used for\n            msm estimation.\n        moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            empirical moments.\n        params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the\n            parameter estimates.\n\n    Returns:\n        np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)\n\n    \"\"\"\n    _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments(\n        jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov\n    )\n    gwg_inverse = _sandwich(_jac, _weights)\n    gwg_inverse = robust_inverse(gwg_inverse, INVALID_SENSITIVITY_MSG)\n\n    m6 = []\n\n    for k in range(len(_weights)):\n        mask_matrix_o = np.zeros(shape=_weights.shape)\n        mask_matrix_o[k, k] = 1\n\n        m6k_1 = gwg_inverse @ _sandwich(_jac, mask_matrix_o) @ _params_cov\n        m6k_2 = (\n            gwg_inverse\n            @ _jac.T\n            @ mask_matrix_o\n            @ _moments_cov\n            @ _weights\n            @ _jac\n            @ gwg_inverse\n        )\n        m6k_3 = (\n            gwg_inverse\n            @ _jac.T\n            @ _weights\n            @ _moments_cov\n            @ mask_matrix_o\n            @ _jac\n            @ gwg_inverse\n        )\n        m6k_4 = _params_cov @ _sandwich(_jac, mask_matrix_o) @ gwg_inverse\n\n        m6k = -m6k_1 + m6k_2 + m6k_3 - m6k_4\n        m6k = m6k.diagonal()\n\n        m6.append(m6k)\n\n    m6 = np.array(m6).T\n\n    weights_diagonal = np.diagonal(_weights)\n    params_variances = np.diagonal(_params_cov)\n\n    e6 = m6 / params_variances.reshape(-1, 1)\n    e6 = e6 * weights_diagonal\n\n    if names:\n        e6 = pd.DataFrame(e6, index=names.get(\"params\"), columns=names.get(\"moments\"))\n\n    return e6\n\n\ndef _sandwich(a, b):\n    \"\"\"Calculate the sandwich product of two matrices: a.T * b * a.\"\"\"\n    sandwich = a.T @ b @ a\n    return sandwich\n\n\ndef _sandwich_plus(a, b, c):\n    \"\"\"Calculate the sandwich product of three matrices: a.T * b.T * c * b * a.\"\"\"\n    sandwich = a.T @ b.T @ c @ b @ a\n    return sandwich\n"
  },
  {
    "path": "src/estimagic/msm_weighting.py",
    "content": "import functools\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import tree_just_flatten\nfrom scipy.linalg import block_diag\n\nfrom estimagic.bootstrap import bootstrap\nfrom optimagic.parameters.block_trees import block_tree_to_matrix, matrix_to_block_tree\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.utilities import robust_inverse\n\n\ndef get_moments_cov(\n    data, calculate_moments, *, moment_kwargs=None, bootstrap_kwargs=None\n):\n    \"\"\"Bootstrap the covariance matrix of the moment conditions.\n\n    Args:\n        data (pandas.DataFrame): DataFrame with empirical data.\n        calculate_moments (callable): Function that calculates that takes data and\n            moment_kwargs as arguments and returns a 1d numpy array or pandas Series\n            with moment conditions.\n        moment_kwargs (dict): Additional keyword arguments for calculate_moments.\n        bootstrap_kwargs (dict): Additional keyword arguments that govern the\n            bootstrapping. Allowed arguments are \"n_draws\", \"seed\", \"n_cores\",\n            \"batch_evaluator\", \"weight_by\", \"cluster_by\" and \"error_handling\".\n            For details see the bootstrap function.\n\n    Returns:\n        pandas.DataFrame or numpy.ndarray: The covariance matrix of the moment\n            conditions for msm estimation.\n\n    \"\"\"\n    moment_kwargs = {} if moment_kwargs is None else moment_kwargs\n    bootstrap_kwargs = {} if bootstrap_kwargs is None else bootstrap_kwargs\n    valid_bs_kwargs = {\n        \"n_cores\",\n        \"n_draws\",\n        \"seed\",\n        \"batch_evaluator\",\n        \"weight_by\",\n        \"cluster_by\",\n        \"error_handling\",\n        \"existing_result\",\n        \"outcome_kwargs\",\n    }\n    problematic = set(bootstrap_kwargs).difference(valid_bs_kwargs)\n    if problematic:\n        raise ValueError(f\"Invalid bootstrap_kwargs: {problematic}\")\n\n    first_eval = calculate_moments(data, **moment_kwargs)\n\n    registry = get_registry(extended=True)\n\n    @functools.wraps(calculate_moments)\n    def func(data, **kwargs):\n        raw = calculate_moments(data, **kwargs)\n        out = pd.Series(\n            tree_just_flatten(raw, registry=registry)\n        )  # xxxx won't be necessary soon!\n        return out\n\n    cov_arr = bootstrap(\n        data=data, outcome=func, outcome_kwargs=moment_kwargs, **bootstrap_kwargs\n    ).cov()\n\n    if isinstance(cov_arr, pd.DataFrame):\n        cov_arr = cov_arr.to_numpy()  # xxxx won't be necessary soon\n\n    cov = matrix_to_block_tree(cov_arr, first_eval, first_eval)\n\n    return cov\n\n\ndef get_weighting_matrix(\n    moments_cov, method, empirical_moments, clip_value=1e-6, return_type=\"pytree\"\n):\n    \"\"\"Calculate a weighting matrix from moments_cov.\n\n    Args:\n        moments_cov (pandas.DataFrame or numpy.ndarray): Square DataFrame or Array\n            with the covariance matrix of the moment conditions for msm estimation.\n        method (str): One of \"optimal\", \"diagonal\", or \"identity\".\n        empirical_moments (pytree): Pytree containing empirical moments. Used to get\n            the tree structure\n        clip_value (float): Bound at which diagonal elements of the moments_cov are\n            clipped to avoid dividing by zero.\n        return_type (str): One of \"pytree\", \"array\" or \"pytree_and_array\"\n\n    Returns:\n        pandas.DataFrame or numpy.ndarray: Weighting matrix with the same shape as\n            moments_cov.\n\n    \"\"\"\n    fast_path = isinstance(moments_cov, np.ndarray) and moments_cov.ndim == 2\n\n    if fast_path:\n        _internal_cov = moments_cov\n    else:\n        _internal_cov = block_tree_to_matrix(\n            moments_cov,\n            outer_tree=empirical_moments,\n            inner_tree=empirical_moments,\n        )\n\n    if method == \"optimal\":\n        array_weights = robust_inverse(_internal_cov)\n    elif method == \"diagonal\":\n        diagonal_values = 1 / np.clip(np.diagonal(_internal_cov), clip_value, np.inf)\n        array_weights = np.diag(diagonal_values)\n    elif method == \"identity\":\n        array_weights = np.identity(_internal_cov.shape[0])\n    else:\n        raise ValueError(f\"Invalid method: {method}\")\n\n    if return_type == \"array\" or (fast_path and \"_and_\" not in return_type):\n        out = array_weights\n    elif fast_path:\n        out = (array_weights, array_weights)\n    else:\n        tree_weights = matrix_to_block_tree(\n            array_weights,\n            outer_tree=empirical_moments,\n            inner_tree=empirical_moments,\n        )\n        if return_type == \"pytree\":\n            out = tree_weights\n        else:\n            out = (tree_weights, array_weights)\n\n    return out\n\n\ndef _assemble_block_diagonal_matrix(matrices):\n    \"\"\"Build a block diagonal matrix out of matrices.\n\n    Args:\n        matrices (list): List of square numpy arrays or DataFrames with the building\n            blocks for the block diagonal matrix.\n\n    Returns:\n        pandas.DataFrame or numpy.ndarray: The block diagonal matrix.\n\n    \"\"\"\n    values = block_diag(*matrices)\n\n    if all(isinstance(mat, pd.DataFrame) for mat in matrices):\n        to_concat = [pd.Series(index=mat.index, dtype=float) for mat in matrices]\n        combined_index = pd.concat(to_concat).index\n        out = pd.DataFrame(values, index=combined_index, columns=combined_index)\n    else:\n        out = values\n    return out\n"
  },
  {
    "path": "src/estimagic/py.typed",
    "content": ""
  },
  {
    "path": "src/estimagic/shared_covs.py",
    "content": "from typing import NamedTuple\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom pybaum import tree_just_flatten, tree_unflatten\n\nfrom optimagic.parameters.block_trees import matrix_to_block_tree\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef transform_covariance(\n    internal_params,\n    internal_cov,\n    converter,\n    rng,\n    n_samples,\n    bounds_handling,\n):\n    \"\"\"Transform the internal covariance matrix to an external one, given constraints.\n\n    Args:\n        internal_params (InternalParams): NamedTuple with entries:\n            - value (np.ndarray): Internal parameter values.\n            - lower_bounds (np.ndarray): Lower bounds on the internal params.\n            - upper_bounds (np.ndarray): Upper bounds on the internal params.\n            - soft_lower_bounds (np.ndarray): Soft lower bounds on the internal params.\n            - soft_upper_bounds (np.ndarray): Soft upper bounds on the internal params.\n            - name (list): List of names of the external parameters.\n            - free_mask (np.ndarray): Boolean mask representing which external parameter\n              is free.\n        internal_cov (np.ndarray or pandas.DataFrame) with a covariance matrix of the\n            internal parameter vector. For background information about internal and\n            external params see :ref:`implementation_of_constraints`.\n        constraints (list): List with constraint dictionaries.\n            See :ref:`constraints`.\n        rng (numpy.random.Generator): A random number generator.\n        n_samples (int): Number of samples used to transform the covariance matrix of\n            the internal parameter vector into the covariance matrix of the external\n            parameters.\n        bounds_handling (str): One of \"clip\", \"raise\", \"ignore\". Determines how bounds\n            are handled. If \"clip\", confidence intervals are clipped at the bounds.\n            Standard errors are only adjusted if a sampling step is necessary due to\n            additional constraints. If \"raise\" and any lower or upper bound is binding,\n            we raise an error. If \"ignore\", boundary problems are simply ignored.\n\n    Returns:\n        pd.DataFrame: Quadratic DataFrame containing the covariance matrix of the free\n            parameters. If parameters were fixed (explicitly or by other constraints),\n            the index is a subset of params.index. The columns are the same as the\n            index.\n\n    \"\"\"\n    if converter.has_transforming_constraints:\n        _from_internal = converter.params_from_internal\n\n        is_free = internal_params.free_mask\n        lower_bounds = internal_params.lower_bounds\n        upper_bounds = internal_params.upper_bounds\n\n        sample = rng.multivariate_normal(\n            mean=internal_params.values,\n            cov=internal_cov,\n            size=n_samples,\n        )\n        transformed_free = []\n        for params in sample:\n            if bounds_handling == \"clip\":\n                x = np.clip(params, a_min=lower_bounds, a_max=upper_bounds)\n            elif bounds_handling == \"raise\":\n                if (params < lower_bounds).any() or (params > upper_bounds).any():\n                    raise ValueError()\n            else:\n                x = params\n\n            transformed = _from_internal(x=x, return_type=\"flat\")\n            transformed_free.append(transformed[is_free])\n\n        free_cov = np.cov(\n            np.array(transformed_free),\n            rowvar=False,\n        )\n\n    else:\n        free_cov = internal_cov\n\n    return free_cov\n\n\ndef calculate_summary_data_estimation(\n    estimation_result,\n    free_estimates,\n    ci_level,\n    method,\n    n_samples,\n    bounds_handling,\n    seed,\n):\n    se = estimation_result.se(\n        method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed\n    )\n    lower, upper = estimation_result.ci(\n        method=method,\n        n_samples=n_samples,\n        ci_level=ci_level,\n        bounds_handling=bounds_handling,\n        seed=seed,\n    )\n    p_values = estimation_result.p_values(\n        method=method, n_samples=n_samples, bounds_handling=bounds_handling, seed=seed\n    )\n    summary_data = {\n        \"value\": estimation_result.params,\n        \"standard_error\": se,\n        \"ci_lower\": lower,\n        \"ci_upper\": upper,\n        \"p_value\": p_values,\n        \"free\": free_estimates.free_mask,\n    }\n    return summary_data\n\n\ndef calculate_estimation_summary(\n    summary_data,\n    names,\n    free_names,\n):\n    \"\"\"Create estimation summary using pre-calculated results.\n\n    Args:\n        summary_data (dict): Dictionary with entries ['params', 'p_value', 'ci_lower',\n        'ci_upper', 'standard_error'].\n        names (List[str]): List of parameter names, corresponding to result_object.\n        free_names (List[str]): List of parameter names for free parameters.\n\n    Returns:\n        pytree: A pytree with the same structure as params. Each leaf in the params\n            tree is replaced by a DataFrame containing columns \"value\",\n            \"standard_error\", \"pvalue\", \"ci_lower\" and \"ci_upper\".  Parameters that do\n            not have a standard error (e.g. because they were fixed during estimation)\n            contain NaNs in all but the \"value\" column. The value column is only\n            reproduced for convenience.\n\n    \"\"\"\n    # ==================================================================================\n    # Flatten summary and construct data frame for flat estimates\n    # ==================================================================================\n\n    registry = get_registry(extended=True)\n    flat_data = {\n        key: tree_just_flatten(val, registry=registry)\n        for key, val in summary_data.items()\n    }\n\n    df = pd.DataFrame(flat_data, index=names)\n\n    stars = pd.cut(\n        df.loc[free_names, \"p_value\"],\n        bins=[-1, 0.01, 0.05, 0.1, 2],\n        labels=[\"***\", \"**\", \"*\", \"\"],\n    )\n\n    df[\"stars\"] = stars\n\n    # ==================================================================================\n    # Map summary data into params tree structure\n    # ==================================================================================\n\n    # create tree with values corresponding to indices of df\n    indices = tree_unflatten(summary_data[\"value\"], names, registry=registry)\n\n    estimates_flat = tree_just_flatten(summary_data[\"value\"])\n    indices_flat = tree_just_flatten(indices)\n\n    # use index chunks in indices_flat to access the corresponding sub data frame of df,\n    # and use the index information stored in estimates_flat to form the correct (multi)\n    # index for the resulting leaf.\n    summary_flat = []\n    for index_leaf, params_leaf in zip(indices_flat, estimates_flat, strict=False):\n        if np.isscalar(params_leaf):\n            loc = [index_leaf]\n            index = [0]\n        elif isinstance(params_leaf, pd.DataFrame) and \"value\" in params_leaf:\n            loc = index_leaf[\"value\"].to_numpy().flatten()\n            index = params_leaf.index\n        elif isinstance(params_leaf, pd.DataFrame):\n            loc = index_leaf.to_numpy().flatten()\n            # use product of existing index and columns for regular pd.DataFrame\n            index = pd.MultiIndex.from_tuples(\n                [\n                    (*row, col) if isinstance(row, tuple) else (row, col)\n                    for row in params_leaf.index\n                    for col in params_leaf.columns\n                ]\n            )\n        elif isinstance(params_leaf, pd.Series):\n            loc = index_leaf.to_numpy().flatten()\n            index = params_leaf.index\n        else:\n            # array case (numpy or jax)\n            loc = index_leaf.flatten()\n            if params_leaf.ndim == 1:\n                index = pd.RangeIndex(stop=params_leaf.size)\n            else:\n                index = pd.MultiIndex.from_arrays(\n                    np.unravel_index(np.arange(params_leaf.size), params_leaf.shape)\n                )\n\n        df_chunk = df.loc[loc]\n        df_chunk.index = index\n\n        summary_flat.append(df_chunk)\n\n    summary = tree_unflatten(summary_data[\"value\"], summary_flat)\n    return summary\n\n\ndef process_pandas_arguments(**kwargs):\n    \"\"\"Convert pandas objects to arrays and extract names of moments and parameters.\n\n    This works for any number of keyword arguments. The result is a tuple containing\n    numpy arrays in same order as the keyword arguments and a dictionary with the\n    separated index objects as last entry. This dictionary contains the entries\n    \"moments\" and \"params\" for the identified moment names and parameter names.\n\n    The keyword arguments \"jac\", \"hess\", \"weights\" and \"moments_cov\" are used to extract\n    the names. Other keyword arguments are simply converted to numpy arrays.\n\n    \"\"\"\n    param_name_candidates = {}\n    moment_name_candidates = {}\n\n    if \"jac\" in kwargs:\n        jac = kwargs[\"jac\"]\n        if isinstance(jac, pd.DataFrame):\n            param_name_candidates[\"jac\"] = jac.columns\n            moment_name_candidates[\"jac\"] = jac.index\n\n    if \"hess\" in kwargs:\n        hess = kwargs[\"hess\"]\n        if isinstance(hess, pd.DataFrame):\n            param_name_candidates[\"hess\"] = hess.index\n\n    if \"weights\" in kwargs:\n        weights = kwargs[\"weights\"]\n        if isinstance(weights, pd.DataFrame):\n            moment_name_candidates[\"weights\"] = weights.index\n\n    if \"moments_cov\" in kwargs:\n        moments_cov = kwargs[\"moments_cov\"]\n        if isinstance(moments_cov, pd.DataFrame):\n            moment_name_candidates[\"moments_cov\"] = moments_cov.index\n\n    names = {}\n    if param_name_candidates:\n        _check_names_coincide(param_name_candidates)\n        names[\"params\"] = list(param_name_candidates.values())[0]\n    if moment_name_candidates:\n        _check_names_coincide(moment_name_candidates)\n        names[\"moments\"] = list(moment_name_candidates.values())[0]\n\n    # order of outputs is same as order of inputs; names are last.\n    out_list = [_to_numpy(val, name=key) for key, val in kwargs.items()] + [names]\n    return tuple(out_list)\n\n\ndef _to_numpy(df_or_array, name):\n    if isinstance(df_or_array, pd.DataFrame):\n        arr = df_or_array.to_numpy()\n    elif isinstance(df_or_array, np.ndarray):\n        arr = df_or_array\n    else:\n        raise TypeError(\n            f\"{name} must be a DataFrame or numpy array, not {type(df_or_array)}.\"\n        )\n    return arr\n\n\ndef _check_names_coincide(name_dict):\n    if len(name_dict) >= 2:\n        first_key = list(name_dict)[0]\n        first_names = name_dict[first_key]\n\n        for key, names in name_dict.items():\n            if not first_names.equals(names):\n                msg = f\"Ambiguous parameter or moment names from {first_key} and {key}.\"\n                raise ValueError(msg)\n\n\ndef get_derivative_case(derivative):\n    \"\"\"Determine which kind of derivative should be used.\"\"\"\n    if callable(derivative):\n        case = \"closed-form\"\n    elif derivative is False:\n        case = \"skip\"\n    else:\n        case = \"numerical\"\n    return case\n\n\ndef calculate_ci(free_values, free_standard_errors, ci_level):\n    alpha = 1 - ci_level\n    scale = scipy.stats.norm.ppf(1 - alpha / 2)\n    lower = free_values - scale * free_standard_errors\n    upper = free_values + scale * free_standard_errors\n    return lower, upper\n\n\ndef calculate_p_values(free_values, free_standard_errors):\n    tvalues = free_values / np.clip(free_standard_errors, 1e-300, np.inf)\n    pvalues = 2 * scipy.stats.norm.sf(np.abs(tvalues))\n    return pvalues\n\n\ndef calculate_free_estimates(estimates, internal_estimates):\n    mask = internal_estimates.free_mask\n    names = internal_estimates.names\n\n    registry = get_registry(extended=True)\n    external_flat = np.array(tree_just_flatten(estimates, registry=registry))\n\n    free_estimates = FreeParams(\n        values=external_flat[mask],\n        free_mask=mask,\n        all_names=names,\n        free_names=np.array(names)[mask].tolist(),\n    )\n    return free_estimates\n\n\ndef transform_free_cov_to_cov(free_cov, free_params, params, return_type):\n    \"\"\"Fill non-free values and project to params block-tree.\"\"\"\n    mask = free_params.free_mask\n    cov = np.full((len(mask), len(mask)), np.nan)\n    cov[np.ix_(mask, mask)] = free_cov\n    if return_type == \"dataframe\":\n        names = free_params.all_names\n        cov = pd.DataFrame(cov, columns=names, index=names)\n    elif return_type == \"pytree\":\n        cov = matrix_to_block_tree(cov, params, params)\n    elif return_type != \"array\":\n        raise ValueError(\n            \"return_type must be one of pytree, array, or dataframe, \"\n            f\"not {return_type}.\"\n        )\n    return cov\n\n\ndef transform_free_values_to_params_tree(values, free_params, params):\n    \"\"\"Fill non-free values and project to params tree structure.\"\"\"\n    mask = free_params.free_mask\n    flat = np.full(len(mask), np.nan)\n    flat[np.ix_(mask)] = values\n    registry = get_registry(extended=True)\n    pytree = tree_unflatten(params, flat, registry=registry)\n    return pytree\n\n\nclass FreeParams(NamedTuple):\n    values: np.ndarray  # free external parameter values\n    free_mask: np.ndarray  # boolean mask to filter free params from external params\n    free_names: list  # names of free external parameters\n    all_names: list  # names of all external parameters\n"
  },
  {
    "path": "src/estimagic/utilities.py",
    "content": "from optimagic.decorators import deprecated\nfrom optimagic.utilities import (\n    calculate_trustregion_initial_radius as _calculate_trustregion_initial_radius,\n)\nfrom optimagic.utilities import (\n    chol_params_to_lower_triangular_matrix as _chol_params_to_lower_triangular_matrix,\n)\nfrom optimagic.utilities import cov_matrix_to_params as _cov_matrix_to_params\nfrom optimagic.utilities import (\n    cov_matrix_to_sdcorr_params as _cov_matrix_to_sdcorr_params,\n)\nfrom optimagic.utilities import cov_params_to_matrix as _cov_params_to_matrix\nfrom optimagic.utilities import cov_to_sds_and_corr as _cov_to_sds_and_corr\nfrom optimagic.utilities import (\n    dimension_to_number_of_triangular_elements as _dimension_to_number_of_triangular_elements,  # noqa: E501\n)\nfrom optimagic.utilities import get_rng as _get_rng\nfrom optimagic.utilities import hash_array as _hash_array\nfrom optimagic.utilities import isscalar as _isscalar\nfrom optimagic.utilities import (\n    number_of_triangular_elements_to_dimension as _number_of_triangular_elements_to_dimension,  # noqa: E501\n)\nfrom optimagic.utilities import propose_alternatives as _propose_alternatives\nfrom optimagic.utilities import read_pickle as _read_pickle\nfrom optimagic.utilities import robust_cholesky as _robust_cholesky\nfrom optimagic.utilities import robust_inverse as _robust_inverse\nfrom optimagic.utilities import sdcorr_params_to_matrix as _sdcorr_params_to_matrix\nfrom optimagic.utilities import (\n    sdcorr_params_to_sds_and_corr as _sdcorr_params_to_sds_and_corr,\n)\nfrom optimagic.utilities import sds_and_corr_to_cov as _sds_and_corr_to_cov\nfrom optimagic.utilities import to_pickle as _to_pickle\n\nMSG = (\n    \"estimagic.utilities.{name} has been deprecated in version 0.5.0. Use optimagic.\"\n    \"utilities.{name} instead. This function will be removed in version 0.6.0.\"\n)\n\n\nchol_params_to_lower_triangular_matrix = deprecated(\n    _chol_params_to_lower_triangular_matrix,\n    MSG.format(name=\"chol_params_to_lower_triangular_matrix\"),\n)\ncov_params_to_matrix = deprecated(\n    _cov_params_to_matrix, MSG.format(name=\"cov_params_to_matrix\")\n)\ncov_matrix_to_params = deprecated(\n    _cov_matrix_to_params, MSG.format(name=\"cov_matrix_to_params\")\n)\nsdcorr_params_to_sds_and_corr = deprecated(\n    _sdcorr_params_to_sds_and_corr, MSG.format(name=\"sdcorr_params_to_sds_and_corr\")\n)\nsds_and_corr_to_cov = deprecated(\n    _sds_and_corr_to_cov, MSG.format(name=\"sds_and_corr_to_cov\")\n)\ncov_to_sds_and_corr = deprecated(\n    _cov_to_sds_and_corr, MSG.format(name=\"cov_to_sds_and_corr\")\n)\nsdcorr_params_to_matrix = deprecated(\n    _sdcorr_params_to_matrix, MSG.format(name=\"sdcorr_params_to_matrix\")\n)\ncov_matrix_to_sdcorr_params = deprecated(\n    _cov_matrix_to_sdcorr_params, MSG.format(name=\"cov_matrix_to_sdcorr_params\")\n)\nnumber_of_triangular_elements_to_dimension = deprecated(\n    _number_of_triangular_elements_to_dimension,\n    MSG.format(name=\"number_of_triangular_elements_to_dimension\"),\n)\ndimension_to_number_of_triangular_elements = deprecated(\n    _dimension_to_number_of_triangular_elements,\n    MSG.format(name=\"dimension_to_number_of_triangular_elements\"),\n)\npropose_alternatives = deprecated(\n    _propose_alternatives, MSG.format(name=\"propose_alternatives\")\n)\nrobust_cholesky = deprecated(_robust_cholesky, MSG.format(name=\"robust_cholesky\"))\nrobust_inverse = deprecated(_robust_inverse, MSG.format(name=\"robust_inverse\"))\nhash_array = deprecated(_hash_array, MSG.format(name=\"hash_array\"))\ncalculate_trustregion_initial_radius = deprecated(\n    _calculate_trustregion_initial_radius,\n    MSG.format(name=\"calculate_trustregion_initial_radius\"),\n)\nto_pickle = deprecated(_to_pickle, MSG.format(name=\"to_pickle\"))\nread_pickle = deprecated(_read_pickle, MSG.format(name=\"read_pickle\"))\nisscalar = deprecated(_isscalar, MSG.format(name=\"isscalar\"))\nget_rng = deprecated(_get_rng, MSG.format(name=\"get_rng\"))\n\n__all__ = [\n    \"chol_params_to_lower_triangular_matrix\",\n    \"cov_params_to_matrix\",\n    \"cov_matrix_to_params\",\n    \"sdcorr_params_to_sds_and_corr\",\n    \"sds_and_corr_to_cov\",\n    \"cov_to_sds_and_corr\",\n    \"sdcorr_params_to_matrix\",\n    \"cov_matrix_to_sdcorr_params\",\n    \"number_of_triangular_elements_to_dimension\",\n    \"dimension_to_number_of_triangular_elements\",\n    \"propose_alternatives\",\n    \"robust_cholesky\",\n    \"robust_inverse\",\n    \"hash_array\",\n    \"calculate_trustregion_initial_radius\",\n    \"to_pickle\",\n    \"read_pickle\",\n    \"isscalar\",\n    \"get_rng\",\n]\n"
  },
  {
    "path": "src/optimagic/__init__.py",
    "content": "from __future__ import annotations\n\nfrom optimagic import constraints, mark, sandbox, timing, utilities\nfrom optimagic.algorithms import algos\nfrom optimagic.benchmarking.benchmark_reports import (\n    convergence_report,\n    rank_report,\n    traceback_report,\n)\nfrom optimagic.benchmarking.get_benchmark_problems import get_benchmark_problems\nfrom optimagic.benchmarking.run_benchmark import run_benchmark\nfrom optimagic.constraints import (\n    DecreasingConstraint,\n    EqualityConstraint,\n    FixedConstraint,\n    FlatCovConstraint,\n    FlatSDCorrConstraint,\n    IncreasingConstraint,\n    LinearConstraint,\n    NonlinearConstraint,\n    PairwiseEqualityConstraint,\n    ProbabilityConstraint,\n)\nfrom optimagic.differentiation.derivatives import first_derivative, second_derivative\nfrom optimagic.differentiation.numdiff_options import NumdiffOptions\nfrom optimagic.logging import (\n    ExistenceStrategy as ExistenceStrategy,\n)\nfrom optimagic.logging import (\n    SQLiteLogOptions as SQLiteLogOptions,\n)\nfrom optimagic.logging import (\n    SQLiteLogReader as SQLiteLogReader,\n)\nfrom optimagic.logging.read_log import OptimizeLogReader\nfrom optimagic.optimization.fun_value import (\n    FunctionValue,\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.optimization.history import History\nfrom optimagic.optimization.multistart_options import MultistartOptions\nfrom optimagic.optimization.optimize import maximize, minimize\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.optimizers import pygad\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.constraint_tools import check_constraints, count_free_params\nfrom optimagic.parameters.scaling import ScalingOptions\nfrom optimagic.visualization.convergence_plot import convergence_plot\nfrom optimagic.visualization.history_plots import criterion_plot, params_plot\nfrom optimagic.visualization.profile_plot import profile_plot\nfrom optimagic.visualization.slice_plot import slice_plot\n\ntry:\n    from ._version import version as __version__\nexcept ImportError:\n    # broken installation, we don't even try unknown only works because we do poor mans\n    # version compare\n    __version__ = \"unknown\"\n\n\n__all__ = [\n    \"maximize\",\n    \"minimize\",\n    \"utilities\",\n    \"first_derivative\",\n    \"second_derivative\",\n    \"run_benchmark\",\n    \"get_benchmark_problems\",\n    \"profile_plot\",\n    \"convergence_plot\",\n    \"convergence_report\",\n    \"rank_report\",\n    \"traceback_report\",\n    \"slice_plot\",\n    \"criterion_plot\",\n    \"params_plot\",\n    \"count_free_params\",\n    \"check_constraints\",\n    \"OptimizeLogReader\",\n    \"OptimizeResult\",\n    \"Bounds\",\n    \"mark\",\n    \"ScalingOptions\",\n    \"MultistartOptions\",\n    \"NumdiffOptions\",\n    \"FunctionValue\",\n    \"LeastSquaresFunctionValue\",\n    \"ScalarFunctionValue\",\n    \"LikelihoodFunctionValue\",\n    \"constraints\",\n    \"FlatCovConstraint\",\n    \"FlatSDCorrConstraint\",\n    \"IncreasingConstraint\",\n    \"DecreasingConstraint\",\n    \"FixedConstraint\",\n    \"NonlinearConstraint\",\n    \"LinearConstraint\",\n    \"ProbabilityConstraint\",\n    \"PairwiseEqualityConstraint\",\n    \"EqualityConstraint\",\n    \"History\",\n    \"__version__\",\n    \"algos\",\n    \"pygad\",\n    \"timing\",\n    \"sandbox\",\n]\n"
  },
  {
    "path": "src/optimagic/algorithms.py",
    "content": "\"\"\"This code was auto-generated by a pre-commit hook and should not be changed.\n\nIf you manually change this code, all of your changes will be overwritten the next time\nthe pre-commit hook runs.\n\nDetailed information on the purpose of the code can be found here:\nhttps://optimagic.readthedocs.io/en/latest/development/ep-02-typing.html#algorithm-selection\n\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Type, cast\n\nfrom optimagic.optimization.algorithm import Algorithm\nfrom optimagic.optimizers.bayesian_optimizer import BayesOpt\nfrom optimagic.optimizers.bhhh import BHHH\nfrom optimagic.optimizers.fides import Fides\nfrom optimagic.optimizers.gfo_optimizers import (\n    GFODifferentialEvolution,\n    GFODownhillSimplex,\n    GFOEvolutionStrategy,\n    GFOGeneticAlgorithm,\n    GFOHillClimbing,\n    GFOParallelTempering,\n    GFOParticleSwarmOptimization,\n    GFOPowellsMethod,\n    GFORepulsingHillClimbing,\n    GFOSimulatedAnnealing,\n    GFOSpiralOptimization,\n    GFOStochasticHillClimbing,\n)\nfrom optimagic.optimizers.iminuit_migrad import IminuitMigrad\nfrom optimagic.optimizers.ipopt import Ipopt\nfrom optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA\nfrom optimagic.optimizers.neldermead import NelderMeadParallel\nfrom optimagic.optimizers.nevergrad_optimizers import (\n    NevergradBayesOptim,\n    NevergradCGA,\n    NevergradCMAES,\n    NevergradDifferentialEvolution,\n    NevergradEDA,\n    NevergradEMNA,\n    NevergradMeta,\n    NevergradNGOpt,\n    NevergradOnePlusOne,\n    NevergradPSO,\n    NevergradRandomSearch,\n    NevergradSamplingSearch,\n    NevergradTBPSA,\n)\nfrom optimagic.optimizers.nlopt_optimizers import (\n    NloptBOBYQA,\n    NloptCCSAQ,\n    NloptCOBYLA,\n    NloptCRS2LM,\n    NloptDirect,\n    NloptESCH,\n    NloptISRES,\n    NloptLBFGSB,\n    NloptMMA,\n    NloptNelderMead,\n    NloptNEWUOA,\n    NloptPRAXIS,\n    NloptSbplx,\n    NloptSLSQP,\n    NloptTNewton,\n    NloptVAR,\n)\nfrom optimagic.optimizers.pounders import Pounders\nfrom optimagic.optimizers.pygad_optimizer import Pygad\nfrom optimagic.optimizers.pygmo_optimizers import (\n    PygmoBeeColony,\n    PygmoCmaes,\n    PygmoCompassSearch,\n    PygmoDe,\n    PygmoDe1220,\n    PygmoGaco,\n    PygmoGwo,\n    PygmoIhs,\n    PygmoMbh,\n    PygmoPso,\n    PygmoPsoGen,\n    PygmoSade,\n    PygmoSea,\n    PygmoSga,\n    PygmoSimulatedAnnealing,\n    PygmoXnes,\n)\nfrom optimagic.optimizers.pyswarms_optimizers import (\n    PySwarmsGeneralPSO,\n    PySwarmsGlobalBestPSO,\n    PySwarmsLocalBestPSO,\n)\nfrom optimagic.optimizers.scipy_optimizers import (\n    ScipyBasinhopping,\n    ScipyBFGS,\n    ScipyBrute,\n    ScipyCOBYLA,\n    ScipyConjugateGradient,\n    ScipyDifferentialEvolution,\n    ScipyDirect,\n    ScipyDualAnnealing,\n    ScipyLBFGSB,\n    ScipyLSDogbox,\n    ScipyLSLM,\n    ScipyLSTRF,\n    ScipyNelderMead,\n    ScipyNewtonCG,\n    ScipyPowell,\n    ScipySHGO,\n    ScipySLSQP,\n    ScipyTruncatedNewton,\n    ScipyTrustConstr,\n)\nfrom optimagic.optimizers.tao_optimizers import TAOPounders\nfrom optimagic.optimizers.tranquilo import Tranquilo, TranquiloLS\n\n\n@dataclass(frozen=True)\nclass AlgoSelection:\n    def _all(self) -> list[Type[Algorithm]]:\n        raw = [field.default for field in self.__dataclass_fields__.values()]\n        return cast(list[Type[Algorithm]], raw)\n\n    def _available(self) -> list[Type[Algorithm]]:\n        _all = self._all()\n        return [\n            a\n            for a in _all\n            if a.algo_info.is_available  # type: ignore\n        ]\n\n    @property\n    def All(self) -> list[Type[Algorithm]]:\n        return self._all()\n\n    @property\n    def Available(self) -> list[Type[Algorithm]]:\n        return self._available()\n\n    @property\n    def AllNames(self) -> list[str]:\n        return [str(a.name) for a in self._all()]\n\n    @property\n    def AvailableNames(self) -> list[str]:\n        return [str(a.name) for a in self._available()]\n\n    @property\n    def _all_algorithms_dict(self) -> dict[str, Type[Algorithm]]:\n        return {str(a.name): a for a in self._all()}\n\n    @property\n    def _available_algorithms_dict(self) -> dict[str, Type[Algorithm]]:\n        return {str(a.name): a for a in self._available()}\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms(\n    AlgoSelection\n):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Parallel(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Scalar(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalParallelScalarAlgorithms(AlgoSelection):\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLeastSquaresLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def GradientFree(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection):\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Scalar(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientBasedScalarAlgorithms(AlgoSelection):\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Scalar(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLocalScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLeastSquaresLocalAlgorithms(AlgoSelection):\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n\n\n@dataclass(frozen=True)\nclass GradientBasedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Parallel(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(\n        self,\n    ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalScalarAlgorithms(AlgoSelection):\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms:\n        return BoundedGradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLeastSquaresLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def LeastSquares(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms:\n        return BoundedGradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalParallelScalarAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms:\n        return BoundedGradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLeastSquaresLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms:\n        return BoundedGradientFreeLocalParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLeastSquaresParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Local(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def GradientBased(\n        self,\n    ) -> BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def GradientFree(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def GradientFree(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms:\n        return BoundedGlobalGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(\n        self,\n    ) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def GradientBased(\n        self,\n    ) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(\n        self,\n    ) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalParallelScalarAlgorithms(AlgoSelection):\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLocalParallelScalarAlgorithms:\n        return BoundedGradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLeastSquaresLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLeastSquaresLocalParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedNonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientBasedAlgorithms(AlgoSelection):\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalGradientBasedScalarAlgorithms:\n        return BoundedGlobalGradientBasedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection):\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientBasedScalarAlgorithms(AlgoSelection):\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientBasedScalarAlgorithms:\n        return BoundedGlobalGradientBasedScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLocalAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def LeastSquares(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms:\n        return BoundedGradientBasedLeastSquaresLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientBasedLocalScalarAlgorithms:\n        return BoundedGradientBasedLocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLocalScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLocalScalarAlgorithms:\n        return BoundedGradientBasedLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLeastSquaresLocalAlgorithms(AlgoSelection):\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms:\n        return BoundedGradientBasedLeastSquaresLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLikelihoodLocalAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalGradientBasedScalarAlgorithms:\n        return BoundedGlobalGradientBasedScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientBasedLocalScalarAlgorithms:\n        return BoundedGradientBasedLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedLeastSquaresAlgorithms(AlgoSelection):\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n\n    @property\n    def Local(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms:\n        return BoundedGradientBasedLeastSquaresLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalGradientFreeAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalGradientFreeParallelAlgorithms:\n        return BoundedGlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalGradientFreeScalarAlgorithms:\n        return BoundedGlobalGradientFreeScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeScalarAlgorithms:\n        return BoundedGlobalGradientFreeScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms:\n        return GlobalGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeParallelAlgorithms:\n        return BoundedGlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientFreeParallelScalarAlgorithms:\n        return GlobalGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pounders: Type[Pounders] = Pounders\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def LeastSquares(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeLocalParallelAlgorithms:\n        return BoundedGradientFreeLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeLocalScalarAlgorithms:\n        return BoundedGradientFreeLocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalScalarAlgorithms(AlgoSelection):\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalScalarAlgorithms:\n        return BoundedGradientFreeLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeLocalParallelScalarAlgorithms:\n        return GradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLeastSquaresLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms:\n        return GradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalParallelAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    pounders: Type[Pounders] = Pounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalParallelAlgorithms:\n        return BoundedGradientFreeLocalParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms:\n        return GradientFreeLeastSquaresLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeLocalParallelScalarAlgorithms:\n        return GradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeScalarAlgorithms:\n        return BoundedGlobalGradientFreeScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalScalarAlgorithms:\n        return BoundedGradientFreeLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeParallelScalarAlgorithms:\n        return BoundedGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeLeastSquaresAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Local(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeParallelAlgorithms:\n        return BoundedGlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalParallelAlgorithms:\n        return BoundedGradientFreeLocalParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeParallelScalarAlgorithms:\n        return BoundedGradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeParallelScalarAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeParallelScalarAlgorithms:\n        return BoundedGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeParallelScalarAlgorithms:\n        return GlobalGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalParallelScalarAlgorithms:\n        return GradientFreeLocalParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLeastSquaresParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms:\n        return GradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def GradientBased(self) -> BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def GradientBased(self) -> BoundedGlobalGradientBasedScalarAlgorithms:\n        return BoundedGlobalGradientBasedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGlobalGradientFreeScalarAlgorithms:\n        return BoundedGlobalGradientFreeScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms:\n        return BoundedGlobalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def GradientFree(self) -> BoundedGlobalGradientFreeParallelAlgorithms:\n        return BoundedGlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalParallelScalarAlgorithms:\n        return BoundedGlobalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GlobalGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalParallelScalarAlgorithms:\n        return BoundedGlobalParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeParallelScalarAlgorithms:\n        return GlobalGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLocalNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedLocalScalarAlgorithms:\n        return BoundedGradientBasedLocalScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLocalScalarAlgorithms:\n        return BoundedGradientFreeLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedLocalParallelScalarAlgorithms:\n        return BoundedLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLeastSquaresLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedLeastSquaresLocalAlgorithms:\n        return BoundedGradientBasedLeastSquaresLocalAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLeastSquaresLocalAlgorithms:\n        return BoundedGradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedLeastSquaresLocalParallelAlgorithms:\n        return BoundedLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLocalParallelAlgorithms:\n        return BoundedGradientFreeLocalParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedLeastSquaresLocalParallelAlgorithms:\n        return BoundedLeastSquaresLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedLocalParallelScalarAlgorithms:\n        return BoundedLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalParallelScalarAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedLocalParallelScalarAlgorithms:\n        return BoundedLocalParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalParallelScalarAlgorithms:\n        return GradientFreeLocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LeastSquaresLocalParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLeastSquaresLocalParallelAlgorithms:\n        return BoundedLeastSquaresLocalParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLeastSquaresLocalParallelAlgorithms:\n        return GradientFreeLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedNonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalNonlinearConstrainedScalarAlgorithms:\n        return BoundedGlobalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms:\n        return BoundedLocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedNonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Global(self) -> BoundedGlobalNonlinearConstrainedParallelAlgorithms:\n        return BoundedGlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedParallelAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedParallelScalarAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Global(self) -> BoundedGlobalParallelScalarAlgorithms:\n        return BoundedGlobalParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeParallelScalarAlgorithms:\n        return BoundedGradientFreeParallelScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalParallelScalarAlgorithms:\n        return BoundedLocalParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLeastSquaresParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms:\n        return BoundedGradientFreeLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLeastSquaresLocalParallelAlgorithms:\n        return BoundedLeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass NonlinearConstrainedParallelScalarAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms:\n        return BoundedNonlinearConstrainedParallelScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms:\n        return GlobalNonlinearConstrainedParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientBasedAlgorithms(AlgoSelection):\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientBasedAlgorithms:\n        return BoundedGlobalGradientBasedAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientBasedScalarAlgorithms:\n        return GlobalGradientBasedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLocalAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLocalAlgorithms:\n        return BoundedGradientBasedLocalAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientBasedLeastSquaresLocalAlgorithms:\n        return GradientBasedLeastSquaresLocalAlgorithms()\n\n    @property\n    def Likelihood(self) -> GradientBasedLikelihoodLocalAlgorithms:\n        return GradientBasedLikelihoodLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientBasedLocalScalarAlgorithms:\n        return GradientBasedLocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientBasedAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalGradientBasedAlgorithms:\n        return BoundedGlobalGradientBasedAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedGradientBasedLeastSquaresAlgorithms:\n        return BoundedGradientBasedLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientBasedLocalAlgorithms:\n        return BoundedGradientBasedLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> BoundedGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientBasedScalarAlgorithms:\n        return BoundedGradientBasedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedScalarAlgorithms:\n        return BoundedGradientBasedScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientBasedScalarAlgorithms:\n        return GlobalGradientBasedScalarAlgorithms()\n\n    @property\n    def Local(self) -> GradientBasedLocalScalarAlgorithms:\n        return GradientBasedLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLeastSquaresAlgorithms(AlgoSelection):\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedLeastSquaresAlgorithms:\n        return BoundedGradientBasedLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> GradientBasedLeastSquaresLocalAlgorithms:\n        return GradientBasedLeastSquaresLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedLikelihoodAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n\n    @property\n    def Local(self) -> GradientBasedLikelihoodLocalAlgorithms:\n        return GradientBasedLikelihoodLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalGradientFreeAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n\n    @property\n    def Bounded(self) -> BoundedGlobalGradientFreeAlgorithms:\n        return BoundedGlobalGradientFreeAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalGradientFreeParallelAlgorithms:\n        return GlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalGradientFreeScalarAlgorithms:\n        return GlobalGradientFreeScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pounders: Type[Pounders] = Pounders\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLocalAlgorithms:\n        return BoundedGradientFreeLocalAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientFreeLeastSquaresLocalAlgorithms:\n        return GradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeLocalParallelAlgorithms:\n        return GradientFreeLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeLocalScalarAlgorithms:\n        return GradientFreeLocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGradientFreeAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Global(self) -> BoundedGlobalGradientFreeAlgorithms:\n        return BoundedGlobalGradientFreeAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedGradientFreeLeastSquaresAlgorithms:\n        return BoundedGradientFreeLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> BoundedGradientFreeLocalAlgorithms:\n        return BoundedGradientFreeLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGradientFreeParallelAlgorithms:\n        return BoundedGradientFreeParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGradientFreeScalarAlgorithms:\n        return BoundedGradientFreeScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeScalarAlgorithms:\n        return BoundedGradientFreeScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeScalarAlgorithms:\n        return GlobalGradientFreeScalarAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalScalarAlgorithms:\n        return GradientFreeLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeParallelScalarAlgorithms:\n        return GradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeLeastSquaresAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeLeastSquaresAlgorithms:\n        return BoundedGradientFreeLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLeastSquaresLocalAlgorithms:\n        return GradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeLeastSquaresParallelAlgorithms:\n        return GradientFreeLeastSquaresParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeParallelAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeParallelAlgorithms:\n        return BoundedGradientFreeParallelAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeParallelAlgorithms:\n        return GlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientFreeLeastSquaresParallelAlgorithms:\n        return GradientFreeLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalParallelAlgorithms:\n        return GradientFreeLocalParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(\n        self,\n    ) -> GradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeParallelScalarAlgorithms:\n        return GradientFreeParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedGlobalAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def GradientBased(self) -> BoundedGlobalGradientBasedAlgorithms:\n        return BoundedGlobalGradientBasedAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGlobalGradientFreeAlgorithms:\n        return BoundedGlobalGradientFreeAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedGlobalNonlinearConstrainedAlgorithms:\n        return BoundedGlobalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedGlobalParallelAlgorithms:\n        return BoundedGlobalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedGlobalScalarAlgorithms:\n        return BoundedGlobalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalNonlinearConstrainedAlgorithms(AlgoSelection):\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalNonlinearConstrainedAlgorithms:\n        return BoundedGlobalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientBased(self) -> GlobalGradientBasedNonlinearConstrainedAlgorithms:\n        return GlobalGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeNonlinearConstrainedAlgorithms:\n        return GlobalGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalNonlinearConstrainedParallelAlgorithms:\n        return GlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalNonlinearConstrainedScalarAlgorithms:\n        return GlobalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalScalarAlgorithms:\n        return BoundedGlobalScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GlobalGradientBasedScalarAlgorithms:\n        return GlobalGradientBasedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeScalarAlgorithms:\n        return GlobalGradientFreeScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GlobalNonlinearConstrainedScalarAlgorithms:\n        return GlobalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalParallelScalarAlgorithms:\n        return GlobalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedGlobalParallelAlgorithms:\n        return BoundedGlobalParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeParallelAlgorithms:\n        return GlobalGradientFreeParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GlobalNonlinearConstrainedParallelAlgorithms:\n        return GlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalParallelScalarAlgorithms:\n        return GlobalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLocalAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pounders: Type[Pounders] = Pounders\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedLocalAlgorithms:\n        return BoundedGradientBasedLocalAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLocalAlgorithms:\n        return BoundedGradientFreeLocalAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedLeastSquaresLocalAlgorithms:\n        return BoundedLeastSquaresLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedLocalNonlinearConstrainedAlgorithms:\n        return BoundedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedLocalParallelAlgorithms:\n        return BoundedLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedLocalScalarAlgorithms:\n        return BoundedLocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedLocalNonlinearConstrainedAlgorithms:\n        return BoundedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLocalNonlinearConstrainedAlgorithms:\n        return GradientBasedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalNonlinearConstrainedAlgorithms:\n        return GradientFreeLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> LocalNonlinearConstrainedScalarAlgorithms:\n        return LocalNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalScalarAlgorithms(AlgoSelection):\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedLocalScalarAlgorithms:\n        return BoundedLocalScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLocalScalarAlgorithms:\n        return GradientBasedLocalScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalScalarAlgorithms:\n        return GradientFreeLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> LocalNonlinearConstrainedScalarAlgorithms:\n        return LocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> LocalParallelScalarAlgorithms:\n        return LocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LeastSquaresLocalAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLeastSquaresLocalAlgorithms:\n        return BoundedLeastSquaresLocalAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLeastSquaresLocalAlgorithms:\n        return GradientBasedLeastSquaresLocalAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLeastSquaresLocalAlgorithms:\n        return GradientFreeLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> LeastSquaresLocalParallelAlgorithms:\n        return LeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LikelihoodLocalAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n\n    @property\n    def GradientBased(self) -> GradientBasedLikelihoodLocalAlgorithms:\n        return GradientBasedLikelihoodLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalParallelAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    pounders: Type[Pounders] = Pounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLocalParallelAlgorithms:\n        return BoundedLocalParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalParallelAlgorithms:\n        return GradientFreeLocalParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> LeastSquaresLocalParallelAlgorithms:\n        return LeastSquaresLocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> LocalParallelScalarAlgorithms:\n        return LocalParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedNonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Global(self) -> BoundedGlobalNonlinearConstrainedAlgorithms:\n        return BoundedGlobalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedNonlinearConstrainedAlgorithms:\n        return BoundedGradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeNonlinearConstrainedAlgorithms:\n        return BoundedGradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalNonlinearConstrainedAlgorithms:\n        return BoundedLocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedNonlinearConstrainedParallelAlgorithms:\n        return BoundedNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedNonlinearConstrainedScalarAlgorithms:\n        return BoundedNonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    fides: Type[Fides] = Fides\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Global(self) -> BoundedGlobalScalarAlgorithms:\n        return BoundedGlobalScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedScalarAlgorithms:\n        return BoundedGradientBasedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeScalarAlgorithms:\n        return BoundedGradientFreeScalarAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalScalarAlgorithms:\n        return BoundedLocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedNonlinearConstrainedScalarAlgorithms:\n        return BoundedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedParallelScalarAlgorithms:\n        return BoundedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedLeastSquaresAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedLeastSquaresAlgorithms:\n        return BoundedGradientBasedLeastSquaresAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeLeastSquaresAlgorithms:\n        return BoundedGradientFreeLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLeastSquaresLocalAlgorithms:\n        return BoundedLeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms:\n        return BoundedLeastSquaresParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedParallelAlgorithms(AlgoSelection):\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Global(self) -> BoundedGlobalParallelAlgorithms:\n        return BoundedGlobalParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeParallelAlgorithms:\n        return BoundedGradientFreeParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedLeastSquaresParallelAlgorithms:\n        return BoundedLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalParallelAlgorithms:\n        return BoundedLocalParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedNonlinearConstrainedParallelAlgorithms:\n        return BoundedNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedParallelScalarAlgorithms:\n        return BoundedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass NonlinearConstrainedScalarAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedNonlinearConstrainedScalarAlgorithms:\n        return BoundedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalNonlinearConstrainedScalarAlgorithms:\n        return GlobalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms:\n        return GradientBasedNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms:\n        return GradientFreeNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Local(self) -> LocalNonlinearConstrainedScalarAlgorithms:\n        return LocalNonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> NonlinearConstrainedParallelScalarAlgorithms:\n        return NonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass NonlinearConstrainedParallelAlgorithms(AlgoSelection):\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n\n    @property\n    def Bounded(self) -> BoundedNonlinearConstrainedParallelAlgorithms:\n        return BoundedNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Global(self) -> GlobalNonlinearConstrainedParallelAlgorithms:\n        return GlobalNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeNonlinearConstrainedParallelAlgorithms:\n        return GradientFreeNonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms:\n        return NonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass ParallelScalarAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedParallelScalarAlgorithms:\n        return BoundedParallelScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalParallelScalarAlgorithms:\n        return GlobalParallelScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeParallelScalarAlgorithms:\n        return GradientFreeParallelScalarAlgorithms()\n\n    @property\n    def Local(self) -> LocalParallelScalarAlgorithms:\n        return LocalParallelScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> NonlinearConstrainedParallelScalarAlgorithms:\n        return NonlinearConstrainedParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LeastSquaresParallelAlgorithms(AlgoSelection):\n    pounders: Type[Pounders] = Pounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLeastSquaresParallelAlgorithms:\n        return BoundedLeastSquaresParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLeastSquaresParallelAlgorithms:\n        return GradientFreeLeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> LeastSquaresLocalParallelAlgorithms:\n        return LeastSquaresLocalParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientBasedAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedGradientBasedAlgorithms:\n        return BoundedGradientBasedAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientBasedAlgorithms:\n        return GlobalGradientBasedAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientBasedLeastSquaresAlgorithms:\n        return GradientBasedLeastSquaresAlgorithms()\n\n    @property\n    def Likelihood(self) -> GradientBasedLikelihoodAlgorithms:\n        return GradientBasedLikelihoodAlgorithms()\n\n    @property\n    def Local(self) -> GradientBasedLocalAlgorithms:\n        return GradientBasedLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientBasedNonlinearConstrainedAlgorithms:\n        return GradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientBasedScalarAlgorithms:\n        return GradientBasedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GradientFreeAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedGradientFreeAlgorithms:\n        return BoundedGradientFreeAlgorithms()\n\n    @property\n    def Global(self) -> GlobalGradientFreeAlgorithms:\n        return GlobalGradientFreeAlgorithms()\n\n    @property\n    def LeastSquares(self) -> GradientFreeLeastSquaresAlgorithms:\n        return GradientFreeLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> GradientFreeLocalAlgorithms:\n        return GradientFreeLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GradientFreeNonlinearConstrainedAlgorithms:\n        return GradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GradientFreeParallelAlgorithms:\n        return GradientFreeParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GradientFreeScalarAlgorithms:\n        return GradientFreeScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass GlobalAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n\n    @property\n    def Bounded(self) -> BoundedGlobalAlgorithms:\n        return BoundedGlobalAlgorithms()\n\n    @property\n    def GradientBased(self) -> GlobalGradientBasedAlgorithms:\n        return GlobalGradientBasedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GlobalGradientFreeAlgorithms:\n        return GlobalGradientFreeAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> GlobalNonlinearConstrainedAlgorithms:\n        return GlobalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> GlobalParallelAlgorithms:\n        return GlobalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> GlobalScalarAlgorithms:\n        return GlobalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LocalAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n    fides: Type[Fides] = Fides\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pounders: Type[Pounders] = Pounders\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLocalAlgorithms:\n        return BoundedLocalAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLocalAlgorithms:\n        return GradientBasedLocalAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLocalAlgorithms:\n        return GradientFreeLocalAlgorithms()\n\n    @property\n    def LeastSquares(self) -> LeastSquaresLocalAlgorithms:\n        return LeastSquaresLocalAlgorithms()\n\n    @property\n    def Likelihood(self) -> LikelihoodLocalAlgorithms:\n        return LikelihoodLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> LocalNonlinearConstrainedAlgorithms:\n        return LocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> LocalParallelAlgorithms:\n        return LocalParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> LocalScalarAlgorithms:\n        return LocalScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass BoundedAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    fides: Type[Fides] = Fides\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Global(self) -> BoundedGlobalAlgorithms:\n        return BoundedGlobalAlgorithms()\n\n    @property\n    def GradientBased(self) -> BoundedGradientBasedAlgorithms:\n        return BoundedGradientBasedAlgorithms()\n\n    @property\n    def GradientFree(self) -> BoundedGradientFreeAlgorithms:\n        return BoundedGradientFreeAlgorithms()\n\n    @property\n    def LeastSquares(self) -> BoundedLeastSquaresAlgorithms:\n        return BoundedLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> BoundedLocalAlgorithms:\n        return BoundedLocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> BoundedNonlinearConstrainedAlgorithms:\n        return BoundedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> BoundedParallelAlgorithms:\n        return BoundedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> BoundedScalarAlgorithms:\n        return BoundedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass NonlinearConstrainedAlgorithms(AlgoSelection):\n    ipopt: Type[Ipopt] = Ipopt\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n\n    @property\n    def Bounded(self) -> BoundedNonlinearConstrainedAlgorithms:\n        return BoundedNonlinearConstrainedAlgorithms()\n\n    @property\n    def Global(self) -> GlobalNonlinearConstrainedAlgorithms:\n        return GlobalNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedNonlinearConstrainedAlgorithms:\n        return GradientBasedNonlinearConstrainedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeNonlinearConstrainedAlgorithms:\n        return GradientFreeNonlinearConstrainedAlgorithms()\n\n    @property\n    def Local(self) -> LocalNonlinearConstrainedAlgorithms:\n        return LocalNonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> NonlinearConstrainedParallelAlgorithms:\n        return NonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> NonlinearConstrainedScalarAlgorithms:\n        return NonlinearConstrainedScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass ScalarAlgorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    fides: Type[Fides] = Fides\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tranquilo: Type[Tranquilo] = Tranquilo\n\n    @property\n    def Bounded(self) -> BoundedScalarAlgorithms:\n        return BoundedScalarAlgorithms()\n\n    @property\n    def Global(self) -> GlobalScalarAlgorithms:\n        return GlobalScalarAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedScalarAlgorithms:\n        return GradientBasedScalarAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeScalarAlgorithms:\n        return GradientFreeScalarAlgorithms()\n\n    @property\n    def Local(self) -> LocalScalarAlgorithms:\n        return LocalScalarAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> NonlinearConstrainedScalarAlgorithms:\n        return NonlinearConstrainedScalarAlgorithms()\n\n    @property\n    def Parallel(self) -> ParallelScalarAlgorithms:\n        return ParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LeastSquaresAlgorithms(AlgoSelection):\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    pounders: Type[Pounders] = Pounders\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedLeastSquaresAlgorithms:\n        return BoundedLeastSquaresAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedLeastSquaresAlgorithms:\n        return GradientBasedLeastSquaresAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeLeastSquaresAlgorithms:\n        return GradientFreeLeastSquaresAlgorithms()\n\n    @property\n    def Local(self) -> LeastSquaresLocalAlgorithms:\n        return LeastSquaresLocalAlgorithms()\n\n    @property\n    def Parallel(self) -> LeastSquaresParallelAlgorithms:\n        return LeastSquaresParallelAlgorithms()\n\n\n@dataclass(frozen=True)\nclass LikelihoodAlgorithms(AlgoSelection):\n    bhhh: Type[BHHH] = BHHH\n\n    @property\n    def GradientBased(self) -> GradientBasedLikelihoodAlgorithms:\n        return GradientBasedLikelihoodAlgorithms()\n\n    @property\n    def Local(self) -> LikelihoodLocalAlgorithms:\n        return LikelihoodLocalAlgorithms()\n\n\n@dataclass(frozen=True)\nclass ParallelAlgorithms(AlgoSelection):\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedParallelAlgorithms:\n        return BoundedParallelAlgorithms()\n\n    @property\n    def Global(self) -> GlobalParallelAlgorithms:\n        return GlobalParallelAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeParallelAlgorithms:\n        return GradientFreeParallelAlgorithms()\n\n    @property\n    def LeastSquares(self) -> LeastSquaresParallelAlgorithms:\n        return LeastSquaresParallelAlgorithms()\n\n    @property\n    def Local(self) -> LocalParallelAlgorithms:\n        return LocalParallelAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> NonlinearConstrainedParallelAlgorithms:\n        return NonlinearConstrainedParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> ParallelScalarAlgorithms:\n        return ParallelScalarAlgorithms()\n\n\n@dataclass(frozen=True)\nclass Algorithms(AlgoSelection):\n    bayes_opt: Type[BayesOpt] = BayesOpt\n    bhhh: Type[BHHH] = BHHH\n    fides: Type[Fides] = Fides\n    gfo_differential_evolution: Type[GFODifferentialEvolution] = (\n        GFODifferentialEvolution\n    )\n    gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex\n    gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy\n    gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm\n    gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing\n    gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering\n    gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization\n    gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod\n    gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing\n    gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing\n    gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization\n    gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = (\n        GFOStochasticHillClimbing\n    )\n    iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad\n    ipopt: Type[Ipopt] = Ipopt\n    nag_dfols: Type[NagDFOLS] = NagDFOLS\n    nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA\n    neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel\n    nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim\n    nevergrad_cga: Type[NevergradCGA] = NevergradCGA\n    nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES\n    nevergrad_de: Type[NevergradDifferentialEvolution] = NevergradDifferentialEvolution\n    nevergrad_eda: Type[NevergradEDA] = NevergradEDA\n    nevergrad_emna: Type[NevergradEMNA] = NevergradEMNA\n    nevergrad_meta: Type[NevergradMeta] = NevergradMeta\n    nevergrad_ngopt: Type[NevergradNGOpt] = NevergradNGOpt\n    nevergrad_oneplusone: Type[NevergradOnePlusOne] = NevergradOnePlusOne\n    nevergrad_pso: Type[NevergradPSO] = NevergradPSO\n    nevergrad_randomsearch: Type[NevergradRandomSearch] = NevergradRandomSearch\n    nevergrad_samplingsearch: Type[NevergradSamplingSearch] = NevergradSamplingSearch\n    nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA\n    nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA\n    nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ\n    nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA\n    nlopt_crs2_lm: Type[NloptCRS2LM] = NloptCRS2LM\n    nlopt_direct: Type[NloptDirect] = NloptDirect\n    nlopt_esch: Type[NloptESCH] = NloptESCH\n    nlopt_isres: Type[NloptISRES] = NloptISRES\n    nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB\n    nlopt_mma: Type[NloptMMA] = NloptMMA\n    nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA\n    nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead\n    nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS\n    nlopt_slsqp: Type[NloptSLSQP] = NloptSLSQP\n    nlopt_sbplx: Type[NloptSbplx] = NloptSbplx\n    nlopt_tnewton: Type[NloptTNewton] = NloptTNewton\n    nlopt_var: Type[NloptVAR] = NloptVAR\n    pounders: Type[Pounders] = Pounders\n    pygad: Type[Pygad] = Pygad\n    pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony\n    pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes\n    pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch\n    pygmo_de: Type[PygmoDe] = PygmoDe\n    pygmo_de1220: Type[PygmoDe1220] = PygmoDe1220\n    pygmo_gaco: Type[PygmoGaco] = PygmoGaco\n    pygmo_gwo: Type[PygmoGwo] = PygmoGwo\n    pygmo_ihs: Type[PygmoIhs] = PygmoIhs\n    pygmo_mbh: Type[PygmoMbh] = PygmoMbh\n    pygmo_pso: Type[PygmoPso] = PygmoPso\n    pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen\n    pygmo_sade: Type[PygmoSade] = PygmoSade\n    pygmo_sea: Type[PygmoSea] = PygmoSea\n    pygmo_sga: Type[PygmoSga] = PygmoSga\n    pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing\n    pygmo_xnes: Type[PygmoXnes] = PygmoXnes\n    pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO\n    pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO\n    pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO\n    scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS\n    scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping\n    scipy_brute: Type[ScipyBrute] = ScipyBrute\n    scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA\n    scipy_conjugate_gradient: Type[ScipyConjugateGradient] = ScipyConjugateGradient\n    scipy_differential_evolution: Type[ScipyDifferentialEvolution] = (\n        ScipyDifferentialEvolution\n    )\n    scipy_direct: Type[ScipyDirect] = ScipyDirect\n    scipy_dual_annealing: Type[ScipyDualAnnealing] = ScipyDualAnnealing\n    scipy_lbfgsb: Type[ScipyLBFGSB] = ScipyLBFGSB\n    scipy_ls_dogbox: Type[ScipyLSDogbox] = ScipyLSDogbox\n    scipy_ls_lm: Type[ScipyLSLM] = ScipyLSLM\n    scipy_ls_trf: Type[ScipyLSTRF] = ScipyLSTRF\n    scipy_neldermead: Type[ScipyNelderMead] = ScipyNelderMead\n    scipy_newton_cg: Type[ScipyNewtonCG] = ScipyNewtonCG\n    scipy_powell: Type[ScipyPowell] = ScipyPowell\n    scipy_shgo: Type[ScipySHGO] = ScipySHGO\n    scipy_slsqp: Type[ScipySLSQP] = ScipySLSQP\n    scipy_truncated_newton: Type[ScipyTruncatedNewton] = ScipyTruncatedNewton\n    scipy_trust_constr: Type[ScipyTrustConstr] = ScipyTrustConstr\n    tao_pounders: Type[TAOPounders] = TAOPounders\n    tranquilo: Type[Tranquilo] = Tranquilo\n    tranquilo_ls: Type[TranquiloLS] = TranquiloLS\n\n    @property\n    def Bounded(self) -> BoundedAlgorithms:\n        return BoundedAlgorithms()\n\n    @property\n    def Global(self) -> GlobalAlgorithms:\n        return GlobalAlgorithms()\n\n    @property\n    def GradientBased(self) -> GradientBasedAlgorithms:\n        return GradientBasedAlgorithms()\n\n    @property\n    def GradientFree(self) -> GradientFreeAlgorithms:\n        return GradientFreeAlgorithms()\n\n    @property\n    def LeastSquares(self) -> LeastSquaresAlgorithms:\n        return LeastSquaresAlgorithms()\n\n    @property\n    def Likelihood(self) -> LikelihoodAlgorithms:\n        return LikelihoodAlgorithms()\n\n    @property\n    def Local(self) -> LocalAlgorithms:\n        return LocalAlgorithms()\n\n    @property\n    def NonlinearConstrained(self) -> NonlinearConstrainedAlgorithms:\n        return NonlinearConstrainedAlgorithms()\n\n    @property\n    def Parallel(self) -> ParallelAlgorithms:\n        return ParallelAlgorithms()\n\n    @property\n    def Scalar(self) -> ScalarAlgorithms:\n        return ScalarAlgorithms()\n\n\nalgos = Algorithms()\nglobal_algos = GlobalAlgorithms()\n\nALL_ALGORITHMS = algos._all_algorithms_dict\nAVAILABLE_ALGORITHMS = algos._available_algorithms_dict\nGLOBAL_ALGORITHMS = global_algos._available_algorithms_dict\n"
  },
  {
    "path": "src/optimagic/batch_evaluators.py",
    "content": "\"\"\"A collection of batch evaluators for process based parallelism.\n\nAll batch evaluators have the same interface and any function with the same interface\ncan be used used as batch evaluator in optimagic.\n\n\"\"\"\n\nfrom joblib import Parallel, delayed\n\ntry:\n    from pathos.pools import ProcessPool\n\n    pathos_is_available = True\nexcept ImportError:\n    pathos_is_available = False\n\nimport threading\nfrom typing import Any, Callable, Literal, TypeVar, cast\n\nfrom optimagic import deprecations\nfrom optimagic.config import DEFAULT_N_CORES as N_CORES\nfrom optimagic.decorators import catch, unpack\nfrom optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral, ErrorHandling\n\nT = TypeVar(\"T\")\n\n\ndef pathos_mp_batch_evaluator(\n    func: Callable[..., T],\n    arguments: list[Any],\n    *,\n    n_cores: int = N_CORES,\n    error_handling: ErrorHandling\n    | Literal[\"raise\", \"continue\"] = ErrorHandling.CONTINUE,\n    unpack_symbol: Literal[\"*\", \"**\"] | None = None,\n) -> list[T]:\n    \"\"\"Batch evaluator based on pathos.multiprocess.ProcessPool.\n\n    This uses a patched but older version of python multiprocessing that replaces\n    pickling with dill and can thus handle decorated functions.\n\n    Args:\n        func (Callable): The function that is evaluated.\n        arguments (Iterable): Arguments for the functions. Their interperation\n            depends on the unpack argument.\n        n_cores (int): Number of cores used to evaluate the function in parallel.\n            Value below one are interpreted as one. If only one core is used, the\n            batch evaluator disables everything that could cause problems, i.e. in that\n            case func and arguments are never pickled and func is executed in the main\n            process.\n        error_handling (str): Can take the values \"raise\" (raise the error and stop all\n            tasks as soon as one task fails) and \"continue\" (catch exceptions and set\n            the traceback of the raised exception.\n            KeyboardInterrupt and SystemExit are always raised.\n        unpack_symbol (str or None). Can be \"**\", \"*\" or None. If None, func just takes\n            one argument. If \"*\", the elements of arguments are positional arguments for\n            func. If \"**\", the elements of arguments are keyword arguments for func.\n\n\n    Returns:\n        list: The function evaluations.\n\n    \"\"\"\n    if not pathos_is_available:\n        raise NotImplementedError(\n            \"To use the pathos_mp_batch_evaluator, install pathos with \"\n            \"conda install -c conda-forge pathos.\"\n        )\n\n    _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol)\n    n_cores = int(n_cores)\n\n    reraise = error_handling in [\n        \"raise\",\n        ErrorHandling.RAISE,\n        ErrorHandling.RAISE_STRICT,\n    ]\n\n    @unpack(symbol=unpack_symbol)\n    @catch(default=\"__traceback__\", reraise=reraise)\n    def internal_func(*args: Any, **kwargs: Any) -> T:\n        return func(*args, **kwargs)\n\n    if n_cores <= 1:\n        res = [internal_func(arg) for arg in arguments]\n    else:\n        p = ProcessPool(nodes=n_cores)\n        try:\n            res = p.map(internal_func, arguments)\n        except Exception as e:\n            p.terminate()\n            raise e\n\n    return res\n\n\ndef joblib_batch_evaluator(\n    func: Callable[..., T],\n    arguments: list[Any],\n    *,\n    n_cores: int = N_CORES,\n    error_handling: ErrorHandling\n    | Literal[\"raise\", \"continue\"] = ErrorHandling.CONTINUE,\n    unpack_symbol: Literal[\"*\", \"**\"] | None = None,\n) -> list[T]:\n    \"\"\"Batch evaluator based on joblib's Parallel.\n\n    Args:\n        func (Callable): The function that is evaluated.\n        arguments (Iterable): Arguments for the functions. Their interperation\n            depends on the unpack argument.\n        n_cores (int): Number of cores used to evaluate the function in parallel.\n            Value below one are interpreted as one. If only one core is used, the\n            batch evaluator disables everything that could cause problems, i.e. in that\n            case func and arguments are never pickled and func is executed in the main\n            process.\n        error_handling (str): Can take the values \"raise\" (raise the error and stop all\n            tasks as soon as one task fails) and \"continue\" (catch exceptions and set\n            the output of failed tasks to the traceback of the raised exception.\n            KeyboardInterrupt and SystemExit are always raised.\n        unpack_symbol (str or None). Can be \"**\", \"*\" or None. If None, func just takes\n            one argument. If \"*\", the elements of arguments are positional arguments for\n            func. If \"**\", the elements of arguments are keyword arguments for func.\n\n\n    Returns:\n        list: The function evaluations.\n\n    \"\"\"\n    _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol)\n    n_cores = int(n_cores) if int(n_cores) >= 2 else 1\n\n    reraise = error_handling in [\n        \"raise\",\n        ErrorHandling.RAISE,\n        ErrorHandling.RAISE_STRICT,\n    ]\n\n    @unpack(symbol=unpack_symbol)\n    @catch(default=\"__traceback__\", reraise=reraise)\n    def internal_func(*args: Any, **kwargs: Any) -> T:\n        return func(*args, **kwargs)\n\n    if n_cores == 1:\n        res = [internal_func(arg) for arg in arguments]\n    else:\n        res = Parallel(n_jobs=n_cores)(delayed(internal_func)(arg) for arg in arguments)\n\n    return res\n\n\ndef threading_batch_evaluator(\n    func: Callable[..., T],\n    arguments: list[Any],\n    *,\n    n_cores: int = N_CORES,\n    error_handling: ErrorHandling\n    | Literal[\"raise\", \"continue\"] = ErrorHandling.CONTINUE,\n    unpack_symbol: Literal[\"*\", \"**\"] | None = None,\n) -> list[T]:\n    \"\"\"Batch evaluator based on Python's threading.\n\n    Args:\n        func (Callable): The function that is evaluated.\n        arguments (Iterable): Arguments for the functions. Their interperation\n            depends on the unpack argument.\n        n_cores (int): Number of threads used to evaluate the function in parallel.\n            Value below one are interpreted as one.\n        error_handling (str): Can take the values \"raise\" (raise the error and stop all\n            tasks as soon as one task fails) and \"continue\" (catch exceptions and set\n            the output of failed tasks to the traceback of the raised exception.\n            KeyboardInterrupt and SystemExit are always raised.\n        unpack_symbol (str or None). Can be \"**\", \"*\" or None. If None, func just takes\n            one argument. If \"*\", the elements of arguments are positional arguments for\n            func. If \"**\", the elements of arguments are keyword arguments for func.\n\n    Returns:\n        list: The function evaluations.\n\n    \"\"\"\n    _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol)\n    n_cores = int(n_cores) if int(n_cores) >= 2 else 1\n\n    reraise = error_handling in [\n        \"raise\",\n        ErrorHandling.RAISE,\n        ErrorHandling.RAISE_STRICT,\n    ]\n\n    @unpack(symbol=unpack_symbol)\n    @catch(default=\"__traceback__\", reraise=reraise)\n    def internal_func(*args: Any, **kwargs: Any) -> T:\n        return func(*args, **kwargs)\n\n    if n_cores == 1:\n        res = [internal_func(arg) for arg in arguments]\n    else:\n        results = [None] * len(arguments)\n        threads = []\n        errors = []\n        error_lock = threading.Lock()\n\n        def thread_func(index: int, arg: Any) -> None:\n            try:\n                results[index] = internal_func(arg)\n            except Exception as e:\n                with error_lock:\n                    errors.append(e)\n\n        for i, arg in enumerate(arguments):\n            thread = threading.Thread(target=thread_func, args=(i, arg))\n            threads.append(thread)\n            thread.start()\n\n        for thread in threads:\n            thread.join()\n\n        if errors:\n            raise errors[0]\n\n        res = cast(list[T], results)\n    return res\n\n\ndef _check_inputs(\n    func: Callable[..., T],\n    arguments: list[Any],\n    n_cores: int,\n    error_handling: ErrorHandling | Literal[\"raise\", \"continue\"],\n    unpack_symbol: Literal[\"*\", \"**\"] | None,\n) -> None:\n    if not callable(func):\n        raise TypeError(\"func must be callable.\")\n\n    try:\n        arguments = list(arguments)\n    except Exception as e:\n        raise ValueError(\"arguments must be list like.\") from e\n\n    try:\n        int(n_cores)\n    except Exception as e:\n        raise ValueError(\"n_cores must be an integer.\") from e\n\n    if unpack_symbol not in (None, \"*\", \"**\"):\n        raise ValueError(\n            f\"unpack_symbol must be None, '*' or '**', not {unpack_symbol}\"\n        )\n\n    if error_handling not in [\n        \"raise\",\n        \"continue\",\n        ErrorHandling.RAISE,\n        ErrorHandling.CONTINUE,\n        ErrorHandling.RAISE_STRICT,\n    ]:\n        raise ValueError(\n            \"error_handling must be 'raise' or 'continue' or ErrorHandling not \"\n            f\"{error_handling}\"\n        )\n\n\ndef process_batch_evaluator(\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = \"joblib\",\n) -> BatchEvaluator:\n    if batch_evaluator is None:\n        deprecations.throw_none_valued_batch_evaluator_warning()\n        batch_evaluator = \"joblib\"\n\n    if callable(batch_evaluator):\n        out = batch_evaluator\n    elif isinstance(batch_evaluator, str):\n        if batch_evaluator == \"joblib\":\n            out = cast(BatchEvaluator, joblib_batch_evaluator)\n        elif batch_evaluator == \"pathos\":\n            out = cast(BatchEvaluator, pathos_mp_batch_evaluator)\n        elif batch_evaluator == \"threading\":\n            out = cast(BatchEvaluator, threading_batch_evaluator)\n        else:\n            raise ValueError(\n                \"Invalid batch evaluator requested. Currently only 'pathos', 'joblib', \"\n                \"and 'threading' are supported.\"\n            )\n    else:\n        raise TypeError(\"batch_evaluator must be a callable or string.\")\n\n    return out\n"
  },
  {
    "path": "src/optimagic/benchmarking/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/benchmarking/benchmark_reports.py",
    "content": "import pandas as pd\n\nfrom optimagic.benchmarking.process_benchmark_results import (\n    process_benchmark_results,\n)\nfrom optimagic.visualization.profile_plot import create_solution_times\n\n\ndef convergence_report(\n    problems, results, *, stopping_criterion=\"y\", x_precision=1e-4, y_precision=1e-4\n):\n    \"\"\"Create a DataFrame with convergence information for a set of problems.\n\n    Args:\n        problems (dict): optimagic benchmarking problems dictionary. Keys are the\n            problem names. Values contain information on the problem, including the\n            solution value.\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        stopping_criterion (str): one of \"x_and_y\", \"x_or_y\", \"x\", \"y\". Determines\n            how convergence is determined from the two precisions. Default is \"y\".\n        x_precision (float or None): how close an algorithm must have gotten to the\n            true parameter values (as percent of the Euclidean distance between start\n            and solution parameters) before the criterion for clipping and convergence\n            is fulfilled. Default is 1e-4.\n        y_precision (float or None): how close an algorithm must have gotten to the\n            true criterion values (as percent of the distance between start\n            and solution criterion value) before the criterion for clipping and\n            convergence is fulfilled. Default is 1e-4.\n\n    Returns:\n        pandas.DataFrame: indexes are the problems, columns are the algorithms and\n            the dimensionality of the benchmark problems. For the algorithms column,\n            the values are strings that are either \"success\", \"failed\", or \"error\".\n            For the dimensionality column, the values denote the number of dimensions\n            of the problem.\n\n    \"\"\"\n    _, converged_info = process_benchmark_results(\n        problems=problems,\n        results=results,\n        stopping_criterion=stopping_criterion,\n        x_precision=x_precision,\n        y_precision=y_precision,\n    )\n\n    report = _get_success_info(results, converged_info)\n    report[\"dimensionality\"] = report.index.map(_get_problem_dimensions(problems))\n\n    return report\n\n\ndef rank_report(\n    problems,\n    results,\n    *,\n    runtime_measure=\"n_evaluations\",\n    stopping_criterion=\"y\",\n    x_precision=1e-4,\n    y_precision=1e-4,\n):\n    \"\"\"Create a DataFrame with rank information for a set of problems.\n\n    Args:\n        problems (dict): optimagic benchmarking problems dictionary. Keys are the\n            problem names. Values contain information on the problem, including the\n            solution value.\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        runtime_measure (str): \"n_evaluations\", \"n_batches\" or \"walltime\".\n            This is the runtime until the desired convergence was reached by an\n            algorithm. This is called performance measure by Moré and Wild (2009).\n            Default is \"n_evaluations\".\n        stopping_criterion (str): one of \"x_and_y\", \"x_or_y\", \"x\", \"y\". Determines\n            how convergence is determined from the two precisions.\n        x_precision (float or None): how close an algorithm must have gotten to the\n            true parameter values (as percent of the Euclidean distance between start\n            and solution parameters) before the criterion for clipping and convergence\n            is fulfilled. Default is 1e-4.\n        y_precision (float or None): how close an algorithm must have gotten to the\n            true criterion values (as percent of the distance between start\n            and solution criterion value) before the criterion for clipping and\n            convergence is fulfilled. Default is 1e-4.\n\n    Returns:\n        pandas.DataFrame: indexes are the problems, columns are the algorithms and the\n            dimensionality of the problems. The values are the ranks of the algorithms\n            for each problem, where 0 means the algorithm was the fastest, 1 means it\n            was the second fastest and so on. If an algorithm did not converge on a\n            problem, the value is \"failed\". If an algorithm did encounter an error\n            during optimization, the value is \"error\".\n\n    \"\"\"\n    histories, converged_info = process_benchmark_results(\n        problems=problems,\n        results=results,\n        stopping_criterion=stopping_criterion,\n        x_precision=x_precision,\n        y_precision=y_precision,\n    )\n\n    solution_times = create_solution_times(\n        histories, runtime_measure, converged_info, return_tidy=False\n    )\n    solution_times[\"rank\"] = (\n        solution_times.groupby(\"problem\")[runtime_measure].rank(\n            method=\"dense\", ascending=True\n        )\n        - 1\n    ).astype(\"Int64\")\n\n    success_info = _get_success_info(results, converged_info)\n\n    df_wide = solution_times.pivot(index=\"problem\", columns=\"algorithm\", values=\"rank\")\n    report = df_wide.astype(str)\n    report.columns.name = None\n\n    report[~converged_info] = success_info\n    report[\"dimensionality\"] = report.index.map(_get_problem_dimensions(problems))\n\n    return report\n\n\ndef traceback_report(problems, results, return_type=\"dataframe\"):\n    \"\"\"Create traceback report for all problems that have not been solved.\n\n    Args:\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        return_type (str): either \"text\", \"markdown\", \"dict\" or \"dataframe\".\n            If \"text\", the traceback report is returned as a string. If \"markdown\",\n            it is a markdown string. If \"dict\", it is returned as a dictionary.\n            If \"dataframe\", it is a tidy pandas DataFrame, where indexes are the\n            algorithm and problem names, the columns are the tracebacks and the\n            dimensionality of the problem. Default is \"dataframe\".\n\n    Returns:\n        (list or str or dict or pandas.DataFrame): traceback report. If return_type\n            is \"text\", the report is a list of strings. If \"markdown\", it is a\n            formatted markdown string with algorithms and problem names as headers.\n            If return_type is \"dict\", the report is a dictionary. If return_type is\n            \"dataframe\", it is a tidy pandas DataFrame. In the latter case, indexes\n            are the algorithm and problem names, the columns are the tracebacks and\n            the dimensionality of the problems. The values are the tracebacks of the\n            algorithms for problems where they stopped with an error.\n\n    \"\"\"\n    if return_type == \"text\":\n        report = []\n        for result in results.values():\n            if isinstance(result[\"solution\"], str):\n                report.append(result[\"solution\"])\n\n    elif return_type == \"markdown\":\n        report = \"```python\"\n        for (problem_name, algorithm_name), result in results.items():\n            if isinstance(result[\"solution\"], str):\n                if f\"### {algorithm_name}\" not in report:\n                    report += f\"\\n### {algorithm_name} \\n\"\n                report += f\"\\n#### {problem_name} \\n\"\n                report += f\"\\n{result['solution']} \\n\"\n        report += \"\\n```\"\n\n    elif return_type == \"dict\":\n        report = {}\n        for (problem_name, algorithm_name), result in results.items():\n            if isinstance(result[\"solution\"], str):\n                report[(problem_name, algorithm_name)] = result[\"solution\"]\n\n    elif return_type == \"dataframe\":\n        tracebacks = {}\n        for (problem_name, algorithm_name), result in results.items():\n            if isinstance(result[\"solution\"], str):\n                tracebacks[algorithm_name] = tracebacks.setdefault(algorithm_name, {})\n                tracebacks[algorithm_name][problem_name] = result[\"solution\"]\n\n        report = pd.DataFrame.from_dict(tracebacks, orient=\"index\").stack().to_frame()\n        report.index.set_names([\"algorithm\", \"problem\"], inplace=True)\n        report.columns = [\"traceback\"]\n        report[\"dimensionality\"] = 0\n\n        for problem_name, dim in _get_problem_dimensions(problems).items():\n            if problem_name in report.index.get_level_values(\"problem\"):\n                report.loc[(slice(None), problem_name), \"dimensionality\"] = dim\n\n    else:\n        raise ValueError(\n            f\"return_type {return_type} is not supported. Must be one of \"\n            f\"'text', 'markdown', 'dict' or 'dataframe'.\"\n        )\n\n    return report\n\n\ndef _get_success_info(results, converged_info):\n    \"\"\"Create a DataFrame with information on whether an algorithm succeeded or not.\n\n    Args:\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        converged_info (pandas.DataFrame): columns are the algorithms, indexes are the\n            problems. The values are boolean and True when the algorithm arrived at\n            the solution with the desired precision.\n\n    Returns:\n        pandas.DataFrame: indexes are the problems, columns are the algorithms.\n           values are strings that are either \"success\", \"failed\", or \"error\".\n\n    \"\"\"\n    success_info = converged_info.replace({True: \"success\", False: \"failed\"})\n\n    for key, value in results.items():\n        if isinstance(value[\"solution\"], str):\n            success_info.at[key] = \"error\"\n\n    return success_info\n\n\ndef _get_problem_dimensions(problems):\n    \"\"\"Get the dimension of each problem.\n\n    Args:\n        problems (dict): dictionary of problems. keys are problem names, values are\n            dictionaries with the problem information.\n\n    Returns:\n        dict: keys are problem names, values are the dimension of the problem.\n\n    \"\"\"\n    return {prob: len(problems[prob][\"inputs\"][\"params\"]) for prob in problems}\n"
  },
  {
    "path": "src/optimagic/benchmarking/cartis_roberts.py",
    "content": "\"\"\"Define the medium scale CUTEst Benchmark Set.\n\nThis benchmark set is contains 60 test cases for nonlinear least squares\nsolvers. It was used to benchmark all modern model based non-linear\nderivative free least squares solvers (e.g. POUNDERS, DFOGN, DFOLS).\n\nThe parameter dimensions are of medium scale, varying between 25 and 100.\n\nThe benchmark set is based on Table 3 in Cartis and Roberts (2019).\nImplementation is based on\n- the original SIF files: https://bitbucket.org/optrove/sif/src/master/\n- on sources cited in the SIF files or,\n- where available, on AMPL implementaions available here:\n- https://vanderbei.princeton.edu/ampl/nlmodels/cute/index.html\n\n\"\"\"\n\nfrom functools import partial\n\nimport numpy as np\n\nfrom optimagic import mark\nfrom optimagic.config import IS_NUMBA_INSTALLED\nfrom optimagic.parameters.bounds import Bounds\n\nif IS_NUMBA_INSTALLED:\n    from numba import njit\nelse:\n\n    def njit(func):\n        return func\n\n\nfrom optimagic.benchmarking.more_wild import (\n    brown_almost_linear,\n    linear_full_rank,\n    linear_rank_one,\n    watson,\n)\n\n\n@mark.least_squares\ndef luksan11(x):\n    dim_in = len(x)\n    fvec = np.zeros(2 * (dim_in - 1))\n    fvec[::2] = 20 * x[:-1] / (1 + x[:-1] ** 2) - 10 * x[1:]\n    fvec[1::2] = x[:-1] - 1\n    return fvec\n\n\n@mark.least_squares\ndef luksan12(x):\n    dim_in = len(x)\n    n = (dim_in - 2) // 3\n    i = np.arange(0, 3 * n, 3)\n    fvec = np.zeros(6 * n)\n    fvec[::6] = 10 * (x[i] ** 2 - x[i + 1])\n    fvec[1::6] = x[i + 2] - 1\n    fvec[2::6] = (x[i + 3] - 1) ** 2\n    fvec[3::6] = (x[i + 4] - 1) ** 3\n    fvec[4::6] = x[i] ** 2 * x[i + 3] + np.sin(x[i + 3] - x[i + 4]) - 10\n    fvec[5::6] = x[i + 1] + (x[i + 2] ** 4) * (x[i + 3] ** 2) - 20\n    return fvec\n\n\n@mark.least_squares\ndef luksan13(x):\n    dim_in = len(x)\n    n = (dim_in - 2) // 3\n    fvec = np.zeros(n * 7)\n    i = np.arange(n)\n    k = i * 7\n    fvec[k] = 10 * (x[3 * i] ** 2 - x[3 * i + 1])\n    fvec[k + 1] = 10 * (x[3 * i + 1] ** 2 - x[3 * i + 2])\n    fvec[k + 2] = (x[3 * i + 2] - x[3 * i + 3]) ** 2\n    fvec[k + 3] = (x[3 * i + 3] - x[3 * i + 4]) ** 2\n    fvec[k + 4] = x[3 * i] + x[3 * i + 1] ** 2 + x[3 * i + 2] - 30\n    fvec[k + 5] = x[3 * i + 1] - x[3 * i + 2] ** 2 + x[3 * i + 3] - 10\n    fvec[k + 6] = x[3 * i + 1] * x[3 * i + 4] - 10\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan14(x):\n    dim_in = len(x)\n    dim_out = 7 * (dim_in - 2) // 3\n    fvec = np.zeros(dim_out, dtype=np.float64)\n\n    for i in range(0, dim_in - 2, 3):\n        k = (i // 3) * 7\n        fvec[k : k + 7] = [\n            10 * (x[i] ** 2 - x[i + 1]),\n            x[i + 1] + x[i + 2] - 2,\n            x[i + 3] - 1,\n            x[i + 4] - 1,\n            x[i] + 3 * x[i + 1],\n            x[i + 2] + x[i + 3] - 2 * x[i + 4],\n            10 * (x[i + 1] ** 2 - x[i + 4]),\n        ]\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan15(x):\n    dim_in = len(x)\n    dim_out = (dim_in - 2) * 2\n    temp = np.zeros((dim_out, 3), dtype=np.float64)\n    y = np.tile([35.8, 11.2, 6.2, 4.4], dim_out // 4)\n\n    for p in range(1, 4):\n        k = 0\n        for i in range(0, dim_in - 2, 2):\n            for j in range(1, 5):\n                temp[k, p - 1] = (p**2 / j) * np.abs(\n                    x[i] * (x[i + 1] ** 2) * (x[i + 2] ** 3) * (x[i + 3] ** 4)\n                ) ** (1 / (p * j))\n\n                k += 1\n\n    fvec = y - np.sum(temp, axis=1)\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan16(x):\n    dim_in = len(x)\n    dim_out = (dim_in - 2) * 2\n    temp = np.zeros((dim_out, 3), dtype=np.float64)\n    y = np.tile([35.8, 11.2, 6.2, 4.4], dim_out // 4)\n\n    for p in range(1, 4):\n        k = 0\n        for i in range(0, dim_in - 2, 2):\n            for j in range(1, 5):\n                temp[k, p - 1] = (p**2 / j) * np.exp(\n                    (x[i] + 2 * x[i + 1] + 3 * x[i + 2] + 4 * x[i + 3]) * (1 / (p * j))\n                )\n                k += 1\n\n    fvec = y - np.sum(temp, axis=1)\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan17(x):\n    dim_in = len(x)\n    dim_out = (dim_in - 2) * 2\n    temp = np.zeros((dim_out, 4), dtype=np.float64)\n    y = np.tile([30.6, 72.2, 124.4, 187.4], dim_out // 4)\n\n    for q in range(1, 5):\n        k = 0\n        for i in range(-1, dim_in - 4, 2):\n            for j in range(1, 5):\n                temp[k, q - 1] += -j * q**2 * np.sin(x[i + q]) + j**2 * q * np.cos(\n                    x[i + q]\n                )\n                k += 1\n\n    fvec = y - np.sum(temp, axis=1)\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan21(x):\n    dim_out = len(x)\n    h = 1 / (dim_out + 1)\n    fvec = np.zeros(dim_out, dtype=np.float64)\n\n    fvec[0] = 2 * x[0] + 0.5 * h**2 * (x[0] + h + 1) ** 3 - x[1] + 1\n    for i in range(1, dim_out - 1):\n        fvec[i] = (\n            2 * x[i]\n            + 0.5 * h**2 * (x[i] + h * (i + 1) + 1) ** 3\n            - x[i - 1]\n            - x[i + 1]\n            + 1\n        )\n    fvec[-1] = 2 * x[-1] + 0.5 * h**2 * (x[-1] + h * dim_out + 1) ** 3 - x[-2] + 1\n\n    return fvec\n\n\n@mark.least_squares\ndef luksan22(x):\n    dim_out = 2 * len(x) - 2\n    fvec = np.zeros(dim_out)\n    fvec[0] = x[0] - 1\n    fvec[1:-1:2] = 10 * (x[:-2] ** 2 - x[1:-1])\n    fvec[2:-1:2] = 2 * np.exp(-((x[:-2] - x[1:-1]) ** 2)) + np.exp(\n        -2 * (x[1:-1] - x[2:]) ** 2\n    )\n    fvec[-1] = -10 * (x[-2] ** 2)\n    return fvec\n\n\n@mark.least_squares\ndef morebvne(x):\n    dim_in = len(x)\n    h = 1 / (dim_in + 1)\n    i = np.arange(1, dim_in + 1)\n    fvec = np.zeros(dim_in)\n\n    fvec[0] = 2 * x[0] - x[1] + h**2 / 2 * (x[0] + i[0] * h + 1) ** 3\n    fvec[1:-1] = (\n        2 * x[1:-1] - x[:-2] - x[2:] + h**2 / 2 * (x[1:-1] + i[1:-1] * h + 1) ** 3\n    )\n    fvec[-1] = 2 * x[-2] - x[-2] + h**2 / 2 * (x[-1] + i[-1] * h + 1) ** 3\n\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef flosp2(x, a, b, ra=1.0e7):\n    n = 5\n    xvec = np.ones((3, n, n), dtype=np.float64)\n    xvec[0] = x[: n**2].reshape(n, n)\n    xvec[1] = x[n**2 : 2 * n**2].reshape(n, n)\n    xvec[2, 1:-1, 1:-1] = x[2 * n**2 :].reshape(n - 2, n - 2)\n\n    h = 1 / 2\n    ax = 1.0\n    axx = ax**2\n    theta = 0.5 * np.pi\n    pi1 = -0.5 * ax * ra * np.cos(theta)\n    pi2 = 0.5 * ax * ra * np.sin(theta)\n\n    fvec = np.empty(59, dtype=np.float64)\n\n    temp = np.empty((n - 2, n - 2, n - 2), dtype=np.float64)\n    for j in range(1, n - 1):\n        for i in range(1, n - 1):\n            temp[0, i - 1, j - 1] = (\n                xvec[0, i, j] * -2 * (1 / h) ** 2\n                + xvec[0, i + 1, j] * (1 / h) ** 2\n                + xvec[0, i - 1, j] * (1 / h) ** 2\n                + xvec[0, i, j] * -2 * axx * (1 / h) ** 2\n                + xvec[0, i, j + 1] * axx * (1 / h) ** 2\n                + xvec[0, i, j - 1] * ax * (1 / h) ** 2\n                + xvec[1, i + 1, j] * -pi1 / (2 * h)\n                + xvec[1, i - 1, j] * pi1 / (2 * h)\n                + xvec[1, i, j + 1] * -pi2 / (2 * h)\n                + xvec[1, i, j - 1] * pi2 / (2 * h)\n            )\n\n            temp[1, i - 1, j - 1] = (\n                xvec[2, i, j] * -2 * (1 / h) ** 2\n                + xvec[2, i + 1, j] * (1 / h) ** 2\n                + xvec[2, i - 1, j] * (1 / h) ** 2\n                + xvec[2, i, j] * -2 * axx * (1 / h) ** 2\n                + xvec[2, i, j + 1] * axx * (1 / h) ** 2\n                + xvec[2, i, j - 1] * axx * (1 / h) ** 2\n                + xvec[0, i, j] * axx * 0.25\n            )\n\n            temp[2, i - 1, j - 1] = (\n                xvec[1, i, j] * -2 * (1 / h) ** 2\n                + xvec[1, i + 1, j] * (1 / h) ** 2\n                + xvec[1, i - 1, j] * (1 / h) ** 2\n                + xvec[1, i, j] * -2 * axx * (1 / h) ** 2\n                + xvec[1, i, j + 1] * axx * (1 / h) ** 2\n                + xvec[1, i, j - 1] * axx * (1 / h) ** 2\n                - 0.25\n                * ax\n                * (1 / h) ** 2\n                * (xvec[2, i, j + 1] - xvec[2, i, j - 1])\n                * (xvec[1, i + 1, j] - xvec[1, i - 1, j])\n                + 0.25\n                * ax\n                * (1 / h) ** 2\n                * (xvec[2, i + 1, j] - xvec[2, i - 1, j])\n                * (xvec[1, i, j + 1] - xvec[1, i, j - 1])\n            )\n    fvec[:27] = temp.flatten()\n\n    temp = np.zeros((n, n), dtype=np.float64)\n    for k in range(n):\n        temp[k, -1] = a[2]\n        temp[k, 0] = b[2]\n        temp[0, k] = 0\n    temp[-1, -1] = 0\n\n    for k in range(n):\n        temp[k, -1] += (\n            xvec[1, k, -1] * 2 * a[0] * (1 / h)\n            + xvec[1, k, -2] * -2 * a[0] * (1 / h)\n            + xvec[1, k, -1] * a[1]\n        )\n        temp[k, 0] += (\n            xvec[1, k, 1] * 2 * b[0] * (1 / h)\n            + xvec[1, k, 0] * -2 * b[0] * (1 / h)\n            + xvec[1, k, 0] * b[1]\n        )\n        temp[-1, k] += xvec[1, -1, k] * 2 * (1 / (ax * h)) + xvec[1, -2, k] * -2 * (\n            1 / (ax * h)\n        )\n        temp[0, k] += xvec[1, 1, k] * 2 * (1 / (ax * h)) + xvec[1, 0, k] * -2 * (\n            1 / (ax * h)\n        )\n\n    fvec[27:32] = temp[0]\n    fvec[32:37] = temp[-1]\n    fvec[37:40] = temp[1:-1, 0]\n    fvec[40:43] = temp[1:-1, -1]\n\n    temp = np.zeros((n, n), dtype=np.float64)\n    for k in range(n):\n        temp[k, -1] += xvec[2, k, -1] * -2 * (1 / h) + xvec[2, k, -2] * 2 * (1 / h)\n        temp[k, 0] += xvec[2, k, 1] * 2 * (1 / h) + xvec[2, k, 0] * -2 * (1 / h)\n        temp[-1, k] += xvec[2, -1, k] * -2 * (1 / (ax * h)) + xvec[2, -2, k] * 2 * (\n            1 / (ax * h)\n        )\n        temp[0, k] += xvec[2, 1, k] * 2 * (1 / (ax * h)) + xvec[2, 0, k] * -2 * (\n            1 / (ax * h)\n        )\n\n    fvec[43:48] = temp[0]\n    fvec[48:53] = temp[-1]\n    fvec[53:56] = temp[1:-1, 0]\n    fvec[56:] = temp[1:-1, -1]\n\n    return fvec\n\n\n@mark.least_squares\ndef oscigrne(x):\n    dim_in = len(x)\n    rho = 500\n\n    fvec = np.zeros(dim_in)\n    fvec[0] = 0.5 * x[0] - 0.5 - 4 * rho * (x[1] - 2.0 * x[0] ** 2 + 1.0) * x[0]\n    fvec[1:-1] = (\n        2 * rho * (x[1:-1] - 2.0 * x[:-2] ** 2 + 1.0)\n        - 4 * rho * (x[2:] - 2.0 * x[:-2] ** 2 + 1.0) * x[2:]\n    )\n    fvec[-1] = 2 * rho * (x[-1] - 2.0 * x[-2] ** 2 + 1.0)\n\n    return fvec\n\n\n@mark.least_squares\ndef spmsqrt(x):\n    m = (len(x) + 2) // 3\n    xmat = np.diag(x[2:-1:3], -1) + np.diag(x[::3], 0) + np.diag(x[1:-2:3], 1)\n\n    b = np.zeros((m, m), dtype=np.float64)\n    b[0, 0] = np.sin(1)\n    b[0, 1] = np.sin(4)\n    k = 2\n    for i in range(1, m - 1):\n        k += 1\n        b[i, i - 1] = np.sin(k**2)\n        k += 1\n        b[i, i] = np.sin(k**2)\n        k += 1\n        b[i, i + 1] = np.sin(k**2)\n    k += 1\n    b[-1, -2] = np.sin(k**2)\n    k += 1\n    b[-1, -1] = np.sin(k**2)\n\n    fmat = np.zeros((m, m), dtype=np.float64)\n    fmat[0, 0] = xmat[0, 0] ** 2 + xmat[0, 1] * xmat[1, 0]\n    fmat[0, 1] = xmat[0, 0] * xmat[0, 1] + xmat[0, 1] * xmat[1, 1]\n    fmat[0, 2] = xmat[0, 1] * xmat[1, 2]\n\n    fmat[1, 0] = xmat[1, 0] * xmat[0, 0] + xmat[1, 1] * xmat[1, 0]\n    fmat[1, 1] = xmat[1, 0] * xmat[0, 1] + xmat[1, 1] ** 2 + xmat[1, 2] * xmat[2, 1]\n    fmat[1, 2] = xmat[1, 1] * xmat[1, 2] + xmat[1, 2] * xmat[2, 2]\n    fmat[1, 3] = xmat[1, 2] * xmat[2, 3]\n\n    for i in range(2, m - 2):\n        fmat[i, i - 2] = xmat[i, i - 1] * xmat[i - 1, i - 2]\n        fmat[i, i - 1] = (\n            xmat[i, i - 1] * xmat[i - 1, i - 1] + xmat[i, i] * xmat[i, i - 1]\n        )\n        fmat[i, i] = (\n            xmat[i, i - 1] * xmat[i - 1, i]\n            + xmat[i, i] ** 2\n            + xmat[i, i + 1] * xmat[i + 1, i]\n        )\n        fmat[i, i + 1] = (\n            xmat[i, i] * xmat[i, i + 1] + xmat[i, i + 1] * xmat[i + 1, i + 1]\n        )\n        fmat[i, i + 2] = xmat[i, i + 1] * xmat[i + 1, i + 2]\n\n    fmat[-2, -4] = xmat[-2, -3] * xmat[-3, -4]\n    fmat[-2, -3] = xmat[-2, -3] * xmat[-3, -3] + xmat[-2, -2] * xmat[-2, -3]\n    fmat[-2, -2] = (\n        xmat[-2, -3] * xmat[-3, -2] + xmat[-2, -2] ** 2 + xmat[-2, -1] * xmat[-1, -2]\n    )\n    fmat[-2, -1] = xmat[-2, -2] * xmat[-2, -1] + xmat[-2, -1] * xmat[-1, -1]\n\n    fmat[-1, -3] = xmat[-1, -2] * xmat[-2, -3]\n    fmat[-1, -2] = xmat[-1, -2] * xmat[-2, -2] + xmat[-1, -1] * xmat[-1, -2]\n    fmat[-1, -1] = xmat[-1, -2] * xmat[-2, -1] + xmat[-1, -1] ** 2\n\n    fmat[0, 0] -= b[0, 0] ** 2 + b[0, 1] * b[1, 0]\n    for i in range(1, m - 1):\n        fmat[i, i] -= (\n            b[i, i] ** 2 + b[i - 1, i] * b[i, i - 1] + b[i + 1, i] * b[i, i + 1]\n        )\n    fmat[-1, -1] -= b[-1, -1] ** 2 + b[-2, -1] * b[-1, -2]\n    for i in range(m - 1):\n        fmat[i + 1, i] -= b[i + 1, i] * b[i, i] + b[i + 1, i + 1] * b[i + 1, i]\n    for i in range(1, m):\n        fmat[i - 1, i] -= b[i - 1, i] * b[i, i] + b[i - 1, i - 1] * b[i - 1, i]\n    for i in range(1, m - 1):\n        fmat[i + 1, i - 1] -= b[i + 1, i] * b[i, i - 1]\n    for i in range(1, m - 1):\n        fmat[i - 1, i + 1] -= b[i - 1, i] * b[i, i + 1]\n\n    return fmat.flatten()\n\n\n@mark.least_squares\ndef semicon2(x):\n    n = len(x) // 1\n    ln = 9 * n // 10\n\n    lambda_ = 0.2\n    a = -0.00009\n    b = 0.00001\n    ua = 0.0\n    ub = 700.0\n    ca = 1e12\n    cb = 1e13\n    beta = 40.0\n\n    h = (b - a) / (n + 1)\n    lb = lambda_ * beta\n    lua = lambda_ * ua\n    lub = lambda_ * ub\n\n    xvec = np.zeros(n + 2, dtype=np.float64)\n    xvec[0] = lua\n    xvec[1:-1] = x\n    xvec[-1] = lub\n\n    fvec = np.zeros(n, dtype=np.float64)\n    for i in range(1, ln + 1):\n        fvec[i - 1] = (\n            xvec[i - 1]\n            - 2 * xvec[i]\n            + xvec[i + 1]\n            + lambda_ * (h**2) * ca * np.exp(-lb * (xvec[i] - lua))\n            - lambda_ * (h**2) * cb * np.exp(lb * (xvec[i] - lub))\n            - lambda_ * (h**2) * ca\n        )\n    for i in range(ln + 1, n + 1):\n        fvec[i - 1] = (\n            xvec[i - 1]\n            - 2 * xvec[i]\n            + xvec[i + 1]\n            - lambda_ * (h**2) * cb * np.exp(lb * (xvec[i] - lub))\n            + lambda_ * (h**2) * ca * np.exp(-lb * (xvec[i] - lua))\n            + lambda_ * (h**2) * cb\n        )\n\n    return fvec\n\n\n@mark.least_squares\ndef qr3d(x, m=5):\n    q = x[: m**2].reshape(m, m)\n    r = np.zeros((m, m), dtype=np.float64)\n    r[np.triu_indices_from(r)] = x[m**2 :]\n\n    a = (\n        np.diag((1 - np.arange(2, m + 1)) / m, -1)\n        + np.diag(2 * np.arange(1, m + 1) / m, 0)\n        + np.diag((1 - np.arange(1, m)) / m, 1)\n    )\n    a[0, 1] = 0\n    a[-1, -2] = (1 - m) / m\n    a[-1, -1] = 2 * m\n\n    omat = np.zeros((m, m), dtype=np.float64)  # triu\n    fmat = np.zeros((m, m), dtype=np.float64)\n\n    for i in range(m):\n        for j in range(i, m):\n            for k in range(m):\n                omat[i, j] += q[i, k] * q[j, k]\n\n    for i in range(m):\n        for j in range(m):\n            for k in range(j + 1):\n                fmat[i, j] += q[i, k] * r[k, j]\n\n    for i in range(m):\n        omat[i, i] -= 1\n    fmat[0, 0] -= a[0, 0]\n    fmat[0, 1] -= a[0, 1]\n    for i in range(1, m - 1):\n        fmat[i, i - 1] -= a[i, i - 1]\n        fmat[i, i] -= a[i, i]\n        fmat[i, i + 1] -= a[i, i + 1]\n    fmat[-1, -2] -= a[-1, -2]\n    fmat[-1, -1] -= a[-1, -1]\n\n    return np.concatenate((omat[np.triu_indices_from(omat)].flatten(), fmat.flatten()))\n\n\n@mark.least_squares\ndef qr3dbd(x, m=5):\n    q = x[: m**2].reshape(m, m)\n    r = np.zeros((m, m), dtype=np.float64)\n    r[0, :-2] = x[m**2 : -9]\n    r[1, 1:-1] = x[-9:-6]\n    r[2, 2:] = x[-6:-3]\n    r[3, 3:] = x[-3:-1]\n    r[4, 4] = x[-1]\n\n    a = (\n        np.diag((1 - np.arange(2, m + 1)) / m, -1)\n        + np.diag(2 * np.arange(1, m + 1) / m, 0)\n        + np.diag((1 - np.arange(1, m)) / m, 1)\n    )\n    a[0, 1] = 0\n    a[-1, -2] = (1 - m) / m\n    a[-1, -1] = 2 * m\n\n    omat = np.zeros((m, m), dtype=np.float64)  # triu\n    fmat = np.zeros((m, m), dtype=np.float64)\n\n    for i in range(m):\n        for j in range(i, m):\n            for k in range(m):\n                omat[i, j] += q[i, k] * q[j, k]\n\n    for i in range(m):\n        fmat[i, 0] += q[i, 0] * r[0, 0]\n        fmat[i, 1] += q[i, 0] * r[0, 1] + q[i, 1] * r[1, 1]\n        for j in range(2, m):\n            for k in range(j - 2, j + 1):\n                fmat[i, j] += q[i, k] * r[k, j]\n\n    for i in range(m):\n        omat[i, i] -= 1\n    fmat[0, 0] -= a[0, 0]\n    fmat[0, 1] -= a[0, 1]\n    for i in range(1, m - 1):\n        fmat[i, i - 1] -= a[i, i - 1]\n        fmat[i, i] -= a[i, i]\n        fmat[i, i + 1] -= a[i, i + 1]\n    fmat[-1, -2] -= a[-1, -2]\n    fmat[-1, -1] -= a[-1, -1]\n\n    return np.concatenate((omat[np.triu_indices_from(omat)].flatten(), fmat.flatten()))\n\n\n@mark.least_squares\ndef eigen(x, param):\n    dim_in = int(np.sqrt(len(x) + 0.25))\n    dvec = x[:dim_in]\n    qmat = x[dim_in:].reshape(dim_in, dim_in)\n    emat = qmat @ np.diag(dvec) @ qmat - param\n    omat = qmat @ qmat - np.eye(dim_in)\n    return np.concatenate((emat.flatten(), omat.flatten()))\n\n\n@mark.least_squares\ndef powell_singular(x):\n    dim_in = len(x)\n    fvec = np.zeros(dim_in)\n    fvec[::4] = x[::4] + 10 * x[1::4]\n    fvec[1::4] = 5 * (x[2::4] - x[3::4])\n    fvec[2::4] = (x[1::4] - 2 * x[2::4]) ** 2\n    fvec[3::4] = 10 * (x[0::4] - x[3::4]) ** 2\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef hydcar(\n    x_in,\n    n,\n    m,\n    k,\n):\n    x = x_in[: (n * m)].reshape((n, m))\n    t = x_in[(n * m) : 4 * n]\n    v = x_in[4 * n :]\n\n    avec = np.array([9.647, 9.953, 9.466], dtype=np.float64)\n    bvec = np.array([-2998, -3448.10, -3347.25], dtype=np.float64)\n    cvec = np.array([230.66, 235.88, 215.31], dtype=np.float64)\n    alp = np.array([37.6, 48.2, 45.4], dtype=np.float64)\n    be = np.array([8425, 9395, 10466], dtype=np.float64)\n    bep = np.array([24.2, 35.6, 31.9], dtype=np.float64)\n    fl = np.array([30, 30, 40], dtype=np.float64)\n    tf = 100.0\n    b = 40.0\n    d = 60.0\n    q = 2500000.0\n\n    out = np.empty(n * 5 - 1, dtype=np.float64)\n    fvec1 = np.zeros(m, dtype=np.float64)\n    fvec3 = np.zeros(m, dtype=np.float64)\n    fvec2 = np.zeros((n - 2, m), dtype=np.float64)\n    fvec7 = np.zeros(n, dtype=np.float64)\n    fvec8 = 0\n    fvec9 = np.zeros(n - 2, dtype=np.float64)\n\n    for j in range(m):\n        fvec1[j] += x[0, j] * b\n        fvec3[j] += -x[n - 1, j]\n\n    for j in range(m):\n        fvec1[j] += -1 * x[1, j] * (v[0] + b)\n        fvec1[j] += v[0] * x[0, j] * np.exp(avec[j] + (bvec[j] / (t[0] + cvec[j])))\n        fvec3[j] += x[n - 2, j] * np.exp(avec[j] + (bvec[j] / (t[n - 2] + cvec[j])))\n\n        fvec8 += (\n            (\n                v[0]\n                * x[0, j]\n                * np.exp(avec[j] + (bvec[j] / (t[0] + cvec[j])))\n                * (be[j] + bep[j] * t[0])\n            )\n            + b * x[0, j] * (alp[j] * t[0])\n            - x[1, j] * (b + v[0]) * (alp[j] * t[1])\n        )\n\n        for i in range(1, n - 1):\n            fvec2[i - 1, j] += (\n                v[i - 1]\n                * x[i - 1, j]\n                * (-1)\n                * np.exp(avec[j] + (bvec[j] / (t[i - 1] + cvec[j])))\n            )\n            fvec2[i - 1, j] += (\n                v[i] * x[i, j] * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j])))\n            )\n\n            fvec9[i - 1] += (\n                v[i]\n                * x[i, j]\n                * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j])))\n                * (be[j] + bep[j] * t[i])\n            )\n            fvec9[i - 1] += (\n                v[i - 1]\n                * x[i - 1, j]\n                * (-1)\n                * np.exp(avec[j] + (bvec[j] / (t[i - 1] + cvec[j])))\n                * (be[j] + bep[j] * t[i - 1])\n            )\n\n        for i in range(n):\n            fvec7[i] += x[i, j] * np.exp(avec[j] + (bvec[j] / (t[i] + cvec[j])))\n\n    for j in range(m):\n        for i in range(1, k):\n            fvec2[i - 1, j] += -1 * x[i + 1, j] * (v[i] + b)\n            fvec2[i - 1, j] += x[i, j] * (v[i - 1] + b)\n\n        fvec2[k - 1, j] += -1 * x[k + 1, j] * (v[k] - d)\n        fvec2[k - 1, j] += x[k, j] * (v[k - 1] + b)\n\n        for i in range(k + 1, n - 1):\n            fvec2[i - 1, j] += -1 * x[i + 1, j] * (v[i] - d)\n            fvec2[i - 1, j] += x[i, j] * (v[i - 1] - d)\n\n    for j in range(m):\n        for i in range(1, k):\n            fvec9[i - 1] += 1 * x[i, j] * (v[i - 1] + b) * (alp[j] * t[i])\n            fvec9[i - 1] += (-1) * x[i + 1, j] * (v[i] + b) * (alp[j] * t[i + 1])\n\n        fvec9[k - 1] += 1 * x[k, j] * (v[k - 1] + b) * (alp[j] * t[i])\n        fvec9[k - 1] += (-1) * x[k + 1, j] * (v[k] - d) * (alp[j] * t[k + 1])\n\n        for i in range(k + 1, n - 1):\n            fvec9[i - 1] += 1 * x[i, j] * (v[i - 1] - d) * (alp[j] * t[i])\n            fvec9[i - 1] += (-1) * x[i + 1, j] * (v[i] - d) * (alp[j] * t[i + 1])\n\n    smallhf = 0\n    for j in range(m):\n        fvec2[k - 1, j] -= fl[j]\n        smallhf += (tf * alp[j]) * fl[j]\n    fvec7 -= 1\n    fvec8 -= q\n    fvec9[k - 1] -= smallhf\n\n    out[:m] = fvec1 * 1e-2\n    out[m : 2 * m] = fvec3\n    out[2 * m : (n - 2) * m + 2 * m] = fvec2.flatten() * 1e-2\n    out[(n - 2) * m + 2 * m : (n - 2) * m + 2 * m + n] = fvec7\n    out[(n - 2) * m + 2 * m + n] = fvec8 * 1e-5\n    out[-(n - 2) :] = fvec9 * 1e-5\n\n    return out\n\n\n@mark.least_squares\ndef methane(x):\n    fvec = np.zeros(31, dtype=np.float64)\n    fvec[0] = 0.01 * (\n        0.000826446280991736\n        * x[24]\n        * x[1]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[0]))\n        - x[4] * (693.37 + x[24])\n        + 693.37 * x[1]\n    )\n    fvec[1] = (\n        0.000869565217391304 * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) * x[19]\n        - x[22]\n    )\n    fvec[2] = 0.01 * (\n        -0.000826446280991736\n        * x[24]\n        * x[1]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[0]))\n        - x[7] * (693.37 + x[25])\n        + x[4] * (693.37 + x[24])\n        + 0.000833333333333333\n        * x[25]\n        * x[4]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[3]))\n    )\n    fvec[3] = -4.5125 + 0.01 * (\n        -0.000833333333333333\n        * x[25]\n        * x[4]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[3]))\n        - x[10] * (-442.13 + x[26])\n        + x[7] * (693.37 + x[25])\n        + 0.000840336134453782\n        * x[26]\n        * x[7]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[6]))\n    )\n    fvec[4] = 0.01 * (\n        -0.000840336134453782\n        * x[26]\n        * x[7]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[6]))\n        - x[13] * (-442.13 + x[27])\n        + x[10] * (-442.13 + x[26])\n        + 0.000847457627118644\n        * x[27]\n        * x[10]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[9]))\n    )\n    fvec[5] = 0.01 * (\n        -0.000847457627118644\n        * x[27]\n        * x[10]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[9]))\n        - x[16] * (-442.13 + x[28])\n        + x[13] * (-442.13 + x[27])\n        + 0.000854700854700855\n        * x[28]\n        * x[13]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[12]))\n    )\n    fvec[6] = 0.01 * (\n        -0.000854700854700855\n        * x[28]\n        * x[13]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[12]))\n        - x[19] * (-442.13 + x[29])\n        + x[16] * (-442.13 + x[28])\n        + 0.000862068965517241\n        * x[29]\n        * x[16]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[15]))\n    )\n    fvec[7] = 0.01 * (\n        -0.000862068965517241\n        * x[29]\n        * x[16]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[15]))\n        - x[22] * (-442.13 + x[30])\n        + x[19] * (-442.13 + x[29])\n        + 0.000869565217391304\n        * x[30]\n        * x[19]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[18]))\n    )\n    fvec[8] = 0.01 * (\n        0.000826446280991736 * x[24] * x[2] * np.exp(18.3443 - 3841.2203 / (228 + x[0]))\n        - x[5] * (693.37 + x[24])\n        + 693.37 * x[2]\n    )\n    fvec[9] = (\n        0.000869565217391304 * np.exp(18.3443 - 3841.2203 / (228 + x[18])) * x[20]\n        - x[23]\n    )\n    fvec[10] = 0.01 * (\n        -0.000826446280991736\n        * x[24]\n        * x[2]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[0]))\n        - x[8] * (693.37 + x[25])\n        + x[5] * (693.37 + x[24])\n        + 0.000833333333333333\n        * x[25]\n        * x[5]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[3]))\n    )\n    fvec[11] = -6.8425 + 0.01 * (\n        -0.000833333333333333\n        * x[25]\n        * x[5]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[3]))\n        - x[11] * (-442.13 + x[26])\n        + x[8] * (693.37 + x[25])\n        + 0.000840336134453782\n        * x[26]\n        * x[8]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[6]))\n    )\n    fvec[12] = 0.01 * (\n        -0.000840336134453782\n        * x[26]\n        * x[8]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[6]))\n        - x[14] * (-442.13 + x[27])\n        + x[11] * (-442.13 + x[26])\n        + 0.000847457627118644\n        * x[27]\n        * x[11]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[9]))\n    )\n    fvec[13] = 0.01 * (\n        -0.000847457627118644\n        * x[27]\n        * x[11]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[9]))\n        - x[17] * (-442.13 + x[28])\n        + x[14] * (-442.13 + x[27])\n        + 0.000854700854700855\n        * x[28]\n        * x[14]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[12]))\n    )\n    fvec[14] = 0.01 * (\n        -0.000854700854700855\n        * x[28]\n        * x[14]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[12]))\n        - x[20] * (-442.13 + x[29])\n        + x[17] * (-442.13 + x[28])\n        + 0.000862068965517241\n        * x[29]\n        * x[17]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[15]))\n    )\n    fvec[15] = 0.01 * (\n        -0.000862068965517241\n        * x[29]\n        * x[17]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[15]))\n        - x[23] * (-442.13 + x[30])\n        + x[20] * (-442.13 + x[29])\n        + 0.000869565217391304\n        * x[30]\n        * x[20]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[18]))\n    )\n    fvec[16] = (\n        -1\n        + 0.000826446280991736 * np.exp(18.5751 - 3632.649 / (239.2 + x[0])) * x[1]\n        + 0.000826446280991736 * np.exp(18.3443 - 3841.2203 / (228 + x[0])) * x[2]\n    )\n    fvec[17] = (\n        -1\n        + 0.000833333333333333 * np.exp(18.5751 - 3632.649 / (239.2 + x[3])) * x[4]\n        + 0.000833333333333333 * np.exp(18.3443 - 3841.2203 / (228 + x[3])) * x[5]\n    )\n    fvec[18] = (\n        -1\n        + 0.000840336134453782 * np.exp(18.5751 - 3632.649 / (239.2 + x[6])) * x[7]\n        + 0.000840336134453782 * np.exp(18.3443 - 3841.2203 / (228 + x[6])) * x[8]\n    )\n    fvec[19] = (\n        -1\n        + 0.000847457627118644 * np.exp(18.5751 - 3632.649 / (239.2 + x[9])) * x[10]\n        + 0.000847457627118644 * np.exp(18.3443 - 3841.2203 / (228 + x[9])) * x[11]\n    )\n    fvec[20] = (\n        -1\n        + 0.000854700854700855 * np.exp(18.5751 - 3632.649 / (239.2 + x[12])) * x[13]\n        + 0.000854700854700855 * np.exp(18.3443 - 3841.2203 / (228 + x[12])) * x[14]\n    )\n    fvec[21] = (\n        -1\n        + 0.000862068965517241 * np.exp(18.5751 - 3632.649 / (239.2 + x[15])) * x[16]\n        + 0.000862068965517241 * np.exp(18.3443 - 3841.2203 / (228 + x[15])) * x[17]\n    )\n    fvec[22] = (\n        -1\n        + 0.000869565217391304 * np.exp(18.5751 - 3632.649 / (239.2 + x[18])) * x[19]\n        + 0.000869565217391304 * np.exp(18.3443 - 3841.2203 / (228 + x[18])) * x[20]\n    )\n    fvec[23] = (\n        -1\n        + 0.00087719298245614 * np.exp(18.5751 - 3632.649 / (239.2 + x[21])) * x[22]\n        + 0.00087719298245614 * np.exp(18.3443 - 3841.2203 / (228 + x[21])) * x[23]\n    )\n    fvec[24] = -83.862 + 1e-5 * (\n        0.000826446280991736\n        * x[24]\n        * x[1]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[0]))\n        * (9566.67 + 0.0422 * x[0] * x[0] - 1.59 * x[0])\n        + 693.37 * (0.0422 * x[0] * x[0] + 15.97 * x[0]) * x[1]\n        - x[4] * (693.37 + x[24]) * (0.0422 * x[3] * x[3] + 15.97 * x[3])\n        + 0.000826446280991736\n        * x[24]\n        * x[2]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[0]))\n        * (10834.67 + 8.74 * x[0])\n        + 12549.997 * x[2] * x[0]\n        - 18.1 * x[5] * (693.37 + x[24]) * x[3]\n    )\n    fvec[25] = 1e-5 * (\n        0.000833333333333333\n        * x[25]\n        * x[4]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[3]))\n        * (9566.67 + 0.0422 * x[3] * x[3] - 1.59 * x[3])\n        + x[4] * (693.37 + x[24]) * (0.0422 * x[3] * x[3] + 15.97 * x[3])\n        - 0.000826446280991736\n        * x[24]\n        * x[1]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[0]))\n        * (9566.67 + 0.0422 * x[0] * x[0] - 1.59 * x[0])\n        - x[7] * (693.37 + x[25]) * (0.0422 * x[6] * x[6] + 15.97 * x[6])\n        + 0.000833333333333333\n        * x[25]\n        * x[5]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[3]))\n        * (10834.67 + 8.74 * x[3])\n        + 18.1 * x[5] * (693.37 + x[24]) * x[3]\n        - 0.000826446280991736\n        * x[24]\n        * x[2]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[0]))\n        * (10834.67 + 8.74 * x[0])\n        - 18.1 * x[8] * (693.37 + x[25]) * x[6]\n    )\n    fvec[26] = -18.9447111025 + 1e-5 * (\n        0.000840336134453782\n        * x[26]\n        * x[7]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[6]))\n        * (9566.67 + 0.0422 * x[6] * x[6] - 1.59 * x[6])\n        + x[7] * (693.37 + x[25]) * (0.0422 * x[6] * x[6] + 15.97 * x[6])\n        - 0.000833333333333333\n        * x[25]\n        * x[4]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[3]))\n        * (9566.67 + 0.0422 * x[3] * x[3] - 1.59 * x[3])\n        - x[10] * (-442.13 + x[26]) * (0.0422 * x[9] * x[9] + 15.97 * x[9])\n        + 0.000840336134453782\n        * x[26]\n        * x[8]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[6]))\n        * (10834.67 + 8.74 * x[6])\n        + 18.1 * x[8] * (693.37 + x[25]) * x[6]\n        - 0.000833333333333333\n        * x[25]\n        * x[5]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[3]))\n        * (10834.67 + 8.74 * x[3])\n        - 18.1 * x[11] * (-442.13 + x[26]) * x[9]\n    )\n    fvec[27] = 1e-5 * (\n        0.000847457627118644\n        * x[27]\n        * x[10]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[9]))\n        * (9566.67 + 0.0422 * x[9] * x[9] - 1.59 * x[9])\n        + x[10] * (-442.13 + x[26]) * (0.0422 * x[9] * x[9] + 15.97 * x[9])\n        - 0.000840336134453782\n        * x[26]\n        * x[7]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[6]))\n        * (9566.67 + 0.0422 * x[6] * x[6] - 1.59 * x[6])\n        - x[13] * (-442.13 + x[27]) * (0.0422 * x[12] * x[12] + 15.97 * x[12])\n        + 0.000847457627118644\n        * x[27]\n        * x[11]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[9]))\n        * (10834.67 + 8.74 * x[9])\n        + 18.1 * x[11] * (-442.13 + x[26]) * x[9]\n        - 0.000840336134453782\n        * x[26]\n        * x[8]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[6]))\n        * (10834.67 + 8.74 * x[6])\n        - 18.1 * x[14] * (-442.13 + x[27]) * x[12]\n    )\n    fvec[28] = 1e-5 * (\n        0.000854700854700855\n        * x[28]\n        * x[13]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[12]))\n        * (9566.67 + 0.0422 * x[12] * x[12] - 1.59 * x[12])\n        + x[13] * (-442.13 + x[27]) * (0.0422 * x[12] * x[12] + 15.97 * x[12])\n        - 0.000847457627118644\n        * x[27]\n        * x[10]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[9]))\n        * (9566.67 + 0.0422 * x[9] * x[9] - 1.59 * x[9])\n        - x[16] * (-442.13 + x[28]) * (0.0422 * x[15] * x[15] + 15.97 * x[15])\n        + 0.000854700854700855\n        * x[28]\n        * x[14]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[12]))\n        * (10834.67 + 8.74 * x[12])\n        + 18.1 * x[14] * (-442.13 + x[27]) * x[12]\n        - 0.000847457627118644\n        * x[27]\n        * x[11]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[9]))\n        * (10834.67 + 8.74 * x[9])\n        - 18.1 * x[17] * (-442.13 + x[28]) * x[15]\n    )\n    fvec[29] = 1e-5 * (\n        0.000862068965517241\n        * x[29]\n        * x[16]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[15]))\n        * (9566.67 + 0.0422 * x[15] * x[15] - 1.59 * x[15])\n        + x[16] * (-442.13 + x[28]) * (0.0422 * x[15] * x[15] + 15.97 * x[15])\n        - 0.000854700854700855\n        * x[28]\n        * x[13]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[12]))\n        * (9566.67 + 0.0422 * x[12] * x[12] - 1.59 * x[12])\n        - x[19] * (-442.13 + x[29]) * (0.0422 * x[18] * x[18] + 15.97 * x[18])\n        + 0.000862068965517241\n        * x[29]\n        * x[17]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[15]))\n        * (10834.67 + 8.74 * x[15])\n        + 18.1 * x[17] * (-442.13 + x[28]) * x[15]\n        - 0.000854700854700855\n        * x[28]\n        * x[14]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[12]))\n        * (10834.67 + 8.74 * x[12])\n        - 18.1 * x[20] * (-442.13 + x[29]) * x[18]\n    )\n    fvec[30] = 1e-5 * (\n        0.000869565217391304\n        * x[30]\n        * x[19]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[18]))\n        * (9566.67 + 0.0422 * x[18] * x[18] - 1.59 * x[18])\n        + x[19] * (-442.13 + x[29]) * (0.0422 * x[18] * x[18] + 15.97 * x[18])\n        - 0.000862068965517241\n        * x[29]\n        * x[16]\n        * np.exp(18.5751 - 3632.649 / (239.2 + x[15]))\n        * (9566.67 + 0.0422 * x[15] * x[15] - 1.59 * x[15])\n        - x[22] * (-442.13 + x[30]) * (0.0422 * x[21] * x[21] + 15.97 * x[21])\n        + 0.000869565217391304\n        * x[30]\n        * x[20]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[18]))\n        * (10834.67 + 8.74 * x[18])\n        + 18.1 * x[20] * (-442.13 + x[29]) * x[18]\n        - 0.000862068965517241\n        * x[29]\n        * x[17]\n        * np.exp(18.3443 - 3841.2203 / (228 + x[15]))\n        * (10834.67 + 8.74 * x[15])\n        - 18.1 * x[23] * (-442.13 + x[30]) * x[21]\n    )\n    return fvec\n\n\n@mark.least_squares\ndef argtrig(x):\n    dim_in = len(x)\n    fvec = (\n        dim_in\n        - np.sum(np.cos(x))\n        + np.arange(1, dim_in + 1) * (1 - np.cos(x) - np.sin(x))\n    )\n    return fvec\n\n\n@mark.least_squares\ndef artif(x):\n    dim_in = len(x)\n    xvec = np.zeros(dim_in + 2, dtype=np.float64)\n    xvec[1:-1] = x\n    fvec = np.zeros(dim_in, dtype=np.float64)\n    for i in range(dim_in):\n        fvec[i] = -0.05 * (xvec[i + 1] + xvec[i + 2] + xvec[i]) + np.arctan(\n            np.sin(np.mod(i + 1, 100) * xvec[i + 1])\n        )\n    return fvec\n\n\n@mark.least_squares\ndef arwhdne(x):\n    dim_in = len(x)\n    fvec = np.zeros(2 * (dim_in - 1))\n    fvec[: dim_in - 1] = x[:-1] ** 2 + x[-1] ** 2\n    fvec[dim_in - 1 :] = 4 * x[:-1] - 3\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef bdvalues(x):\n    dim_in = len(x)\n    h = 1 / (dim_in + 1)\n    xvec = np.zeros(dim_in + 2, dtype=np.float64)\n    for i in range(dim_in):\n        xvec[i + 1] = x[i]\n    fvec = np.zeros(dim_in, dtype=np.float64)\n    for i in range(2, dim_in + 2):\n        fvec[i - 2] = (\n            -xvec[i - 2]\n            + 2 * xvec[i - 1]\n            - xvec[i]\n            + 0.5 * h**2 * (xvec[i - 1] + i * h + 1) ** 3\n        )\n    return fvec\n\n\n@mark.least_squares\ndef bratu_2d(x, alpha):\n    x = x.reshape((int(np.sqrt(len(x))), int(np.sqrt(len(x)))))\n    p = x.shape[0] + 2\n    h = 1 / (p - 1)\n    c = h**2 * alpha\n    xvec = np.zeros((x.shape[0] + 2, x.shape[1] + 2), dtype=np.float64)\n    xvec[1 : x.shape[0] + 1, 1 : x.shape[1] + 1] = x\n    fvec = np.zeros(x.shape)\n    for i in range(2, p):\n        for j in range(2, p):\n            fvec[i - 2, j - 2] = (\n                4 * xvec[i - 1, j - 1]\n                - xvec[i, j - 1]\n                - xvec[i - 2, j - 1]\n                - xvec[i - 1, j]\n                - xvec[i - 1, j - 2]\n                - c * np.exp(xvec[i - 1, j - 1])\n            )\n    return fvec.flatten()\n\n\n@mark.least_squares\ndef bratu_3d(x, alpha):\n    n = int(np.cbrt(len(x)))\n    x = x.reshape((n, n, n))\n    p = x.shape[0] + 2\n    h = 1 / (p - 1)\n    c = h**2 * alpha\n    xvec = np.zeros((x.shape[0] + 2, x.shape[1] + 2, x.shape[2] + 2), dtype=np.float64)\n    xvec[1 : x.shape[0] + 1, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x\n    fvec = np.zeros(x.shape, dtype=np.float64)\n    for i in range(2, p):\n        for j in range(2, p):\n            for k in range(2, p):\n                fvec[i - 2, j - 2, k - 2] = (\n                    6 * xvec[i - 1, j - 1, k - 1]\n                    - xvec[i, j - 1, k - 1]\n                    - xvec[i - 2, j - 1, k - 1]\n                    - xvec[i - 1, j, k - 1]\n                    - xvec[i - 1, j - 2, k - 1]\n                    - xvec[i - 1, j - 1, k]\n                    - xvec[i - 1, j - 1, k - 2]\n                    - c * np.exp(xvec[i, j, k])\n                )\n    return fvec.flatten()\n\n\n@mark.least_squares\ndef broydn_3d(x):\n    kappa_1 = 2\n    kappa_2 = 1\n    fvec = np.zeros_like(x)\n    fvec[0] = -2 * x[1] + kappa_2 + (3 - kappa_1 * x[0]) * x[0]\n    fvec[1 : len(x) - 1] = (\n        -x[:-2] - 2 * x[2:] + kappa_2 + (3 - kappa_1 * x[1:-1]) * x[1:-1]\n    )\n    fvec[-1] = -x[-2] + kappa_2 + (3 - kappa_1 * x[-1]) * x[-1]\n    return fvec\n\n\n@mark.least_squares\ndef broydn_bd(x):\n    dim_in = len(x)\n    fvec = np.zeros(dim_in, dtype=np.float64)\n    for i in range(1, 1 + dim_in):\n        ji = []\n        lb = max(1, i - 5)\n        ub = min(dim_in, i + 1)\n        for j in range(lb, ub + 1):\n            if j != i:\n                ji.append(j)\n        fvec[i - 1] = x[i - 1] * (2 + 5 * x[i - 1] ** 2) - np.sum(\n            x[np.array(ji) - 1] * (1 + x[np.array(ji) - 1])\n        )\n    return fvec\n\n\n@mark.least_squares\ndef cbratu_2d(x):\n    n = int(np.sqrt(len(x) / 2))\n    x = x.reshape((2, n, n))\n    xvec = np.zeros((x.shape[0], x.shape[1] + 2, x.shape[2] + 2), dtype=np.float64)\n    xvec[0, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x[0, :, :]\n    xvec[1, 1 : x.shape[1] + 1, 1 : x.shape[2] + 1] = x[1, :, :]\n    p = x.shape[1] + 2\n    h = 1 / (p - 1)\n    alpha = 5\n    c = h**2 * alpha\n    fvec = np.zeros(x.shape, dtype=np.float64)\n    for i in range(2, p):\n        for j in range(2, p):\n            fvec[0, i - 2, j - 2] = (\n                4 * xvec[0, i - 1, j - 1]\n                - xvec[0, i, j - 1]\n                - xvec[0, i - 2, j - 1]\n                - xvec[0, i - 1, j]\n                - xvec[0, i - 1, j - 2]\n                - c * np.exp(xvec[0, i - 1, j - 1]) * np.cos(xvec[0, i - 1, j - 1])\n            )\n            fvec[1, i - 2, j - 2] = (\n                4 * xvec[1, i - 1, j - 1]\n                - xvec[1, i, j - 1]\n                - xvec[1, i - 2, j - 1]\n                - xvec[1, i - 1, j]\n                - xvec[1, i - 1, j - 2]\n                - c * np.exp(xvec[1, i - 1, j - 1]) * np.sin(xvec[1, i - 1, j - 1])\n            )\n    return fvec.flatten()\n\n\n@mark.least_squares\ndef chandheq(x):\n    dim_in = len(x)\n    constant = 1\n    w = np.ones(dim_in, dtype=np.int64) / dim_in\n    h = np.ones(dim_in, dtype=np.int64)\n    fvec = np.zeros(dim_in, dtype=np.float64)\n    for i in range(dim_in):\n        fvec[i] = (-0.5 * constant * w * x[i] / (x[i] + x) * h[i] * h + h[i] - 1).sum()\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef chemrcta(x):\n    dim_in = int(len(x) / 2)\n    x = x.reshape((2, dim_in))\n    fvec = np.zeros(2 * dim_in, dtype=np.float64)\n\n    # define some auxiliary params\n    pem = 1.0\n    peh = 5.0\n    d = 0.135\n    b = 0.5\n    beta = 2.0\n    gamma = 25.0\n    h = 1 / (dim_in - 1)\n    cu1 = -h * pem\n    cui1 = 1 / (h**2 * pem) + 1 / h\n    cui = -1 / h - 2 / (h**2 * pem)\n    ct1 = -h * peh\n    cti1 = 1 / (h**2 * peh) + 1 / h\n    cti = -beta - 1 / h - 2 / (h**2 * peh)\n\n    fvec[0] = cu1 * x[0, 1] - x[0, 0] + h * pem\n    fvec[1] = ct1 * x[1, 1] - x[1, 0] + h * peh\n    for i in range(2, dim_in):\n        fvec[i] = (\n            -d * x[0, i - 1] * np.exp(gamma - gamma / x[1, i - 1])\n            + (cui1) * x[0, i - 2]\n            + cui * x[0, i - 1]\n            + x[0, i] / (h**2 * pem)\n        )\n        fvec[dim_in - 2 + i] = (\n            b * d * x[0, i - 1] * np.exp(gamma - gamma / x[1, i - 1])\n            + beta * x[1, i - 1]\n            + cti1 * x[1, i - 2]\n            + cti * x[1, i - 1]\n            + x[1, i] / (h**2 * peh)\n        )\n    fvec[-2] = x[0, -1] - x[0, -2]\n    fvec[-1] = x[1, -1] - x[1, -2]\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef chemrctb(x):\n    dim_in = int(len(x))\n    fvec = np.zeros(dim_in, dtype=np.float64)\n\n    # define some auxiliary params\n    pe = 5.0\n    d = 0.135\n    b = 0.5\n    gamma = 25.0\n    h = 1 / (dim_in - 1)\n    ct1 = -h * pe\n    cti1 = 1 / (h**2 * pe) + 1 / h\n    cti = -1 / h - 2 / (h**2 * pe)\n\n    fvec[0] = ct1 * x[1] - x[0] + h * pe\n    for i in range(2, dim_in):\n        fvec[i - 1] = (\n            d * (b + 1 - x[i - 1]) * np.exp(gamma - gamma / x[i - 1])\n            + cti1 * x[i - 2]\n            + cti * x[i - 1]\n            + x[i] / (h**2 * pe)\n        )\n    fvec[-1] = x[-1] - x[-2]\n    return fvec\n\n\n@mark.least_squares\ndef chnrsbne(x):\n    alfa = np.array(\n        [\n            1.25,\n            1.40,\n            2.40,\n            1.40,\n            1.75,\n            1.20,\n            2.25,\n            1.20,\n            1.00,\n            1.10,\n            1.50,\n            1.60,\n            1.25,\n            1.25,\n            1.20,\n            1.20,\n            1.40,\n            0.50,\n            0.50,\n            1.25,\n            1.80,\n            0.75,\n            1.25,\n            1.40,\n            1.60,\n            2.00,\n            1.00,\n            1.60,\n            1.25,\n            2.75,\n            1.25,\n            1.25,\n            1.25,\n            3.00,\n            1.50,\n            2.00,\n            1.25,\n            1.40,\n            1.80,\n            1.50,\n            2.20,\n            1.40,\n            1.50,\n            1.25,\n            2.00,\n            1.50,\n            1.25,\n            1.40,\n            0.60,\n            1.50,\n        ]\n    )\n    dim_in = len(x)\n    fvec = np.zeros(2 * (dim_in - 1))\n    fvec[: dim_in - 1] = 4 * alfa[1:] * (x[:-1] - x[1:] ** 2)\n    fvec[dim_in - 1 :] = x[1:] - 1\n    return fvec\n\n\n@mark.least_squares\n@njit\ndef drcavty(x, r):\n    m = int(np.sqrt(len(x)))\n    x = x.reshape((m, m))\n    h = 1 / (m + 2)\n    xvec = np.zeros((m + 4, m + 4), dtype=np.float64)\n    xvec[2 : m + 2, 2 : m + 2] = x\n    xvec[-2, :] = -h / 2\n    xvec[-1, :] = h / 2\n    fvec = np.zeros(x.shape, dtype=np.float64)\n    for i in range(m):\n        for j in range(m):\n            fvec[i, j] = (\n                20 * xvec[i + 2, j + 2]\n                - 8 * xvec[i + 1, j + 2]\n                - 8 * xvec[i + 3, j + 2]\n                - 8 * xvec[i + 2, j + 1]\n                - 8 * xvec[i + 2, j + 3]\n                + 2 * xvec[i + 1, j + 3]\n                + 2 * xvec[i + 3, j + 2]\n                + 2 * xvec[i + 1, j + 1]\n                + 2 * xvec[i + 3, j + 3]\n                + xvec[i, j + 2]\n                + xvec[i + 4, j + 2]\n                + xvec[i + 2, j]\n                + xvec[i + 2, j + 4]\n                + (r / 4)\n                * (xvec[i + 2, j + 3] - xvec[i + 2, j + 1])\n                * (\n                    xvec[i, j + 2]\n                    + xvec[i + 1, j + 1]\n                    + xvec[i + 1, j + 3]\n                    - 4 * xvec[i + 1, j + 2]\n                    - 4 * xvec[i + 3, j + 2]\n                    - xvec[i + 3, j + 2]\n                    - xvec[i + 3, j + 3]\n                    - xvec[i + 4, j + 2]\n                )\n                - (r / 4)\n                * (xvec[i + 3, j + 2] - xvec[i + 1, j + 2])\n                * (\n                    xvec[i + 2, j]\n                    + xvec[i + 1, j + 1]\n                    + xvec[i + 3, j + 1]\n                    - 4 * xvec[i + 2, j + 1]\n                    - 4 * xvec[i + 2, j + 3]\n                    - xvec[i + 1, j + 3]\n                    - xvec[i + 3, j + 3]\n                    - xvec[i + 2, j + 4]\n                )\n            )\n\n    return fvec.flatten()\n\n\n@mark.least_squares\ndef freurone(x):\n    dim_in = len(x)\n    fvec = np.zeros((2, dim_in - 1), dtype=np.float64)\n    for i in range(dim_in - 1):\n        fvec[0, i] = (5.0 - x[i + 1]) * x[i + 1] ** 2 + x[i] - 2 * x[i + 1] - 13.0\n        fvec[1, i] = (1.0 + x[i + 1]) * x[i + 1] ** 2 + x[i] - 14 * x[i + 1] - 29.0\n    return fvec.flatten()\n\n\n@mark.least_squares\ndef hatfldg(x):\n    dim_in = len(x)\n    fvec = np.zeros(dim_in, dtype=np.float64)\n    for i in range(1, dim_in - 1):\n        fvec[i - 1] = x[i] * (x[i - 1] - x[i + 1]) + x[i] - x[12] + 1\n    fvec[-2] = x[0] - x[12] + 1 - x[0] * x[1]\n    fvec[-1] = x[-1] - x[12] + 1 + x[-2] * x[-1]\n    return fvec\n\n\n@mark.least_squares\ndef integreq(x):\n    dim_in = len(x)\n    h = 1 / (dim_in + 1)\n    t = np.arange(1, dim_in + 1) * h\n    xvec = np.zeros(dim_in + 2, dtype=np.float64)\n    xvec[1:-1] = x\n    fvec = np.zeros_like(x)\n    for i in range(1, dim_in):\n        fvec[i - 1] = (\n            xvec[i]\n            + h\n            * (\n                (1 - t[i - 1]) * (t[:i] * (xvec[1 : i + 1] + t[:i] + 1) ** 3).sum()\n                + t[i - 1] * ((1 - t[i:]) * (xvec[i + 1 : -1] + t[i:] + 1) ** 3).sum()\n            )\n            / 2\n        )\n    fvec[-1] = (\n        xvec[-2]\n        + h\n        * (\n            (1 - t[-1]) * (t * (xvec[1:-1] + t + 1) ** 3).sum()\n            + t[-1] * ((1 - t[-1]) * (xvec[-2] + t[-1] + 1) ** 3)\n        )\n        / 2\n    )\n    return fvec\n\n\n@mark.least_squares\ndef msqrta(x):\n    dim_in = int(np.sqrt(len(x)))\n    xmat = x.reshape((dim_in, dim_in))\n    bmat = 5 * xmat\n    amat = np.zeros((dim_in, dim_in), dtype=np.float64)\n    for i in range(1, dim_in + 1):\n        for j in range(1, dim_in + 1):\n            amat[i - 1, j - 1] = (bmat[i - 1, :] * bmat[:, j - 1]).sum()\n    fmat = np.zeros((dim_in, dim_in))\n    for i in range(1, dim_in + 1):\n        for j in range(1, dim_in + 1):\n            fmat[i - 1, j - 1] = (xmat[i - 1, :] * xmat[:, j - 1]).sum() - amat[\n                i - 1, j - 1\n            ]\n    return fmat.flatten()\n\n\n@mark.least_squares\ndef penalty_1(x, a=1e-5):\n    fvec = np.sqrt(a) * (x - 2)\n    fvec = np.concatenate([fvec, [x @ x - 1 / 4]])\n    return fvec\n\n\n@mark.least_squares\ndef penalty_2(x, a=1e-10):\n    dim_in = len(x)\n    y = np.exp(np.arange(1, 2 * dim_in + 1) / 10) + np.exp(np.arange(2 * dim_in) / 10)\n    fvec = np.zeros(2 * dim_in)\n    fvec[0] = x[0] - 0.2\n    fvec[1:dim_in] = np.sqrt(a) * (\n        np.exp(x[1:] / 10) + np.exp(x[:-1] / 10) - y[1:dim_in]\n    )\n    fvec[dim_in:-1] = np.sqrt(a) * (np.exp(x[1:] / 10) - np.exp(-1 / 10))\n    fvec[-1] = (np.arange(1, dim_in + 1)[::-1] * x**2).sum() - 1\n    return fvec\n\n\n@mark.least_squares\ndef vardimne(x):\n    dim_in = len(x)\n    fvec = np.zeros(dim_in + 2)\n    fvec[:-2] = x - 1\n    fvec[-2] = (np.arange(1, dim_in + 1) * (x - 1)).sum()\n    fvec[-1] = ((np.arange(1, dim_in + 1) * (x - 1)).sum()) ** 2\n    return fvec\n\n\n@mark.least_squares\ndef yatpsq_1(x, dim_in):\n    xvec = x[: dim_in**2]\n    xvec = xvec.reshape((dim_in, dim_in))\n    yvec = x[dim_in**2 : dim_in**2 + dim_in]\n    zvec = x[dim_in**2 + dim_in : dim_in**2 + 2 * dim_in]\n    fvec = np.zeros((dim_in, dim_in), dtype=np.float64)\n    for i in range(dim_in):\n        for j in range(dim_in):\n            fvec[i, j] = (\n                xvec[i, j] ** 3\n                - 10 * xvec[i, j] ** 2\n                - (yvec[i] + zvec[j])\n                * (xvec[i, j] * np.cos(xvec[i, j]) - np.sin(xvec[i, j]))\n            )\n    fvec = fvec.flatten()\n    temp = (np.sin(xvec) / xvec).sum(axis=0) - 1\n    fvec = np.concatenate((fvec, temp))\n    temp = (np.sin(xvec) / xvec).sum(axis=1) - 1\n    fvec = np.concatenate((fvec, temp))\n    return fvec\n\n\n@mark.least_squares\ndef yatpsq_2(x, dim_in):\n    xvec = x[: dim_in**2]\n    xvec = xvec.reshape((dim_in, dim_in))\n    yvec = x[dim_in**2 : dim_in**2 + dim_in]\n    zvec = x[dim_in**2 + dim_in : dim_in**2 + 2 * dim_in]\n    fvec = np.zeros((dim_in, dim_in), dtype=np.float64)\n    for i in range(dim_in):\n        for j in range(dim_in):\n            fvec[i, j] = xvec[i, j] - (yvec[i] + zvec[j]) * (1 + np.cos(xvec[i, j])) - 1\n    fvec = fvec.flatten()\n    temp = (np.sin(xvec) + xvec).sum(axis=0) - 1\n    fvec = np.concatenate((fvec, temp))\n    temp = (np.sin(xvec) + xvec).sum(axis=1) - 1\n    fvec = np.concatenate((fvec, temp))\n    return fvec\n\n\ndef get_start_points_msqrta(dim_in, flag=1):\n    bmat = np.zeros((dim_in, dim_in))\n    for i in range(1, dim_in + 1):\n        for j in range(1, dim_in + 1):\n            bmat[i - 1, j - 1] = np.sin(((i - 1) * dim_in + j) ** 2)\n    if flag == 2:\n        bmat[2, 0] = 0\n    xmat = 0.2 * bmat\n    return xmat.flatten().tolist()\n\n\ndef get_start_points_bdvalues(n, a=1):\n    h = 1 / (n + 1)\n    x = np.zeros(n)\n    for i in range(n):\n        x[i] = (i + 1) * h * ((i + 1) * h - 1)\n    return (x * a).tolist()\n\n\ndef get_start_points_spmsqrt(m):\n    b = np.zeros((m, m))\n    b[0, 0] = np.sin(1)\n    b[0, 1] = np.sin(4)\n    k = 2\n    for i in range(1, m - 1):\n        k += 1\n        b[i, i - 1] = np.sin(k**2)\n        k += 1\n        b[i, i] = np.sin(k**2)\n        k += 1\n        b[i, i + 1] = np.sin(k**2)\n    k += 1\n    b[-1, -2] = np.sin(k**2)\n    k += 1\n    b[-1, -1] = np.sin(k**2)\n\n    x = np.zeros((m, m))\n    x[:, :2] = 0.2 * b[:, :2]\n    x[1:-1, :-2] = 0.2 * b[1:-1, :-2]\n    x[1:-1, 1:-1] = 0.2 * b[1:-1, 1:-1]\n    x[1:-1, 2:] = 0.2 * b[1:-1, 2:]\n    x[-1, -2:] = 0.2 * b[-1, -2:]\n\n    x_out = x[x != 0]\n\n    return x_out.tolist()\n\n\ndef get_start_points_qr3d(m):\n    r = np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1)\n    r[0, 1] = 0\n    r[-1, -1] = 2 * m\n    return np.concatenate([np.eye(m).flatten(), r[np.triu_indices_from(r)]]).tolist()\n\n\ndef get_start_points_qr3dbd(m):\n    r = np.diag(2 * np.arange(1, m + 1) / m, 0) + np.diag((1 - np.arange(1, m)) / m, 1)\n    r[0, 1] = 0\n    r[-1, -1] = 2 * m\n    return np.concatenate(\n        [np.eye(m).flatten(), r[0, :-2], r[1, 1:-1], r[2, 2:], r[3, 3:], [r[4, 4]]]\n    ).tolist()\n\n\ndef get_start_points_hydcar20():\n    x = [\n        0.0,\n        0.3,\n        0.1,\n        0.0,\n        0.3,\n        0.9,\n        0.01,\n        0.3,\n        0.9,\n        0.02,\n        0.4,\n        0.8,\n        0.05,\n        0.4,\n        0.8,\n        0.07,\n        0.45,\n        0.8,\n        0.09,\n        0.5,\n        0.7,\n        0.1,\n        0.5,\n        0.7,\n        0.15,\n        0.5,\n        0.6,\n        0.2,\n        0.5,\n        0.6,\n        0.25,\n        0.6,\n        0.5,\n        0.3,\n        0.6,\n        0.5,\n        0.35,\n        0.6,\n        0.5,\n        0.4,\n        0.6,\n        0.4,\n        0.4,\n        0.7,\n        0.4,\n        0.42,\n        0.7,\n        0.3,\n        0.45,\n        0.75,\n        0.3,\n        0.45,\n        0.75,\n        0.2,\n        0.5,\n        0.8,\n        0.1,\n        0.5,\n        0.8,\n        0.0,\n    ]\n    return x + [100] * 20 + [300] * 19\n\n\ndef get_start_points_hydcar6():\n    x = [\n        0.0,\n        0.2,\n        0.9,\n        0.0,\n        0.2,\n        0.8,\n        0.05,\n        0.3,\n        0.8,\n        0.1,\n        0.3,\n        0.6,\n        0.3,\n        0.5,\n        0.3,\n        0.6,\n        0.6,\n        0.0,\n    ]\n    return x + [100] * 6 + [300] * 5\n\n\ndef get_start_points_methanb8():\n    return [\n        107.47,\n        0.09203,\n        0.908,\n        102.4,\n        0.1819,\n        0.8181,\n        97.44,\n        0.284,\n        0.716,\n        96.3,\n        0.3051,\n        0.6949,\n        93.99,\n        0.3566,\n        0.6434,\n        89.72,\n        0.468,\n        0.532,\n        83.71,\n        0.6579,\n        0.3421,\n        78.31,\n        0.8763,\n        0.1237,\n        886.37,\n        910.01,\n        922.52,\n        926.46,\n        935.56,\n        952.83,\n        975.73,\n    ]\n\n\ndef get_start_points_methanl8():\n    return [\n        120,\n        0.09203,\n        0.908,\n        110,\n        0.1819,\n        0.8181,\n        100,\n        0.284,\n        0.716,\n        88,\n        0.3051,\n        0.6949,\n        86,\n        0.3566,\n        0.6434,\n        84,\n        0.468,\n        0.532,\n        80,\n        0.6579,\n        0.3421,\n        76,\n        0.8763,\n        0.1237,\n        886.37,\n        910.01,\n        922.52,\n        926.46,\n        935.56,\n        952.83,\n        975.73,\n    ]\n\n\nsolution_x_bdvalues = [\n    -0.00501717,\n    -0.00998312,\n    -0.01489709,\n    -0.01975833,\n    -0.02456605,\n    -0.02931945,\n    -0.03401771,\n    -0.03866001,\n    -0.0432455,\n    -0.04777331,\n    -0.05224255,\n    -0.05665232,\n    -0.0610017,\n    -0.06528975,\n    -0.06951549,\n    -0.07367795,\n    -0.07777612,\n    -0.08180898,\n    -0.08577546,\n    -0.08967451,\n    -0.09350501,\n    -0.09726585,\n    -0.10095589,\n    -0.10457394,\n    -0.10811881,\n    -0.11158927,\n    -0.11498406,\n    -0.1183019,\n    -0.12154147,\n    -0.12470143,\n    -0.1277804,\n    -0.13077697,\n    -0.13368969,\n    -0.1365171,\n    -0.13925766,\n    -0.14190984,\n    -0.14447205,\n    -0.14694265,\n    -0.14931997,\n    -0.15160232,\n    -0.15378794,\n    -0.15587503,\n    -0.15786175,\n    -0.15974621,\n    -0.16152647,\n    -0.16320056,\n    -0.16476642,\n    -0.16622197,\n    -0.16756507,\n    -0.1687935,\n    -0.16990502,\n    -0.17089728,\n    -0.17176792,\n    -0.17251447,\n    -0.17313443,\n    -0.1736252,\n    -0.17398413,\n    -0.17420848,\n    -0.17429545,\n    -0.17424214,\n    -0.17404559,\n    -0.17370274,\n    -0.17321044,\n    -0.17256546,\n    -0.17176447,\n    -0.17080403,\n    -0.16968062,\n    -0.16839059,\n    -0.16693019,\n    -0.16529558,\n    -0.16348276,\n    -0.16148763,\n    -0.15930595,\n    -0.15693338,\n    -0.15436539,\n    -0.15159735,\n    -0.14862447,\n    -0.14544178,\n    -0.14204417,\n    -0.13842638,\n    -0.13458293,\n    -0.13050819,\n    -0.12619633,\n    -0.12164132,\n    -0.11683693,\n    -0.1117767,\n    -0.10645396,\n    -0.10086179,\n    -0.09499304,\n    -0.0888403,\n    -0.08239586,\n    -0.07565179,\n    -0.06859981,\n    -0.06123136,\n    -0.05353755,\n    -0.04550917,\n    -0.03713662,\n    -0.02840998,\n    -0.01931889,\n    -0.00985262,\n]\n\nsolution_x_bratu_2d = [\n    0.07234633,\n    0.11814877,\n    0.1459185,\n    0.15914495,\n    0.15914495,\n    0.1459185,\n    0.11814877,\n    0.07234633,\n    0.11814877,\n    0.19875438,\n    0.24923944,\n    0.27361473,\n    0.27361473,\n    0.24923944,\n    0.19875438,\n    0.11814877,\n    0.1459185,\n    0.24923944,\n    0.31530971,\n    0.34753593,\n    0.34753593,\n    0.31530971,\n    0.24923944,\n    0.1459185,\n    0.15914495,\n    0.27361473,\n    0.34753593,\n    0.3837784,\n    0.3837784,\n    0.34753593,\n    0.27361473,\n    0.15914495,\n    0.15914495,\n    0.27361473,\n    0.34753593,\n    0.3837784,\n    0.3837784,\n    0.34753593,\n    0.27361473,\n    0.15914495,\n    0.1459185,\n    0.24923944,\n    0.31530971,\n    0.34753593,\n    0.34753593,\n    0.31530971,\n    0.24923944,\n    0.1459185,\n    0.11814877,\n    0.19875438,\n    0.24923944,\n    0.27361473,\n    0.27361473,\n    0.24923944,\n    0.19875438,\n    0.11814877,\n    0.07234633,\n    0.11814877,\n    0.1459185,\n    0.15914495,\n    0.15914495,\n    0.1459185,\n    0.11814877,\n    0.07234633,\n]\n\nsolution_x_bratu_2d_t = [\n    0.1933024,\n    0.33566336,\n    0.43355494,\n    0.48428111,\n    0.48428111,\n    0.43355494,\n    0.33566336,\n    0.1933024,\n    0.33566336,\n    0.59839893,\n    0.78485783,\n    0.88316504,\n    0.88316504,\n    0.78485783,\n    0.59839893,\n    0.33566336,\n    0.43355494,\n    0.78485783,\n    1.04056365,\n    1.17766089,\n    1.17766089,\n    1.04056365,\n    0.78485783,\n    0.43355494,\n    0.48428111,\n    0.88316504,\n    1.17766089,\n    1.33720634,\n    1.33720634,\n    1.17766089,\n    0.88316504,\n    0.48428111,\n    0.48428111,\n    0.88316504,\n    1.17766089,\n    1.33720634,\n    1.33720634,\n    1.17766089,\n    0.88316504,\n    0.48428111,\n    0.43355494,\n    0.78485783,\n    1.04056365,\n    1.17766089,\n    1.17766089,\n    1.04056365,\n    0.78485783,\n    0.43355494,\n    0.33566336,\n    0.59839893,\n    0.78485783,\n    0.88316504,\n    0.88316504,\n    0.78485783,\n    0.59839893,\n    0.33566336,\n    0.1933024,\n    0.33566336,\n    0.43355494,\n    0.48428111,\n    0.48428111,\n    0.43355494,\n    0.33566336,\n    0.1933024,\n]\n\n\nsolution_x_bratu_3d = [\n    0.24431369,\n    0.27785366,\n    0.19682155,\n    0.27785366,\n    0.32761664,\n    0.23878408,\n    0.19682155,\n    0.23878408,\n    0.18908409,\n    0.27785366,\n    0.32761664,\n    0.23878408,\n    0.32761664,\n    0.39611483,\n    0.29367471,\n    0.23878408,\n    0.29367471,\n    0.2314289,\n    0.19682155,\n    0.23878408,\n    0.18908409,\n    0.23878408,\n    0.29367471,\n    0.2314289,\n    0.18908409,\n    0.2314289,\n    0.18663237,\n]\n\nsolution_x_broydn_3d = [\n    -0.57076119,\n    -0.68191013,\n    -0.70248602,\n    -0.70626058,\n    -0.70695185,\n    -0.70707842,\n    -0.70710159,\n    -0.70710583,\n    -0.70710661,\n    -0.70710675,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710678,\n    -0.70710677,\n    -0.70710675,\n    -0.70710669,\n    -0.70710654,\n    -0.70710612,\n    -0.70710498,\n    -0.70710185,\n    -0.70709332,\n    -0.70707001,\n    -0.70700634,\n    -0.70683248,\n    -0.70635771,\n    -0.70506153,\n    -0.7015252,\n    -0.69189463,\n    -0.66579752,\n    -0.59603531,\n    -0.4164123,\n]\n\n\nsolution_x_cbratu_2d = [\n    0.16692195,\n    0.2529246,\n    0.2796211,\n    0.2529246,\n    0.16692195,\n    0.2529246,\n    0.39198662,\n    0.43607163,\n    0.39198662,\n    0.2529246,\n    0.2796211,\n    0.43607163,\n    0.48598608,\n    0.43607163,\n    0.2796211,\n    0.2529246,\n    0.39198662,\n    0.43607163,\n    0.39198662,\n    0.2529246,\n    0.16692195,\n    0.2529246,\n    0.2796211,\n    0.2529246,\n    0.16692195,\n]\n\nsolution_x_cbratu_2d = solution_x_cbratu_2d + [0] * 25\n\nsolution_x_broydn_bd = [\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    0.00000000e00,\n    -0.00000000e00,\n    -0.00000000e00,\n    -1.00000000e-10,\n    -2.00000000e-10,\n    -2.00000000e-10,\n    -1.00000000e-10,\n    4.00000000e-10,\n    1.40000000e-09,\n    3.00000000e-09,\n    4.60000000e-09,\n    4.70000000e-09,\n    -1.00000000e-10,\n    -1.43000000e-08,\n    -4.23000000e-08,\n    -8.25000000e-08,\n    -1.17600000e-07,\n    -1.00700000e-07,\n    5.54000000e-08,\n    4.68300000e-07,\n    1.22420000e-06,\n    2.22540000e-06,\n    2.92120000e-06,\n    1.96990000e-06,\n    -2.95480000e-06,\n    -1.47187000e-05,\n    -3.48246000e-05,\n    -5.90960000e-05,\n    -7.05915000e-05,\n    -3.15647000e-05,\n    1.19032300e-04,\n    4.48647900e-04,\n    9.73368600e-04,\n    1.53772420e-03,\n    1.63273940e-03,\n    2.14717600e-04,\n    -4.30681910e-03,\n    -1.36127680e-02,\n    -2.81043041e-02,\n    -4.39233903e-02,\n    -4.73306566e-02,\n    -8.45337580e-03,\n    1.04321937e-01,\n    2.74938066e-01,\n    4.54655029e-01,\n    6.22031184e-01,\n    7.74293819e-01,\n    9.11375485e-01,\n    1.03226579e00,\n    1.13635201e00,\n    1.22498498e00,\n    1.30019836e00,\n    1.36374913e00,\n    1.41711415e00,\n    1.46168952e00,\n    1.49882961e00,\n    1.52972625e00,\n    1.55537824e00,\n    1.57663224e00,\n    1.59421664e00,\n    1.60875287e00,\n    1.62076049e00,\n    1.63067143e00,\n    1.63884538e00,\n    1.64557503e00,\n    1.65102930e00,\n    1.65461538e00,\n    1.64858082e00,\n    1.55247986e00,\n]\n\nsolution_x_chemrctb = [\n    0.05141945,\n    0.05203209,\n    0.05267567,\n    0.05335175,\n    0.05406197,\n    0.05480806,\n    0.05559182,\n    0.05641517,\n    0.05728009,\n    0.05818869,\n    0.05914317,\n    0.06014585,\n    0.06119916,\n    0.06230566,\n    0.06346804,\n    0.06468911,\n    0.06597184,\n    0.06731935,\n    0.0687349,\n    0.07022193,\n    0.07178406,\n    0.07342507,\n    0.07514894,\n    0.07695987,\n    0.07886224,\n    0.08086068,\n    0.08296004,\n    0.08516541,\n    0.08748215,\n    0.08991588,\n    0.09247251,\n    0.09515826,\n    0.09797963,\n    0.10094348,\n    0.104057,\n    0.10732776,\n    0.11076369,\n    0.11437313,\n    0.11816486,\n    0.12214807,\n    0.12633243,\n    0.13072811,\n    0.13534578,\n    0.14019665,\n    0.14529249,\n    0.15064569,\n    0.15626923,\n    0.16217677,\n    0.16838265,\n    0.17490194,\n    0.18175047,\n    0.18894487,\n    0.19650261,\n    0.20444203,\n    0.21278242,\n    0.22154402,\n    0.23074811,\n    0.24041703,\n    0.25057426,\n    0.26124447,\n    0.27245356,\n    0.28422875,\n    0.29659862,\n    0.30959322,\n    0.32324409,\n    0.33758438,\n    0.35264891,\n    0.36847425,\n    0.38509884,\n    0.40256304,\n    0.42090924,\n    0.440182,\n    0.46042812,\n    0.48169675,\n    0.50403953,\n    0.52751072,\n    0.55216731,\n    0.57806915,\n    0.60527915,\n    0.63386338,\n    0.66389124,\n    0.69543563,\n    0.72857315,\n    0.76338427,\n    0.79995348,\n    0.83836951,\n    0.87872535,\n    0.921118,\n    0.96564698,\n    1.01240975,\n    1.06148865,\n    1.11291774,\n    1.16660739,\n    1.22219286,\n    1.27878417,\n    1.33468684,\n    1.38740197,\n    1.43443791,\n    1.47507982,\n    1.51238643,\n]\n\n\nsolution_x_drcavty3 = [\n    6.90580000e-06,\n    -3.04054000e-05,\n    -1.34595400e-04,\n    -2.98301400e-04,\n    -3.97564800e-04,\n    -2.82615200e-04,\n    -1.00791500e-04,\n    1.18693000e-05,\n    4.83418000e-05,\n    3.86272000e-05,\n    -3.61169000e-05,\n    -1.56090300e-04,\n    -3.44522400e-04,\n    -5.22159200e-04,\n    -5.02848100e-04,\n    -1.96532500e-04,\n    4.01814000e-05,\n    1.66926300e-04,\n    1.64254200e-04,\n    9.75942000e-05,\n    -1.53179900e-04,\n    -3.26999400e-04,\n    -5.35655500e-04,\n    -5.17594800e-04,\n    -2.45473400e-04,\n    2.11398200e-04,\n    3.85544900e-04,\n    4.70161600e-04,\n    3.19836200e-04,\n    1.48115900e-04,\n    -3.44263800e-04,\n    -3.05706200e-04,\n    -6.07866500e-04,\n    -1.40639000e-04,\n    3.54345200e-04,\n    1.17906180e-03,\n    1.27587890e-03,\n    6.46781700e-04,\n    2.97807400e-04,\n    9.77706000e-05,\n    -3.80139500e-04,\n    5.85784900e-04,\n    -4.34699000e-04,\n    1.15040270e-03,\n    2.93253490e-03,\n    5.19921130e-03,\n    3.26982700e-03,\n    -1.15543100e-03,\n    -3.31632400e-04,\n    -9.65743000e-05,\n    8.88011200e-04,\n    4.55121760e-03,\n    1.59257740e-03,\n    4.02608170e-03,\n    5.27395750e-03,\n    -2.05009960e-03,\n    -7.90681200e-04,\n    1.29072190e-03,\n    3.92764700e-04,\n    -7.23810000e-05,\n    1.10527329e-02,\n    1.14289463e-02,\n    1.01554380e-03,\n    -4.10803130e-03,\n    -1.39518580e-03,\n    1.43680550e-03,\n    -2.32410100e-04,\n    3.02444440e-03,\n    1.54672000e-04,\n    -3.88632200e-04,\n    4.87177720e-03,\n    -1.17441400e-03,\n    6.05647400e-04,\n    -6.18932200e-04,\n    -1.81334350e-03,\n    5.15906690e-03,\n    1.41277700e-04,\n    6.31930020e-03,\n    8.67670500e-04,\n    1.30191470e-03,\n    2.96133460e-03,\n    3.64054300e-03,\n    2.00721890e-03,\n    5.74324870e-03,\n    2.01317600e-04,\n    5.60508670e-03,\n    1.15676060e-03,\n    8.20725550e-03,\n    -9.88774500e-04,\n    1.46054681e-02,\n    4.93810300e-04,\n    3.65006800e-04,\n    6.47333900e-04,\n    7.25182800e-04,\n    1.71821900e-04,\n    2.96466900e-04,\n    -7.95212300e-04,\n    1.80194150e-03,\n    8.79835000e-04,\n    1.17217338e-02,\n]\n\nsolution_x_drcavty2 = [\n    -8.30500000e-07,\n    1.79025100e-04,\n    4.69755400e-04,\n    6.91706100e-04,\n    7.63680500e-04,\n    6.99211100e-04,\n    5.59898000e-04,\n    4.13496000e-04,\n    2.89295400e-04,\n    1.58674600e-04,\n    1.44396300e-04,\n    6.45348200e-04,\n    1.19393250e-03,\n    1.48581000e-03,\n    1.49174680e-03,\n    1.30666740e-03,\n    1.04594130e-03,\n    7.90114600e-04,\n    5.54089300e-04,\n    2.86541200e-04,\n    4.86092200e-04,\n    1.31996230e-03,\n    1.90360630e-03,\n    2.06459340e-03,\n    1.95310610e-03,\n    1.71807210e-03,\n    1.41349750e-03,\n    1.06405470e-03,\n    6.90019300e-04,\n    2.91278200e-04,\n    1.00536400e-03,\n    1.98412810e-03,\n    2.39879170e-03,\n    2.47713040e-03,\n    2.38995890e-03,\n    2.11023730e-03,\n    1.58275540e-03,\n    9.59023300e-04,\n    3.65828300e-04,\n    -2.52386000e-05,\n    1.70639510e-03,\n    2.47798200e-03,\n    2.96272640e-03,\n    3.23424450e-03,\n    2.92194380e-03,\n    1.83925430e-03,\n    5.17510800e-04,\n    -2.72294500e-04,\n    -8.42981300e-04,\n    -6.44882600e-04,\n    3.47563010e-03,\n    3.68554070e-03,\n    4.85243520e-03,\n    4.30556650e-03,\n    2.59563830e-03,\n    1.23414300e-04,\n    -1.12148630e-03,\n    -1.16433340e-03,\n    -1.76218150e-03,\n    -5.08449600e-04,\n    1.02089465e-02,\n    9.19876750e-03,\n    7.38832940e-03,\n    8.91347900e-04,\n    -2.02918160e-03,\n    -4.35306900e-04,\n    1.74552680e-03,\n    -3.82299000e-04,\n    -1.89595900e-03,\n    1.44318390e-03,\n    8.39182720e-03,\n    2.10036430e-03,\n    -2.07708990e-03,\n    -7.15986500e-04,\n    -1.22269490e-03,\n    2.85020860e-03,\n    1.86361079e-02,\n    -1.85665600e-04,\n    5.77159200e-04,\n    1.16139361e-02,\n    7.12641800e-03,\n    2.28174230e-03,\n    -4.41730960e-03,\n    1.19527564e-02,\n    2.02136034e-02,\n    1.78591365e-02,\n    1.06707580e-01,\n    3.00444810e-03,\n    1.98001460e-02,\n    1.37005246e-01,\n    2.73846000e-03,\n    7.47556450e-03,\n    1.07964128e-02,\n    1.81864591e-02,\n    -1.19626975e-02,\n    -5.17858661e-02,\n    -2.97147410e-02,\n    -5.84116800e-03,\n    1.62672675e-01,\n    9.02415668e-01,\n]\n\nsolution_x_freurone = [\n    12.26912153,\n    -0.83186186,\n    -1.50692279,\n    -1.53467102,\n    -1.53579843,\n    -1.53584421,\n    -1.53584607,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584616,\n    -1.53584616,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584616,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584616,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584616,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584616,\n    -1.53584615,\n    -1.53584616,\n    -1.53584615,\n    -1.53584615,\n    -1.53584615,\n    -1.53584617,\n    -1.53584665,\n    -1.53585846,\n    -1.53614941,\n    -1.54330584,\n]\n\n\nsolution_x_integreq = [\n    -0.0049257,\n    -0.00980164,\n    -0.01462709,\n    -0.01940127,\n    -0.02412341,\n    -0.0287927,\n    -0.03340834,\n    -0.03796949,\n    -0.04247531,\n    -0.04692493,\n    -0.05131748,\n    -0.05565204,\n    -0.0599277,\n    -0.06414352,\n    -0.06829854,\n    -0.07239178,\n    -0.07642224,\n    -0.08038888,\n    -0.08429068,\n    -0.08812655,\n    -0.09189541,\n    -0.09559613,\n    -0.09922759,\n    -0.1027886,\n    -0.10627797,\n    -0.10969449,\n    -0.11303691,\n    -0.11630394,\n    -0.11949428,\n    -0.1226066,\n    -0.12563951,\n    -0.12859164,\n    -0.13146152,\n    -0.13424771,\n    -0.1369487,\n    -0.13956294,\n    -0.14208886,\n    -0.14452484,\n    -0.14686924,\n    -0.14912034,\n    -0.15127642,\n    -0.15333569,\n    -0.15529633,\n    -0.15715646,\n    -0.15891416,\n    -0.16056747,\n    -0.16211435,\n    -0.16355275,\n    -0.16488053,\n    -0.16609551,\n    -0.16719544,\n    -0.16817804,\n    -0.16904092,\n    -0.16978168,\n    -0.17039781,\n    -0.17088676,\n    -0.17124589,\n    -0.17147249,\n    -0.1715638,\n    -0.17151695,\n    -0.171329,\n    -0.17099693,\n    -0.17051763,\n    -0.16988789,\n    -0.16910442,\n    -0.16816384,\n    -0.16706265,\n    -0.16579725,\n    -0.16436394,\n    -0.16275891,\n    -0.16097822,\n    -0.15901783,\n    -0.15687354,\n    -0.15454105,\n    -0.15201592,\n    -0.14929355,\n    -0.14636922,\n    -0.14323804,\n    -0.13989497,\n    -0.13633479,\n    -0.13255212,\n    -0.1285414,\n    -0.12429688,\n    -0.11981262,\n    -0.11508247,\n    -0.11010007,\n    -0.10485884,\n    -0.09935198,\n    -0.09357242,\n    -0.08751287,\n    -0.08116576,\n    -0.07452324,\n    -0.06757719,\n    -0.06031917,\n    -0.05274045,\n    -0.04483194,\n    -0.03658422,\n    -0.02798751,\n    -0.01903165,\n    -0.01008278,\n]\n\nsolution_x_msqrta = [\n    8.0e-10,\n    -5.0e-10,\n    -2.0e-10,\n    -1.0e-10,\n    1.0e-10,\n    -5.0e-10,\n    -1.0e-10,\n    -4.0e-10,\n    -2.0e-10,\n    2.0e-10,\n    2.1e-09,\n    -1.0e-09,\n    -7.0e-10,\n    -4.0e-10,\n    7.0e-10,\n    -1.4e-09,\n    -3.0e-10,\n    -8.0e-10,\n    4.0e-10,\n    6.0e-10,\n    2.2e-09,\n    -1.6e-09,\n    8.0e-10,\n    -5.0e-10,\n    -1.2e-09,\n    -5.0e-10,\n    -7.0e-10,\n    -1.7e-09,\n    -2.1e-09,\n    2.0e-10,\n    -1.2e-09,\n    -3.0e-10,\n    1.4e-09,\n    -1.0e-10,\n    -1.7e-09,\n    1.4e-09,\n    1.0e-10,\n    -9.0e-10,\n    -2.1e-09,\n    -5.0e-10,\n    -2.7e-09,\n    6.0e-10,\n    -1.0e-09,\n    2.0e-10,\n    1.2e-09,\n    -4.0e-10,\n    2.1e-09,\n    8.0e-10,\n    6.0e-10,\n    4.0e-10,\n    -2.9e-09,\n    1.0e-09,\n    -4.0e-10,\n    3.0e-10,\n    5.0e-10,\n    6.0e-10,\n    1.5e-09,\n    9.0e-10,\n    4.0e-10,\n    -1.0e-10,\n    2.9e-09,\n    -1.4e-09,\n    5.0e-10,\n    -4.0e-10,\n    -7.0e-10,\n    -7.0e-10,\n    -1.2e-09,\n    -1.4e-09,\n    -1.2e-09,\n    2.0e-10,\n    -1.3e-09,\n    9.0e-10,\n    -4.0e-10,\n    3.0e-10,\n    6.0e-10,\n    2.0e-10,\n    5.0e-10,\n    1.1e-09,\n    1.0e-09,\n    -1.0e-10,\n    2.5e-09,\n    -1.1e-09,\n    1.3e-09,\n    -3.0e-10,\n    -1.6e-09,\n    3.0e-10,\n    -1.6e-09,\n    -1.4e-09,\n    -1.7e-09,\n    -3.0e-10,\n    -3.1e-09,\n    7.0e-10,\n    4.0e-10,\n    3.0e-10,\n    -6.0e-10,\n    1.4e-09,\n    1.3e-09,\n    2.0e-10,\n    -8.0e-10,\n    -5.0e-10,\n]\n\n\nsolution_x_msqrtb = [\n    1.2e-09,\n    -3.0e-10,\n    2.0e-10,\n    -4.0e-10,\n    -3.0e-10,\n    6.0e-10,\n    -2.0e-10,\n    -8.0e-10,\n    -6.0e-10,\n    -0.0e00,\n    8.0e-10,\n    -1.6e-09,\n    -1.3e-09,\n    -5.0e-10,\n    -0.0e00,\n    -2.0e-09,\n    3.0e-10,\n    -9.0e-10,\n    -1.0e-10,\n    1.0e-09,\n    2.3e-09,\n    -0.0e00,\n    1.3e-09,\n    -6.0e-10,\n    -1.3e-09,\n    1.1e-09,\n    -1.0e-09,\n    -1.2e-09,\n    -1.9e-09,\n    0.0e00,\n    -1.1e-09,\n    1.9e-09,\n    1.9e-09,\n    7.0e-10,\n    -5.0e-10,\n    1.0e-09,\n    -8.0e-10,\n    1.3e-09,\n    -4.0e-10,\n    -6.0e-10,\n    -3.1e-09,\n    -1.0e-10,\n    -2.2e-09,\n    7.0e-10,\n    2.2e-09,\n    -7.0e-10,\n    1.8e-09,\n    1.3e-09,\n    3.1e-09,\n    -4.0e-10,\n    -1.6e-09,\n    1.7e-09,\n    1.3e-09,\n    8.0e-10,\n    -0.0e00,\n    1.0e-09,\n    -3.0e-10,\n    1.5e-09,\n    3.0e-10,\n    -7.0e-10,\n    4.2e-09,\n    -1.0e-09,\n    5.0e-10,\n    -1.4e-09,\n    -9.0e-10,\n    2.7e-09,\n    -7.0e-10,\n    -2.7e-09,\n    -2.0e-09,\n    -3.0e-10,\n    -8.0e-10,\n    5.0e-10,\n    -4.0e-10,\n    2.0e-10,\n    1.2e-09,\n    2.2e-09,\n    7.0e-10,\n    3.0e-10,\n    1.3e-09,\n    -1.1e-09,\n    2.7e-09,\n    4.0e-10,\n    2.6e-09,\n    -5.0e-10,\n    -2.8e-09,\n    -6.0e-10,\n    -2.2e-09,\n    -9.0e-10,\n    -3.6e-09,\n    8.0e-10,\n    -2.4e-09,\n    3.1e-09,\n    2.9e-09,\n    1.4e-09,\n    -8.0e-10,\n    1.1e-09,\n    -1.1e-09,\n    2.5e-09,\n    -4.0e-10,\n    -9.0e-10,\n]\n\nsolution_x_penalty2 = [\n    1.00248452e-01,\n    -1.60000000e-09,\n    -1.40000000e-09,\n    -1.50000000e-09,\n    -1.00000000e-09,\n    -9.00000000e-10,\n    -8.00000000e-10,\n    -6.00000000e-10,\n    -1.20000000e-09,\n    -8.00000000e-10,\n    -7.00000000e-10,\n    -6.00000000e-10,\n    -4.00000000e-10,\n    -4.00000000e-10,\n    -2.00000000e-10,\n    -2.00000000e-10,\n    -3.00000000e-10,\n    -3.00000000e-10,\n    -1.00000000e-10,\n    -0.00000000e00,\n    0.00000000e00,\n    1.00000000e-10,\n    1.00000000e-10,\n    0.00000000e00,\n    0.00000000e00,\n    1.00000000e-10,\n    1.00000000e-10,\n    1.00000000e-10,\n    -1.00000000e-10,\n    -4.00000000e-10,\n    -4.00000000e-10,\n    -1.00000000e-09,\n    -7.00000000e-10,\n    -6.00000000e-10,\n    -8.00000000e-10,\n    -8.00000000e-10,\n    -1.20000000e-09,\n    -9.00000000e-10,\n    -6.00000000e-10,\n    -4.00000000e-10,\n    -1.00000000e-10,\n    2.00000000e-10,\n    5.00000000e-10,\n    1.00000000e-09,\n    5.40000000e-09,\n    2.20000000e-09,\n    7.20000000e-09,\n    8.50000000e-09,\n    9.60000000e-09,\n    1.00000000e-08,\n    1.14000000e-08,\n    5.80000000e-09,\n    1.62000000e-08,\n    1.82000000e-08,\n    2.15000000e-08,\n    2.35000000e-08,\n    2.57000000e-08,\n    2.90000000e-08,\n    3.19000000e-08,\n    3.09000000e-08,\n    4.16000000e-08,\n    4.66000000e-08,\n    5.31000000e-08,\n    6.20000000e-08,\n    6.85000000e-08,\n    7.99000000e-08,\n    9.37000000e-08,\n    1.08300000e-07,\n    1.24300000e-07,\n    1.46700000e-07,\n    1.64900000e-07,\n    1.87800000e-07,\n    2.08200000e-07,\n    2.40000000e-07,\n    2.78900000e-07,\n    3.24200000e-07,\n    3.77700000e-07,\n    4.22400000e-07,\n    4.89200000e-07,\n    5.68200000e-07,\n    6.64900000e-07,\n    7.72800000e-07,\n    8.93500000e-07,\n    1.05180000e-06,\n    1.24180000e-06,\n    1.46270000e-06,\n    1.73540000e-06,\n    2.06050000e-06,\n    2.45940000e-06,\n    2.96690000e-06,\n    3.61230000e-06,\n    4.44100000e-06,\n    5.50230000e-06,\n    6.96620000e-06,\n    8.98120000e-06,\n    1.18847000e-05,\n    1.64570000e-05,\n    2.42465000e-05,\n    4.02062000e-05,\n    4.21655000e-05,\n]\n\n\nsolution_x_watson = [\n    -0.00000000e00,\n    1.00000000e00,\n    -9.80000000e-09,\n    3.33333416e-01,\n    3.52440000e-06,\n    1.33262416e-01,\n    5.10786700e-04,\n    5.27159393e-02,\n    -4.88557280e-03,\n    7.04208489e-02,\n    -1.76310368e-01,\n    3.59652497e-01,\n    -3.34930081e-01,\n    -1.06954704e-01,\n    7.10806973e-01,\n    -7.44769987e-01,\n    1.39770112e-01,\n    2.23466491e-01,\n    -2.30955205e-02,\n    -4.13852010e-03,\n    -2.40655722e-01,\n    2.11825083e-01,\n    -7.89005230e-02,\n    2.52472539e-02,\n    5.33065585e-02,\n    1.99652115e-01,\n    -5.38278039e-01,\n    2.27228847e-01,\n    2.79127878e-01,\n    -2.82103374e-01,\n    7.20994063e-02,\n]\n\n\nsolution_x_yatpsq_1 = [7.06817436] * 100 + [4011.71977601] * 10 + [-4045.83698215] * 10\n\nsolution_x_yatpsq_2 = [0.0500104219] * 100 + [31.74567612] * 10 + [-32.22096803] * 10\n\nsolution_x_arglble = [\n    1.47780425,\n    1.95560851,\n    3.92938553,\n    2.91121702,\n    8.01627094,\n    6.85877107,\n    15.21786782,\n    4.82243403,\n    6.00860058,\n    15.03254187,\n    23.28986922,\n    12.71754213,\n    -26.58525249,\n    29.43573564,\n    15.37597436,\n    8.64486806,\n    -29.5319652,\n    11.01720116,\n    -42.30702246,\n    29.06508375,\n    -6.28404714,\n    45.57973843,\n    -20.97480585,\n    24.43508427,\n    -31.03091276,\n    -54.17050498,\n    -28.61801666,\n    57.87147127,\n    51.17010631,\n    29.75194872,\n    -20.71188224,\n    16.28973612,\n    150.20741347,\n    -60.0639304,\n    -83.05993998,\n    21.03440232,\n    91.61798856,\n    -85.61404492,\n    18.37256156,\n    57.1301675,\n    37.08398118,\n    -13.56809428,\n    121.49145482,\n    90.15947687,\n    12.46203868,\n    -42.9496117,\n    -29.66369732,\n    47.87016853,\n    142.29920239,\n    -63.06182553,\n    3.443138,\n    -109.34100996,\n    -194.46227627,\n    -58.23603332,\n    74.84482245,\n    114.74294254,\n    -32.27461444,\n    101.34021261,\n    48.55025892,\n    58.50389743,\n    66.92509996,\n    -42.42376449,\n    -96.64369487,\n    31.57947224,\n    -83.36761354,\n    299.41482695,\n    -67.29433552,\n    -121.1278608,\n    -35.15915764,\n    -167.11987996,\n    -88.92036545,\n    41.06880464,\n    242.22658666,\n    182.23597711,\n    72.43687845,\n    -172.22808985,\n    156.51103949,\n    35.74512311,\n    -129.889251,\n    113.260335,\n    61.76069205,\n    73.16796235,\n    -226.09539309,\n    -28.13618857,\n    -140.01767085,\n    241.98290964,\n    -188.31315279,\n    179.31895373,\n    -102.19596978,\n    23.92407737,\n    -20.6453831,\n    -86.8992234,\n    -260.60293104,\n    -60.32739463,\n    -187.46576175,\n    94.74033707,\n    -26.40609501,\n    283.59840478,\n    -81.00626161,\n    -6.23327543,\n]\n\n\nsolution_x_morebvne = [\n    -0.00480171244711894,\n    -0.009553656441466189,\n    -0.014255063178091082,\n    -0.018905148312847776,\n    -0.02350311156607665,\n    -0.028048136314090373,\n    -0.03253938916802462,\n    -0.03697601953959521,\n    -0.04135715919328434,\n    -0.04568192178445848,\n    -0.04994940238289979,\n    -0.054158676981210775,\n    -0.058308801987528955,\n    -0.06239881370196436,\n    -0.06642772777614718,\n    -0.07039453865524663,\n    -0.07429821900179416,\n    -0.07813771910061508,\n    -0.08191196624414197,\n    -0.0856198640973513,\n    -0.08926029204153081,\n    -0.09283210449604987,\n    -0.09633413021726782,\n    -0.09976517157367622,\n    -0.10312400379632972,\n    -0.10640937420357734,\n    -0.10962000139906009,\n    -0.11275457444189355,\n    -0.11581175198790325,\n    -0.11879016140072804,\n    -0.12168839783155079,\n    -0.12450502326615709,\n    -0.12723856553796054,\n    -0.12988751730556838,\n    -0.13245033499339215,\n    -0.13492543769373558,\n    -0.13731120602871583,\n    -0.13960598097029298,\n    -0.14180806261659795,\n    -0.1439157089226587,\n    -0.14592713438352983,\n    -0.14784050866773057,\n    -0.1496539551987893,\n    -0.15136554968258126,\n    -0.1529733185780274,\n    -0.15447523750859646,\n    -0.15586922961191965,\n    -0.15715316382468714,\n    -0.15832485309984537,\n    -0.1593820525529578,\n    -0.16032245753442315,\n    -0.1611437016240685,\n    -0.16184335454444554,\n    -0.16241891998896002,\n    -0.16286783336074998,\n    -0.16318745941800578,\n    -0.16337508982118337,\n    -0.16342794057730933,\n    -0.1633431493763049,\n    -0.16311777281396858,\n    -0.16274878349595126,\n    -0.16223306701673054,\n    -0.1615674188072447,\n    -0.16074854084447576,\n    -0.15977303821587704,\n    -0.15863741553111918,\n    -0.15733807317317933,\n    -0.15587130338031904,\n    -0.15423328614998352,\n    -0.15242008495510762,\n    -0.15042764226272823,\n    -0.14825177484417715,\n    -0.145888168865457,\n    -0.14333237474568616,\n    -0.14057980177072898,\n    -0.13762571244830293,\n    -0.13446521658997057,\n    -0.13109326510447508,\n    -0.12750464348585883,\n    -0.12369396497871003,\n    -0.11965566340170586,\n    -0.11538398560935402,\n    -0.11087298357047293,\n    -0.10611650604048345,\n    -0.1011081898030038,\n    -0.09584145045453528,\n    -0.09030947270418657,\n    -0.08450520015839938,\n    -0.07842132455849246,\n    -0.07205027443652223,\n    -0.06538420315244962,\n    -0.058414976272886505,\n    -0.05113415824875308,\n    -0.04353299834598826,\n    -0.03560241577999431,\n    -0.027332984000740478,\n    -0.018714914071368417,\n    -0.009738037078703543,\n    -0.00039178550924665913,\n    0.009334826481040273,\n]\n\nsolution_x_oscigrne = [\n    -0.999903551150572,\n    1.000114247321587,\n    0.9998642692032601,\n    1.0001606899137698,\n    0.9998089690015318,\n    1.00022598434592,\n    0.999731099815014,\n    1.0003177559633685,\n    0.9996214137443762,\n    1.000446688078944,\n    0.9994668378325214,\n    1.0006277224039304,\n    0.999248858409288,\n    1.000881705203141,\n    0.9989411926230166,\n    1.0012376192059906,\n    0.998506407461758,\n    1.0017355588402022,\n    0.9978909678746266,\n    1.0024305943941474,\n    0.9970179188459485,\n    1.0033975851656824,\n    0.9957759910544586,\n    1.0047367560724156,\n    0.9940033482196368,\n    1.0065792892561265,\n    0.991463576275275,\n    1.0090910440981682,\n    0.9878113950447263,\n    1.0124704597271386,\n    0.9825476906558169,\n    1.016933474160337,\n    0.97497223558056,\n    1.0226745117037057,\n    0.9641665140655163,\n    1.0297915464178238,\n    0.949087499582646,\n    1.0381739331985491,\n    0.9289092394703534,\n    1.0473881814350956,\n    0.9037032879438561,\n    1.0566576332592028,\n    0.8751912025461293,\n    1.0650493325263044,\n    0.8467346025073008,\n    1.071836505625808,\n    0.8219637719978666,\n    1.0767742560792348,\n    0.8030052123113574,\n    1.0800668731949534,\n    0.7899438210393462,\n    1.0821271313195384,\n    0.781606186399683,\n    1.0833627968298138,\n    0.7765461227304188,\n    1.0840845613674477,\n    0.7735701654509769,\n    1.0844995317623756,\n    0.7718524469999704,\n    1.0847359223290596,\n    0.7708717537908896,\n    1.084869871714956,\n    0.7703153481248035,\n    1.0849455446733298,\n    0.770000790359205,\n    1.084988222260974,\n    0.7698233176390366,\n    1.0850122686562047,\n    0.7697233024137095,\n    1.0850258120070317,\n    0.7696669756983013,\n    1.0850334441314584,\n    0.7696352680150108,\n    1.0850377667549869,\n    0.7696174327289053,\n    1.0850402906360344,\n    0.769607435938671,\n    1.0850420199210213,\n    0.7696019492096186,\n    1.0850440403901882,\n    0.7695993333871978,\n    1.085048727912368,\n    0.7695994609478275,\n    1.0850634025349974,\n    0.769604955537232,\n    1.0851124682328854,\n    0.7696264320469576,\n    1.0852784437585632,\n    0.7697008512420949,\n    1.0858411700848751,\n    0.7699541437010324,\n    1.0877518530579322,\n    0.7708145141755096,\n    1.0942649130343773,\n    0.7737450447320231,\n    1.1167550385396223,\n    0.7838340360674543,\n    1.197692695917145,\n    0.8197587877734469,\n    0.34400894026358253,\n]\n\nsolution_x_spmsqrt = [\n    0.8414709848078964,\n    -0.7568024953079281,\n    0.41211848524175654,\n    -0.2879033166650651,\n    -0.13235175009777303,\n    -0.991778853443116,\n    -0.9537526527594719,\n    0.9200260381967906,\n    -0.6298879942744537,\n    -0.5063656411097588,\n    0.9988152247235795,\n    -0.4910215938984694,\n    -0.6019998676776046,\n    0.9395300555699313,\n    -0.9300948780045254,\n    -0.9992080341070627,\n    -0.026521020285755953,\n    -0.40406521945636065,\n    0.2793865543595699,\n    -0.8509193596391765,\n    0.923470012926003,\n    0.1935029667421232,\n    0.9364725475338365,\n    -0.8859527784925296,\n    0.176016272833866,\n    -0.5291338443628917,\n    0.14993681711330134,\n    -0.9851359060614224,\n    -0.8115681644677004,\n    0.9978032744219705,\n    -0.32153677367579575,\n    -0.15853338004399595,\n    0.9055399984980432,\n    -0.10589758762554138,\n    -0.21933702833760824,\n    0.9956757929363228,\n    -0.6701396839379524,\n    -0.9055272090161384,\n    0.45213333953209767,\n    -0.8012247906768953,\n    -0.24539810131000517,\n    -0.9999908622413068,\n    0.9851203677373821,\n    0.7025150575473956,\n    0.97049168633502,\n    -0.9905826083622151,\n    -0.4442747122315391,\n    -0.9365254011824229,\n    0.7333337958292518,\n    -0.6501275235748957,\n    -0.236456371968843,\n    0.7902854647755708,\n    0.4042582281073567,\n    0.5663064119145462,\n    0.346394965535536,\n    0.6369471771360007,\n    0.5590140193623636,\n    0.6017832141649304,\n    0.11508425966985522,\n    -0.2620839590180966,\n    0.9766556656643753,\n    -0.9660321335212897,\n    -0.9201559227267819,\n    -0.5946419876082146,\n    0.4278557468834321,\n    0.9835224135737828,\n    0.3296208750563675,\n    -0.4117614029834671,\n    -0.9965019983464922,\n    -0.7736233386803075,\n    0.9509241545016164,\n    0.3635926207547267,\n    0.7570979728966365,\n    -0.20259269090077123,\n    0.9997657290235363,\n    0.9835006076878136,\n    -0.7274941973722288,\n    0.9535984876805766,\n    0.9745271031531163,\n    -0.5444763096196569,\n    0.9767074399435044,\n    0.8369692092360629,\n    0.49052257006311906,\n    -0.017099129324754637,\n    -0.6155654443683672,\n    0.6372259975359011,\n    -0.7853724073864938,\n    0.025888206258587287,\n    -0.8648845336882347,\n    0.8272184413093554,\n    -0.23598771211618058,\n    0.5221681399851388,\n    -0.1941831363419754,\n    0.963594168151382,\n    0.714349265095485,\n    -0.9904998832255181,\n    0.06994035488705914,\n    -0.1506818641899401,\n    -0.6954738915705097,\n    -0.3056143888882522,\n]\n\n\nsolution_x_semicon2 = [\n    -6.349124087002204e-18,\n    -2.720672286121828e-18,\n    -2.0494594670589808e-18,\n    2.2161886201338454e-18,\n    1.2316133510181184e-18,\n    -3.2705004691851026e-18,\n    -3.807751676148402e-18,\n    3.4276320135398005e-18,\n    2.2326202305653343e-18,\n    -7.019626524587994e-18,\n    -5.858568597552852e-18,\n    -9.211616078255237e-18,\n    -7.71770563976305e-18,\n    -7.876848461287058e-18,\n    -1.6000476267872514e-18,\n    -1.734489564328033e-18,\n    1.7775061650713925e-19,\n    -5.073461303752388e-18,\n    -7.328302750507605e-18,\n    -3.1238828312289746e-18,\n    5.980268474857619e-19,\n    -1.6975325642760375e-18,\n    4.937452418560979e-18,\n    1.3511147727766607e-17,\n    5.749609112964981e-17,\n    1.801648085519756e-16,\n    5.786887187854167e-16,\n    1.878507150702686e-15,\n    6.136128040698149e-15,\n    2.0030487111145893e-14,\n    6.534292422120726e-14,\n    2.1313997714716273e-13,\n    6.952358252095008e-13,\n    2.2677726547784577e-12,\n    7.39723184163866e-12,\n    2.412907024826009e-11,\n    7.870673668386567e-11,\n    2.567338541527492e-10,\n    8.374412732822885e-10,\n    2.731653274793591e-09,\n    8.910391477241349e-09,\n    2.906484367660483e-08,\n    9.480673285799459e-08,\n    3.092504307341268e-07,\n    1.008744689447865e-06,\n    3.2904220583605265e-06,\n    1.073297189268266e-05,\n    3.500918292749676e-05,\n    0.00011418868707255388,\n    0.00037238836057455765,\n    0.001213800218967423,\n    0.003949812221062431,\n    0.012784144445260801,\n    0.040678780990535776,\n    0.12303542035650665,\n    0.32818258157808705,\n    0.7151930120566522,\n    1.2976206270033757,\n    2.0761013706015463,\n    3.050641312078585,\n    4.221240463432072,\n    5.587898824666944,\n    7.1506163957832,\n    8.90939317678084,\n    10.864229167659865,\n    13.015124368420274,\n    15.362078779062067,\n    17.905092399585243,\n    20.644165229989806,\n    23.57929727027575,\n    26.71048852044308,\n    30.037738980491792,\n    33.56104865042189,\n    37.28041753023337,\n    41.19584561992624,\n    45.30733291950049,\n    49.61487942895613,\n    54.118485148293146,\n    58.81815007751155,\n    63.71387421661134,\n    68.80565756559251,\n    74.09350012445506,\n    79.577401893199,\n    85.25736287182431,\n    91.13338306033101,\n    97.2054624587191,\n    103.47360106698856,\n    109.93779888513943,\n    116.59805591317166,\n    123.45437215108528,\n    130.5067475988803,\n    135.59853094786146,\n    138.72972219802878,\n    139.9003970309516,\n    139.9942331482723,\n    139.99967247706724,\n    139.9999814190815,\n    139.99999894593952,\n    139.99999994020595,\n    139.9999999966189,\n]\n\nsolution_x_qr3d = [\n    0.8944271909999159,\n    0.39036002917941326,\n    0.18505699313910443,\n    0.095507370926703,\n    0.0652019862276467,\n    -0.4472135954999579,\n    0.7807200583588265,\n    0.3701139862782089,\n    0.19101474185340606,\n    0.13040397245529342,\n    0,\n    -0.4879500364742666,\n    0.7402279725564178,\n    0.3820294837068121,\n    0.26080794491058684,\n    0,\n    0,\n    -0.5299359348983446,\n    0.7003873867958222,\n    0.47814789900274257,\n    0,\n    0,\n    0,\n    -0.5638286020497468,\n    0.8258918255501917,\n    0.447213595499958,\n    -0.35777087639996635,\n    0.08944271909999157,\n    0,\n    0,\n    0.8197560612767679,\n    -0.7416840554408852,\n    0.1951800145897066,\n    0,\n    1.132212330751066,\n    -1.1439886848599183,\n    0.31796156093900735,\n    1.4188709070303882,\n    -6.058518452574962,\n    7.972029516100272,\n]\n\n\nsolution_x_qr3dbd = [\n    0.8944271909999159,\n    0.3903600291794133,\n    0.1850569931391044,\n    0.09550737092670301,\n    0.06520198622764671,\n    -0.4472135954999579,\n    0.7807200583588265,\n    0.3701139862782088,\n    0.19101474185340603,\n    0.1304039724552934,\n    0,\n    -0.48795003647426655,\n    0.7402279725564178,\n    0.38202948370681217,\n    0.26080794491058684,\n    0,\n    0,\n    -0.5299359348983446,\n    0.7003873867958221,\n    0.4781478990027425,\n    0,\n    0,\n    0,\n    -0.5638286020497468,\n    0.8258918255501917,\n    0.447213595499958,\n    -0.3577708763999664,\n    0.08944271909999163,\n    0.819756061276768,\n    -0.7416840554408851,\n    0.19518001458970657,\n    1.1322123307510663,\n    -1.1439886848599186,\n    0.3179615609390067,\n    1.4188709070303884,\n    -6.058518452574962,\n    7.972029516100272,\n]\n\n\nsolution_x_eigenb = [\n    2.1880343004416827,\n    2.433369037415534,\n    2.4128467655642227,\n    2.4143168471412966,\n    2.4142066676219205,\n    2.414206682597956,\n    2.4143168514760567,\n    2.4128467763690584,\n    2.433369040912926,\n    2.1880343004587717,\n    0.963618832502572,\n    -0.19358332415060756,\n    -0.020571030789440843,\n    -0.0042978748491313435,\n    -0.0011189543814576976,\n    -0.0003256602342870186,\n    -0.00010143977363046447,\n    -3.309302901033653e-05,\n    -1.1092507161708885e-05,\n    -4.10236648587512e-06,\n    -9.758954956834986e-05,\n    0.9208420627147608,\n    -0.19113674746578155,\n    -0.019750356974082468,\n    -0.004087291838641473,\n    -0.0010576174015411869,\n    -0.00030654777315460436,\n    -9.524470464093356e-05,\n    -3.082138926676854e-05,\n    -1.1092450516891782e-05,\n    -2.1507171255294457e-05,\n    3.236210199985498e-05,\n    0.9240994322190695,\n    -0.19135572211934757,\n    -0.019818514674832036,\n    -0.004104765871306482,\n    -0.0010626629889412406,\n    -0.0003082323428087962,\n    -9.524506106163913e-05,\n    -3.30929923080829e-05,\n    -5.86920342958478e-06,\n    8.343154992559033e-06,\n    -1.7090950629164782e-06,\n    0.9238630466026214,\n    -0.19134060715511186,\n    -0.019813760575551793,\n    -0.004103453696629484,\n    -0.0010626616637818649,\n    -0.0003065476688168684,\n    -0.0001014390007803748,\n    -1.7629336348662287e-06,\n    2.414698832828228e-06,\n    -4.1012794468935585e-07,\n    1.7073395190219743e-07,\n    0.9238806428059877,\n    -0.19134186729883576,\n    -0.019813762593004474,\n    -0.004104766449316578,\n    -0.001057618179089577,\n    -0.00032566103285368623,\n    -5.617848676115081e-07,\n    7.498487330424776e-07,\n    -1.1366246620226694e-07,\n    4.233248357697225e-08,\n    -1.8184553525805125e-08,\n    0.9238806405230912,\n    -0.19134060742073378,\n    -0.019818514337183922,\n    -0.004087292358165013,\n    -0.001118954298914211,\n    -1.8565094044360727e-07,\n    2.461558460853512e-07,\n    -2.9698560165568933e-08,\n    2.5691549193155624e-08,\n    4.188439547971367e-08,\n    1.7091126942261566e-07,\n    0.9238630462038411,\n    -0.19135572096178424,\n    -0.019750357283584624,\n    -0.004297874115889032,\n    -6.452367583021426e-08,\n    7.874956290802419e-08,\n    -2.175478191008932e-08,\n    -2.9611984365300305e-08,\n    -1.125976562800656e-07,\n    -4.101727958716196e-07,\n    -1.7068431405865914e-06,\n    0.9240994307748498,\n    -0.19113674668992328,\n    -0.020571030343105964,\n    -1.2720088883591045e-08,\n    5.7045828447717434e-08,\n    7.924431455583827e-08,\n    2.4584027025267894e-07,\n    7.494896735776724e-07,\n    2.41326518080458e-06,\n    8.34418638100877e-06,\n    3.236121345356557e-05,\n    0.9208420621116469,\n    -0.19358332383274754,\n    -9.410619198170363e-09,\n    -1.324699653531768e-08,\n    -6.476508265084364e-08,\n    -1.8558263456164044e-07,\n    -5.618026820278632e-07,\n    -1.7630007182357942e-06,\n    -5.868582984643352e-06,\n    -2.1506490807524302e-05,\n    -9.758904294524118e-05,\n    0.9636188326077207,\n]\n\n\nsolution_x_luksan12 = [\n    -2.6260067987163516,\n    6.896065319147662,\n    1.527892692614362,\n    1.5497440503992346,\n    2.2933547979396463,\n    1.6306208450523716,\n    1.5819176730889628,\n    2.32812470715209,\n    1.6291935486837537,\n    1.5831335363873629,\n    2.3316182375355337,\n    1.6290901468343557,\n    1.5831778731783577,\n    2.331746894500055,\n    1.6290866339965575,\n    1.5831791924745884,\n    2.3317508225088055,\n    1.6290863289641941,\n    1.5831794312419505,\n    2.3317513366797806,\n    1.6290862573533014,\n    1.5831793883878833,\n    2.3317513422592953,\n    1.629086261632123,\n    1.5831793726762426,\n    2.3317512664646154,\n    1.6290862864991247,\n    1.5831794068996972,\n    2.331751192772075,\n    1.629086292750654,\n    1.5831793960520542,\n    2.331751254872966,\n    1.6290863143562366,\n    1.5831794727104964,\n    2.331751220286205,\n    1.6290863278083665,\n    1.5831794601591884,\n    2.3317513895695736,\n    1.6290862215089341,\n    1.583179580563746,\n    2.3317517718530487,\n    1.629086273471926,\n    1.5831794517394973,\n    2.3317514742466026,\n    1.629086326246853,\n    1.5831793647918584,\n    2.3317510961040324,\n    1.6290862893642488,\n    1.583179357650647,\n    2.3317509213902934,\n    1.629086379389047,\n    1.5831793466774813,\n    2.331751016773598,\n    1.6290862487394557,\n    1.5831795060981861,\n    2.331751350596469,\n    1.629086344775534,\n    1.583179237092884,\n    2.331750704884059,\n    1.6290861760511046,\n    1.583180031048655,\n    2.3317524781223806,\n    1.6290862227638085,\n    1.583179320520067,\n    2.3317511883963484,\n    1.62908615550431,\n    1.5831794393651841,\n    2.33175167156419,\n    1.6290861496589093,\n    1.583179491480598,\n    2.3317514438297446,\n    1.6290867286777644,\n    1.5831787218028068,\n    2.331749775035503,\n    1.629086341296889,\n    1.5831793785655173,\n    2.331751379257171,\n    1.6290862955034657,\n    1.5831794896813611,\n    2.331751501905547,\n    1.6290857775369425,\n    1.583179220493569,\n    2.331751008922027,\n    1.6290872482674832,\n    1.5831776309272405,\n    2.331747114910226,\n    1.6291104318632341,\n    1.5831327312118264,\n    2.3316489547138564,\n    1.629748307506696,\n    1.5818970811223332,\n    2.3289347056739356,\n    1.6468612471308757,\n    1.5492871064990756,\n    2.2555720960782186,\n    -1.2840500262457888,\n    2.551876632332705,\n    0.9810837136085581,\n]\n\nsolution_x_luksan13 = [\n    1.3086052241942845,\n    1.6216137111993358,\n    2.2886144747619968,\n    1.34232518154836,\n    1.6619515759503343,\n    2.3751337553163605,\n    1.343855296385397,\n    1.6630483896975998,\n    2.377485954810846,\n    1.3438990216611229,\n    1.6630791019372382,\n    2.3775518416632186,\n    1.3439002468996393,\n    1.6630799506592178,\n    2.3775536420160925,\n    1.3439002707105718,\n    1.6630799637087308,\n    2.377553663793456,\n    1.343900274442869,\n    1.6630799703922405,\n    2.377553677510941,\n    1.343900273735809,\n    1.663079968764641,\n    2.3775536820493994,\n    1.3439002754790357,\n    1.6630799760584611,\n    2.3775537070702106,\n    1.3439002833097773,\n    1.6630799799354958,\n    2.377553710130499,\n    1.343900279016161,\n    1.6630799791188497,\n    2.377553709151317,\n    1.3439002771699002,\n    1.6630799775994893,\n    2.3775537046342916,\n    1.3439002881835356,\n    1.6630799956367304,\n    2.377553757775651,\n    1.3439002886986675,\n    1.663079991183404,\n    2.377553741510781,\n    1.3439002855650906,\n    1.6630799931177906,\n    2.3775537587908215,\n    1.3439002747672082,\n    1.6630799713724573,\n    2.377553681610918,\n    1.3439002854677142,\n    1.6630799870142512,\n    2.377553735404843,\n    1.3439002800895123,\n    1.6630799851565248,\n    2.3775537353686165,\n    1.3439002784725658,\n    1.663079982758718,\n    2.377553724300102,\n    1.3439002839408525,\n    1.663079984436528,\n    2.377553726731763,\n    1.3439002742018746,\n    1.6630799686940994,\n    2.377553673295027,\n    1.343900285055898,\n    1.663079986851812,\n    2.3775537287002404,\n    1.3439002797888209,\n    1.6630799728327539,\n    2.377553682977633,\n    1.3439002761687657,\n    1.6630799673959926,\n    2.3775536708099745,\n    1.3439002798986437,\n    1.6630799763732456,\n    2.3775537001976423,\n    1.3439002780805065,\n    1.6630799801142993,\n    2.3775537125819097,\n    1.3439002916123894,\n    1.6630800028052062,\n    2.3775537813761924,\n    1.3439003043419797,\n    1.6630800585945529,\n    2.3775539515552806,\n    1.3439012394354732,\n    1.6630827445655045,\n    2.3775621184439015,\n    1.3439344570959444,\n    1.6631783889211809,\n    2.37785290158486,\n    1.3451213454947373,\n    1.666597519847485,\n    2.38828410134482,\n    1.3886122940189465,\n    1.7939836213544151,\n    2.885811280705054,\n    4.723092522164855,\n    5.389923139518635,\n]\n\nsolution_x_luksan14 = [\n    -0.692015438465357,\n    0.4736192819939741,\n    1.2412079054782652,\n    -0.4783782506182583,\n    0.23882842116612296,\n    1.094079486551747,\n    -0.2641170714882838,\n    0.08143516139066553,\n    1.0679283525636625,\n    -0.1494417718117012,\n    0.033925047353142675,\n    1.0809287152115696,\n    -0.13657207013818243,\n    0.02960520381904979,\n    1.0827798919700173,\n    -0.1362649003784561,\n    0.029450043690314048,\n    1.0828484754308758,\n    -0.13625660575465595,\n    0.029445194398704543,\n    1.0828506279698982,\n    -0.13625635947650283,\n    0.029445045430998882,\n    1.082850692128227,\n    -0.13625634849900412,\n    0.029445040594224484,\n    1.0828506890538272,\n    -0.13625634095713987,\n    0.029445038872369666,\n    1.0828506892763552,\n    -0.1362563401019553,\n    0.029445038661562405,\n    1.0828506865668028,\n    -0.1362563352007162,\n    0.029445038297226032,\n    1.0828506897645718,\n    -0.1362563399662581,\n    0.029445038930055314,\n    1.0828506855121347,\n    -0.13625633491870875,\n    0.029445037517807717,\n    1.0828506970059786,\n    -0.13625635176495168,\n    0.029445039882406293,\n    1.0828506971123373,\n    -0.13625635316124712,\n    0.029445040472916795,\n    1.0828506840532126,\n    -0.13625633315729457,\n    0.029445037711023858,\n    1.0828506931348947,\n    -0.13625634492000505,\n    0.029445039530404147,\n    1.082850698256033,\n    -0.1362563552104686,\n    0.029445040416000965,\n    1.0828506949203678,\n    -0.13625634915587218,\n    0.02944504055043263,\n    1.0828506838415628,\n    -0.13625633328649378,\n    0.029445037473531936,\n    1.0828506996527274,\n    -0.1362563547825522,\n    0.029445040998217188,\n    1.0828506836226952,\n    -0.13625633328336106,\n    0.029445037480123,\n    1.0828506844275172,\n    -0.13625633160755746,\n    0.029445037363800216,\n    1.0828506910381746,\n    -0.13625634142989135,\n    0.02944503900512941,\n    1.0828506970833573,\n    -0.13625635262199126,\n    0.029445040274926663,\n    1.0828506858747455,\n    -0.13625633628442352,\n    0.029445037869996665,\n    1.0828506843744086,\n    -0.13625633006537194,\n    0.029445038276720837,\n    1.082850772934407,\n    -0.13625645049722282,\n    0.029445066824156065,\n    1.0828531756767654,\n    -0.13625960631817075,\n    0.029445905929758334,\n    1.082930785041219,\n    -0.13636157993795717,\n    0.029472948037119334,\n    1.0853841906187118,\n    -0.13957177103726676,\n    0.03033477911863786,\n    0.9997884952922044,\n    0.03012334667228848,\n    0.03001755818766747,\n]\n\n\nsolution_x_luksan15 = [\n    -2.8128120376543353,\n    1.2947106505184869,\n    -2.81440005752448,\n    0.5152023646747894,\n    -2.0089506145318516,\n    1.0515415361025104,\n    -3.4386919581246973,\n    0.5351040934480955,\n    -1.6697901903837928,\n    1.1273918094584376,\n    -3.0058465049859246,\n    0.5987020506877548,\n    -2.1273657980099876,\n    0.9192000873982219,\n    -3.4535594653315687,\n    0.5623710862956811,\n    -1.9386092963220432,\n    0.9821839081455959,\n    -3.5093911936842477,\n    0.5501687807342822,\n    -2.1778372107639,\n    0.9063872377904626,\n    -3.424790911399904,\n    0.5665652947300606,\n    -2.279179522364104,\n    0.8685030719682756,\n    -3.145617106466628,\n    0.6099286765580767,\n    -1.9284952155742618,\n    0.9691898205061202,\n    -3.016417857801295,\n    0.6212423706928658,\n    -1.9216427599092167,\n    0.9730403697373208,\n    -3.285655214260605,\n    0.5820210274753043,\n    -2.1458132202272386,\n    0.9058785725661358,\n    -2.958575186535638,\n    0.6348101011039402,\n    -1.9602973787877798,\n    0.952916227779147,\n    -3.2178165098362,\n    0.5944426358988449,\n    -1.72911123316871,\n    1.0594397548002128,\n    -3.1978737322906863,\n    0.5844520814478443,\n    -1.7679802490196521,\n    1.0524244273162242,\n    -3.146733476316577,\n    0.5902404606592925,\n    -2.128977747267094,\n    0.914704760106901,\n    -3.2345320433357836,\n    0.5920343641247361,\n    -2.2610082037405834,\n    0.8670334110534724,\n    -3.4060367708426935,\n    0.576246511012118,\n    -2.355877477880136,\n    0.8412164300993267,\n    -3.081758650591148,\n    0.6241626014976729,\n    -1.6525049806156873,\n    1.081267498611019,\n    -2.963721310470893,\n    0.6194531536445858,\n    -1.506685332675987,\n    1.1746494074889837,\n    -2.9257857154408438,\n    0.6141113169233168,\n    -2.1021910871806577,\n    0.9219352856117872,\n    -3.3401105799593105,\n    0.5774971684592886,\n    -1.7547078621368493,\n    1.0532221932141288,\n    -3.3352090067149023,\n    0.5658942245431572,\n    -2.353376910071777,\n    0.854027290091373,\n    -3.2416948400866707,\n    0.5965555845011213,\n    -1.9356880509818681,\n    0.9699374813403614,\n    -3.292568634235838,\n    0.580974296002741,\n    -1.6861988438404256,\n    1.0857879072493273,\n    -3.1295933491980374,\n    0.5904364080984017,\n    -1.9200181272009214,\n    0.9895807082139652,\n    -3.2489040016986324,\n    0.582149002451738,\n    -0.5601900925614515,\n    2.4870578822567175,\n    -3.5588729390698672,\n    -0.4666353059476335,\n]\n\nsolution_x_luksan16 = [\n    10.23347504140921,\n    20.59876890029168,\n    -35.3558957906405,\n    14.159724748767388,\n    -6.192790944245422,\n    6.904260897808911,\n    -12.426375656338006,\n    7.916405145745939,\n    9.883332945956017,\n    -7.763552251913891,\n    9.829215962712826,\n    -5.460412966230336,\n    -2.0247375676154316,\n    2.292011784517189,\n    -8.854644657757955,\n    6.501718109308879,\n    -10.00818055426963,\n    6.969493641831156,\n    5.794385035460475,\n    -4.827934342605883,\n    -0.3589290720778674,\n    1.7351238328795198,\n    -20.46786870905146,\n    15.073627999686598,\n    -22.38633414526394,\n    14.87045990266218,\n    -9.108444080860473,\n    5.493242761973028,\n    -4.436790715837233,\n    3.35863879246109,\n    -12.778497913669044,\n    9.51430783434134,\n    17.974890839174535,\n    -14.543141451864756,\n    -0.878903552061843,\n    3.937581796604095,\n    -37.95441551229194,\n    27.217302740157244,\n    1.9384003997032608,\n    -5.0732916754344854,\n    -9.834240436833431,\n    9.928282181783743,\n    24.901369369122225,\n    -20.6810518922668,\n    5.609710258509786,\n    0.40845702633880704,\n    7.517485147363342,\n    -6.744213821972444,\n    6.283831933809041,\n    -2.719582209866709,\n    1.1727545558714283,\n    -0.5901766790421645,\n    -10.83801088444533,\n    8.63096398021895,\n    4.262706411344401,\n    -4.302452961158821,\n    -1.1028476495022923,\n    2.413241731236865,\n    -4.9611988982883455,\n    3.2905463368286787,\n    -11.011449594878954,\n    8.354169868673353,\n    17.534906240840428,\n    -14.074846099952074,\n    1.5433478836011245,\n    1.9967416934301132,\n    -5.0398763810168425,\n    2.8962555845029465,\n    -2.503087792307807,\n    2.18971326359533,\n    -5.425121985743007,\n    4.100312921945939,\n    -7.90674647105912,\n    5.736740005101778,\n    -10.955014606035165,\n    7.825133686094932,\n    13.983943593069979,\n    -11.161214770039159,\n    4.399884927901469,\n    -0.7147360927864606,\n    16.135713247282684,\n    -12.343832004737795,\n    -6.450092935863761,\n    7.476113508806722,\n    -22.97438911325564,\n    15.605814430808019,\n    24.62552916846743,\n    -20.02790069712666,\n    32.528132112236335,\n    -20.037974911481278,\n    -1.5215783873530375,\n    3.528694334566693,\n    7.356145013974544,\n    -6.400505214585759,\n    -0.5849689371220534,\n    2.3004991729965987,\n    -7.749666453881994,\n    5.308798604551641,\n    -12.801456405933285,\n    9.38466573197068,\n]\n\nsolution_x_luksan17 = [\n    -0.8437781517444214,\n    5.211601301412691,\n    -0.8255686986099557,\n    -1.1224808830025863,\n    -0.9115016416553087,\n    -1.0449996789508758,\n    -0.8370067076638087,\n    -1.1138351780732039,\n    -0.8829775814636974,\n    -1.0662539739396077,\n    -1.0011441498985942,\n    -0.9971962219303508,\n    -0.9921060603499269,\n    -1.0107562326286939,\n    -0.9812584988122448,\n    -1.0142511875210711,\n    -0.9884985874862595,\n    -1.0098353738374723,\n    -0.9872157109718606,\n    -1.0112133037704427,\n    -0.9864952703702744,\n    -1.0113724963264314,\n    -0.9870842717327896,\n    -1.011035340568662,\n    -0.9869391605540793,\n    -1.0111661235119436,\n    -0.9868977564988685,\n    -1.0111670258624144,\n    -0.9869449215955661,\n    -1.0111419398459263,\n    -0.9869301696083717,\n    -1.0111537677001405,\n    -0.9869283405594838,\n    -1.0111528331131907,\n    -0.986932002795563,\n    -1.0111510466790732,\n    -0.9869305922818844,\n    -1.0111520809826104,\n    -0.9869305749770071,\n    -1.011151923374323,\n    -0.9869308492975264,\n    -1.0111518042563021,\n    -0.9869307203053385,\n    -1.0111518914759727,\n    -0.9869307309805547,\n    -1.0111518713966636,\n    -0.9869307504505271,\n    -1.011151864732727,\n    -0.9869307391816526,\n    -1.0111518711394447,\n    -0.986930743051711,\n    -1.0111518679059937,\n    -0.9869307405857761,\n    -1.011151871272733,\n    -0.9869307406810994,\n    -1.0111518690271912,\n    -0.9869307442357795,\n    -1.0111518679286133,\n    -0.9869307403926516,\n    -1.0111518706700777,\n    -0.9869307426486206,\n    -1.011151868044014,\n    -0.9869307395007838,\n    -1.0111518726198288,\n    -0.9869307376627915,\n    -1.0111518716425005,\n    -0.9869307459735903,\n    -1.0111518650959377,\n    -0.9869307441838526,\n    -1.011151868558413,\n    -0.9869307370598098,\n    -1.011151873493444,\n    -0.9869307388844694,\n    -1.011151870953494,\n    -0.9869307414667764,\n    -1.0111518699114064,\n    -0.9869307413827505,\n    -1.0111518693727195,\n    -0.9869307428780796,\n    -1.0111518683864686,\n    -0.986930742414142,\n    -1.0111518688900905,\n    -0.9869307423425782,\n    -1.0111518687612688,\n    -0.9869307388492422,\n    -1.0111518725840953,\n    -0.9869307379963231,\n    -1.0111518716476386,\n    -0.9869307390986436,\n    -1.0111518722894763,\n    -0.9869307349748873,\n    -1.011151874864258,\n    -0.986930740183915,\n    -1.0111518696551045,\n    -0.9869307419219207,\n    -1.011151869862698,\n    -0.9869307393375487,\n    -1.0111518711716971,\n    -0.98693074172968,\n    -1.0111518690786507,\n]\n\nsolution_x_luksan21 = [\n    -6.072657993990953,\n    -11.151678306331249,\n    -15.281680185264895,\n    -18.553572785910436,\n    -21.08878192552139,\n    -23.018420220928494,\n    -24.46705052798465,\n    -25.543512880064885,\n    -26.337637149910563,\n    -26.920674289315414,\n    -27.347575624380077,\n    -27.65988732407303,\n    -27.88857759276986,\n    -28.05649817926434,\n    -28.18038720009252,\n    -28.272439283160423,\n    -28.341505869674798,\n    -28.39399571405599,\n    -28.434542523545463,\n    -28.466491445203996,\n    -28.492253969229775,\n    -28.51356599746666,\n    -28.53167622506297,\n    -28.54748213206502,\n    -28.561629796383773,\n    -28.574583635599762,\n    -28.58667947877867,\n    -28.59815838663125,\n    -28.609194634285124,\n    -28.61991238997938,\n    -28.630402327046635,\n    -28.640728722950577,\n    -28.650938105649047,\n    -28.661061977374047,\n    -28.671123050731868,\n    -28.68113826085573,\n    -28.69112022311497,\n    -28.701078262087883,\n    -28.71101815506324,\n    -28.72094477397387,\n    -28.730862903992644,\n    -28.740776507637953,\n    -28.75068864243215,\n    -28.760599670061964,\n    -28.77051000987731,\n    -28.780418360465934,\n    -28.790323772732222,\n    -28.80022409430249,\n    -28.810119012418326,\n    -28.82001016509087,\n    -28.829900708980315,\n    -28.839793590776342,\n    -28.849689118959287,\n    -28.859586440413942,\n    -28.86948336412596,\n    -28.879378174667828,\n    -28.88926990461213,\n    -28.8991581591508,\n    -28.909043599093003,\n    -28.918925344790882,\n    -28.92880159055073,\n    -28.938669244304233,\n    -28.94852535832674,\n    -28.95836634260835,\n    -28.96818485329044,\n    -28.97796943084888,\n    -28.98770372929353,\n    -28.997369201223027,\n    -29.006941800115108,\n    -29.016389375603524,\n    -29.025665682937056,\n    -29.03470507627482,\n    -29.04341491397447,\n    -29.051663075623473,\n    -29.059265584091445,\n    -29.065967527124695,\n    -29.07141754001239,\n    -29.07512741421742,\n    -29.07641564980266,\n    -29.074334399240477,\n    -29.067568644255143,\n    -29.054295689963208,\n    -29.0319862391214,\n    -28.997130480357225,\n    -28.944864342494807,\n    -28.86845848063996,\n    -28.758620076438536,\n    -28.602552441434927,\n    -28.38270523359173,\n    -28.075144666563702,\n    -27.647479062397462,\n    -27.056331868057303,\n    -26.24447489795629,\n    -25.137976027248023,\n    -23.64410279764132,\n    -21.651318090974016,\n    -19.03331370504794,\n    -15.65922569153753,\n    -11.41085934966044,\n    -6.2035977373817195,\n]\n\n\nsolution_x_luksan22 = [\n    0.960000892597201,\n    0.9188402401000288,\n    0.8346999741225976,\n    0.6824138623323005,\n    0.44429684165068956,\n    0.16679643843737746,\n    -0.023004295169824662,\n    0.015327433907712216,\n    -0.009931088912543472,\n    0.006684223516831371,\n    -0.004411575260245885,\n    0.0029571424982889867,\n    -0.001966221180203038,\n    0.001316579307305194,\n    -0.0008787716047566526,\n    0.0005864520202429219,\n    -0.00039319743428295955,\n    0.000262756179445633,\n    -0.0001760817809970661,\n    0.00011752859355313591,\n    -7.938290213698181e-05,\n    5.4582276287576205e-05,\n    -3.676956244840184e-05,\n    2.2892784894905886e-05,\n    -1.4844959572369287e-05,\n    8.978467841981641e-06,\n    -7.419590428825042e-06,\n    5.0134136153247085e-06,\n    -4.8296331309899045e-06,\n    4.4148810133672814e-06,\n    -5.451937781671956e-06,\n    5.28902886393822e-06,\n    -5.486579973708986e-06,\n    4.589797533333856e-06,\n    -3.686156736885388e-06,\n    1.3813327052080713e-06,\n    -2.875286216624316e-07,\n    -1.966618190117126e-07,\n    -5.984127105225435e-08,\n    -7.936593253722943e-07,\n    2.1703743349261694e-07,\n    -8.894851154156264e-07,\n    3.573122556113253e-07,\n    8.966129390508038e-08,\n    -1.3677877479650928e-06,\n    6.43379960693636e-07,\n    -6.357331910140075e-07,\n    -1.7198971213860295e-07,\n    7.720236116499906e-07,\n    -1.5538185090359862e-06,\n    8.378707374504217e-07,\n    -2.73224811890867e-07,\n    1.0658698182255397e-07,\n    -7.054426255706567e-07,\n    5.652296338005435e-08,\n    2.3167064968509738e-07,\n    -7.1526763080569e-07,\n    1.1597873712533682e-08,\n    -2.863395818025362e-07,\n    -8.633020826805924e-07,\n    4.2697411766659695e-07,\n    -1.1541837705801389e-06,\n    4.7551953352402166e-07,\n    3.5199581610952056e-07,\n    -2.5742042022428385e-06,\n    2.499335480925905e-06,\n    -4.079663388392796e-06,\n    3.723178696281719e-06,\n    -2.919751243874922e-06,\n    1.4829783071179273e-06,\n    -1.1191190311723908e-06,\n    5.588506976522976e-07,\n    -4.825768343696162e-07,\n    9.863313432267673e-07,\n    -1.3577296762331155e-06,\n    1.0976659429101464e-06,\n    -1.3266509165639996e-06,\n    8.804177825565544e-07,\n    -1.59655791383051e-06,\n    2.2532672723180185e-07,\n    -1.5600238860251166e-06,\n    1.7351280778268136e-06,\n    -1.618517725020847e-06,\n    1.287719767348254e-06,\n    -1.2077855625401572e-06,\n    -1.226034002734674e-06,\n    1.2900045894163097e-06,\n    -1.883605391514802e-06,\n    -1.279989121101194e-06,\n    1.7054561610327153e-06,\n    -4.501630088393006e-06,\n    3.38756209930697e-06,\n    -5.221884694101911e-06,\n    6.8241273128299335e-06,\n    -7.215782067624106e-06,\n    4.201540035034105e-06,\n    -3.7683204911021953e-06,\n    2.681881990832518e-07,\n    7.907138005741486e-07,\n    3.0687261099791043,\n]\n\n\nsolution_x_hydcar20 = [\n    2.6759127057952494e-07,\n    0.0020075973345538593,\n    0.9979921350741772,\n    1.1074382518158015e-06,\n    0.003926319294260064,\n    0.9960725732674882,\n    4.473878508666662e-06,\n    0.007432799593801248,\n    0.9925627265276902,\n    1.791855353550396e-05,\n    0.013805819144958344,\n    0.9861762623015062,\n    7.125759668355649e-05,\n    0.025273323829558065,\n    0.9746554185737586,\n    0.0002803534439901862,\n    0.04553620774789967,\n    0.9541834388081104,\n    0.001082782951168712,\n    0.08018717893154169,\n    0.9187300381172898,\n    0.0040498954829321235,\n    0.13606692468745604,\n    0.859883179829612,\n    0.014346338806928278,\n    0.21713770052872539,\n    0.7685159606643465,\n    0.04646668013286012,\n    0.31340489138177297,\n    0.640128428485367,\n    0.05166573387892863,\n    0.42190983200015447,\n    0.526424434120917,\n    0.05782376960889372,\n    0.5508311256800345,\n    0.39134510471107226,\n    0.0640697514942442,\n    0.6757845188863061,\n    0.26014572961945,\n    0.07002653957502589,\n    0.7737845233677303,\n    0.15618893705724401,\n    0.07687228280882921,\n    0.836427017007432,\n    0.08670070018373897,\n    0.08838584185008973,\n    0.8661393596329539,\n    0.04547479851695662,\n    0.11319983773418042,\n    0.8640793740698467,\n    0.022720788195972993,\n    0.16949917403437423,\n    0.8198575022045703,\n    0.010643323761055593,\n    0.2889793132934084,\n    0.7066247193552677,\n    0.004395967351324038,\n    0.4999998216058201,\n    0.4986616017769641,\n    0.0013385766172157153,\n    138.21584043438,\n    138.13772749931923,\n    137.99511335767673,\n    137.73620245773773,\n    137.27057972483576,\n    136.44580451915726,\n    135.01759125081972,\n    132.61817941469988,\n    128.72481163454674,\n    122.65561152670969,\n    119.34453517365907,\n    115.7706674624033,\n    112.60284049804272,\n    110.23710719573587,\n    108.6126130079421,\n    107.35503374607997,\n    105.88999629459819,\n    103.3849389558914,\n    98.83590533616024,\n    92.07928040428027,\n    290.69252437645963,\n    290.65561780338373,\n    290.59025193849226,\n    290.4776378323827,\n    290.2922675253222,\n    290.0081841660331,\n    289.615549643397,\n    289.14599494303985,\n    288.7724567817456,\n    277.8716922412405,\n    280.6638306878377,\n    284.24018735028017,\n    287.7729960147694,\n    290.52761603695006,\n    292.2929262974615,\n    293.2381872872007,\n    293.71591921265576,\n    294.59496871170194,\n    298.03881543410006,\n]\n\nsolution_x_hydcar6 = [\n    0.005639001019299217,\n    0.13023671278902904,\n    0.8641242861916716,\n    0.020668138451243257,\n    0.22352172363375894,\n    0.7558101379149978,\n    0.06627260583120169,\n    0.3309386117749258,\n    0.6027887823938726,\n    0.11820814762835632,\n    0.4418686949212894,\n    0.43992315745035443,\n    0.24917284487890107,\n    0.500513105207918,\n    0.2503140499131811,\n    0.49624066598713407,\n    0.4131755248073142,\n    0.09058380920555179,\n    132.63808144202352,\n    127.84951651511513,\n    120.39649297520073,\n    113.59593052273564,\n    104.00971299398577,\n    93.2001658166645,\n    288.8739911256674,\n    288.5263717741471,\n    275.6104698527219,\n    280.00166220797604,\n    289.1545657231987,\n]\n\nsolution_x_methane = [\n    107.7653795798063,\n    0.09225723236421929,\n    0.9077427676357807,\n    102.68498908598441,\n    0.1821995285312063,\n    0.8178004714687936,\n    97.71772476055263,\n    0.28421889854366617,\n    0.7157811014563338,\n    96.57726115013135,\n    0.30530731675985745,\n    0.6946926832401424,\n    94.26309926727772,\n    0.3566490103973165,\n    0.6433509896026832,\n    89.98899748591788,\n    0.46779111102816195,\n    0.5322088889718375,\n    83.97342066984532,\n    0.6573895966863115,\n    0.34261040331368847,\n    78.32157508655418,\n    0.8759450903481356,\n    0.12405490965186436,\n    886.7137742582912,\n    910.3656929177117,\n    922.1591291059583,\n    926.0766775727482,\n    935.1735260591255,\n    952.4236258294623,\n    975.0192103423753,\n]\n\n\nCARTIS_ROBERTS_PROBLEMS = {\n    \"arglale\": {\n        # arglale is the same as linear_full_rank with specific settings\n        \"fun\": partial(linear_full_rank, dim_out=400),\n        \"start_x\": [1] * 100,\n        \"solution_x\": [-0.99999952] * 100,\n        \"start_criterion\": 700,\n        \"solution_criterion\": 300,\n    },\n    \"arglble\": {\n        # arglble is the same as linear_rank_one with specific settings\n        \"fun\": partial(linear_rank_one, dim_out=400),\n        \"start_x\": [1] * 100,\n        \"solution_x\": solution_x_arglble,\n        \"start_criterion\": 5.460944e14,\n        \"solution_criterion\": 99.62547,\n    },\n    \"argtrig\": {\n        \"fun\": argtrig,\n        \"start_x\": [1 / 100] * 100,\n        \"solution_x\": [0] * 100,\n        \"start_criterion\": 32.99641,\n        \"solution_criterion\": 0,\n    },\n    \"artif\": {\n        \"fun\": artif,\n        \"start_x\": [1] * 100,\n        \"solution_x\": None,\n        \"start_criterion\": 36.59115,\n        \"solution_criterion\": 0,\n    },\n    \"arwhdne\": {\n        \"fun\": arwhdne,\n        \"start_x\": [1] * 100,\n        \"solution_x\": [0.706011] * 99 + [0],\n        \"start_criterion\": 495,\n        \"solution_criterion\": 27.66203,\n    },\n    \"bdvalues\": {\n        \"fun\": bdvalues,\n        \"start_x\": get_start_points_bdvalues(100, 1000),\n        \"solution_x\": solution_x_bdvalues,\n        \"start_criterion\": 1.943417e7,\n        \"solution_criterion\": 0,\n    },\n    \"bratu_2d\": {\n        \"fun\": partial(bratu_2d, alpha=4),\n        \"start_x\": [0] * 64,\n        \"solution_x\": solution_x_bratu_2d,\n        \"start_criterion\": 0.1560738,\n        \"solution_criterion\": 0,\n    },\n    \"bratu_2d_t\": {\n        \"fun\": partial(bratu_2d, alpha=6.80812),\n        \"start_x\": [0] * 64,\n        \"solution_x\": solution_x_bratu_2d_t,\n        \"start_criterion\": 0.4521311,\n        \"solution_criterion\": 1.8534736e-05,\n    },\n    \"bratu_3d\": {\n        \"fun\": partial(bratu_3d, alpha=6.80812),\n        \"start_x\": [0] * 27,\n        \"solution_x\": solution_x_bratu_3d,\n        \"start_criterion\": 4.888529,\n        \"solution_criterion\": 0,\n    },\n    \"brownale\": {\n        \"fun\": brown_almost_linear,\n        \"start_x\": [0.5] * 100,\n        \"solution_x\": [1] * 100,\n        \"start_criterion\": 2.524757e5,\n        \"solution_criterion\": 0,\n    },\n    \"broydn_3d\": {\n        \"fun\": broydn_3d,\n        \"start_x\": [-1] * 100,\n        \"solution_x\": solution_x_broydn_3d,\n        \"start_criterion\": 111,\n        \"solution_criterion\": 0,\n    },\n    \"cbratu_2d\": {\n        \"fun\": cbratu_2d,\n        \"start_x\": [0] * (2 * 5 * 5),\n        \"solution_x\": solution_x_cbratu_2d,\n        \"start_criterion\": 0.4822531,\n        \"solution_criterion\": 0,\n    },\n    \"broydn_bd\": {\n        \"fun\": broydn_bd,\n        \"start_x\": [1] * 100,\n        \"solution_x\": solution_x_broydn_bd,\n        \"start_criterion\": 2404,\n        \"solution_criterion\": 0,\n    },\n    \"chandheq\": {\n        \"fun\": chandheq,\n        \"start_x\": (np.arange(1, 101) / 100).tolist(),\n        \"solution_x\": None,\n        \"start_criterion\": 6.923365,\n        \"solution_criterion\": 0,\n    },\n    \"chemrcta\": {\n        \"fun\": chemrcta,\n        \"start_x\": [1] * 100,\n        \"solution_x\": None,\n        \"start_criterion\": 3.0935,\n        \"solution_criterion\": 0,\n        \"bounds\": Bounds(lower=np.concatenate([np.zeros(50), 1e-6 * np.ones(50)])),\n    },\n    \"chemrctb\": {\n        \"fun\": chemrctb,\n        \"start_x\": [1] * 100,\n        \"solution_x\": solution_x_chemrctb,\n        \"start_criterion\": 1.446513,\n        \"solution_criterion\": 1.404424e-3,\n        \"bounds\": Bounds(lower=1e-6 * np.ones(100)),\n    },\n    \"chnrsbne\": {\n        \"fun\": chnrsbne,\n        \"start_x\": [-1] * 50,\n        \"solution_x\": [1] * 50,\n        \"start_criterion\": 7635.84,\n        \"solution_criterion\": 0,\n    },\n    \"drcavty1\": {\n        \"fun\": partial(drcavty, r=500),\n        \"start_x\": [0] * 100,\n        \"solution_x\": None,\n        \"start_criterion\": 0.4513889,\n        \"solution_criterion\": 0,\n    },\n    \"drcavty2\": {\n        \"fun\": partial(drcavty, r=1000),\n        \"start_x\": [0] * 100,\n        \"solution_x\": solution_x_drcavty2,\n        \"start_criterion\": 0.4513889,\n        \"solution_criterion\": 3.988378e-4,\n    },\n    \"drcavty3\": {\n        \"fun\": partial(drcavty, r=4500),\n        \"start_x\": [0] * 100,\n        \"solution_x\": solution_x_drcavty3,\n        \"start_criterion\": 0.4513889,\n        \"solution_criterion\": 0,\n    },\n    \"eigena\": {\n        \"fun\": partial(eigen, param=np.diag(np.arange(1, 11))),\n        \"start_x\": [1] * 10 + np.eye(10).flatten().tolist(),\n        \"solution_x\": [*np.arange(1, 11).tolist(), 1] + ([0] * 10 + [1]) * 9,\n        \"start_criterion\": 285,\n        \"solution_criterion\": 0,\n        \"bounds\": Bounds(lower=np.zeros(110)),\n    },\n    \"eigenb\": {\n        \"fun\": partial(\n            eigen, param=np.diag(2 * np.ones(10)) + np.diag(-np.ones(9), k=1)\n        ),\n        \"start_x\": [1] * 10 + np.eye(10).flatten().tolist(),\n        \"solution_x\": solution_x_eigenb,\n        \"start_criterion\": 19,\n        \"solution_criterion\": 1.55654284,\n        # we suspect a typo in Cartis and Roberts (2019);\n        # according to table 3 in their paper, the minimum is at 0.\n    },\n    \"flosp2hh\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([1, 0, -1], dtype=np.int64),\n            b=np.array([1, 0, -1], dtype=np.int64),\n            ra=1e7,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 519,\n        \"solution_criterion\": 1 / 3,\n    },\n    \"flosp2hl\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([1, 0, -1], dtype=np.float64),\n            b=np.array([1, 0, -1], dtype=np.float64),\n            ra=1e3,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 519,\n        \"solution_criterion\": 1 / 3,\n    },\n    \"flosp2hm\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([1, 0, -1], dtype=np.float64),\n            b=np.array([1, 0, -1], dtype=np.float64),\n            ra=1e5,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 519,\n        \"solution_criterion\": 1 / 3,\n    },\n    \"flosp2th\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([0, 1, 0], dtype=np.float64),\n            b=np.array([0, 1, 1], dtype=np.float64),\n            ra=1e7,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 516,\n        \"solution_criterion\": 0,\n    },\n    \"flosp2tl\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([0, 1, 0], dtype=np.float64),\n            b=np.array([0, 1, 1], dtype=np.float64),\n            ra=1e3,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 516,\n        \"solution_criterion\": 0,\n    },\n    \"flosp2tm\": {\n        \"fun\": partial(\n            flosp2,\n            a=np.array([0, 1, 0], dtype=np.float64),\n            b=np.array([0, 1, 1], dtype=np.float64),\n            ra=1e5,\n        ),\n        \"start_x\": [0] * 59,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 516,\n        \"solution_criterion\": 0,\n    },\n    \"freurone\": {\n        \"fun\": freurone,\n        \"start_x\": [0.5, -2] + [0] * 98,\n        \"solution_x\": solution_x_freurone,\n        \"start_criterion\": 9.95565e4,\n        \"solution_criterion\": 1.196458e4,\n    },\n    \"hatfldg\": {\n        \"fun\": hatfldg,\n        \"start_x\": [1] * 25,\n        \"solution_x\": [0] * 11 + [-1, 1] + [0] * 12,\n        \"start_criterion\": 27,\n        \"solution_criterion\": 0,\n    },\n    \"hydcar20\": {\n        \"fun\": partial(hydcar, n=20, m=3, k=9),\n        \"start_x\": get_start_points_hydcar20(),\n        \"solution_x\": solution_x_hydcar20,\n        \"start_criterion\": 1341.663,\n        \"solution_criterion\": 0,\n    },\n    \"hydcar6\": {\n        \"fun\": partial(hydcar, n=6, m=3, k=2),\n        \"start_x\": get_start_points_hydcar6(),\n        \"solution_x\": solution_x_hydcar6,\n        \"start_criterion\": 704.1073,\n        \"solution_criterion\": 0,\n    },\n    \"integreq\": {\n        \"fun\": integreq,\n        \"start_x\": (np.arange(1, 101) / 101 * (np.arange(1, 101) / 101 - 1)).tolist(),\n        \"solution_x\": solution_x_integreq,\n        \"start_criterion\": 0.5730503,\n        \"solution_criterion\": 0,\n    },\n    \"luksan11\": {\n        \"fun\": luksan11,\n        \"start_x\": [-0.8] * 100,\n        \"solution_x\": [1] * 100,\n        \"start_criterion\": 626.0640,\n        \"solution_criterion\": 0,\n    },\n    \"luksan12\": {\n        \"fun\": luksan12,\n        \"start_x\": [-1] * 98,\n        \"solution_x\": None,\n        \"start_criterion\": 3.2160e4,\n        \"solution_criterion\": None,\n        # we found a lower minimum than Cartis and Roberts (2019) at 1651.837;\n        # according to table 3 in their paper, the minimum is at 4292.197.\n        # We suspect, however, that the true optimum is even lower.\n        # That is why we disable this test function for the time being.\n    },\n    \"luksan13\": {\n        \"fun\": luksan13,\n        \"start_x\": [-1] * 98,\n        \"solution_x\": solution_x_luksan13,\n        \"start_criterion\": 6.4352e4,\n        \"solution_criterion\": 24949.67040503685711883,\n        # we found a lower minimum than Cartis and Roberts (2019);\n        # according to table 3 in their paper, the minimum is at 25188.86\n    },\n    \"luksan14\": {\n        \"fun\": luksan14,\n        \"start_x\": [-1] * 98,\n        \"solution_x\": solution_x_luksan14,\n        \"start_criterion\": 2.6880e4,\n        \"solution_criterion\": 123.9235,\n    },\n    \"luksan15\": {\n        \"fun\": luksan15,\n        \"start_x\": [-0.8, 1.2, -1.2, 0.8] * 25,\n        \"solution_x\": solution_x_luksan15,\n        \"start_criterion\": 2.701585e4,\n        \"solution_criterion\": 3.569697,\n    },\n    \"luksan16\": {\n        \"fun\": luksan16,\n        \"start_x\": [-0.8, 1.2, -1.2, 0.8] * 25,\n        \"solution_x\": solution_x_luksan16,\n        \"start_criterion\": 1.306848e4,\n        \"solution_criterion\": 3.569697,\n    },\n    \"luksan17\": {\n        \"fun\": luksan17,\n        \"start_x\": [-0.8, 1.2, -1.2, 0.8] * 25,\n        \"solution_x\": None,  # multiple argmins\n        \"start_criterion\": 1.687370e6,\n        \"solution_criterion\": 0.4931613,\n    },\n    \"luksan21\": {\n        \"fun\": luksan21,\n        \"start_x\": [ih * (ih - 1) for ih in np.arange(1, 101) * (1 / 101)],\n        \"solution_x\": solution_x_luksan21,\n        \"start_criterion\": 99.98751,\n        \"solution_criterion\": 0,\n    },\n    \"luksan22\": {\n        \"fun\": luksan22,\n        \"start_x\": [-1.2 if i % 2 == 0 else 1 for i in range(100)],\n        \"solution_x\": solution_x_luksan22,\n        \"start_criterion\": 2.487686e4,\n        \"solution_criterion\": 872.9230,\n    },\n    \"methanb8\": {\n        \"fun\": methane,\n        \"start_x\": get_start_points_methanb8(),\n        \"solution_x\": solution_x_methane,\n        \"start_criterion\": 1.043105,\n        \"solution_criterion\": 0,\n    },\n    \"methanl8\": {\n        \"fun\": methane,\n        \"start_x\": get_start_points_methanl8(),\n        \"solution_x\": solution_x_methane,\n        \"start_criterion\": 4345.100,\n        \"solution_criterion\": 0,\n    },\n    \"morebvne\": {\n        \"fun\": morebvne,\n        \"start_x\": [t * (t - 1) for t in np.arange(1, 101) * (1 / 101)],\n        \"solution_x\": solution_x_morebvne,\n        \"start_criterion\": 3.633100e-4,\n        \"solution_criterion\": 0,\n    },\n    \"msqrta\": {\n        \"fun\": msqrta,\n        \"start_x\": get_start_points_msqrta(10),\n        \"solution_x\": solution_x_msqrta,\n        \"start_criterion\": 212.7162,\n        \"solution_criterion\": 0,\n    },\n    \"msqrtb\": {\n        \"fun\": msqrta,\n        \"start_x\": get_start_points_msqrta(10, flag=2),\n        \"solution_x\": solution_x_msqrtb,\n        \"start_criterion\": 205.0753,\n        \"solution_criterion\": 0,\n    },\n    \"oscigrne\": {\n        \"fun\": oscigrne,\n        \"start_x\": [-2] + [1] * 99,\n        \"solution_x\": solution_x_oscigrne,\n        \"start_criterion\": 6.120720e8,\n        \"solution_criterion\": 0,\n    },\n    \"penalty_1\": {\n        \"fun\": penalty_1,\n        \"start_x\": list(range(1, 101)),\n        \"solution_x\": None,\n        \"start_criterion\": 1.144806e11,\n        \"solution_criterion\": 9.025000e-9,\n    },\n    \"penalty_2\": {\n        \"fun\": penalty_2,\n        \"start_x\": [0.5] * 100,\n        \"solution_x\": solution_x_penalty2,\n        \"start_criterion\": 1.591383e6,\n        \"solution_criterion\": 0.9809377,\n    },\n    \"powellse\": {\n        \"fun\": powell_singular,\n        \"start_x\": [3.0, -1.0, 0.0, 1] * 25,\n        \"solution_x\": [0] * 100,\n        \"start_criterion\": 41875,\n        \"solution_criterion\": 0,\n    },\n    \"qr3d\": {\n        \"fun\": partial(qr3d, m=5),\n        \"start_x\": get_start_points_qr3d(5),\n        \"solution_x\": solution_x_qr3d,\n        \"start_criterion\": 1.2,\n        \"solution_criterion\": 0,\n        \"bounds\": Bounds(\n            lower=[-np.inf] * 25\n            + [0 if i == j else -np.inf for i in range(5) for j in range(5)]\n        ),\n    },\n    \"qr3dbd\": {\n        \"fun\": partial(qr3dbd, m=5),\n        \"start_x\": get_start_points_qr3dbd(5),\n        \"solution_x\": solution_x_qr3dbd,\n        \"start_criterion\": 1.2,\n        \"solution_criterion\": 0,\n        \"bounds\": Bounds(\n            lower=[-np.inf] * 25\n            + [0 if i == j else -np.inf for i in range(5) for j in range(5)]\n        ),\n    },\n    \"spmsqrt\": {\n        \"fun\": spmsqrt,\n        \"start_x\": get_start_points_spmsqrt(34),\n        \"solution_x\": solution_x_spmsqrt,\n        \"start_criterion\": 74.33542,\n        \"solution_criterion\": 0,\n    },\n    \"semicn2u\": {\n        \"fun\": semicon2,\n        \"start_x\": [0] * 100,\n        \"solution_x\": solution_x_semicon2,\n        \"start_criterion\": 2.025037e4,\n        \"solution_criterion\": 0,\n    },\n    \"semicon2\": {\n        \"fun\": semicon2,\n        \"start_x\": [0] * 100,\n        \"solution_x\": solution_x_semicon2,\n        \"start_criterion\": 2.025037e4,\n        \"solution_criterion\": 0,\n        \"bounds\": Bounds(lower=-5 * np.ones(100), upper=0.2 * 700 * np.ones(100)),\n    },\n    \"vardimne\": {\n        \"fun\": vardimne,\n        \"start_x\": [1 - i / 100 for i in range(1, 101)],\n        \"solution_x\": [1] * 100,\n        \"start_criterion\": 1.310584e14,\n        \"solution_criterion\": 0,\n    },\n    \"watsonne\": {\n        \"fun\": watson,\n        \"start_x\": [0] * 31,\n        \"solution_x\": solution_x_watson,\n        \"start_criterion\": 30,\n        \"solution_criterion\": 0,\n    },\n    \"yatpsq_1\": {\n        \"fun\": partial(yatpsq_1, dim_in=10),\n        \"start_x\": [6] * 100 + [0] * 20,\n        \"solution_x\": solution_x_yatpsq_1,\n        \"start_criterion\": 2.073643e6,\n        \"solution_criterion\": 0,\n    },\n    \"yatpsq_2\": {\n        \"fun\": partial(yatpsq_2, dim_in=10),\n        \"start_x\": [10] * 100 + [0] * 20,\n        \"solution_x\": solution_x_yatpsq_2,\n        \"start_criterion\": 1.831687e5,\n        \"solution_criterion\": 0,\n    },\n}\n"
  },
  {
    "path": "src/optimagic/benchmarking/get_benchmark_problems.py",
    "content": "from functools import partial, wraps\n\nimport numpy as np\n\nfrom optimagic import mark\nfrom optimagic.benchmarking.cartis_roberts import CARTIS_ROBERTS_PROBLEMS\nfrom optimagic.benchmarking.more_wild import MORE_WILD_PROBLEMS\nfrom optimagic.benchmarking.noise_distributions import NOISE_DISTRIBUTIONS\nfrom optimagic.shared.process_user_function import infer_aggregation_level\nfrom optimagic.typing import AggregationLevel\nfrom optimagic.utilities import get_rng\n\n\ndef get_benchmark_problems(\n    name,\n    *,\n    additive_noise=False,\n    additive_noise_options=None,\n    multiplicative_noise=False,\n    multiplicative_noise_options=None,\n    scaling=False,\n    scaling_options=None,\n    seed=None,\n    exclude=None,\n):\n    \"\"\"Get a dictionary of test problems for a benchmark.\n\n    Args:\n        name (str): The name of the set of test problems. Currently \"more_wild\"\n            is the only supported one.\n        additive_noise (bool): Whether to add additive noise to the problem.\n            Default False.\n        additive_noise_options (dict or None): Specifies the amount and distribution\n            of the addititve noise added to the problem. Has the entries:\n            - distribition (str): One of \"normal\", \"gumbel\", \"uniform\", \"logistic\",\n            \"laplace\". Default \"normal\".\n            - std (float): The standard deviation of the noise. This works for all\n            distributions, even if those distributions are normally not specified\n            via a standard deviation (e.g. uniform).\n            - correlation (float): Number between 0 and 1 that specifies the auto\n            correlation of the noise.\n        multiplicative_noise (bool): Whether to add multiplicative noise to the problem.\n            Default False.\n        multiplicative_noise_options (dict or None): Specifies the amount and\n            distribition of the multiplicative noise added to the problem. Has entries:\n            - distribition (str): One of \"normal\", \"gumbel\", \"uniform\", \"logistic\",\n            \"laplace\". Default \"normal\".\n            - std (float): The standard deviation of the noise. This works for all\n            distributions, even if those distributions are normally not specified\n            via a standard deviation (e.g. uniform).\n            - correlation (float): Number between 0 and 1 that specifies the auto\n            correlation of the noise.\n            - clipping_value (float): A non-negative float. Multiplicative noise\n            becomes zero if the function value is zero. To avoid this, we do not\n            implement multiplicative noise as `f_noisy = f * epsilon` but by\n            `f_noisy` = f + (epsilon - 1) * f_clipped` where f_clipped is bounded\n            away from zero from both sides by the clipping value.\n        scaling (bool): Whether the parameter space of the problem should be rescaled.\n        scaling_options (dict): Dict containing the keys \"min_scale\", and \"max_scale\".\n            If scaling is True, the parameters the optimizer sees are the standard\n            parameters multiplied by np.linspace(min_scale, max_scale, len(params)).\n            If min_scale and max_scale have very different orders of magnitude, the\n            problem becomes harder to solve for many optimizers.\n        seed (Union[None, int, numpy.random.Generator]): If seed is None or int the\n            numpy.random.default_rng is used seeded with seed. If seed is already a\n            Generator instance then that instance is used.\n        exclude (str or List): Problems to exclude.\n\n\n    Returns:\n        dict: Nested dictionary with benchmark problems of the structure:\n            {\"name\": {\"inputs\": {...}, \"solution\": {...}, \"info\": {...}}}\n            where \"inputs\" are keyword arguments for ``minimize`` such as the criterion\n            function and start parameters. \"solution\" contains the entries \"params\" and\n            \"value\" and \"info\" might contain information about the test problem.\n\n    \"\"\"\n    if exclude is None:\n        exclude = {}\n    elif isinstance(exclude, str):\n        exclude = [exclude]\n    else:\n        exclude = set(exclude)\n\n    rng = get_rng(seed)\n    raw_problems = _get_raw_problems(name)\n\n    raw_problems = {k: v for k, v in raw_problems.items() if k not in exclude}\n\n    is_noisy = bool(additive_noise or multiplicative_noise)\n\n    if additive_noise:\n        additive_options = _process_noise_options(additive_noise_options, False)\n    else:\n        additive_options = None\n\n    if multiplicative_noise:\n        multiplicative_options = _process_noise_options(\n            multiplicative_noise_options, True\n        )\n    else:\n        multiplicative_options = None\n\n    if scaling:\n        scaling_options = scaling_options if scaling_options is not None else {}\n        scaling_options = {\"min_scale\": 0.1, \"max_scale\": 10, **scaling_options}\n    else:\n        scaling_options = None\n\n    problems = {}\n    for prob_name, specification in raw_problems.items():\n        inputs = _create_problem_inputs(\n            specification,\n            additive_options=additive_options,\n            multiplicative_options=multiplicative_options,\n            scaling_options=scaling_options,\n            rng=rng,\n        )\n\n        problems[prob_name] = {\n            \"inputs\": inputs,\n            \"noise_free_fun\": specification[\"fun\"],\n            \"solution\": _create_problem_solution(\n                specification, scaling_options=scaling_options\n            ),\n            \"noisy\": is_noisy,\n            \"info\": specification.get(\"info\", {}),\n            \"start_criterion\": specification[\"start_criterion\"],\n        }\n\n    return problems\n\n\ndef _get_raw_problems(name):\n    if name == \"more_wild\":\n        raw_problems = MORE_WILD_PROBLEMS\n    elif name == \"cartis_roberts\":\n        raw_problems = CARTIS_ROBERTS_PROBLEMS\n    elif name == \"example\":\n        subset = {\n            \"rosenbrock_good_start\",\n            \"helical_valley_good_start\",\n            \"powell_singular_good_start\",\n            \"freudenstein_roth_good_start\",\n            \"bard_good_start\",\n            \"box_3d\",\n            \"brown_dennis_good_start\",\n            \"chebyquad_6\",\n            \"bdqrtic_8\",\n            \"mancino_5_good_start\",\n        }\n        raw_problems = {k: v for k, v in MORE_WILD_PROBLEMS.items() if k in subset}\n    elif name == \"estimagic\":\n        subset_mw = {\n            \"cube_8\",\n            \"chebyquad_6\",\n            \"bdqrtic_8\",\n            \"linear_full_rank_bad_start\",\n            \"chebyquad_7\",\n            \"osborne_two_bad_start\",\n            \"bdqrtic_10\",\n            \"bdqrtic_11\",\n            \"heart_eight_bad_start\",\n            \"mancino_5_bad_start\",\n            \"chebyquad_8\",\n            \"cube_6\",\n            \"cube_5\",\n            \"bdqrtic_12\",\n            \"chebyquad_10\",\n            \"chebyquad_9\",\n            \"chebyquad_11\",\n            \"mancino_8\",\n            \"mancino_10\",\n            \"mancino_12_bad_start\",\n        }\n        subset_cr = {\n            \"hatfldg\",\n            \"bratu_3d\",\n            \"cbratu_2d\",\n            \"chnrsbne\",\n            \"bratu_2d\",\n            \"vardimne\",\n            \"penalty_1\",\n            \"arglale\",\n            \"arglble\",\n        }\n        subset_add_steps = {\n            \"rosenbrock_good_start\",\n            \"cube_5\",\n            \"chebyquad_10\",\n        }\n        raw_problems = {}\n        for k, v in MORE_WILD_PROBLEMS.items():\n            if k in subset_mw:\n                raw_problems[k] = v\n            if k in subset_add_steps:\n                problem = v.copy()\n                raw_func = problem[\"fun\"]\n\n                problem[\"fun\"] = wraps(raw_func)(partial(_step_func, raw_func=raw_func))\n                raw_problems[f\"{k}_with_steps\"] = problem\n\n        for k, v in CARTIS_ROBERTS_PROBLEMS.items():\n            if k in subset_cr:\n                raw_problems[k] = v\n\n    else:\n        raise NotImplementedError()\n    return raw_problems\n\n\ndef _step_func(x, raw_func):\n    return raw_func(x.round(3))\n\n\ndef _create_problem_inputs(\n    specification, additive_options, multiplicative_options, scaling_options, rng\n):\n    _x = np.array(specification[\"start_x\"])\n\n    if scaling_options is not None:\n        scaling_factor = _get_scaling_factor(_x, scaling_options)\n        _x = _x * scaling_factor\n    else:\n        scaling_factor = None\n\n    problem_type = infer_aggregation_level(specification[\"fun\"])\n\n    problem_type_to_marker = {\n        AggregationLevel.SCALAR: mark.scalar,\n        AggregationLevel.LIKELIHOOD: mark.likelihood,\n        AggregationLevel.LEAST_SQUARES: mark.least_squares,\n    }\n\n    _criterion = partial(\n        _internal_criterion_template,\n        criterion=specification[\"fun\"],\n        additive_options=additive_options,\n        multiplicative_options=multiplicative_options,\n        scaling_factor=scaling_factor,\n        rng=rng,\n    )\n\n    _criterion = problem_type_to_marker[problem_type](_criterion)\n\n    inputs = {\"fun\": _criterion, \"params\": _x}\n    return inputs\n\n\ndef _create_problem_solution(specification, scaling_options):\n    _solution_x = specification.get(\"solution_x\")\n    if _solution_x is None:\n        _solution_x = np.array(specification[\"start_x\"]) * np.nan\n    elif isinstance(_solution_x, list):\n        _solution_x = np.array(_solution_x)\n    _params = _solution_x\n    if scaling_options is not None:\n        _params = _params * _get_scaling_factor(_params, scaling_options)\n\n    _value = specification[\"solution_criterion\"]\n\n    solution = {\n        \"params\": _params,\n        \"value\": _value,\n    }\n    return solution\n\n\ndef _get_scaling_factor(x, options):\n    return np.linspace(options[\"min_scale\"], options[\"max_scale\"], len(x))\n\n\ndef _internal_criterion_template(\n    params, criterion, additive_options, multiplicative_options, scaling_factor, rng\n):\n    if scaling_factor is not None:\n        params = params / scaling_factor\n\n    critval = criterion(params)\n\n    noise = _get_combined_noise(\n        critval,\n        additive_options=additive_options,\n        multiplicative_options=multiplicative_options,\n        rng=rng,\n    )\n\n    noisy_critval = critval + noise\n\n    return noisy_critval\n\n\ndef _get_combined_noise(fval, additive_options, multiplicative_options, rng):\n    size = len(np.atleast_1d(fval))\n    if multiplicative_options is not None:\n        options = multiplicative_options.copy()\n        std = options.pop(\"std\")\n        clipval = options.pop(\"clipping_value\")\n        scaled_std = std * _clip_away_from_zero(fval, clipval)\n        multiplicative_noise = _sample_from_distribution(\n            **options, std=scaled_std, size=size, rng=rng\n        )\n    else:\n        multiplicative_noise = 0\n\n    if additive_options is not None:\n        additive_noise = _sample_from_distribution(\n            **additive_options, size=size, rng=rng\n        )\n    else:\n        additive_noise = 0\n\n    return multiplicative_noise + additive_noise\n\n\ndef _sample_from_distribution(distribution, mean, std, size, rng, correlation=0):\n    sample = NOISE_DISTRIBUTIONS[distribution](size=size, rng=rng)\n    dim = size if isinstance(size, int) else size[1]\n    if correlation != 0 and dim > 1:\n        chol = np.linalg.cholesky(np.diag(np.ones(dim) - correlation) + correlation)\n        sample = (chol @ sample.T).T\n        sample = sample / sample.std()\n    sample *= std\n    sample += mean\n    return sample\n\n\ndef _process_noise_options(options, is_multiplicative):\n    options = {} if options is None else options\n\n    defaults = {\"std\": 0.01, \"distribution\": \"normal\", \"correlation\": 0, \"mean\": 0}\n    if is_multiplicative:\n        defaults[\"clipping_value\"] = 1\n\n    processed = {\n        **defaults,\n        **options,\n    }\n\n    distribution = processed[\"distribution\"]\n    if distribution not in NOISE_DISTRIBUTIONS:\n        raise ValueError(\n            f\"Invalid distribution: {distribution}. \"\n            \"Allowed are {list(NOISE_DISTRIBUTIONS)}\"\n        )\n\n    std = processed[\"std\"]\n    if std < 0:\n        raise ValueError(f\"std must be non-negative. Not: {std}\")\n\n    corr = processed[\"correlation\"]\n    if corr < 0:\n        raise ValueError(f\"corr must be non-negative. Not: {corr}\")\n\n    if is_multiplicative:\n        clipping_value = processed[\"clipping_value\"]\n        if clipping_value < 0:\n            raise ValueError(\n                f\"clipping_value must be non-negative. Not: {clipping_value}\"\n            )\n\n    return processed\n\n\ndef _clip_away_from_zero(a, clipval):\n    is_scalar = np.isscalar(a)\n    a = np.atleast_1d(a)\n\n    is_positive = a >= 0\n\n    clipped = np.where(is_positive, np.clip(a, clipval, np.inf), a)\n    clipped = np.where(~is_positive, np.clip(clipped, -np.inf, -clipval), clipped)\n\n    if is_scalar:\n        clipped = clipped[0]\n    return clipped\n"
  },
  {
    "path": "src/optimagic/benchmarking/more_wild.py",
    "content": "\"\"\"Define the More-Wild Benchmark Set.\n\nThis benchmark set is contains 53 test cases for nonlinear least squares solvers.\nThe test cases are built out of 22 functions, originally derived from the CUTEr\nProblems. It was used to benchmark all modern model based non-linear derivative\nfree least squares solvers (e.g. POUNDERS, DFOGN, DFOLS).\n\nThe parameter dimensions are quite small, varying between 2 and 12.\n\nThe benchmark set was first described In More and Wild, 2009. Fortran and Matlab Code\nis available here. We use the following sources of information to construct the\nbenchmark set:\n\n- https://www.mcs.anl.gov/~more/dfo/fortran/dfovec.f for the function implementation\n- https://www.mcs.anl.gov/~more/dfo/fortran/dfoxs.f for the base starting points\n- https://www.mcs.anl.gov/~more/dfo/fortran/dfo.dat for:\n    - The mapping test cases to criterion functions (column 1)\n    - The dimensionalities of parameter vectors (column 2)\n    - The dimensionalities of the output (column 3)\n    - Whether the base start vector is multiplied by a factor of ten or not (column 4).\n\n\"\"\"\n\nfrom functools import partial\n\nimport numpy as np\n\nfrom optimagic import mark\n\n\n@mark.least_squares\ndef linear_full_rank(x, dim_out):\n    temp = 2 * x.sum() / dim_out + 1\n    out = np.full(dim_out, -temp)\n    out[: len(x)] += x\n    return out\n\n\n@mark.least_squares\ndef linear_rank_one(x, dim_out):\n    dim_in = len(x)\n    sm = np.arange(1, dim_in + 1) @ x\n    fvec = np.arange(1, dim_out + 1) * sm - 1.0\n    return fvec\n\n\n@mark.least_squares\ndef linear_rank_one_zero_columns_rows(x, dim_out):\n    dim_in = len(x)\n    sm = (np.arange(2, dim_in) * x[1:-1]).sum()\n    fvec = np.arange(dim_out) * sm - 1.0\n    fvec[-1] = -1.0\n    return fvec\n\n\n@mark.least_squares\ndef rosenbrock(x):\n    fvec = np.zeros(2)\n    fvec[0] = 10 * (x[1] - x[0] ** 2)\n    fvec[1] = 1.0 - x[0]\n    return fvec\n\n\n@mark.least_squares\ndef helical_valley(x):\n    temp = 8 * np.arctan(1.0)\n    temp1 = np.sign(x[1]) * 0.25\n    if x[0] > 0:\n        temp1 = np.arctan(x[1] / x[0]) / temp\n    elif x[0] < 0:\n        temp1 = np.arctan(x[1] / x[0]) / temp + 0.5\n    temp2 = np.sqrt(x[0] ** 2 + x[1] ** 2)\n    fvec = np.zeros(3)\n    fvec[0] = 10 * (x[2] - 10 * temp1)\n    fvec[1] = 10 * (temp2 - 1.0)\n    fvec[2] = x[2]\n    return fvec\n\n\n@mark.least_squares\ndef powell_singular(x):\n    fvec = np.zeros(4)\n    fvec[0] = x[0] + 10 * x[1]\n    fvec[1] = np.sqrt(5.0) * (x[2] - x[3])\n    fvec[2] = (x[1] - 2 * x[2]) ** 2\n    fvec[3] = np.sqrt(10.0) * (x[0] - x[3]) ** 2\n    return fvec\n\n\n@mark.least_squares\ndef freudenstein_roth(x):\n    fvec = np.zeros(2)\n    fvec[0] = -13 + x[0] + ((5 - x[1]) * x[1] - 2) * x[1]\n    fvec[1] = -29 + x[0] + ((1.0 + x[1]) * x[1] - 14) * x[1]\n    return fvec\n\n\n@mark.least_squares\ndef bard(x, y):\n    fvec = np.zeros(len(y))\n    for i in range(1, round(len(y) / 2) + 1):\n        temp = len(y) + 1 - i\n        fvec[i - 1] = y[i - 1] - (x[0] + i / (x[1] * temp + x[2] * i))\n    for i in range(round(len(y) / 2) + 1, len(y) + 1):\n        temp = len(y) + 1 - i\n        fvec[i - 1] = y[i - 1] - (x[0] + i / (x[1] * temp + x[2] * temp))\n    return fvec\n\n\n@mark.least_squares\ndef kowalik_osborne(x, y1, y2):\n    temp1 = y1 * (y1 + x[1])\n    temp2 = y1 * (y1 + x[2]) + x[3]\n    fvec = y2 - x[0] * temp1 / temp2\n    return fvec\n\n\n@mark.least_squares\ndef meyer(x, y):\n    temp = 5 * np.arange(1, len(y) + 1) + 45 + x[2]\n    temp1 = x[1] / temp\n    temp2 = np.exp(temp1)\n    fvec = x[0] * temp2 - y\n    return fvec\n\n\n@mark.least_squares\ndef watson(x):\n    dim_in = len(x)\n    fvec = np.zeros(31)\n    for i in range(1, 30):\n        temp = i / 29\n        sum_1 = (np.arange(1, dim_in) * temp ** np.arange(dim_in - 1) * x[1:]).sum()\n        sum_2 = (temp ** np.arange(dim_in) * x).sum()\n        fvec[i - 1] = sum_1 - sum_2**2 - 1.0\n    fvec[29] = x[0]\n    fvec[30] = x[1] - x[0] ** 2 - 1.0\n    return fvec\n\n\n@mark.least_squares\ndef box_3d(x, dim_out):\n    fvec = np.zeros(dim_out)\n    for i in range(1, dim_out + 1):\n        fvec[i - 1] = (\n            np.exp(-i / 10 * x[0])\n            - np.exp(-i / 10 * x[1])\n            + (np.exp(-i) - np.exp(-i / 10)) * x[2]\n        )\n    return fvec\n\n\n@mark.least_squares\ndef jennrich_sampson(x, dim_out):\n    fvec = (\n        2 * (1.0 + np.arange(1, dim_out + 1))\n        - np.exp(np.arange(1, dim_out + 1) * x[0])\n        - np.exp(np.arange(1, dim_out + 1) * x[1])\n    )\n    return fvec\n\n\n@mark.least_squares\ndef brown_dennis(x, dim_out):\n    fvec = np.zeros(dim_out)\n    for i in range(1, dim_out + 1):\n        temp = i / 5\n        temp_1 = x[0] + temp * x[1] - np.exp(temp)\n        temp_2 = x[2] + np.sin(temp) * x[3] - np.cos(temp)\n        fvec[i - 1] = temp_1**2 + temp_2**2\n    return fvec\n\n\n@mark.least_squares\ndef chebyquad(x, dim_out):\n    fvec = np.zeros(dim_out)\n    dim_in = len(x)\n    for i in range(1, dim_in + 1):\n        temp_1 = 1.0\n        temp_2 = 2 * x[i - 1] - 1.0\n        temp_3 = 2 * temp_2\n        for j in range(dim_out):\n            fvec[j] = fvec[j] + temp_2\n            temp_4 = temp_3 * temp_2 - temp_1\n            temp_1 = temp_2\n            temp_2 = temp_4\n    for i in range(1, dim_out + 1):\n        fvec[i - 1] = fvec[i - 1] / dim_in\n        if i % 2 == 0:\n            fvec[i - 1] = fvec[i - 1] + 1 / (i**2 - 1.0)\n    return fvec\n\n\n@mark.least_squares\ndef brown_almost_linear(x):\n    dim_in = len(x)\n    sm = -(dim_in + 1) + x.sum()\n    product = x.prod()\n    fvec = x + sm\n    fvec[dim_in - 1] = product - 1.0\n    return fvec\n\n\n@mark.least_squares\ndef osborne_one(x, y):\n    temp = 10 * np.arange(len(y))\n    temp_1 = np.exp(-x[3] * temp)\n    temp_2 = np.exp(-x[4] * temp)\n    fvec = y - (x[0] + x[1] * temp_1 + x[2] * temp_2)\n    return fvec\n\n\n@mark.least_squares\ndef osborne_two(x, y):\n    temp_array = np.zeros((4, len(y)))\n    temp = np.arange(len(y)) / 10\n    temp_array[0] = np.exp(-x[4] * temp)\n    temp_array[1] = np.exp(-x[5] * (temp - x[8]) ** 2)\n    temp_array[2] = np.exp(-x[6] * (temp - x[9]) ** 2)\n    temp_array[3] = np.exp(-x[7] * (temp - x[10]) ** 2)\n    fvec = y - (temp_array.T * x[:4]).T.sum(axis=0)\n    return fvec\n\n\n@mark.least_squares\ndef bdqrtic(x):\n    # the length of array x should be more than 5.\n    dim_in = len(x)\n    fvec = np.zeros(2 * (dim_in - 4))\n    for i in range(dim_in - 4):\n        fvec[i] = -4 * x[i] + 3\n        fvec[dim_in - 4 + i] = (\n            x[i] ** 2\n            + 2 * x[i + 1] ** 2\n            + 3 * x[i + 2] ** 2\n            + 4 * x[i + 3] ** 2\n            + 5 * x[dim_in - 1] ** 2\n        )\n    return fvec\n\n\n@mark.least_squares\ndef cube(x):\n    fvec = 10 * (x - np.roll(x, 1) ** 3)\n    fvec[0] = x[0] - 1.0\n    return fvec\n\n\n@mark.least_squares\ndef mancino(x):\n    dim_in = len(x)\n    fvec = np.zeros(dim_in)\n    for i in range(dim_in):\n        sm = 0\n        for j in range(dim_in):\n            temp = np.sqrt(x[i] ** 2 + (i + 1) / (j + 1))\n            sm += temp * ((np.sin(np.log(temp))) ** 5 + (np.cos(np.log(temp))) ** 5)\n        fvec[i] = 1400 * x[i] + (i + 1 - 50) ** 3 + sm\n    return fvec\n\n\n@mark.least_squares\ndef heart_eight(x, y):\n    dim_y = len(y)\n    fvec = np.zeros(dim_y)\n    fvec[0] = x[0] + x[1] - y[0]\n    fvec[1] = x[2] + x[3] - y[1]\n    fvec[2] = x[4] * x[0] + x[5] * x[1] - x[6] * x[2] - x[7] * x[3] - y[2]\n    fvec[3] = x[6] * x[0] + x[7] * x[1] + x[4] * x[2] + x[5] * x[3] - y[3]\n    fvec[4] = (\n        x[0] * (x[4] ** 2 - x[6] ** 2)\n        - 2 * x[2] * x[4] * x[6]\n        + x[1] * (x[5] ** 2 - x[7] ** 2)\n        - 2 * x[3] * x[5] * x[7]\n        - y[4]\n    )\n    fvec[5] = (\n        x[2] * (x[4] ** 2 - x[6] ** 2)\n        + 2 * x[0] * x[4] * x[6]\n        + x[3] * (x[5] ** 2 - x[7] ** 2)\n        + 2 * x[1] * x[5] * x[7]\n        - y[5]\n    )\n    fvec[6] = (\n        x[0] * x[4] * (x[4] ** 2 - 3 * x[6] ** 2)\n        + x[2] * x[6] * (x[6] ** 2 - 3 * x[4] ** 2)\n        + x[1] * x[5] * (x[5] ** 2 - 3 * x[7] ** 2)\n        + x[3] * x[7] * (x[7] ** 2 - 3 * x[5] ** 2)\n        - y[6]\n    )\n    fvec[7] = (\n        x[2] * x[4] * (x[4] ** 2 - 3 * x[6] ** 2)\n        - x[0] * x[6] * (x[6] ** 2 - 3 * x[4] ** 2)\n        + x[3] * x[5] * (x[5] ** 2 - 3 * x[7] ** 2)\n        - x[1] * x[7] * (x[7] ** 2 - 3 * x[5] ** 2)\n        - y[7]\n    )\n    return fvec\n\n\n@mark.least_squares\ndef get_start_points_mancino(n, a=1):\n    x = np.zeros(n)\n    for i in range(1, n + 1):\n        sm = 0\n        for j in range(1, n + 1):\n            sm += np.sqrt(i / j) * (\n                (np.sin(np.log(np.sqrt(i / j)))) ** 5\n                + (np.cos(np.log(np.sqrt(i / j)))) ** 5\n            )\n        x[i - 1] = -8.7110e-04 * ((i - 50) ** 3 + sm)\n    return (x * a).tolist()\n\n\ny_vec = np.array(\n    [\n        0.1400,\n        0.1800,\n        0.2200,\n        0.2500,\n        0.2900,\n        0.3200,\n        0.3500,\n        0.3900,\n        0.3700,\n        0.5800,\n        0.7300,\n        0.9600,\n        1.3400,\n        2.1000,\n        4.3900,\n    ]\n)\n\nv_vec = np.array(\n    [\n        4.0000,\n        2.0000,\n        1.0000,\n        0.5000,\n        0.2500,\n        0.1670,\n        0.1250,\n        0.1000,\n        0.0833,\n        0.0714,\n        0.0625,\n    ]\n)\n\ny2_vec = np.array(\n    [\n        0.1957,\n        0.1947,\n        0.1735,\n        0.1600,\n        0.0844,\n        0.0627,\n        0.0456,\n        0.0342,\n        0.0323,\n        0.0235,\n        0.0246,\n    ]\n)\n\ny3_vec = np.array(\n    [\n        34780,\n        28610,\n        23650,\n        19630,\n        16370,\n        13720,\n        11540,\n        9744,\n        8261,\n        7030,\n        6005,\n        5147,\n        4427,\n        3820,\n        3307,\n        2872,\n    ]\n)\ny4_vec = np.array(\n    [\n        8.44e-1,\n        9.08e-1,\n        9.32e-1,\n        9.36e-1,\n        9.25e-1,\n        9.08e-1,\n        8.81e-1,\n        8.5e-1,\n        8.18e-1,\n        7.84e-1,\n        7.51e-1,\n        7.18e-1,\n        6.85e-1,\n        6.58e-1,\n        6.28e-1,\n        6.03e-1,\n        5.8e-1,\n        5.58e-1,\n        5.38e-1,\n        5.22e-1,\n        5.06e-1,\n        4.9e-1,\n        4.78e-1,\n        4.67e-1,\n        4.57e-1,\n        4.48e-1,\n        4.38e-1,\n        4.31e-1,\n        4.24e-1,\n        4.2e-1,\n        4.14e-1,\n        4.11e-1,\n        4.06e-1,\n    ]\n)\ny5_vec = np.array(\n    [\n        1.366e0,\n        1.191e0,\n        1.112e0,\n        1.013e0,\n        9.91e-1,\n        8.85e-1,\n        8.31e-1,\n        8.47e-1,\n        7.86e-1,\n        7.25e-1,\n        7.46e-1,\n        6.79e-1,\n        6.08e-1,\n        6.55e-1,\n        6.16e-1,\n        6.06e-1,\n        6.02e-1,\n        6.26e-1,\n        6.51e-1,\n        7.24e-1,\n        6.49e-1,\n        6.49e-1,\n        6.94e-1,\n        6.44e-1,\n        6.24e-1,\n        6.61e-1,\n        6.12e-1,\n        5.58e-1,\n        5.33e-1,\n        4.95e-1,\n        5.0e-1,\n        4.23e-1,\n        3.95e-1,\n        3.75e-1,\n        3.72e-1,\n        3.91e-1,\n        3.96e-1,\n        4.05e-1,\n        4.28e-1,\n        4.29e-1,\n        5.23e-1,\n        5.62e-1,\n        6.07e-1,\n        6.53e-1,\n        6.72e-1,\n        7.08e-1,\n        6.33e-1,\n        6.68e-1,\n        6.45e-1,\n        6.32e-1,\n        5.91e-1,\n        5.59e-1,\n        5.97e-1,\n        6.25e-1,\n        7.39e-1,\n        7.1e-1,\n        7.29e-1,\n        7.2e-1,\n        6.36e-1,\n        5.81e-1,\n        4.28e-1,\n        2.92e-1,\n        1.62e-1,\n        9.8e-2,\n        5.4e-2,\n    ]\n)\n\n\nlinear_full_rank_solution_x = [\n    -0.9999999988839997,\n    -1.0000000177422066,\n    -1.0000000115935452,\n    -1.0000000228208163,\n    -1.0000000488884697,\n    -0.9999999970458138,\n    -0.999999957053959,\n    -1.0000000040514776,\n    -0.9999999708374043,\n]\n\n\nfreudenstein_roth_solution_x = [11.4127789219781, -0.8968052599835741]\n\n\nbard_solution_x = [0.08241056005476516, 1.1330360796060677, 2.3436951913379658]\n\n\nkowalik_osborne_solution_x = [\n    0.19280693401647758,\n    0.19128233030789646,\n    0.12305650338704374,\n    0.1360623315234073,\n]\n\n\nmeyer_solution_x = [0.005609636453940975, 6181.3463491557495, 345.22363473367955]\n\nwatson_6_solution_x = [\n    -0.01572508595814696,\n    1.0124348692251488,\n    -0.23299161822960684,\n    1.2604300607312298,\n    -1.5137288869025518,\n    0.9929964192277573,\n]\n\n\n# Note: only nlopt_neldermead got close to the correct optimal criterion value.\n# Parameter values might be less precise than others but should be precise enough\n# for all practical purposes.\nwatson_9_solution_x = [\n    -1.5307729818292037e-05,\n    0.9997897038761921,\n    0.014763956456196943,\n    0.14634240306061744,\n    1.000820801996808,\n    -2.617730533377693,\n    4.104402503186126,\n    -3.1436119083184844,\n    1.0526263240326197,\n]\n\n\n# Note: only nlopt_nobyqa got close to the correct optimal criterion value.\n# Parameter values might be less precise than others but should be precise enough\n# for all practical purposes.\nwatson_12_solution_x = [\n    -1.257374334661004e-07,\n    1.000009574359581,\n    -0.0005801330054146337,\n    0.339181153679104,\n    -0.01717885040751319,\n    0.1133023927390161,\n    0.19016852711009063,\n    -0.21697797575421524,\n    -0.20528305553311146,\n    0.9344814896242725,\n    -0.8979508634897754,\n    0.3182351206188577,\n]\n\nbrown_dennis_solution_x = [\n    -11.594439969349615,\n    13.203630099554186,\n    -0.40343943943781074,\n    0.2367787758603151,\n]\n\nchebyquad_6_solution_x = [\n    0.06687659094608964,\n    0.2887406731194441,\n    0.36668229924164747,\n    0.6333177007583523,\n    0.7112593268805555,\n    0.9331234090539102,\n]\n\n\nchebyquad_7_solution_x = [\n    0.0580691496209753,\n    0.23517161235742137,\n    0.3380440947400461,\n    0.49999999999999983,\n    0.6619559052599537,\n    0.7648283876425783,\n    0.9419308503790245,\n]\n\n\nchebyquad_8_solution_x = [\n    0.043152760689960816,\n    0.19309084165259105,\n    0.2663287079773684,\n    0.5000000016286815,\n    0.5000000007226908,\n    0.8069091602434582,\n    0.7336712939109635,\n    0.9568472402172841,\n]\n\nchebyquad_9_solution_x = [\n    0.04420534613578318,\n    0.19949067230988682,\n    0.23561910847105574,\n    0.4160469078926057,\n    0.5839530921074088,\n    0.4999999999999922,\n    0.800509327690123,\n    0.7643808915289372,\n    0.9557946538642177,\n]\n\n\nchebyquad_10_solution_x = [\n    0.07474816709152399,\n    0.17151817795786592,\n    0.28643415454482585,\n    0.35964645053932914,\n    0.4707505262783716,\n    0.6167383355304029,\n    0.6167383367837294,\n    0.7998108031241883,\n    0.844854641539109,\n    0.9670066274628275,\n]\n\n\nchebyquad_11_solution_x = [\n    0.02995874447661457,\n    0.1373112070822553,\n    0.18836638791417698,\n    0.3588431173822416,\n    0.3588431160884765,\n    0.5000000000242054,\n    0.6411568833224512,\n    0.6411568815391566,\n    0.8116336110470005,\n    0.8626887929155374,\n    0.9700412549151204,\n]\n\nosborne_one_solution_x = [\n    0.37541005253870485,\n    1.9358469347077125,\n    -1.4646871598379403,\n    0.012867534697214533,\n    0.02212269960299629,\n]\n\nosborne_two_solution_x = [\n    1.3099771555174913,\n    0.4315537955622272,\n    0.6336616986693765,\n    0.5994305344293098,\n    0.7541832304802704,\n    0.9042885759622441,\n    1.365811821857166,\n    4.823698851312894,\n    2.398684862961737,\n    4.568874597996633,\n    5.675341470445994,\n]\n\n\nbdqrtic_8_solution_x = [\n    0.616075443630495,\n    0.4861767187980861,\n    0.39190293828200784,\n    0.32635052133139375,\n    5.7665311977077046e-09,\n    9.348707442258251e-09,\n    7.066347917413364e-09,\n    -2.030598138768078e-09,\n]\n\n\nbdqrtic_10_solution_x = [\n    0.6255364749479968,\n    0.4851009828850974,\n    0.3671943518989714,\n    0.28518847760113386,\n    0.33016716122418716,\n    0.37757199483645576,\n    -3.24040819296658e-09,\n    -1.8973118921921425e-08,\n    -2.2244236071548075e-08,\n    1.9263207246002433e-09,\n]\n\n\nbdqrtic_11_solution_x = [\n    0.6251418193253757,\n    0.4858196102070445,\n    0.3712502347939938,\n    0.28350403794642487,\n    0.31694697562905494,\n    0.33873300184720523,\n    0.3759208995980027,\n    -1.8942209640948616e-08,\n    3.418631657404969e-08,\n    -4.003185000628104e-09,\n    3.166166094063382e-09,\n]\n\n\nbdqrtic_12_solution_x = [\n    0.6248003622228653,\n    0.48537650602979937,\n    0.37165912289534886,\n    0.2859718523039759,\n    0.31552001728813406,\n    0.3253724392486982,\n    0.33781861543778574,\n    0.37402021737899876,\n    -4.429208872117422e-09,\n    -1.008941638491605e-08,\n    -2.5608732325955336e-08,\n    4.485976896804288e-09,\n]\n\n\nmancino_5_solution_x = [\n    84.28291101102532,\n    79.20603967293438,\n    74.3364141135311,\n    69.6711474112178,\n    65.20718113814442,\n]\n\nmancino_8_solution_x = [\n    84.43334222593528,\n    79.33454939399172,\n    74.44387011026309,\n    69.7592945870252,\n    65.27853533617875,\n    60.9988580578957,\n    56.9169379354432,\n    53.028761291567236,\n]\n\nmancino_10_solution_x = [\n    84.53434289477315,\n    79.42084435375007,\n    74.51601545241338,\n    69.81844699647671,\n    65.32637991893166,\n    61.03748806452533,\n    56.94869518846038,\n    53.056052319528746,\n    49.35469508461959,\n    45.83889035077595,\n]\n\n\nmancino_12_solution_x = [\n    84.63591921594158,\n    79.5076423105225,\n    74.5885724920863,\n    69.87791406833868,\n    65.37444824684921,\n    61.07626530788906,\n    56.98054088428213,\n    53.08338921660163,\n    49.379816810523714,\n    45.86378591833196,\n    42.52838225939789,\n    39.36606891417026,\n]\n\nheart_eight_solution_x = [\n    -0.311626605565399,\n    -0.37837339443458845,\n    0.3282442301180765,\n    -0.3722442301180588,\n    -1.282227094270286,\n    2.4943003120854743,\n    1.5548658787873983,\n    -1.384637842863253,\n]\n\n\nMORE_WILD_PROBLEMS = {\n    \"linear_full_rank_good_start\": {\n        \"fun\": mark.least_squares(partial(linear_full_rank, dim_out=45)),\n        \"start_x\": [1] * 9,\n        \"solution_x\": linear_full_rank_solution_x,\n        \"start_criterion\": 72,\n        \"solution_criterion\": 36,\n    },\n    \"linear_full_rank_bad_start\": {\n        \"fun\": mark.least_squares(partial(linear_full_rank, dim_out=45)),\n        \"start_x\": [10] * 9,\n        \"solution_x\": linear_full_rank_solution_x,\n        \"start_criterion\": 1125,\n        \"solution_criterion\": 36,\n    },\n    \"linear_rank_one_good_start\": {\n        \"fun\": mark.least_squares(partial(linear_rank_one, dim_out=35)),\n        \"start_x\": [1] * 7,\n        # no unique solution\n        \"solution_x\": None,\n        \"start_criterion\": 1.165420e7,\n        \"solution_criterion\": 8.380281690143324,\n    },\n    \"linear_rank_one_bad_start\": {\n        \"fun\": mark.least_squares(partial(linear_rank_one, dim_out=35)),\n        \"start_x\": [10] * 7,\n        # no unique solution\n        \"solution_x\": None,\n        \"start_criterion\": 1.168591e9,\n        \"solution_criterion\": 8.380282,\n    },\n    \"linear_rank_one_zero_columns_rows_good_start\": {\n        \"fun\": mark.least_squares(\n            partial(linear_rank_one_zero_columns_rows, dim_out=35)\n        ),\n        \"start_x\": [1] * 7,\n        # no unique solution\n        \"solution_x\": None,\n        \"start_criterion\": 4.989195e6,\n        \"solution_criterion\": 9.880597014926506,\n    },\n    \"linear_rank_one_zero_columns_rows_bad_start\": {\n        \"fun\": mark.least_squares(\n            partial(linear_rank_one_zero_columns_rows, dim_out=35)\n        ),\n        \"start_x\": [10] * 7,\n        # no unique solution\n        \"solution_x\": None,\n        \"start_criterion\": 5.009356e8,\n        \"solution_criterion\": 9.880597014926506,\n    },\n    \"rosenbrock_good_start\": {\n        \"fun\": rosenbrock,\n        \"start_x\": [-1.2, 1],\n        \"solution_x\": [1, 1],\n        \"start_criterion\": 24.2,\n        \"solution_criterion\": 0,\n    },\n    \"rosenbrock_bad_start\": {\n        \"fun\": rosenbrock,\n        \"start_x\": [-12, 10],\n        \"solution_x\": [1, 1],\n        \"start_criterion\": 1.795769e6,\n        \"solution_criterion\": 0,\n    },\n    \"helical_valley_good_start\": {\n        \"fun\": helical_valley,\n        \"start_x\": [-1, 0, 0],\n        \"solution_x\": [1, 0, 0],\n        \"start_criterion\": 2500,\n        \"solution_criterion\": 0,\n    },\n    \"helical_valley_bad_start\": {\n        \"fun\": helical_valley,\n        \"start_x\": [-10, 0, 0],\n        \"solution_x\": [1, 0, 0],\n        \"start_criterion\": 10600,\n        \"solution_criterion\": 0,\n    },\n    \"powell_singular_good_start\": {\n        \"fun\": powell_singular,\n        \"start_x\": [3, -1, 0, 1],\n        \"solution_x\": [0] * 4,\n        \"start_criterion\": 215,\n        \"solution_criterion\": 0,\n    },\n    \"powell_singular_bad_start\": {\n        \"fun\": powell_singular,\n        \"start_x\": [30, -10, 0, 10],\n        \"solution_x\": [0] * 4,\n        \"start_criterion\": 1.615400e6,\n        \"solution_criterion\": 0,\n    },\n    \"freudenstein_roth_good_start\": {\n        \"fun\": freudenstein_roth,\n        \"start_x\": [0.5, -2],\n        \"solution_x\": freudenstein_roth_solution_x,\n        \"start_criterion\": 400.5,\n        \"solution_criterion\": 48.984253679240013,\n    },\n    \"freudenstein_roth_bad_start\": {\n        \"fun\": freudenstein_roth,\n        \"start_x\": [5, -20],\n        \"solution_x\": freudenstein_roth_solution_x,\n        \"start_criterion\": 1.545754e8,\n        \"solution_criterion\": 48.984253679240013,\n    },\n    \"bard_good_start\": {\n        \"fun\": mark.least_squares(partial(bard, y=y_vec)),\n        \"start_x\": [1] * 3,\n        \"solution_x\": bard_solution_x,\n        \"start_criterion\": 41.68170,\n        \"solution_criterion\": 0.00821487730657897,\n    },\n    \"bard_bad_start\": {\n        \"fun\": mark.least_squares(partial(bard, y=y_vec)),\n        \"start_x\": [10] * 3,\n        \"solution_x\": bard_solution_x,\n        \"start_criterion\": 1306.234,\n        \"solution_criterion\": 0.00821487730657897,\n    },\n    \"kowalik_osborne\": {\n        \"fun\": mark.least_squares(partial)(\n            kowalik_osborne,\n            y1=v_vec,\n            y2=y2_vec,\n        ),\n        \"start_x\": [0.25, 0.39, 0.415, 0.39],\n        \"solution_x\": kowalik_osborne_solution_x,\n        \"start_criterion\": 5.313172e-3,\n        \"solution_criterion\": 0.00030750560384924,\n    },\n    \"meyer\": {\n        \"fun\": mark.least_squares(partial(meyer, y=y3_vec)),\n        \"start_x\": [0.02, 4000, 250],\n        \"solution_x\": meyer_solution_x,\n        \"start_criterion\": 1.693608e9,\n        \"solution_criterion\": 87.945855170395831,\n    },\n    \"watson_6_good_start\": {\n        \"fun\": watson,\n        \"start_x\": [0.5] * 6,\n        \"solution_x\": watson_6_solution_x,\n        \"start_criterion\": 16.43083,\n        \"solution_criterion\": 0.00228767005355236,\n    },\n    \"watson_6_bad_start\": {\n        \"fun\": watson,\n        \"start_x\": [5] * 6,\n        \"solution_x\": watson_6_solution_x,\n        \"start_criterion\": 2.323367e6,\n        \"solution_criterion\": 0.00228767005355236,\n    },\n    \"watson_9_good_start\": {\n        \"fun\": watson,\n        \"start_x\": [0.5] * 9,\n        \"solution_x\": watson_9_solution_x,\n        \"start_criterion\": 26.90417,\n        \"solution_criterion\": 1.399760e-6,\n    },\n    \"watson_9_bad_start\": {\n        \"fun\": watson,\n        \"start_x\": [5] * 9,\n        \"solution_x\": watson_9_solution_x,\n        \"start_criterion\": 8.158877e6,\n        \"solution_criterion\": 1.399760e-6,\n    },\n    \"watson_12_good_start\": {\n        \"fun\": watson,\n        \"start_x\": [0.5] * 12,\n        \"solution_x\": watson_12_solution_x,\n        \"start_criterion\": 73.67821,\n        \"solution_criterion\": 4.722381e-10,\n    },\n    \"watson_12_bad_start\": {\n        \"fun\": watson,\n        \"start_x\": [5] * 12,\n        \"solution_x\": watson_12_solution_x,\n        \"start_criterion\": 2.059384e7,\n        \"solution_criterion\": 4.722381e-10,\n    },\n    \"box_3d\": {\n        \"fun\": mark.least_squares(partial(box_3d, dim_out=10)),\n        \"start_x\": [0, 10, 20],\n        \"solution_x\": [1, 10, 1],\n        \"start_criterion\": 1031.154,\n        \"solution_criterion\": 0,\n    },\n    \"jennrich_sampson\": {\n        \"fun\": mark.least_squares(partial(jennrich_sampson, dim_out=10)),\n        \"start_x\": [0.3, 0.4],\n        \"solution_x\": [0.2578252135686162] * 2,\n        \"start_criterion\": 4171.306,\n        \"solution_criterion\": 124.3621823556148,\n    },\n    \"brown_dennis_good_start\": {\n        \"fun\": mark.least_squares(partial(brown_dennis, dim_out=20)),\n        \"start_x\": [25, 5, -5, -1],\n        \"solution_x\": brown_dennis_solution_x,\n        \"start_criterion\": 7.926693e6,\n        \"solution_criterion\": 85822.20162635,\n    },\n    \"brown_dennis_bad_start\": {\n        \"fun\": mark.least_squares(partial(brown_dennis, dim_out=20)),\n        \"start_x\": [250, 50, -50, -10],\n        \"solution_x\": brown_dennis_solution_x,\n        \"start_criterion\": 3.081064e11,\n        \"solution_criterion\": 85822.20162635,\n    },\n    \"chebyquad_6\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=6)),\n        \"start_x\": [i / 7 for i in range(1, 7)],\n        \"solution_x\": chebyquad_6_solution_x,\n        \"start_criterion\": 4.642817e-2,\n        \"solution_criterion\": 0,\n    },\n    \"chebyquad_7\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=7)),\n        \"start_x\": [i / 8 for i in range(1, 8)],\n        \"solution_x\": chebyquad_7_solution_x,\n        \"start_criterion\": 3.377064e-2,\n        \"solution_criterion\": 0,\n    },\n    \"chebyquad_8\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=8)),\n        \"start_x\": [i / 9 for i in range(1, 9)],\n        \"solution_x\": chebyquad_8_solution_x,\n        \"start_criterion\": 3.861770e-2,\n        \"solution_criterion\": 0.003516873725677,\n    },\n    \"chebyquad_9\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=9)),\n        \"start_x\": [i / 10 for i in range(1, 10)],\n        \"solution_x\": chebyquad_9_solution_x,\n        \"start_criterion\": 2.888298e-2,\n        \"solution_criterion\": 0,\n    },\n    \"chebyquad_10\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=10)),\n        \"start_x\": [i / 11 for i in range(1, 11)],\n        \"solution_x\": chebyquad_10_solution_x,\n        \"start_criterion\": 3.376327e-2,\n        \"solution_criterion\": 0.00477271369637536,\n    },\n    \"chebyquad_11\": {\n        \"fun\": mark.least_squares(partial(chebyquad, dim_out=11)),\n        \"start_x\": [i / 12 for i in range(1, 12)],\n        \"solution_x\": chebyquad_11_solution_x,\n        \"start_criterion\": 2.674060e-2,\n        \"solution_criterion\": 0.00279976155186576,\n    },\n    \"brown_almost_linear\": {\n        \"fun\": brown_almost_linear,\n        \"start_x\": [0.5] * 10,\n        \"solution_x\": [1] * 10,\n        \"start_criterion\": 273.2480,\n        \"solution_criterion\": 0,\n    },\n    \"osborne_one\": {\n        \"fun\": mark.least_squares(partial(osborne_one, y=y4_vec)),\n        \"start_x\": [0.5, 1.5, 1, 0.01, 0.02],\n        \"solution_x\": osborne_one_solution_x,\n        \"start_criterion\": 16.17411,\n        \"solution_criterion\": 0.00005464894697483,\n    },\n    \"osborne_two_good_start\": {\n        \"fun\": mark.least_squares(partial(osborne_two, y=y5_vec)),\n        \"start_x\": [1.3, 0.65, 0.65, 0.7, 0.6, 3, 5, 7, 2, 4.5, 5.5],\n        \"solution_x\": osborne_two_solution_x,\n        \"start_criterion\": 2.093420,\n        \"solution_criterion\": 0.0401377362935477,\n    },\n    \"osborne_two_bad_start\": {\n        \"fun\": mark.least_squares(partial(osborne_two, y=y5_vec)),\n        \"start_x\": [13, 6.5, 6.5, 7, 6, 30, 50, 70, 20, 45, 55],\n        \"solution_x\": osborne_two_solution_x,\n        \"start_criterion\": 199.6847,\n        \"solution_criterion\": 0.0401377362935477,\n    },\n    \"bdqrtic_8\": {\n        \"fun\": bdqrtic,\n        \"start_x\": [1] * 8,\n        \"solution_x\": bdqrtic_8_solution_x,\n        \"start_criterion\": 904,\n        \"solution_criterion\": 10.2389734213174,\n    },\n    \"bdqrtic_10\": {\n        \"fun\": bdqrtic,\n        \"start_x\": [1] * 10,\n        \"solution_x\": bdqrtic_10_solution_x,\n        \"start_criterion\": 1356,\n        \"solution_criterion\": 18.28116175359353,\n    },\n    \"bdqrtic_11\": {\n        \"fun\": bdqrtic,\n        \"start_x\": [1] * 11,\n        \"solution_x\": bdqrtic_11_solution_x,\n        \"start_criterion\": 1582,\n        \"solution_criterion\": 22.260591734883817,\n    },\n    \"bdqrtic_12\": {\n        \"fun\": bdqrtic,\n        \"start_x\": [1] * 12,\n        \"solution_x\": bdqrtic_12_solution_x,\n        \"start_criterion\": 1808,\n        \"solution_criterion\": 26.2727663967939,\n    },\n    \"cube_5\": {\n        \"fun\": cube,\n        \"start_x\": [0.5] * 5,\n        \"solution_x\": [1] * 5,\n        \"start_criterion\": 56.5,\n        \"solution_criterion\": 0,\n    },\n    \"cube_6\": {\n        \"fun\": cube,\n        \"start_x\": [0.5] * 6,\n        \"solution_x\": [1] * 6,\n        \"start_criterion\": 70.5625,\n        \"solution_criterion\": 0,\n    },\n    \"cube_8\": {\n        \"fun\": cube,\n        \"start_x\": [0.5] * 8,\n        \"solution_x\": [1] * 8,\n        \"start_criterion\": 98.6875,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_5_good_start\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(5),\n        \"solution_x\": mancino_5_solution_x,\n        \"start_criterion\": 2.539084e9,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_5_bad_start\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(5, 10),\n        \"solution_x\": mancino_5_solution_x,\n        \"start_criterion\": 6.873795e12,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_8\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(8),\n        \"solution_x\": mancino_8_solution_x,\n        \"start_criterion\": 3.367961e9,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_10\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(10),\n        \"solution_x\": mancino_10_solution_x,\n        \"start_criterion\": 3.735127e9,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_12_good_start\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(12),\n        \"solution_x\": mancino_12_solution_x,\n        \"start_criterion\": 3.991072e9,\n        \"solution_criterion\": 0,\n    },\n    \"mancino_12_bad_start\": {\n        \"fun\": mancino,\n        \"start_x\": get_start_points_mancino(12, 10),\n        \"solution_x\": mancino_12_solution_x,\n        \"start_criterion\": 1.130015e13,\n        \"solution_criterion\": 0,\n    },\n    \"heart_eight_good_start\": {\n        \"fun\": mark.least_squares(\n            partial(\n                heart_eight,\n                y=np.array([-0.69, -0.044, -1.57, -1.31, -2.65, 2, -12.6, 9.48]),\n            )\n        ),\n        \"start_x\": [-0.3, -0.39, 0.3, -0.344, -1.2, 2.69, 1.59, -1.5],\n        \"solution_x\": heart_eight_solution_x,\n        \"start_criterion\": 9.385672,\n        \"solution_criterion\": 0,\n    },\n    \"heart_eight_bad_start\": {\n        \"fun\": mark.least_squares(\n            partial(\n                heart_eight,\n                y=np.array([-0.69, -0.044, -1.57, -1.31, -2.65, 2, -12.6, 9.48]),\n            )\n        ),\n        \"start_x\": [-3, -3.9, 3, -3.44, -12, 26.9, 15.9, -15],\n        \"solution_x\": heart_eight_solution_x,\n        \"start_criterion\": 3.365815e10,\n        \"solution_criterion\": 0,\n    },\n    \"brown_almost_linear_medium\": {\n        \"fun\": brown_almost_linear,\n        \"start_x\": [0.5] * 100,\n        \"solution_x\": [1] * 100,\n        \"start_criterion\": 2.524757e5,\n        \"solution_criterion\": 0,\n    },\n}\n"
  },
  {
    "path": "src/optimagic/benchmarking/noise_distributions.py",
    "content": "import numpy as np\n\n\ndef _standard_logistic(size, rng):\n    scale = np.sqrt(3) / np.pi\n    return rng.logistic(loc=0, scale=scale, size=size)\n\n\ndef _standard_uniform(size, rng):\n    ub = np.sqrt(3)\n    lb = -ub\n    return rng.uniform(lb, ub, size=size)\n\n\ndef _standard_normal(size, rng):\n    return rng.normal(size=size)\n\n\ndef _standard_gumbel(size, rng):\n    gamma = 0.577215664901532\n    scale = np.sqrt(6) / np.pi\n    loc = -scale * gamma\n    return rng.gumbel(loc=loc, scale=scale, size=size)\n\n\ndef _standard_laplace(size, rng):\n    return rng.laplace(scale=np.sqrt(0.5), size=size)\n\n\nNOISE_DISTRIBUTIONS = {\n    \"normal\": _standard_normal,\n    \"gumbel\": _standard_gumbel,\n    \"logistic\": _standard_logistic,\n    \"uniform\": _standard_uniform,\n    \"laplace\": _standard_laplace,\n}\n"
  },
  {
    "path": "src/optimagic/benchmarking/process_benchmark_results.py",
    "content": "import numpy as np\nimport pandas as pd\n\n\ndef process_benchmark_results(\n    problems, results, stopping_criterion, x_precision=1e-4, y_precision=1e-4\n):\n    \"\"\"Create tidy DataFrame with all information needed for the benchmarking plots.\n\n    Args:\n        problems (dict): optimagic benchmarking problems dictionary. Keys are the\n            problem names. Values contain information on the problem, including the\n            solution value.\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        stopping_criterion (str): one of \"x_and_y\", \"x_or_y\", \"x\", \"y\", or None.\n            Determines how convergence is determined from the two precisions.\n            If None, no convergence criterion is applied.\n        x_precision (float): how close an algorithm must have gotten to the\n            true parameter values (as percent of the Euclidean distance between start\n            and solution parameters) before the criterion for clipping and convergence\n            is fulfilled. Default is 1e-4.\n        y_precision (float): how close an algorithm must have gotten to the\n            true criterion values (as percent of the distance between start\n            and solution criterion value) before the criterion for clipping and\n            convergence is fulfilled. Default is 1e-4.\n\n    Returns:\n        pandas.DataFrame: tidy DataFrame with the following columns:\n            - problem\n            - algorithm\n            - n_evaluations\n            - walltime\n            - criterion\n            - criterion_normalized\n            - monotone_criterion\n            - monotone_criterion_normalized\n            - parameter_distance\n            - parameter_distance_normalized\n            - monotone_parameter_distance\n            - monotone_parameter_distance_normalized\n\n    \"\"\"\n    histories = []\n    infos = []\n\n    for (problem_name, algorithm_name), result in results.items():\n        history, is_converged = _process_one_result(\n            problem=problems[problem_name],\n            result=result,\n            stopping_criterion=stopping_criterion,\n            x_precision=x_precision,\n            y_precision=y_precision,\n        )\n        history[\"problem\"] = problem_name\n        history[\"algorithm\"] = algorithm_name\n        histories.append(history)\n\n        info = {\n            \"problem\": problem_name,\n            \"algorithm\": algorithm_name,\n            \"is_converged\": is_converged,\n        }\n        infos.append(info)\n\n    histories = pd.concat(histories, ignore_index=True)\n    infos = pd.DataFrame(infos).set_index([\"problem\", \"algorithm\"]).unstack()\n    infos.columns = [tup[1] for tup in infos.columns]\n\n    return histories, infos\n\n\ndef _process_one_result(\n    problem,\n    result,\n    stopping_criterion,\n    x_precision,\n    y_precision,\n):\n    # input processing\n    assert isinstance(x_precision, float)\n    assert isinstance(y_precision, float)\n\n    # extract information\n    _params_hist = result[\"params_history\"]\n    _solution_crit = problem[\"solution\"][\"value\"]\n    _start_crit = problem[\"start_criterion\"]\n    _solution_x = problem[\"solution\"].get(\"params\")\n    _start_x = problem[\"inputs\"][\"params\"]\n    _needed_step = np.linalg.norm(_solution_x - _start_x)\n    if isinstance(_solution_x, np.ndarray) and not np.isfinite(_solution_x).all():\n        _solution_x = None\n\n    # calculate the different transformations of criterion values\n    crit_hist = np.array(result[\"criterion_history\"])\n    monotone_crit_hist = np.minimum.accumulate(crit_hist)\n    normalized_crit_hist = (crit_hist - _solution_crit) / (_start_crit - _solution_crit)\n    normalized_monotone_crit_hist = (monotone_crit_hist - _solution_crit) / (\n        _start_crit - _solution_crit\n    )\n\n    # calculate the different versions of params distance if we have a solution\n    if _solution_x is not None:\n        params_dist = np.linalg.norm(np.array(_params_hist - _solution_x), axis=1)\n        monotone_params_dist = np.minimum.accumulate(params_dist)\n        params_dist_normalized = params_dist / _needed_step\n        monotone_params_dist_normalized = monotone_params_dist / _needed_step\n    else:\n        params_dist = np.full(len(_params_hist), np.nan)\n        monotone_params_dist = np.full(len(_params_hist), np.nan)\n        params_dist_normalized = np.full(len(_params_hist), np.nan)\n        monotone_params_dist_normalized = np.full(len(_params_hist), np.nan)\n\n    # put everything together in a dict\n    out_dict = {\n        \"n_evaluations\": np.arange(len(crit_hist)),\n        \"n_batches\": result[\"batches_history\"],\n        \"walltime\": result[\"time_history\"],\n        \"criterion\": crit_hist,\n        \"criterion_normalized\": normalized_crit_hist,\n        \"monotone_criterion\": monotone_crit_hist,\n        \"monotone_criterion_normalized\": normalized_monotone_crit_hist,\n        \"parameter_distance\": params_dist,\n        \"monotone_parameter_distance\": monotone_params_dist,\n        \"parameter_distance_normalized\": params_dist_normalized,\n        \"monotone_parameter_distance_normalized\": monotone_params_dist_normalized,\n    }\n\n    # calculate at which iteration the problem has been solved\n    if stopping_criterion is not None:\n        is_converged_x, x_idx = _check_convergence(params_dist_normalized, x_precision)\n        is_converged_y, y_idx = _check_convergence(normalized_crit_hist, y_precision)\n\n        flag_aggregators = {\n            \"x\": lambda x, y: x,\n            \"y\": lambda x, y: y,\n            \"x_and_y\": lambda x, y: x and y,\n            \"x_or_y\": lambda x, y: x or y,\n        }\n\n        is_converged = flag_aggregators[stopping_criterion](\n            x=is_converged_x, y=is_converged_y\n        )\n\n        if is_converged:\n            idx_aggregators = {\n                \"x\": lambda x, y: x,\n                \"y\": lambda x, y: y,\n                \"x_and_y\": _aggregate_idxs_with_and,\n                \"x_or_y\": _aggregate_idxs_with_or,\n            }\n            solution_idx = idx_aggregators[stopping_criterion](x=x_idx, y=y_idx)\n            if solution_idx is not None:\n                out_dict = {k: v[: solution_idx + 1] for k, v in out_dict.items()}\n\n    # create a DataFrame and add metadata\n    out = pd.DataFrame(out_dict)\n\n    return out, is_converged\n\n\ndef _check_convergence(values, threshold):\n    boo = values <= threshold\n    if boo.any():\n        is_converged = True\n        idx = np.argmax(boo)\n    else:\n        is_converged = False\n        idx = None\n    return is_converged, idx\n\n\ndef _aggregate_idxs_with_and(x, y):\n    if x is None or y is None:\n        out = None\n    else:\n        out = max(x, y)\n    return out\n\n\ndef _aggregate_idxs_with_or(x, y):\n    if x is None and y is None:\n        out = None\n    elif x is None:\n        out = y\n    elif y is None:\n        out = x\n    else:\n        out = min(x, y)\n    return out\n"
  },
  {
    "path": "src/optimagic/benchmarking/run_benchmark.py",
    "content": "\"\"\"Functions to create, run and visualize optimization benchmarks.\n\nTO-DO:\n- Add other benchmark sets:\n    - finish medium scale problems from https://arxiv.org/pdf/1710.11005.pdf, Page 34.\n    - add scalar problems from https://github.com/AxelThevenot\n- Add option for deterministic noise or wiggle.\n\n\"\"\"\n\nimport numpy as np\nfrom pybaum import tree_just_flatten\n\nfrom optimagic import batch_evaluators\nfrom optimagic.algorithms import AVAILABLE_ALGORITHMS\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef run_benchmark(\n    problems,\n    optimize_options,\n    *,\n    batch_evaluator=\"joblib\",\n    n_cores=1,\n    error_handling=\"continue\",\n    max_criterion_evaluations=1_000,\n    disable_convergence=True,\n):\n    \"\"\"Run problems with different optimize options.\n\n    Args:\n        problems (dict): Nested dictionary with benchmark problems of the structure:\n            {\"name\": {\"inputs\": {...}, \"solution\": {...}, \"info\": {...}}}\n            where \"inputs\" are keyword arguments for ``minimize`` such as the criterion\n            function and start parameters. \"solution\" contains the entries \"params\" and\n            \"value\" and \"info\" might  contain information about the test problem.\n        optimize_options (list or dict): Either a list of algorithms or a Nested\n            dictionary that maps a name for optimizer settings\n            (e.g. ``\"lbfgsb_strict_criterion\"``) to a dictionary of keyword arguments\n            for arguments for ``minimize`` (e.g. ``{\"algorithm\": \"scipy_lbfgsb\",\n            \"algo_options\": {\"convergence.ftol_rel\": 1e-12}}``).\n            Alternatively, the values can just be an algorithm which is then benchmarked\n            at default settings.\n        batch_evaluator (str or callable): See :ref:`batch_evaluators`.\n        n_cores (int): Number of optimizations that is run in parallel. Note that in\n            addition to that an optimizer might parallelize.\n        error_handling (str): One of \"raise\", \"continue\".\n        max_criterion_evaluations (int): Shortcut to set the maximum number of\n            criterion evaluations instead of passing them in via algo options. In case\n            an optimizer does not support this stopping criterion, we also use this as\n            max iterations.\n        disable_convergence (bool): If True, we set extremely strict convergence\n            convergence criteria by default, such that most optimizers will exploit\n            their full computation budget set by max_criterion_evaluations.\n\n    Returns:\n        dict: Nested Dictionary with information on the benchmark run. The outer keys\n            are tuples where the first entry is the name of the problem and the second\n            the name of the optimize options. The values are dicts with the entries:\n            \"params_history\", \"criterion_history\", \"time_history\" and \"solution\".\n\n    \"\"\"\n    if isinstance(batch_evaluator, str):\n        batch_evaluator = getattr(\n            batch_evaluators, f\"{batch_evaluator}_batch_evaluator\"\n        )\n    opt_options = _process_optimize_options(\n        optimize_options,\n        max_evals=max_criterion_evaluations,\n        disable_convergence=disable_convergence,\n    )\n\n    minimize_arguments, keys = _get_optimization_arguments_and_keys(\n        problems, opt_options\n    )\n\n    raw_results = batch_evaluator(\n        func=minimize,\n        arguments=minimize_arguments,\n        n_cores=n_cores,\n        error_handling=error_handling,\n        unpack_symbol=\"**\",\n    )\n\n    processing_arguments = []\n    for name, raw_result in zip(keys, raw_results, strict=False):\n        processing_arguments.append(\n            {\"optimize_result\": raw_result, \"problem\": problems[name[0]]}\n        )\n\n    results = batch_evaluator(\n        func=_process_one_result,\n        arguments=processing_arguments,\n        n_cores=n_cores,\n        error_handling=\"raise\",\n        unpack_symbol=\"**\",\n    )\n\n    results = dict(zip(keys, results, strict=False))\n\n    return results\n\n\ndef _process_optimize_options(raw_options, max_evals, disable_convergence):\n    if not isinstance(raw_options, dict):\n        dict_options = {}\n        for option in raw_options:\n            if isinstance(option, str):\n                dict_options[option] = option\n            else:\n                dict_options[option.__name__] = option\n    else:\n        dict_options = raw_options\n\n    default_algo_options = {}\n    if max_evals is not None:\n        default_algo_options[\"stopping.maxfun\"] = max_evals\n        default_algo_options[\"stopping.maxiter\"] = max_evals\n    if disable_convergence:\n        default_algo_options[\"convergence.ftol_rel\"] = 1e-14\n        default_algo_options[\"convergence.xtol_rel\"] = 1e-14\n        default_algo_options[\"convergence.gtol_rel\"] = 1e-14\n\n    out_options = {}\n    for name, _option in dict_options.items():\n        if not isinstance(_option, dict):\n            option = {\"algorithm\": _option}\n        else:\n            option = _option.copy()\n\n        algo_options = {**default_algo_options, **option.get(\"algo_options\", {})}\n        algo_options = {k.replace(\".\", \"_\"): v for k, v in algo_options.items()}\n        option[\"algo_options\"] = algo_options\n        if isinstance(option.get(\"algo_options\"), dict):\n            option[\"algo_options\"] = {**default_algo_options, **option[\"algo_options\"]}\n        else:\n            option[\"algo_options\"] = default_algo_options\n\n        out_options[name] = option\n\n    return out_options\n\n\ndef _get_optimization_arguments_and_keys(problems, opt_options):\n    kwargs_list = []\n    names = []\n\n    for prob_name, problem in problems.items():\n        for option_name, options in opt_options.items():\n            algo = options[\"algorithm\"]\n            if isinstance(algo, str):\n                if algo not in AVAILABLE_ALGORITHMS:\n                    raise ValueError(f\"Invalid algorithm: {algo}\")\n                else:\n                    valid_options = set(AVAILABLE_ALGORITHMS[algo].__dataclass_fields__)\n\n            else:\n                valid_options = set(algo.__dataclass_fields__)\n\n            algo_options = options[\"algo_options\"]\n            algo_options = {k: v for k, v in algo_options.items() if k in valid_options}\n\n            kwargs = {**options, **problem[\"inputs\"]}\n            kwargs[\"algo_options\"] = algo_options\n            kwargs_list.append(kwargs)\n            names.append((prob_name, option_name))\n\n    return kwargs_list, names\n\n\ndef _process_one_result(optimize_result, problem):\n    \"\"\"Process the result of one optimization run.\n\n    Args:\n        optimize_result (OptimizeResult): Result of one optimization run.\n        problem (dict): Problem specification.\n\n    Returns:\n        dict: Processed result.\n\n    \"\"\"\n    _registry = get_registry(extended=True)\n    _criterion = problem[\"noise_free_fun\"]\n    _start_x = problem[\"inputs\"][\"params\"]\n    _start_crit_value = _criterion(_start_x)\n    if isinstance(_start_crit_value, np.ndarray):\n        _start_crit_value = (_start_crit_value**2).sum()\n    _is_noisy = problem[\"noisy\"]\n    _solution_crit = problem[\"solution\"][\"value\"]\n\n    # This will happen if the optimization raised an error\n    if isinstance(optimize_result, str):\n        params_history_flat = [tree_just_flatten(_start_x, registry=_registry)]\n        criterion_history = [_start_crit_value]\n        time_history = [np.inf]\n        batches_history = [0]\n    else:\n        history = optimize_result.history\n        params_history = history.params\n        params_history_flat = [\n            tree_just_flatten(p, registry=_registry) for p in params_history\n        ]\n        if _is_noisy:\n            criterion_history = np.array([_criterion(p) for p in params_history])\n            if criterion_history.ndim == 2:\n                criterion_history = (criterion_history**2).sum(axis=1)\n        else:\n            criterion_history = history.fun\n        criterion_history = np.clip(criterion_history, _solution_crit, np.inf)\n        batches_history = history.batches\n        time_history = history.start_time\n\n    return {\n        \"params_history\": params_history_flat,\n        \"criterion_history\": criterion_history,\n        \"time_history\": time_history,\n        \"batches_history\": batches_history,\n        \"solution\": optimize_result,\n    }\n"
  },
  {
    "path": "src/optimagic/config.py",
    "content": "import importlib.util\nfrom pathlib import Path\n\nimport plotly.express as px\n\nDOCS_DIR = Path(__file__).parent.parent / \"docs\"\nOPTIMAGIC_ROOT = Path(__file__).parent\n\nPLOTLY_TEMPLATE = \"simple_white\"\nPLOTLY_PALETTE = px.colors.qualitative.Set2\n\n# The hex strings are obtained from the Plotly D3 qualitative palette.\nDEFAULT_PALETTE = [\n    \"#1F77B4\",\n    \"#FF7F0E\",\n    \"#2CA02C\",\n    \"#D62728\",\n    \"#9467BD\",\n    \"#8C564B\",\n    \"#E377C2\",\n    \"#7F7F7F\",\n    \"#BCBD22\",\n    \"#17BECF\",\n]\n\nDEFAULT_N_CORES = 1\n\nCRITERION_PENALTY_SLOPE = 0.1\nCRITERION_PENALTY_CONSTANT = 100\n\n\ndef _is_installed(module_name: str) -> bool:\n    \"\"\"Return True if the given module is installed, otherwise False.\"\"\"\n    return importlib.util.find_spec(module_name) is not None\n\n\n# ======================================================================================\n# Check Available Optimization Packages\n# ======================================================================================\n\nIS_PETSC4PY_INSTALLED = _is_installed(\"petsc4py\")\nIS_NLOPT_INSTALLED = _is_installed(\"nlopt\")\nIS_PYBOBYQA_INSTALLED = _is_installed(\"pybobyqa\")\nIS_DFOLS_INSTALLED = _is_installed(\"dfols\")\nIS_PYGMO_INSTALLED = _is_installed(\"pygmo\")\nIS_CYIPOPT_INSTALLED = _is_installed(\"cyipopt\")\nIS_FIDES_INSTALLED = _is_installed(\"fides\")\nIS_JAX_INSTALLED = _is_installed(\"jax\")\nIS_TRANQUILO_INSTALLED = _is_installed(\"tranquilo\")\nIS_NUMBA_INSTALLED = _is_installed(\"numba\")\nIS_IMINUIT_INSTALLED = _is_installed(\"iminuit\")\nIS_NEVERGRAD_INSTALLED = _is_installed(\"nevergrad\")\n# despite the similar names, the bayes_opt and bayes_optim packages are\n# completely unrelated. However, both of them are dependencies of nevergrad.\nIS_BAYESOPTIM_INSTALLED = _is_installed(\"bayes-optim\")\n# Note: There is a dependancy conflict with nevergrad and bayesian_optimization\n# installing nevergrad pins bayesian_optimization to 1.4.0,\n# but \"bayes_opt\" requires bayesian_optimization>=2.0.0 to work.\n# so if nevergrad is installed, bayes_opt will not work and vice-versa.\nIS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2 = (\n    _is_installed(\"bayes_opt\")\n    and importlib.metadata.version(\"bayesian_optimization\") > \"2.0.0\"\n)\nIS_GRADIENT_FREE_OPTIMIZERS_INSTALLED = _is_installed(\"gradient_free_optimizers\")\nIS_PYGAD_INSTALLED = _is_installed(\"pygad\")\nIS_PYSWARMS_INSTALLED = _is_installed(\"pyswarms\")\n\n# ======================================================================================\n# Check Available Visualization Packages\n# ======================================================================================\n\nIS_MATPLOTLIB_INSTALLED = _is_installed(\"matplotlib\")\nIS_BOKEH_INSTALLED = _is_installed(\"bokeh\")\nIS_ALTAIR_INSTALLED = _is_installed(\"altair\")\n"
  },
  {
    "path": "src/optimagic/constraints.py",
    "content": "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import KW_ONLY, dataclass\nfrom typing import Any, Callable\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import ArrayLike\n\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.optimization.algo_options import CONSTRAINTS_ABSOLUTE_TOLERANCE\nfrom optimagic.typing import PyTree\n\n\nclass Constraint(ABC):\n    \"\"\"Base class for all constraints used for subtyping.\"\"\"\n\n    @abstractmethod\n    def _to_dict(self) -> dict[str, Any]:\n        pass\n\n\ndef identity_selector(x: PyTree) -> PyTree:\n    return x\n\n\n@dataclass(frozen=True)\nclass FixedConstraint(Constraint):\n    \"\"\"Constraint that fixes the selected parameters at their starting values.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"fixed\", \"selector\": self.selector}\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n\n@dataclass(frozen=True)\nclass IncreasingConstraint(Constraint):\n    \"\"\"Constraint that ensures the selected parameters are increasing.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"increasing\", \"selector\": self.selector}\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n\n@dataclass(frozen=True)\nclass DecreasingConstraint(Constraint):\n    \"\"\"Constraint that ensures that the selected parameters are decreasing.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"decreasing\", \"selector\": self.selector}\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n\n@dataclass(frozen=True)\nclass EqualityConstraint(Constraint):\n    \"\"\"Constraint that ensures that the selected parameters are equal.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"equality\", \"selector\": self.selector}\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n\n@dataclass(frozen=True)\nclass ProbabilityConstraint(Constraint):\n    \"\"\"Constraint that ensures that the selected parameters are probabilities.\n\n    This constraint ensures that each of the selected parameters is positive and that\n    the sum of the selected parameters is 1.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"probability\", \"selector\": self.selector}\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n\n@dataclass(frozen=True)\nclass PairwiseEqualityConstraint(Constraint):\n    \"\"\"Constraint that ensures that groups of selected parameters are equal.\n\n    This constraint ensures that each pair between the selected parameters is equal.\n\n    Attributes:\n        selectors: A list of functions that take as input the parameters and return the\n            subsets of parameters to be constrained.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable.\n\n    \"\"\"\n\n    selectors: list[Callable[[PyTree], PyTree]]\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\"type\": \"pairwise_equality\", \"selectors\": self.selectors}\n\n    def __post_init__(self) -> None:\n        if len(self.selectors) < 2:\n            raise InvalidConstraintError(\"At least two selectors must be provided.\")\n\n        if not all(callable(s) for s in self.selectors):\n            raise InvalidConstraintError(\"All selectors must be callable.\")\n\n\n@dataclass(frozen=True)\nclass FlatCovConstraint(Constraint):\n    \"\"\"Constraint that ensures the selected parameters are a valid covariance matrix.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n        regularization: Helps in guiding the optimization towards finding a\n            positive definite covariance matrix instead of only a positive semi-definite\n            matrix. Larger values correspond to a higher likelihood of positive\n            definiteness. Defaults to 0.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable or regularization is\n            not a non-negative float or int.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n    _: KW_ONLY\n    regularization: float = 0.0\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\n            \"type\": \"covariance\",\n            \"selector\": self.selector,\n            \"regularization\": self.regularization,\n        }\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n        if not isinstance(self.regularization, float | int) or self.regularization < 0:\n            raise InvalidConstraintError(\n                \"'regularization' must be a non-negative float or int.\"\n            )\n\n\n@dataclass(frozen=True)\nclass FlatSDCorrConstraint(Constraint):\n    \"\"\"Constraint that ensures the selected parameters are a valid correlation matrix.\n\n    This constraint ensures that each of the selected parameters is positive and that\n    the sum of the selected parameters is 1.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n        regularization: Helps in guiding the optimization towards finding a\n            positive definite covariance matrix instead of only a positive semi-definite\n            matrix. Larger values correspond to a higher likelihood of positive\n            definiteness. Defaults to 0.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable or regularization is\n            not a non-negative float or int.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n    _: KW_ONLY\n    regularization: float = 0.0\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\n            \"type\": \"sdcorr\",\n            \"selector\": self.selector,\n            \"regularization\": self.regularization,\n        }\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n        if not isinstance(self.regularization, float | int) or self.regularization < 0:\n            raise InvalidConstraintError(\n                \"'regularization' must be a non-negative float or int.\"\n            )\n\n\n@dataclass(frozen=True)\nclass LinearConstraint(Constraint):\n    \"\"\"Constraint that bounds a linear combination of the selected parameters.\n\n    This constraint ensures that a linear combination of the selected parameters with\n    the 'weights' is either equal to 'value', or is bounded by 'lower_bound' and\n    'upper_bound'.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n        weights: The weights for the linear combination. If a scalar is provided, it is\n            used for all parameters. Otherwise, it must have the same structure as the\n            selected parameters.\n        lower_bound: The lower bound for the linear combination. Defaults to None.\n        upper_bound: The upper bound for the linear combination. Defaults to None.\n        value: The value to compare the linear combination to. Defaults to None.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable, or if the weights,\n            lower_bound, upper_bound, or value are not valid.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], ArrayLike | \"pd.Series[float]\" | float | int] = (\n        identity_selector\n    )\n    _: KW_ONLY\n    weights: ArrayLike | \"pd.Series[float]\" | float | int | None = None\n    lower_bound: float | int | None = None\n    upper_bound: float | int | None = None\n    value: float | int | None = None\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\n            \"type\": \"linear\",\n            \"selector\": self.selector,\n            \"weights\": self.weights,\n            **_select_non_none(\n                lower_bound=self.lower_bound,\n                upper_bound=self.upper_bound,\n                value=self.value,\n            ),\n        }\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n        if _all_none(self.lower_bound, self.upper_bound, self.value):\n            raise InvalidConstraintError(\n                \"At least one of 'lower_bound', 'upper_bound', or 'value' must be \"\n                \"non-None.\"\n            )\n        if self.value is not None and not _all_none(self.lower_bound, self.upper_bound):\n            raise InvalidConstraintError(\n                \"'value' cannot be used with 'lower_bound' or 'upper_bound'.\"\n            )\n\n        if not isinstance(self.weights, np.ndarray | list | pd.Series | float | int):\n            raise InvalidConstraintError(\n                \"'weights' must be an array-like, a pandas Series, a float, or an int.\"\n            )\n\n        if self.lower_bound is not None and not isinstance(\n            self.lower_bound, float | int\n        ):\n            raise InvalidConstraintError(\"'lower_bound' must be a float or an int.\")\n\n        if self.upper_bound is not None and not isinstance(\n            self.upper_bound, float | int\n        ):\n            raise InvalidConstraintError(\"'upper_bound' must be a float or an int.\")\n\n        if self.value is not None and not isinstance(self.value, float | int):\n            raise InvalidConstraintError(\"'value' must be a float or an int.\")\n\n\n@dataclass(frozen=True)\nclass NonlinearConstraint(Constraint):\n    \"\"\"Constraint that bounds a nonlinear function of the selected parameters.\n\n    This constraint ensures that a nonlinear function of the selected parameters is\n    either equal to 'value', or is bounded by 'lower_bound' and 'upper_bound'.\n\n    Attributes:\n        selector: A function that takes as input the parameters and returns the subset\n            of parameters to be constrained. By default, all parameters are constrained.\n        func: The constraint function which is applied to the selected parameters.\n        derivative: The derivative of the constraint function with respect to the\n            selected parameters. Defaults to None.\n        lower_bound: The lower bound for the nonlinear function. Can be a scalar or of\n            the same structure as output of the constraint function. Defaults to None.\n        upper_bound: The upper bound for the nonlinear function. Can be a scalar or of\n            the same structure as output of the constraint function. Defaults to None.\n        value: The value to compare the nonlinear function to. Can be a scalar or of\n            the same structure as output of the constraint function. Defaults to None.\n        tol: The tolerance for the constraint function. Defaults to\n            `optimagic.optimization.algo_options.CONSTRAINTS_ABSOLUTE_TOLERANCE`.\n\n    Raises:\n        InvalidConstraintError: If the selector is not callable, or if the func,\n            derivative, lower_bound, upper_bound, or value are not valid.\n\n    \"\"\"\n\n    selector: Callable[[PyTree], PyTree] = identity_selector\n    _: KW_ONLY\n    func: Callable[[PyTree], ArrayLike | \"pd.Series[float]\" | float] | None = None\n    derivative: Callable[[PyTree], PyTree] | None = None\n    lower_bound: ArrayLike | \"pd.Series[float]\" | float | None = None\n    upper_bound: ArrayLike | \"pd.Series[float]\" | float | None = None\n    value: ArrayLike | \"pd.Series[float]\" | float | None = None\n    tol: float = CONSTRAINTS_ABSOLUTE_TOLERANCE\n\n    def _to_dict(self) -> dict[str, Any]:\n        return {\n            \"type\": \"nonlinear\",\n            \"selector\": self.selector,\n            **_select_non_none(\n                func=self.func,\n                derivative=self.derivative,\n                # In the dict representation, we write _bounds instead of _bound.\n                lower_bounds=self.lower_bound,\n                upper_bounds=self.upper_bound,\n                value=self.value,\n                tol=self.tol,\n            ),\n        }\n\n    def __post_init__(self) -> None:\n        if not callable(self.selector):\n            raise InvalidConstraintError(\"'selector' must be callable.\")\n\n        if _all_none(self.lower_bound, self.upper_bound, self.value):\n            raise InvalidConstraintError(\n                \"At least one of 'lower_bound', 'upper_bound', or 'value' must be \"\n                \"non-None.\"\n            )\n        if self.value is not None and not _all_none(self.lower_bound, self.upper_bound):\n            raise InvalidConstraintError(\n                \"'value' cannot be used with 'lower_bound' or 'upper_bound'.\"\n            )\n\n        if self.tol is not None and (\n            not isinstance(self.tol, float | int) or self.tol < 0\n        ):\n            raise InvalidConstraintError(\"'tol' must be non-negative.\")\n\n        if self.func is None or not callable(self.func):\n            raise InvalidConstraintError(\"'func' must be callable.\")\n\n        if self.derivative is not None and not callable(self.derivative):\n            raise InvalidConstraintError(\"'derivative' must be callable.\")\n\n\ndef _all_none(*args: Any) -> bool:\n    return all(v is None for v in args)\n\n\ndef _select_non_none(**kwargs: Any) -> dict[str, Any]:\n    return {k: v for k, v in kwargs.items() if v is not None}\n"
  },
  {
    "path": "src/optimagic/decorators.py",
    "content": "\"\"\"This module contains various decorators.\n\nThere are two kinds of decorators defined in this module which consists of either two or\nthree nested functions. The former are decorators without and the latter with arguments.\n\nFor more information on decorators, see this `guide\n`_ on https://realpython.com\n\nwhich\nprovides a comprehensive overview.\n\n.. _guide:\n\nhttps://realpython.com/primer-on-python-decorators/\n\n\"\"\"\n\nimport functools\nimport warnings\n\nfrom optimagic.exceptions import get_traceback\n\n\ndef catch(\n    func=None,\n    *,\n    exception=Exception,\n    exclude=(KeyboardInterrupt, SystemExit),\n    onerror=None,\n    default=None,\n    warn=True,\n    reraise=False,\n):\n    \"\"\"Catch and handle exceptions.\n\n    This decorator can be used with and without additional arguments.\n\n    Args:\n        exception (Exception or tuple): One or several exceptions that\n            are caught and handled. By default all Exceptions are\n            caught and handled.\n        exclude (Exception or tuple): One or several exceptionts that\n            are not caught. By default those are KeyboardInterrupt and\n            SystemExit.\n        onerror (None or Callable): Callable that takes an Exception\n            as only argument. This is called when an exception occurs.\n        default: Value that is returned when as the output of func when\n            an exception occurs. Can be one of the following:\n            - a constant\n            - \"__traceback__\", in this case a string with a traceback is returned.\n            - callable with the same signature as func.\n        warn (bool): If True, the exception is converted to a warning.\n        reraise (bool): If True, the exception is raised after handling it.\n\n    \"\"\"\n\n    def decorator_catch(func):\n        @functools.wraps(func)\n        def wrapper_catch(*args, **kwargs):\n            try:\n                res = func(*args, **kwargs)\n            except exclude:\n                raise\n            except exception as e:\n                if onerror is not None:\n                    onerror(e)\n\n                if reraise:\n                    raise e\n\n                tb = get_traceback()\n\n                if warn:\n                    msg = f\"The following exception was caught:\\n\\n{tb}\"\n                    warnings.warn(msg)\n\n                if default == \"__traceback__\":\n                    res = tb\n                elif callable(default):\n                    res = default(*args, **kwargs)\n                else:\n                    res = default\n            return res\n\n        return wrapper_catch\n\n    if callable(func):\n        return decorator_catch(func)\n    else:\n        return decorator_catch\n\n\ndef unpack(func=None, symbol=None):\n    def decorator_unpack(func):\n        if symbol is None:\n\n            @functools.wraps(func)\n            def wrapper_unpack(arg):\n                return func(arg)\n\n        elif symbol == \"*\":\n\n            @functools.wraps(func)\n            def wrapper_unpack(arg):\n                return func(*arg)\n\n        elif symbol == \"**\":\n\n            @functools.wraps(func)\n            def wrapper_unpack(arg):\n                return func(**arg)\n\n        return wrapper_unpack\n\n    if callable(func):\n        return decorator_unpack(func)\n    else:\n        return decorator_unpack\n\n\ndef deprecated(func, msg):\n    def decorator_deprecated(func):\n        @functools.wraps(func)\n        def wrapper_deprecated(*args, **kwargs):\n            warnings.warn(msg, FutureWarning)\n            return func(*args, **kwargs)\n\n        return wrapper_deprecated\n\n    if callable(func):\n        return decorator_deprecated(func)\n    else:\n        return decorator_deprecated\n"
  },
  {
    "path": "src/optimagic/deprecations.py",
    "content": "import logging\nimport warnings\nfrom dataclasses import replace\nfrom functools import wraps\nfrom pathlib import Path\nfrom typing import Any, Callable, ParamSpec, cast\n\nfrom optimagic import mark\nfrom optimagic.constraints import Constraint, InvalidConstraintError\nfrom optimagic.logging.logger import (\n    LogOptions,\n    SQLiteLogOptions,\n)\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.typing import AggregationLevel\n\n_logger = logging.getLogger(__name__)\n\n\ndef throw_criterion_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `criterion` argument has been \"\n        \"renamed to `fun`. Please use `fun` instead of `criterion`. Using `criterion` \"\n        \" will become an error in optimagic version 0.6.0 and later.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_criterion_kwargs_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `criterion_kwargs` argument has \"\n        \"been renamed to `fun_kwargs`. Please use `fun_kwargs` instead of \"\n        \"`criterion_kwargs`. Using `criterion_kwargs` will become an error in \"\n        \"optimagic version 0.6.0 and later.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivative_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `derivative` argument has been \"\n        \"renamed to `jac`. Please use `jac` instead of `derivative`. Using `derivative`\"\n        \" will become an error in optimagic version 0.6.0 and later. For more details \"\n        \"see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivative_kwargs_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `derivative_kwargs` argument has \"\n        \"been renamed to `jac_kwargs`. Please use `jac_kwargs` instead of \"\n        \"`derivative_kwargs`. Using `derivative_kwargs` will become an error in \"\n        \"optimagic version 0.6.0 and later. For more details see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_criterion_and_derivative_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `criterion_and_derivative` \"\n        \"argument has been renamed to `fun_and_jac`. Please use `fun_and_jac` \"\n        \"instead of `criterion_and_derivative`. Using `criterion_and_derivative` \"\n        \"will become an error in optimagic version 0.6.0 and later. For more details \"\n        \"see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_criterion_and_derivative_kwargs_future_warning():\n    msg = (\n        \"To align optimagic with scipy.optimize, the `criterion_and_derivative_kwargs` \"\n        \"argument has been renamed to `fun_and_jac_kwargs`. Please use \"\n        \"`fun_and_jac_kwargs` instead of `criterion_and_derivative_kwargs`. Using \"\n        \"`criterion_and_derivative_kwargs` will become an error in optimagic version \"\n        \"0.6.0 and later. For more details see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_scaling_options_future_warning():\n    msg = (\n        \"Specifying scaling options via the argument `scaling_options` is deprecated \"\n        \"and will be removed in optimagic version 0.6.0 and later. You can pass these \"\n        \"options directly to the `scaling` argument instead.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_multistart_options_future_warning():\n    msg = (\n        \"Specifying multistart options via the argument `multistart_options` is \"\n        \"deprecated and will be removed in optimagic version 0.6.0 and later. You can \"\n        \"pass these options directly to the `multistart` argument instead. For more \"\n        \"details see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivatives_step_ratio_future_warning():\n    msg = (\n        \"The `step_ratio` argument is deprecated and will be removed alongside \"\n        \"Richardson extrapolation in optimagic version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivatives_n_steps_future_warning():\n    msg = (\n        \"The `n_steps` argument is deprecated and will be removed alongside \"\n        \"Richardson extrapolation in optimagic version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivatives_return_info_future_warning():\n    msg = (\n        \"The `return_info` argument is deprecated and will be removed alongside \"\n        \"Richardson extrapolation in optimagic version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_derivatives_return_func_value_future_warning():\n    msg = (\n        \"The `return_func_value` argument is deprecated and will be removed in \"\n        \"optimagic version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_numdiff_result_func_evals_future_warning():\n    msg = (\n        \"The `func_evals` attribute is deprecated and will be removed in optimagic \"\n        \"version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_numdiff_result_derivative_candidates_future_warning():\n    msg = (\n        \"The `derivative_candidates` attribute is deprecated and will be removed in \"\n        \"optimagic version 0.6.0.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_numdiff_options_deprecated_in_estimate_ml_future_warning():\n    msg = (\n        \"The argument `numdiff_options` is deprecated for `estimate_ml` and will be \"\n        \"removed in estimagic version 0.6.0. Please use the `jacobian_numdiff_options` \"\n        \"and `hessian_numdiff_options` arguments instead to specify the options for \"\n        \"the first and second numerical derivative estimation.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_numdiff_options_deprecated_in_estimate_msm_future_warning():\n    msg = (\n        \"The argument `numdiff_options` is deprecated for `estimate_msm` and will be \"\n        \"removed in estimagic version 0.6.0. Please use the `jacobian_numdiff_options` \"\n        \"argument instead.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_dict_access_future_warning(attribute, obj_name):\n    msg = (\n        f\"The dictionary access for '{attribute}' is deprecated and will be removed \"\n        \"in optimagic version 0.6.0. Please use the new attribute access instead: \"\n        f\"`{obj_name}.{attribute}`.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_none_valued_batch_evaluator_warning():\n    msg = (\n        \"Passing `None` as the `batch_evaluator` is deprecated and will be \"\n        \"removed in optimagic version 0.6.0. Please use the string 'joblib' instead to \"\n        \"use the joblib batch evaluator by default.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_make_subplot_kwargs_in_slice_plot_future_warning():\n    msg = (\n        \"The `make_subplot_kwargs` argument in `slice_plot` is deprecated and will be \"\n        \"removed in optimagic version 0.6.0. Customization of the subplots can be done \"\n        \"by modifying the returned figure.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef replace_and_warn_about_deprecated_algo_options(algo_options):\n    if not isinstance(algo_options, dict):\n        return algo_options\n\n    algo_options = {k.replace(\".\", \"_\"): v for k, v in algo_options.items()}\n\n    replacements = {\n        \"stopping_max_criterion_evaluations\": \"stopping_maxfun\",\n        \"stopping_max_iterations\": \"stopping_maxiter\",\n        \"convergence_absolute_criterion_tolerance\": \"convergence_ftol_abs\",\n        \"convergence_relative_criterion_tolerance\": \"convergence_ftol_rel\",\n        \"convergence_scaled_criterion_tolerance\": \"convergence_ftol_scaled\",\n        \"convergence_absolute_params_tolerance\": \"convergence_xtol_abs\",\n        \"convergence_relative_params_tolerance\": \"convergence_xtol_rel\",\n        \"convergence_absolute_gradient_tolerance\": \"convergence_gtol_abs\",\n        \"convergence_relative_gradient_tolerance\": \"convergence_gtol_rel\",\n        \"convergence_scaled_gradient_tolerance\": \"convergence_gtol_scaled\",\n    }\n\n    present = sorted(set(algo_options) & set(replacements))\n    if present:\n        msg = (\n            \"The following keys in `algo_options` are deprecated and will be removed \"\n            \"in optimagic version 0.6.0 and later. Please replace them as follows:\\n\"\n        )\n        for k in present:\n            msg += f\"  {k} -> {replacements[k]}\\n\"\n\n        warnings.warn(msg, FutureWarning)\n\n    out = {k: v for k, v in algo_options.items() if k not in present}\n    for k in present:\n        out[replacements[k]] = algo_options[k]\n\n    return out\n\n\ndef replace_and_warn_about_deprecated_bounds(\n    lower_bounds,\n    upper_bounds,\n    bounds,\n    soft_lower_bounds=None,\n    soft_upper_bounds=None,\n):\n    old_bounds = {\n        \"lower\": lower_bounds,\n        \"upper\": upper_bounds,\n        \"soft_lower\": soft_lower_bounds,\n        \"soft_upper\": soft_upper_bounds,\n    }\n\n    old_present = [k for k, v in old_bounds.items() if v is not None]\n\n    if old_present:\n        substring = \", \".join(f\"{b}_bound\" for b in old_present)\n        substring = substring.replace(\", \", \", and \", -1)\n        msg = (\n            f\"Specifying bounds via the arguments {substring} is \"\n            \"deprecated and will be removed in optimagic version 0.6.0 and later. \"\n            \"Please use the `bounds` argument instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n\n    if bounds is None and old_present:\n        bounds = Bounds(**old_bounds)\n\n    return bounds\n\n\ndef convert_dict_to_function_value(candidate):\n    \"\"\"Convert the deprecated dictionary output to a suitable FunctionValue object.\n\n    No warning is raised here because this function will be called repeatedly!\n\n    \"\"\"\n    special_keys = [\"value\", \"contributions\", \"root_contributions\"]\n\n    if is_dict_output(candidate):\n        info = {k: v for k, v in candidate.items() if k not in special_keys}\n        if \"root_contributions\" in candidate:\n            out = LeastSquaresFunctionValue(candidate[\"root_contributions\"], info)\n        elif \"contributions\" in candidate:\n            out = LikelihoodFunctionValue(candidate[\"contributions\"], info)\n        else:\n            out = ScalarFunctionValue(candidate[\"value\"], info)\n    else:\n        out = candidate\n\n    return out\n\n\ndef is_dict_output(candidate):\n    \"\"\"Check if the output is a dictionary with special keys.\"\"\"\n    special_keys = [\"value\", \"contributions\", \"root_contributions\"]\n    return isinstance(candidate, dict) and any(k in candidate for k in special_keys)\n\n\ndef throw_dict_output_warning():\n    msg = (\n        \"Returning a dictionary with the special keys 'value', 'contributions', or \"\n        \"'root_contributions' is deprecated and will be removed in optimagic version \"\n        \"0.6.0 and later. Please use the optimagic.mark.scalar, optimagic.mark.\"\n        \"least_squares, or optimagic.mark.likelihood decorators to indicate the type \"\n        \"of problem you are solving. Use optimagic.FunctionValue objects to return \"\n        \"additional information for the logging. Please see the documentation for more \"\n        \"details: https://optimagic.readthedocs.io/en/latest/how_to/how_to_criterion_function.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef infer_problem_type_from_dict_output(output):\n    if \"root_contributions\" in output:\n        out = AggregationLevel.LEAST_SQUARES\n    elif \"contributions\" in output:\n        out = AggregationLevel.LIKELIHOOD\n    else:\n        out = AggregationLevel.SCALAR\n    return out\n\n\nP = ParamSpec(\"P\")\n\n\ndef replace_dict_output(func: Callable[P, Any]) -> Callable[P, Any]:\n    \"\"\"Replace the deprecated dictionary output by a suitable FunctionValue.\n\n    This has no effect if the function does not return a dictionary with at least one of\n    the special keys \"value\", \"contributions\" or \"root_contributions\" or a tuple where\n    the first entry is such a dictionary.\n\n    This decorator does not add a warning because the function will be evaluated many\n    times and the warning would pop up too often.\n\n    \"\"\"\n\n    @wraps(func)\n    def wrapper(*args: P.args, **kwargs: P.kwargs) -> Any:\n        raw = func(*args, **kwargs)\n        # fun and jac case\n        if isinstance(raw, tuple):\n            out = (convert_dict_to_function_value(raw[0]), raw[1])\n        # fun case\n        else:\n            out = convert_dict_to_function_value(raw)\n        return out\n\n    return wrapper\n\n\ndef throw_key_warning_in_derivatives():\n    msg = (\n        \"The `key` argument in first_derivative and second_derivative is deprecated \"\n        \"and will be removed in optimagic version 0.6.0 and later. Please use the \"\n        \"`unpacker` argument instead. While `key` was a string, `unpacker` is a \"\n        \"callable that takes the output of `func` and returns the desired output that \"\n        \"is then differentiated.\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n\ndef throw_dict_constraints_future_warning_if_required(\n    constraints: list[dict[str, Any]] | dict[str, Any],\n) -> None:\n    replacements = {\n        \"fixed\": \"optimagic.FixedConstraint\",\n        \"increasing\": \"optimagic.IncreasingConstraint\",\n        \"decreasing\": \"optimagic.DecreasingConstraint\",\n        \"equality\": \"optimagic.EqualityConstraint\",\n        \"probability\": \"optimagic.ProbabilityConstraint\",\n        \"pairwise_equality\": \"optimagic.PairwiseEqualityConstraint\",\n        \"covariance\": \"optimagic.FlatCovConstraint\",\n        \"sdcorr\": \"optimagic.FlatSDCorrConstraint\",\n        \"linear\": \"optimagic.LinearConstraint\",\n        \"nonlinear\": \"optimagic.NonlinearConstraint\",\n    }\n\n    if not isinstance(constraints, list):\n        constraints = [constraints]\n\n    types_or_none = [\n        constraint.get(\"type\", None) if isinstance(constraint, dict) else None\n        for constraint in constraints\n    ]\n    types = [t for t in types_or_none if t is not None]\n\n    if types:\n        msg = (\n            \"Specifying constraints as a dictionary is deprecated and will be removed \"\n            \"in optimagic version 0.6.0. Please replace them using the new optimagic \"\n            \"constraint objects:\\n\"\n        )\n        for t in types:\n            msg += f\"  {{'type': '{t}', ...}} -> {replacements[t]}(...)\\n\"\n        msg += (\n            \"\\nFor more details see the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_constraints.html\"\n        )\n\n        warnings.warn(msg, FutureWarning)\n\n\ndef replace_and_warn_about_deprecated_multistart_options(options):\n    \"\"\"Replace deprecated multistart options and warn about them.\n\n    Args:\n        options (MultistartOptions): The multistart options to replace.\n\n    Returns:\n        MultistartOptions: The replaced multistart options.\n\n    \"\"\"\n    replacements = {}\n\n    if options.share_optimization is not None:\n        msg = (\n            \"The `share_optimization` option is deprecated and will be removed in \"\n            \"version 0.6.0. Use `stopping_maxopt` instead to specify the number of \"\n            \"optimizations directly. For more details see the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html\"\n        )\n        warnings.warn(msg, FutureWarning)\n\n    if options.convergence_relative_params_tolerance is not None:\n        msg = (\n            \"The `convergence_relative_params_tolerance` option is deprecated and will \"\n            \"be removed in version 0.6.0. Use `convergence_xtol_rel` instead. For more \"\n            \"details see the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html\"\n        )\n        warnings.warn(msg, FutureWarning)\n        if options.convergence_xtol_rel is None:\n            replacements[\"convergence_xtol_rel\"] = (\n                options.convergence_relative_params_tolerance\n            )\n\n    if options.optimization_error_handling is not None:\n        msg = (\n            \"The `optimization_error_handling` option is deprecated and will be \"\n            \"removed in version 0.6.0. Setting this attribute also sets the error \"\n            \"handling for exploration. Use the new `error_handling` option to set the \"\n            \"error handling for both optimization and exploration. For more details \"\n            \"see the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html\"\n        )\n        warnings.warn(msg, FutureWarning)\n        if options.error_handling is None:\n            replacements[\"error_handling\"] = options.optimization_error_handling\n\n    if options.exploration_error_handling is not None:\n        msg = (\n            \"The `exploration_error_handling` option is deprecated and will be \"\n            \"removed in version 0.6.0. Setting this attribute also sets the error \"\n            \"handling for exploration. Use the new `error_handling` option to set the \"\n            \"error handling for both optimization and exploration. For more details \"\n            \"see the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_multistart.html\"\n        )\n        warnings.warn(msg, FutureWarning)\n        if options.error_handling is None:\n            replacements[\"error_handling\"] = options.exploration_error_handling\n\n    return replace(options, **replacements)\n\n\ndef replace_and_warn_about_deprecated_base_steps(\n    step_size,\n    base_steps,\n):\n    if base_steps is not None:\n        msg = (\n            \"The `base_steps` argument is deprecated and will be removed alongside \"\n            \"Richardson extrapolation in optimagic version 0.6.0. To specify the \"\n            \"step size use the `step_size` argument instead. For more details see the \"\n            \"documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_step_size.html\"\n        )\n        warnings.warn(msg, FutureWarning)\n\n        if step_size is None:\n            step_size = base_steps\n\n    return step_size\n\n\ndef replace_and_warn_about_deprecated_derivatives(candidate, name):\n    msg = (\n        f\"Specifying a dictionary of {name} functions is deprecated and will be \"\n        \"removed in optimagic version 0.6.0. Please specify a single function that \"\n        \"returns the correct derivative for your optimizer or a list of functions that \"\n        \"are decorated with the `mark.scalar`, `mark.likelihood` or \"\n        \"`mark.least_squares` decorators. For more details see the documentation: \"\n        \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_derivatives.html\"\n    )\n    warnings.warn(msg, FutureWarning)\n\n    key_to_marker = {\n        \"value\": mark.scalar,\n        \"contributions\": mark.likelihood,\n        \"root_contributions\": mark.least_squares,\n    }\n\n    out = []\n    for key, func in candidate.items():\n        if key in key_to_marker:\n            out.append(key_to_marker[key](func))\n\n    return out\n\n\ndef handle_log_options_throw_deprecated_warning(\n    log_options: dict[str, Any], logger: str | Path | LogOptions | None\n) -> str | Path | LogOptions | None:\n    msg = (\n        \"Usage of the parameter log_options is deprecated \"\n        \"and will be removed in a future version. \"\n        \"Provide a LogOptions instance for the parameter `logging`, if you need to \"\n        \"configure the logging.\"\n    )\n    warnings.warn(msg, FutureWarning)\n    logging_is_path_or_string = isinstance(logger, str) or isinstance(logger, Path)\n    log_options_is_dict = isinstance(log_options, dict)\n    compatible_keys = {\"fast_logging\", \"if_table_exists\", \"if_database_exists\"}\n    log_options_is_compatible = set(log_options.keys()).issubset(compatible_keys)\n\n    if logging_is_path_or_string:\n        if log_options_is_dict and log_options_is_compatible:\n            warnings.warn(\n                f\"\\nUsing {log_options=} to create an instance of SQLiteLogOptions. \"\n                f\"This mechanism will be removed in the future.\",\n                FutureWarning,\n            )\n            if \"if_table_exists\" in log_options:\n                warnings.warn(\n                    \"Found 'if_table_exists' in options dictionary. \"\n                    \"This option is deprecated and setting it has no effect.\",\n                    FutureWarning,\n                )\n                log_options = {\n                    k: v for k, v in log_options.items() if k != \"if_table_exists\"\n                }\n            return SQLiteLogOptions(cast(str | Path, logger), **log_options)\n        elif not log_options_is_compatible:\n            raise ValueError(\n                f\"Found string or path for logger argument, but parameter\"\n                f\" {log_options=} is not compatible to {compatible_keys=}.\"\n                f\"Explicitly create a Logger instance for configuration.\"\n            )\n\n    return logger\n\n\ndef pre_process_constraints(\n    constraints: list[Constraint | dict[str, Any]] | Constraint | dict[str, Any] | None,\n) -> list[dict[str, Any]]:\n    \"\"\"Convert all ways of specifying constraints to a list of dictionaries.\n\n    For the optimagic release 0.5.0 we only implemented the new constraint API, but have\n    not overhauled the internal representation of constraints yet. As a result, we\n    convert all ways of specifying constraints, and in particular the new interface, to\n    the old format, that is, a list of dictionaries.\n\n    Once we have refactor the internal representation of constraints, we will be able to\n    go the other way, and convert all formats to the new one.\n\n    \"\"\"\n    if constraints is None:\n        return []\n\n    if isinstance(constraints, dict | Constraint):\n        constraints = [constraints]\n\n    if isinstance(constraints, list):\n        out = []\n        invalid_types: list[type] = []\n        for constr in constraints:\n            if isinstance(constr, Constraint):\n                out.append(constr._to_dict())\n            elif isinstance(constr, dict):\n                out.append(constr)\n            else:\n                invalid_types.append(type(constr))\n\n        if invalid_types:\n            msg = (\n                f\"Invalid constraint types: {set(invalid_types)}. Must be a constraint \"\n                \"object imported from `optimagic`.\"\n            )\n            raise InvalidConstraintError(msg)\n\n    else:\n        msg = (\n            f\"Invalid constraint type: {type(constraints)}. Must be a constraint \"\n            \"object or list thereof imported from `optimagic`. For more details see \"\n            \"the documentation: \"\n            \"https://optimagic.readthedocs.io/en/latest/how_to/how_to_constraints.html\"\n        )\n        raise InvalidConstraintError(msg)\n\n    return out\n"
  },
  {
    "path": "src/optimagic/differentiation/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/differentiation/derivatives.py",
    "content": "import functools\nimport itertools\nimport re\nfrom dataclasses import dataclass\nfrom itertools import product\nfrom typing import Any, Callable, Literal, NamedTuple, cast\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom pybaum import tree_flatten, tree_just_flatten, tree_unflatten\nfrom pybaum import tree_just_flatten as tree_leaves\n\nfrom optimagic import batch_evaluators, deprecations\nfrom optimagic.config import DEFAULT_N_CORES\nfrom optimagic.deprecations import (\n    replace_and_warn_about_deprecated_base_steps,\n    replace_and_warn_about_deprecated_bounds,\n)\nfrom optimagic.differentiation import finite_differences\nfrom optimagic.differentiation.generate_steps import generate_steps\nfrom optimagic.differentiation.richardson_extrapolation import richardson_extrapolation\nfrom optimagic.parameters.block_trees import hessian_to_block_tree, matrix_to_block_tree\nfrom optimagic.parameters.bounds import Bounds, get_internal_bounds, pre_process_bounds\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import BatchEvaluatorLiteral, PyTree\n\n\n@dataclass(frozen=True)\nclass NumdiffResult:\n    \"\"\"Result of a numerical differentiation.\n\n    The following relationship holds for vector-valued functions with vector-valued\n    parameters:\n\n    First Derivative:\n    -----------------\n\n    - f: R -> R leads to shape (1,), usually called derivative\n    - f: R^m -> R leads to shape (m, ), usually called Gradient\n    - f: R -> R^n leads to shape (n, 1), usually called Jacobian\n    - f: R^m -> R^n leads to shape (n, m), usually called Jacobian\n\n    Second Derivative:\n    ------------------\n\n    - f: R -> R leads to shape (1,), usually called second derivative\n    - f: R^m -> R leads to shape (m, m), usually called Hessian\n    - f: R -> R^n leads to shape (n,), usually called Hessian\n    - f: R^m -> R^n leads to shape (n, m, m), usually called Hessian tensor\n\n    Attributes:\n        derivative: The estimated derivative at the parameters. The structure of the\n            derivative depends on the input parameters and the output of the function.\n        func_value: The value of the function at the parameters.\n\n    \"\"\"\n\n    derivative: PyTree\n    func_value: PyTree | None = None\n    # deprecated\n    _func_evals: pd.DataFrame | dict[str, pd.DataFrame | None] | None = None\n    _derivative_candidates: pd.DataFrame | None = None\n\n    @property\n    def func_evals(self) -> pd.DataFrame | dict[str, pd.DataFrame | None] | None:\n        deprecations.throw_numdiff_result_func_evals_future_warning()\n        return self._func_evals\n\n    @property\n    def derivative_candidates(self) -> pd.DataFrame | None:\n        deprecations.throw_numdiff_result_derivative_candidates_future_warning()\n        return self._derivative_candidates\n\n    def __getitem__(self, key: str) -> Any:\n        deprecations.throw_dict_access_future_warning(key, obj_name=type(self).__name__)\n        return getattr(self, key)\n\n\nclass Evals(NamedTuple):\n    pos: NDArray[np.float64]\n    neg: NDArray[np.float64]\n\n\ndef first_derivative(\n    func: Callable[[PyTree], PyTree],\n    params: PyTree,\n    *,\n    bounds: Bounds | None = None,\n    func_kwargs: dict[str, Any] | None = None,\n    method: Literal[\"central\", \"forward\", \"backward\"] = \"central\",\n    step_size: float | PyTree | None = None,\n    scaling_factor: float | PyTree = 1,\n    min_steps: float | PyTree | None = None,\n    f0: PyTree | None = None,\n    n_cores: int = DEFAULT_N_CORES,\n    error_handling: Literal[\"continue\", \"raise\", \"raise_strict\"] = \"continue\",\n    batch_evaluator: BatchEvaluatorLiteral | Callable = \"joblib\",\n    unpacker: Callable[[Any], PyTree] | None = None,\n    # deprecated\n    lower_bounds: PyTree | None = None,\n    upper_bounds: PyTree | None = None,\n    base_steps: PyTree | None = None,\n    key: str | None = None,\n    step_ratio: float | None = None,\n    n_steps: int | None = None,\n    return_info: bool | None = None,\n    return_func_value: bool | None = None,\n) -> NumdiffResult:\n    \"\"\"Evaluate first derivative of func at params according to method and step options.\n\n    Internally, the function is converted such that it maps from a 1d array to a 1d\n    array. Then the Jacobian of that function is calculated.\n\n    The parameters and the function output can be optimagic-pytrees; for more details on\n    estimagi-pytrees see :ref:`eppytrees`. By default the resulting Jacobian will be\n    returned as a block-pytree.\n\n    For a detailed description of all options that influence the step size as well as an\n    explanation of how steps are adjusted to bounds in case of a conflict, see\n    :func:`~optimagic.differentiation.generate_steps.generate_steps`.\n\n    Args:\n        func: Function of which the derivative is calculated.\n        params: A pytree. See :ref:`params`.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are not used during\n            numerical differentiation. Each bound type mirrors the structure of params.\n            Check our how-to guide on bounds for examples. If params is a flat numpy\n            array, you can also provide bounds via any format that is supported by\n            scipy.optimize.minimize.\n        func_kwargs: Additional keyword arguments for func, optional.\n        method: One of [\"central\", \"forward\", \"backward\"], default \"central\".\n        step_size: 1d array of the same length as params.\n            step_size * scaling_factor is the absolute value of the first (and possibly\n            only) step used in the finite differences approximation of the derivative.\n            If step_size * scaling_factor conflicts with bounds, the actual steps will\n            be adjusted. If step_size is not provided, it will be determined according\n            to a rule of thumb as long as this does not conflict with min_steps.\n        scaling_factor: Scaling factor which is applied to step_size. If it is an\n            numpy.ndarray, it needs to be as long as params. scaling_factor is useful if\n            you want to increase or decrease the base_step relative to the rule-of-thumb\n            or user provided base_step, for example to benchmark the effect of the step\n            size. Default 1.\n        min_steps: Minimal possible step sizes that can be chosen to accommodate bounds.\n            Must have same length as params. By default min_steps is equal to step_size,\n            i.e step size is not decreased beyond what is optimal according to the rule\n            of thumb.\n        f0: 1d numpy array with func(x), optional.\n        n_cores: Number of processes used to parallelize the function evaluations.\n            Default 1.\n        error_handling: One of \"continue\" (catch errors and continue to calculate\n            derivative estimates. In this case, some derivative estimates can be\n            missing but no errors are raised), \"raise\" (catch errors and continue\n            to calculate derivative estimates at first but raise an error if all\n            evaluations for one parameter failed) and \"raise_strict\" (raise an error\n            as soon as a function evaluation fails).\n        batch_evaluator (str or callable): Name of a pre-implemented batch evaluator\n            (currently 'joblib' and 'pathos_mp') or Callable with the same interface\n            as the optimagic batch_evaluators.\n        unpacker: A callable that takes the output of func and returns the part of the\n            output that is needed for the derivative calculation. If None, the output of\n            func is used as is. Default None.\n\n    Returns:\n        NumdiffResult: A numerical differentiation result.\n\n    \"\"\"\n    # ==================================================================================\n    # handle deprecations\n    # ==================================================================================\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n\n    step_size = replace_and_warn_about_deprecated_base_steps(\n        step_size=step_size,\n        base_steps=base_steps,\n    )\n\n    if key is not None:\n        deprecations.throw_key_warning_in_derivatives()\n        if unpacker is None:\n            unpacker = lambda x: x[key]\n\n    if step_ratio is not None:\n        deprecations.throw_derivatives_step_ratio_future_warning()\n    else:\n        step_ratio = 2\n\n    if n_steps is not None:\n        deprecations.throw_derivatives_n_steps_future_warning()\n    else:\n        n_steps = 1\n\n    if return_info is not None:\n        deprecations.throw_derivatives_return_info_future_warning()\n    else:\n        return_info = False\n\n    if return_func_value is not None:\n        deprecations.throw_derivatives_return_func_value_future_warning()\n    else:\n        return_func_value = True\n\n    # ==================================================================================\n\n    bounds = pre_process_bounds(bounds)\n    unpacker = _process_unpacker(unpacker)\n\n    # ==================================================================================\n    # Convert scalar | pytree arguments to 1d arrays of floats\n    # ==================================================================================\n    registry = get_registry(extended=True)\n\n    is_fast_path = _is_1d_array(params)\n\n    if not is_fast_path:\n        x, params_treedef = tree_flatten(params, registry=registry)\n        x = np.array(x, dtype=np.float64)\n\n        if scaling_factor is not None and not np.isscalar(scaling_factor):\n            scaling_factor = np.array(\n                tree_just_flatten(scaling_factor, registry=registry)\n            )\n\n        if min_steps is not None and not np.isscalar(min_steps):\n            min_steps = np.array(tree_just_flatten(min_steps, registry=registry))\n\n        if step_size is not None and not np.isscalar(step_size):\n            step_size = np.array(tree_just_flatten(step_size, registry=registry))\n    else:\n        x = params.astype(np.float64)\n\n    scaling_factor = _process_scalar_or_array_argument(\n        scaling_factor, x, \"scaling_factor\"\n    )\n    min_steps = _process_scalar_or_array_argument(min_steps, x, \"min_steps\")\n    step_size = _process_scalar_or_array_argument(step_size, x, \"step_size\")\n\n    # ==================================================================================\n\n    if np.isnan(x).any():\n        raise ValueError(\"The parameter vector must not contain NaNs.\")\n\n    internal_lb, internal_ub = get_internal_bounds(params, bounds=bounds)\n\n    # handle kwargs\n    func_kwargs = {} if func_kwargs is None else func_kwargs\n    partialed_func = functools.partial(func, **func_kwargs)\n\n    implemented_methods = {\"forward\", \"backward\", \"central\"}\n    if method not in implemented_methods:\n        raise ValueError(f\"Method has to be in {implemented_methods}.\")\n\n    # generate the step array\n    step_size = generate_steps(\n        x=x,\n        method=method,\n        n_steps=n_steps,\n        target=\"first_derivative\",\n        base_steps=step_size,\n        scaling_factor=scaling_factor,\n        bounds=Bounds(lower=internal_lb, upper=internal_ub),\n        step_ratio=step_ratio,\n        min_steps=min_steps,\n    )\n    step_size = cast(NDArray[np.float64], step_size)\n\n    # generate parameter vectors at which func has to be evaluated as numpy arrays\n    evaluation_points = []\n    for step_arr in step_size:\n        for i, j in product(range(n_steps), range(len(x))):\n            if np.isnan(step_arr[i, j]):\n                evaluation_points.append(np.nan)\n            else:\n                point = x.copy()\n                point[j] += step_arr[i, j]\n                evaluation_points.append(point)\n\n    # convert the numpy arrays to whatever is needed by func\n    if not is_fast_path:\n        evaluation_points = [\n            # entries are either a numpy.ndarray or np.nan\n            _unflatten_if_not_nan(p, params_treedef, registry)\n            for p in evaluation_points\n        ]\n\n    # we always evaluate f0, so we can fall back to one-sided derivatives if\n    # two-sided derivatives fail. The extra cost is negligible in most cases.\n    if f0 is None:\n        evaluation_points.append(params)\n\n    # do the function evaluations, including error handling\n    batch_error_handling = \"raise\" if error_handling == \"raise_strict\" else \"continue\"\n    raw_evals = _nan_skipping_batch_evaluator(\n        func=partialed_func,\n        arguments=evaluation_points,\n        n_cores=n_cores,\n        error_handling=batch_error_handling,\n        batch_evaluator=batch_evaluator,\n    )\n\n    # extract information on exceptions that occurred during function evaluations\n    exc_info = \"\\n\\n\".join([val for val in raw_evals if isinstance(val, str)])\n    raw_evals = [val if not isinstance(val, str) else np.nan for val in raw_evals]\n\n    # store full function value at params as func_value and a processed version of it\n    # that we need to calculate derivatives as f0\n    if f0 is None:\n        f0 = raw_evals[-1]\n        raw_evals = raw_evals[:-1]\n    func_value = f0\n\n    f0_tree = unpacker(f0)\n    scalar_out = np.isscalar(f0_tree)\n    vector_out = isinstance(f0_tree, np.ndarray) and f0_tree.ndim == 1\n\n    if scalar_out:\n        f0 = np.array([f0_tree], dtype=float)\n    elif vector_out:\n        f0 = f0_tree.astype(float)\n    else:\n        f0 = tree_leaves(f0_tree, registry=registry)\n        f0 = np.array(f0, dtype=np.float64)\n\n    # convert the raw evaluations to numpy arrays\n    raw_evals_arr = _convert_evals_to_numpy(\n        raw_evals=raw_evals,\n        unpacker=unpacker,\n        registry=registry,\n        is_scalar_out=scalar_out,\n        is_vector_out=vector_out,\n    )\n\n    # apply finite difference formulae\n    evals_data = np.array(raw_evals_arr).reshape(2, n_steps, len(x), -1)\n    evals_data_transposed = np.transpose(evals_data, axes=(0, 1, 3, 2))\n    evals = Evals(pos=evals_data_transposed[0], neg=evals_data_transposed[1])\n\n    jac_candidates = {}\n    for m in [\"forward\", \"backward\", \"central\"]:\n        jac_candidates[m] = finite_differences.jacobian(evals, step_size, f0, m)\n\n    # get the best derivative estimate out of all derivative estimates that could be\n    # calculated, given the function evaluations.\n    orders = {\n        \"central\": [\"central\", \"forward\", \"backward\"],\n        \"forward\": [\"forward\", \"backward\"],\n        \"backward\": [\"backward\", \"forward\"],\n    }\n\n    if n_steps == 1:\n        jac = _consolidate_one_step_derivatives(jac_candidates, orders[method])\n        updated_candidates = None\n    else:\n        richardson_candidates = _compute_richardson_candidates(\n            jac_candidates, step_size, n_steps\n        )\n        jac, updated_candidates = _consolidate_extrapolated(richardson_candidates)\n\n    # raise error if necessary\n    if error_handling in (\"raise\", \"raise_strict\") and np.isnan(jac).any():\n        raise Exception(exc_info)\n\n    # results processing\n    if is_fast_path and vector_out:\n        derivative = jac\n    elif is_fast_path and scalar_out:\n        derivative = jac.flatten()\n    else:\n        derivative = matrix_to_block_tree(jac, f0_tree, params)\n\n    result = {\"derivative\": derivative}\n    if return_func_value:\n        result[\"func_value\"] = func_value\n    if return_info:\n        info = _collect_additional_info(\n            step_size, evals, updated_candidates, target=\"first_derivative\"\n        )\n        result = {**result, **info}\n    return NumdiffResult(**result)\n\n\ndef second_derivative(\n    func: Callable[[PyTree], PyTree],\n    params: PyTree,\n    *,\n    bounds: Bounds | None = None,\n    func_kwargs: dict[str, Any] | None = None,\n    method: Literal[\n        \"forward\", \"backward\", \"central_average\", \"central_cross\"\n    ] = \"central_cross\",\n    step_size: float | PyTree | None = None,\n    scaling_factor: float | PyTree = 1,\n    min_steps: float | PyTree | None = None,\n    f0: PyTree | None = None,\n    n_cores: int = DEFAULT_N_CORES,\n    error_handling: Literal[\"continue\", \"raise\", \"raise_strict\"] = \"continue\",\n    batch_evaluator: BatchEvaluatorLiteral | Callable = \"joblib\",\n    unpacker: Callable[[Any], PyTree] | None = None,\n    # deprecated\n    lower_bounds: PyTree | None = None,\n    upper_bounds: PyTree | None = None,\n    base_steps: PyTree | None = None,\n    step_ratio: float | None = None,\n    n_steps: int | None = None,\n    return_info: bool | None = None,\n    return_func_value: bool | None = None,\n    key: str | None = None,\n) -> NumdiffResult:\n    \"\"\"Evaluate second derivative of func at params according to method and step\n\n    options.\n\n    Internally, the function is converted such that it maps from a 1d array to a 1d\n    array. Then the Hessians of that function are calculated. The resulting derivative\n    estimate is always a :class:`numpy.ndarray`.\n\n    The parameters and the function output can be pandas objects (Series or DataFrames\n    with value column). In that case the output of second_derivative is also a pandas\n    object and with appropriate index and columns.\n\n    Detailed description of all options that influence the step size as well as an\n    explanation of how steps are adjusted to bounds in case of a conflict,\n    see :func:`~optimagic.differentiation.generate_steps.generate_steps`.\n\n    Args:\n        func: Function of which the derivative is calculated.\n        params: 1d numpy array or\n            :class:`pandas.DataFrame` with parameters at which the derivative is\n            calculated. If it is a DataFrame, it can contain the columns \"lower_bound\"\n            and \"upper_bound\" for bounds. See :ref:`params`.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are not used during\n            numerical differentiation. Each bound type mirrors the structure of params.\n            Check our how-to guide on bounds for examples. If params is a flat numpy\n            array, you can also provide bounds via any format that is supported by\n            scipy.optimize.minimize.\n        func_kwargs: Additional keyword arguments for func, optional.\n        method: One of {\"forward\", \"backward\", \"central_average\", \"central_cross\"}\n            These correspond to the finite difference approximations defined in\n            equations [7, x, 8, 9] in Rideout [2009], where (\"backward\", x) is not found\n            in Rideout [2009] but is the natural extension of equation 7 to the backward\n            case. Default \"central_cross\".\n        step_size: 1d array of the same length as params.\n            step_size * scaling_factor is the absolute value of the first (and possibly\n            only) step used in the finite differences approximation of the derivative.\n            If step_size * scaling_factor conflicts with bounds, the actual steps will\n            be adjusted. If step_size is not provided, it will be determined according\n            to a rule of thumb as long as this does not conflict with min_steps.\n        scaling_factor: Scaling factor which is applied to\n            step_size. If it is an numpy.ndarray, it needs to be as long as params.\n            scaling_factor is useful if you want to increase or decrease the base_step\n            relative to the rule-of-thumb or user provided base_step, for example to\n            benchmark the effect of the step size. Default 1.\n        min_steps: Minimal possible step sizes that can be chosen to\n            accommodate bounds. Must have same length as params. By default min_steps is\n            equal to step_size, i.e step size is not decreased beyond what is optimal\n            according to the rule of thumb.\n        f0: 1d numpy array with func(x), optional.\n        n_cores: Number of processes used to parallelize the function\n            evaluations. Default 1.\n        error_handling: One of \"continue\" (catch errors and continue to calculate\n            derivative estimates. In this case, some derivative estimates can be\n            missing but no errors are raised), \"raise\" (catch errors and continue\n            to calculate derivative estimates at first but raise an error if all\n            evaluations for one parameter failed) and \"raise_strict\" (raise an error\n            as soon as a function evaluation fails).\n        batch_evaluator: Name of a pre-implemented batch evaluator\n            (currently 'joblib' and 'pathos_mp') or Callable with the same interface\n            as the optimagic batch_evaluators.\n        unpacker: A callable that takes the output of func and returns the part of the\n            output that is needed for the derivative calculation. If None, the output of\n            func is used as is. Default None.\n\n\n    Returns:\n        NumdiffResult: A numerical differentiation result.\n\n    \"\"\"\n    # ==================================================================================\n    # handle deprecations\n    # ==================================================================================\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n\n    step_size = replace_and_warn_about_deprecated_base_steps(\n        step_size=step_size,\n        base_steps=base_steps,\n    )\n\n    if step_ratio is not None:\n        deprecations.throw_derivatives_step_ratio_future_warning()\n    else:\n        step_ratio = 2\n\n    if n_steps is not None:\n        deprecations.throw_derivatives_n_steps_future_warning()\n    else:\n        n_steps = 1\n\n    if return_info is not None:\n        deprecations.throw_derivatives_return_info_future_warning()\n    else:\n        return_info = False\n\n    if return_func_value is not None:\n        deprecations.throw_derivatives_return_func_value_future_warning()\n    else:\n        return_func_value = True\n\n    if key is not None:\n        deprecations.throw_key_warning_in_derivatives()\n        if unpacker is None:\n            unpacker = lambda x: x[key]\n\n    # ==================================================================================\n    bounds = pre_process_bounds(bounds)\n    unpacker = _process_unpacker(unpacker)\n\n    # ==================================================================================\n    # Convert scalar | pytree arguments to 1d arrays of floats\n    # ==================================================================================\n    registry = get_registry(extended=True)\n\n    is_fast_path = _is_1d_array(params)\n\n    if not is_fast_path:\n        x, params_treedef = tree_flatten(params, registry=registry)\n        x = np.array(x, dtype=np.float64)\n\n        if scaling_factor is not None and not np.isscalar(scaling_factor):\n            scaling_factor = np.array(\n                tree_just_flatten(scaling_factor, registry=registry)\n            )\n\n        if min_steps is not None and not np.isscalar(min_steps):\n            min_steps = np.array(tree_just_flatten(min_steps, registry=registry))\n\n        if step_size is not None and not np.isscalar(step_size):\n            step_size = np.array(tree_just_flatten(step_size, registry=registry))\n    else:\n        x = params.astype(np.float64)\n\n    scaling_factor = _process_scalar_or_array_argument(\n        scaling_factor, x, \"scaling_factor\"\n    )\n    min_steps = _process_scalar_or_array_argument(min_steps, x, \"min_steps\")\n    step_size = _process_scalar_or_array_argument(step_size, x, \"step_size\")\n\n    # ==================================================================================\n\n    unpacker = _process_unpacker(unpacker)\n\n    internal_lb, internal_ub = get_internal_bounds(params, bounds=bounds)\n\n    # handle kwargs\n    func_kwargs = {} if func_kwargs is None else func_kwargs\n    partialed_func = functools.partial(func, **func_kwargs)\n\n    implemented_methods = {\"forward\", \"backward\", \"central_average\", \"central_cross\"}\n    if method not in implemented_methods:\n        raise ValueError(f\"Method has to be in {implemented_methods}.\")\n\n    # generate the step array\n    step_size = generate_steps(\n        x=x,\n        method=(\"central\" if \"central\" in method else method),\n        n_steps=n_steps,\n        target=\"second_derivative\",\n        base_steps=step_size,\n        scaling_factor=scaling_factor,\n        bounds=Bounds(lower=internal_lb, upper=internal_ub),\n        step_ratio=step_ratio,\n        min_steps=min_steps,\n    )\n    step_size = cast(NDArray[np.float64], step_size)\n\n    # generate parameter vectors at which func has to be evaluated as numpy arrays\n    evaluation_points = {  # type: ignore\n        \"one_step\": [],\n        \"two_step\": [],\n        \"cross_step\": [],\n    }\n    for step_arr in step_size:\n        # single direction steps\n        for i, j in product(range(n_steps), range(len(x))):\n            if np.isnan(step_arr[i, j]):\n                evaluation_points[\"one_step\"].append(np.nan)\n            else:\n                point = x.copy()\n                point[j] += step_arr[i, j]\n                evaluation_points[\"one_step\"].append(point)\n        # two and cross direction steps\n        for i, j, k in product(range(n_steps), range(len(x)), range(len(x))):\n            if j > k or np.isnan(step_arr[i, j]) or np.isnan(step_arr[i, k]):\n                evaluation_points[\"two_step\"].append(np.nan)\n                evaluation_points[\"cross_step\"].append(np.nan)\n            else:\n                point = x.copy()\n                point[j] += step_arr[i, j]\n                point[k] += step_arr[i, k]\n                evaluation_points[\"two_step\"].append(point)\n                if j == k:\n                    evaluation_points[\"cross_step\"].append(np.nan)\n                else:\n                    point = x.copy()\n                    point[j] += step_arr[i, j]\n                    point[k] -= step_arr[i, k]\n                    evaluation_points[\"cross_step\"].append(point)\n\n    # convert the numpy arrays to whatever is needed by func\n    if not is_fast_path:\n        evaluation_points = {\n            # entries are either a numpy.ndarray or np.nan, we unflatten only\n            step_type: [\n                _unflatten_if_not_nan(p, params_treedef, registry) for p in points\n            ]\n            for step_type, points in evaluation_points.items()\n        }\n\n    # we always evaluate f0, so we can fall back to one-sided derivatives if\n    # two-sided derivatives fail. The extra cost is negligible in most cases.\n    if f0 is None:\n        evaluation_points[\"one_step\"].append(params)\n\n    # do the function evaluations for one and two step, including error handling\n    batch_error_handling = \"raise\" if error_handling == \"raise_strict\" else \"continue\"\n    raw_evals = _nan_skipping_batch_evaluator(\n        func=partialed_func,\n        arguments=list(itertools.chain.from_iterable(evaluation_points.values())),\n        n_cores=n_cores,\n        error_handling=batch_error_handling,\n        batch_evaluator=batch_evaluator,\n    )\n\n    # extract information on exceptions that occurred during function evaluations\n    exc_info = \"\\n\\n\".join([val for val in raw_evals if isinstance(val, str)])\n    raw_evals = [val if not isinstance(val, str) else np.nan for val in raw_evals]\n\n    n_one_step, n_two_step, n_cross_step = map(len, evaluation_points.values())\n    raw_evals = {\n        \"one_step\": raw_evals[:n_one_step],\n        \"two_step\": raw_evals[n_one_step : n_two_step + n_one_step],\n        \"cross_step\": raw_evals[n_two_step + n_one_step :],\n    }\n\n    # store full function value at params as func_value and a processed version of it\n    # that we need to calculate derivatives as f0\n    if f0 is None:\n        f0 = raw_evals[\"one_step\"][-1]\n        raw_evals[\"one_step\"] = raw_evals[\"one_step\"][:-1]\n    func_value = f0\n\n    f0_tree = unpacker(f0)\n    f0 = tree_leaves(f0_tree, registry=registry)\n    f0 = np.array(f0, dtype=np.float64)\n\n    # convert the raw evaluations to numpy arrays\n    raw_evals = {\n        step_type: _convert_evals_to_numpy(\n            raw_evals=evals, unpacker=unpacker, registry=registry\n        )\n        for step_type, evals in raw_evals.items()\n    }\n\n    # reshape arrays into dimension (n_steps, dim_f, dim_x) or (n_steps, dim_f, dim_x,\n    # dim_x) for finite differences\n    evals = {}\n    evals[\"one_step\"] = _reshape_one_step_evals(raw_evals[\"one_step\"], n_steps, len(x))\n    evals[\"two_step\"] = _reshape_two_step_evals(raw_evals[\"two_step\"], n_steps, len(x))\n    evals[\"cross_step\"] = _reshape_cross_step_evals(\n        raw_evals[\"cross_step\"], n_steps, len(x), f0\n    )\n\n    # apply finite difference formulae\n    hess_candidates = {}\n    for m in [\"forward\", \"backward\", \"central_average\", \"central_cross\"]:\n        hess_candidates[m] = finite_differences.hessian(evals, step_size, f0, m)\n\n    # get the best derivative estimate out of all derivative estimates that could be\n    # calculated, given the function evaluations.\n    orders = {\n        \"central_cross\": [\"central_cross\", \"central_average\", \"forward\", \"backward\"],\n        \"central_average\": [\"central_average\", \"central_cross\", \"forward\", \"backward\"],\n        \"forward\": [\"forward\", \"backward\", \"central_average\", \"central_cross\"],\n        \"backward\": [\"backward\", \"forward\", \"central_average\", \"central_cross\"],\n    }\n\n    if n_steps == 1:\n        hess = _consolidate_one_step_derivatives(hess_candidates, orders[method])\n        updated_candidates = None\n    else:\n        raise ValueError(\n            \"Richardson extrapolation is not implemented for the second derivative yet.\"\n        )\n\n    # raise error if necessary\n    if error_handling in (\"raise\", \"raise_strict\") and np.isnan(hess).any():\n        raise Exception(exc_info)\n\n    # results processing\n    derivative = hessian_to_block_tree(hess, f0_tree, params)\n\n    result = {\"derivative\": derivative}\n    if return_func_value:\n        result[\"func_value\"] = func_value\n    if return_info:\n        info = _collect_additional_info(\n            step_size, evals, updated_candidates, target=\"second_derivative\"\n        )\n        result = {**result, **info}\n    return NumdiffResult(**result)\n\n\ndef _is_1d_array(candidate: Any) -> bool:\n    return isinstance(candidate, np.ndarray) and candidate.ndim == 1\n\n\ndef _reshape_one_step_evals(raw_evals_one_step, n_steps, dim_x):\n    \"\"\"Reshape raw_evals for evaluation points with one step.\n\n    Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to\n    positive and negative steps. Each entry will be a numpy array with dimension\n    (n_steps, dim_f, dim_x).\n\n    Mathematical:\n\n            evals.pos = (f(x0 + delta_jl e_j))\n            evals.neg = (f(x0 - delta_jl e_j))\n\n        for j=1,...,dim_x and l=1,...,n_steps\n\n    \"\"\"\n    evals = np.array(raw_evals_one_step).reshape(2, n_steps, dim_x, -1)\n    evals = evals.swapaxes(2, 3)\n    evals = Evals(pos=evals[0], neg=evals[1])\n    return evals\n\n\ndef _process_unpacker(\n    unpacker: None | Callable[[Any], PyTree],\n) -> Callable[[Any], PyTree]:\n    \"\"\"Process the user provided unpacker function.\n\n    If the unpacker was None, we set it to the identity.\n\n    \"\"\"\n    if unpacker is None:\n        unpacker = lambda x: x\n    else:\n        raw_unpacker = unpacker\n\n        def unpacker(x):\n            if isinstance(x, float) and np.isnan(x):\n                return x\n            return raw_unpacker(x)\n\n    return unpacker\n\n\ndef _process_scalar_or_array_argument(candidate, x, name):\n    if candidate is None:\n        return None\n\n    if np.isscalar(candidate):\n        return np.full_like(x, candidate, dtype=np.float64)\n    else:\n        try:\n            candidate = np.asarray(candidate, dtype=np.float64)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = f\"{name} must be a scalar or have the same structure as params.\"\n            raise ValueError(msg) from e\n\n        if len(candidate) != len(x) or candidate.ndim != 1:\n            msg = f\"{name} must be a scalar or have the same structure as params.\"\n            raise ValueError(msg)\n    return candidate\n\n\ndef _reshape_two_step_evals(raw_evals_two_step, n_steps, dim_x):\n    \"\"\"Reshape raw_evals for evaluation points with two steps.\n\n    Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to\n    positive and negative steps. Each entry will be a numpy array with dimension\n    (n_steps, dim_f, dim_x, dim_x). Since the array is, by definition, symmetric over\n    the last two dimensions, the function is not evaluated on both sides to save\n    computation time and the information is simply copied here.\n\n    Mathematical:\n\n            evals.pos = (f(x0 + delta_jl e_j + delta_kl e_k))\n            evals.neg = (f(x0 - delta_jl e_j - delta_kl e_k))\n\n        for j,k=1,...,dim_x and l=1,...,n_steps\n\n    \"\"\"\n    tril_idx = np.tril_indices(dim_x, -1)\n    evals = np.array(raw_evals_two_step).reshape(2, n_steps, dim_x, dim_x, -1)\n    evals = evals.transpose(0, 1, 4, 2, 3)\n    evals[..., tril_idx[0], tril_idx[1]] = evals[..., tril_idx[1], tril_idx[0]]\n    evals = Evals(pos=evals[0], neg=evals[1])\n    return evals\n\n\ndef _reshape_cross_step_evals(raw_evals_cross_step, n_steps, dim_x, f0):\n    \"\"\"Reshape raw_evals for evaluation points with cross steps.\n\n    Returned object is a namedtuple with entries 'pos' and 'neg' corresponding to\n    positive and negative steps. Each entry will be a numpy array with dimension\n    (n_steps, dim_f, dim_x, dim_x). Since the array is, by definition, symmetric over\n    the last two dimensions, the function is not evaluated on both sides to save\n    computation time and the information is simply copied here. In comparison to the\n    two_step case, however, this symmetry holds only over the dimension 'pos' and 'neg'.\n    That is, the lower triangular of the last two dimensions of 'pos' must equal the\n    upper triangular of the last two dimensions of 'neg'. Further, the diagonal of the\n    last two dimensions must be equal to f0.\n\n    Mathematical:\n\n            evals.pos = (f(x0 + delta_jl e_j - delta_kl e_k))\n            evals.neg = (f(x0 - delta_jl e_j + delta_kl e_k))\n\n        for j,k=1,...,dim_x and l=1,...,n_steps\n\n    \"\"\"\n    tril_idx = np.tril_indices(dim_x, -1)\n    diag_idx = np.diag_indices(dim_x)\n    evals = np.array(raw_evals_cross_step).reshape(2, n_steps, dim_x, dim_x, -1)\n    evals = evals.transpose(0, 1, 4, 2, 3)\n    evals[0][..., tril_idx[0], tril_idx[1]] = evals[1][..., tril_idx[1], tril_idx[0]]\n    evals[0][..., diag_idx[0], diag_idx[1]] = np.atleast_2d(f0).T[np.newaxis, ...]\n    evals = Evals(pos=evals[0], neg=evals[0].swapaxes(2, 3))\n    return evals\n\n\ndef _convert_evaluation_data_to_frame(steps, evals):\n    \"\"\"Convert evaluation data to (tidy) data frame.\n\n    Args:\n        steps (namedtuple): Namedtuple with field names pos and neg. Is generated by\n            :func:`~optimagic.differentiation.generate_steps.generate_steps`.\n        evals (namedtuple): Namedtuple with field names pos and neg. Contains function\n            evaluation corresponding to steps.\n\n    Returns:\n        df (pandas.DataFrame): Tidy data frame with index (sign, step_number, dim_x\n            dim_f), where sign corresponds to pos or neg in steps and evals, step_number\n            indexes the step, dim_x is the dimension of the input vector and dim_f is\n            the dimension of the function output. The data is given by the two columns\n            step and eval. The data frame has 2 * n_steps * dim_x * dim_f rows.\n\n    \"\"\"\n    n_steps, dim_f, dim_x = evals.pos.shape\n\n    dfs = []\n    for direction, step_arr, eval_arr in zip((1, -1), steps, evals, strict=False):\n        df_steps = pd.DataFrame(step_arr, columns=range(dim_x))\n        df_steps = df_steps.reset_index()\n        df_steps = df_steps.rename(columns={\"index\": \"step_number\"})\n        df_steps = df_steps.melt(\n            id_vars=\"step_number\", var_name=\"dim_x\", value_name=\"step\"\n        )\n        df_steps = df_steps.sort_values(\"step_number\")\n        df_steps = df_steps.reset_index(drop=True)\n        df_steps = df_steps.apply(lambda col: col.abs() if col.name == \"step\" else col)\n\n        reshaped_eval_arr = np.transpose(eval_arr, (0, 2, 1)).reshape(-1, dim_f)\n        df_evals = pd.concat((df_steps, pd.DataFrame(reshaped_eval_arr)), axis=1)\n        df_evals = df_evals.melt(\n            id_vars=[\"step_number\", \"dim_x\", \"step\"],\n            var_name=\"dim_f\",\n            value_name=\"eval\",\n        )\n        df_evals = df_evals.assign(sign=direction)\n        df_evals = df_evals.set_index([\"sign\", \"step_number\", \"dim_x\", \"dim_f\"])\n        df_evals = df_evals.sort_index()\n\n        dfs.append(df_evals)\n\n    df = pd.concat(dfs).astype({\"step\": float, \"eval\": float})\n    return df\n\n\ndef _convert_richardson_candidates_to_frame(jac, err):\n    \"\"\"Convert (richardson) jacobian candidates and errors to pandas data frame.\n\n    Args:\n        jac (dict): Dict with richardson jacobian candidates.\n        err (dict): Dict with errors corresponding to richardson jacobian candidates.\n\n    Returns:\n        df (pandas.DataFrame): Frame with column \"der\" and \"err\" and index [\"method\",\n            \"num_term\", \"dim_x\", \"dim_f\"] with respective meaning: type of method used,\n            e.g. central or foward; kind of value, e.g. derivative or error.\n\n    \"\"\"\n    dim_f, dim_x = jac[\"forward1\"].shape\n    dfs = []\n    for key, value in jac.items():\n        method, num_term = _split_into_str_and_int(key)\n        df = pd.DataFrame(value.T, columns=range(dim_f))\n        df = df.assign(dim_x=range(dim_x))\n        df = df.melt(id_vars=\"dim_x\", var_name=\"dim_f\", value_name=\"der\")\n        df = df.assign(method=method, num_term=num_term, err=err[key].T.flatten())\n        dfs.append(df)\n\n    df = pd.concat(dfs)\n    df = df.set_index([\"method\", \"num_term\", \"dim_x\", \"dim_f\"])\n    return df\n\n\ndef _convert_evals_to_numpy(\n    raw_evals, unpacker, registry, is_scalar_out=False, is_vector_out=False\n):\n    \"\"\"Harmonize the output of the function evaluations.\n\n    The raw_evals might contain dictionaries of which we only need one entry, scalar\n    np.nan where we need arrays filled with np.nan or pandas objects. The processed\n    evals only contain numpy arrays.\n\n    \"\"\"\n    # get rid of additional output\n    evals = [unpacker(val) for val in raw_evals]\n\n    # convert pytrees to arrays\n    if is_scalar_out:\n        evals = [\n            np.array([val], dtype=float) if not _is_scalar_nan(val) else val\n            for val in evals\n        ]\n\n    elif is_vector_out:\n        evals = [val.astype(float) if not _is_scalar_nan(val) else val for val in evals]\n    else:\n        evals = [\n            (\n                np.array(tree_leaves(val, registry=registry), dtype=np.float64)\n                if not _is_scalar_nan(val)\n                else val\n            )\n            for val in evals\n        ]\n\n    # find out the correct output shape\n    try:\n        array = next(x for x in evals if hasattr(x, \"shape\") or isinstance(x, dict))\n        out_shape = array.shape\n    except StopIteration:\n        out_shape = \"scalar\"\n\n    # convert to correct output shape\n    if out_shape == \"scalar\":\n        evals = [np.atleast_1d(val) for val in evals]\n    else:\n        for i in range(len(evals)):\n            if isinstance(evals[i], float) and np.isnan(evals[i]):\n                evals[i] = np.full(out_shape, np.nan)\n\n    return evals\n\n\ndef _consolidate_one_step_derivatives(candidates, preference_order):\n    \"\"\"Replace missing derivative estimates of preferred method with others.\n\n    Args:\n        candidates (dict): Dictionary with derivative estimates from different methods.\n        preference_order (list): Order on (a subset of) the keys in candidates. Earlier\n        entries are preferred.\n\n    Returns:\n        consolidated (np.ndarray): Array of same shape as input derivative estimates.\n\n    \"\"\"\n    preferred, others = preference_order[0], preference_order[1:]\n    consolidated = candidates[preferred].copy()\n    for other in others:\n        consolidated = np.where(np.isnan(consolidated), candidates[other], consolidated)\n\n    return consolidated.reshape(consolidated.shape[1:])\n\n\ndef _consolidate_extrapolated(candidates):\n    \"\"\"Get the best possible derivative estimate, given an error estimate.\n\n    Going through ``candidates`` select the best derivative estimate element-wise using\n    the estimated candidates, where best is defined as minimizing the error estimate\n    from the Richardson extrapolation.\n\n    See https://tinyurl.com/ubn3nv5 for corresponding code in numdifftools and\n    https://tinyurl.com/snle7mb for an explanation of how errors of Richardson\n    extrapolated derivative estimates can be estimated.\n\n    Args:\n        candidates (dict): Dictionary containing different derivative estimates and\n            their error estimates.\n\n    Returns:\n        consolidated (np.ndarray): Array of same shape as input derivative estimates.\n        candidate_der_dict (dict): Best derivative estimate given method.\n        candidate_err_dict (dict): Errors corresponding to best derivatives given method\n\n    \"\"\"\n    # first find minimum over steps for each method\n    candidate_der_dict = {}\n    candidate_err_dict = {}\n\n    for key in candidates:\n        _der = candidates[key][\"derivative\"]\n        _err = candidates[key][\"error\"]\n        derivative, error = _select_minimizer_along_axis(_der, _err)\n        candidate_der_dict[key] = derivative\n        candidate_err_dict[key] = error\n\n    # second find minimum over methods\n    candidate_der = np.stack(list(candidate_der_dict.values()))\n    candidate_err = np.stack(list(candidate_err_dict.values()))\n    consolidated, _ = _select_minimizer_along_axis(candidate_der, candidate_err)\n\n    updated_candidates = (candidate_der_dict, candidate_err_dict)\n    return consolidated, updated_candidates\n\n\ndef _compute_richardson_candidates(jac_candidates, steps, n_steps):\n    \"\"\"Compute derivative candidates using Richardson extrapolation.\n\n    Args:\n        jac_candidates (dict): Dictionary with (traditional) derivative estimates from\n            different methods.\n        steps (namedtuple): Namedtuple with the field names pos and neg. Each field\n            contains a numpy array of shape (n_steps, len(x)) with the steps in\n            the corresponding direction. The steps are always symmetric, in the sense\n            that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN.\n        n_steps (int): Number of steps needed. For central methods, this is\n            the number of steps per direction. It is 1 if no Richardson extrapolation\n            is used.\n\n    Returns:\n        richardson_candidates (dict): Dictionary with derivative estimates and error\n            estimates from different methods.\n            - Keys correspond to the method used, i.e. forward, backward or central\n            differences and the number of terms used in the Richardson extrapolation.\n            - Values represent the corresponding derivative estimate and error\n            estimate, stored as np.ndarrays in a sub-dictionary under \"derivative\" and\n            \"error\" respectively, with the first dimensions coinciding with that of an\n            element of ``jac_candidates`` and depending on num_terms, possibly one\n            further dimension.\n\n    \"\"\"\n    richardson_candidates = {}\n    for method in [\"forward\", \"backward\", \"central\"]:\n        for num_terms in range(1, n_steps):\n            derivative, error = richardson_extrapolation(\n                jac_candidates[method], steps, method, num_terms\n            )\n            richardson_candidates[method + str(num_terms)] = {\n                \"derivative\": derivative,\n                \"error\": error,\n            }\n\n    return richardson_candidates\n\n\ndef _select_minimizer_along_axis(derivative, errors):\n    \"\"\"Select best derivative estimates element wise.\n\n    Select elements from ``derivative`` which correspond to minimum in ``errors`` along\n    first axis.\n\n    Args:\n        derivative (np.ndarray): Derivative estimates from Richardson approximation.\n            First axis (axis 0) denotes the potentially multiple estimates. Following\n            dimensions represent the dimension of the derivative, i.e. for a classical\n            gradient ``derivative`` has 2 dimensions, while for a classical jacobian\n            ``derivative`` has 3 dimensions.\n        errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same\n            shape as ``derivative``.\n\n    Returns:\n        derivative_minimal (np.ndarray): Best derivate estimates chosen with respect\n            to minimizing ``errors``. Note that the best values are selected\n            element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``.\n\n        error_minimal (np.ndarray): Minimal errors selected element-wise along axis\n            0 of ``errors``.\n\n    \"\"\"\n    if derivative.shape[0] == 1:\n        jac_minimal = np.squeeze(derivative, axis=0)\n        error_minimal = np.squeeze(errors, axis=0)\n    else:\n        minimizer = np.nanargmin(errors, axis=0)\n        jac_minimal = np.take_along_axis(derivative, minimizer[np.newaxis, :], axis=0)\n        jac_minimal = np.squeeze(jac_minimal, axis=0)\n        error_minimal = np.nanmin(errors, axis=0)\n\n    return jac_minimal, error_minimal\n\n\ndef _nan_skipping_batch_evaluator(\n    func, arguments, n_cores, error_handling, batch_evaluator\n):\n    \"\"\"Evaluate func at each entry in arguments, skipping np.nan entries.\n\n    The function is only evaluated at inputs that are not a scalar np.nan.\n    The outputs corresponding to skipped inputs as well as for inputs on which func\n    returns np.nan are np.nan.\n\n    Args:\n        func (function): Python function that returns a numpy array. The shape\n            of the output of func has to be the same for all elements in arguments.\n        arguments (list): List with inputs for func.\n        n_cores (int): Number of processes.\n\n    Returns:\n        evaluations (list): The function evaluations, same length as arguments.\n\n    \"\"\"\n    # extract information\n    nan_indices = {\n        i for i, arg in enumerate(arguments) if isinstance(arg, float) and np.isnan(arg)\n    }\n    real_args = [arg for i, arg in enumerate(arguments) if i not in nan_indices]\n\n    # get the batch evaluator if it was provided as string\n    if not callable(batch_evaluator):\n        batch_evaluator = getattr(\n            batch_evaluators, f\"{batch_evaluator}_batch_evaluator\"\n        )\n\n    # evaluate functions\n    evaluations = batch_evaluator(\n        func=func, arguments=real_args, n_cores=n_cores, error_handling=error_handling\n    )\n\n    # combine results\n    evaluations = iter(evaluations)\n    results = []\n    for i in range(len(arguments)):\n        if i in nan_indices:\n            results.append(np.nan)\n        else:\n            results.append(next(evaluations))\n\n    return results\n\n\ndef _split_into_str_and_int(s):\n    \"\"\"Splits string in str and int parts.\n\n    Args:\n        s (str): The string.\n\n    Returns:\n        str_part (str): The str part.\n        int_part (int): The int part.\n\n    Example:\n    >>> s = \"forward1\"\n    >>> _split_into_str_and_int(s)\n    ('forward', 1)\n\n    \"\"\"\n    str_part, int_part = re.findall(r\"(\\w+?)(\\d+)\", s)[0]\n    return str_part, int(int_part)\n\n\ndef _collect_additional_info(steps, evals, updated_candidates, target):\n    \"\"\"Combine additional information in dict if return_info is True.\"\"\"\n    info = {}\n    # save function evaluations to accessible data frame\n    if target == \"first_derivative\":\n        func_evals = _convert_evaluation_data_to_frame(steps, evals)\n        info[\"_func_evals\"] = func_evals\n    else:\n        one_step = _convert_evaluation_data_to_frame(steps, evals[\"one_step\"])\n        info[\"_func_evals\"] = {\n            \"one_step\": one_step,\n            \"two_step\": None,\n            \"cross_step\": None,\n        }\n\n    if updated_candidates is not None:\n        # combine derivative candidates in accessible data frame\n        derivative_candidates = _convert_richardson_candidates_to_frame(\n            *updated_candidates\n        )\n        info[\"_derivative_candidates\"] = derivative_candidates\n\n    return info\n\n\ndef _is_scalar_nan(value):\n    return isinstance(value, float) and np.isnan(value)\n\n\ndef _unflatten_if_not_nan(leaves, treedef, registry):\n    if isinstance(leaves, np.ndarray):\n        out = tree_unflatten(treedef, leaves, registry=registry)\n    else:\n        out = leaves\n    return out\n"
  },
  {
    "path": "src/optimagic/differentiation/finite_differences.py",
    "content": "\"\"\"Finite difference formulae for jacobians and hessians.\n\nAll functions in this module should not only work for the simple case of one positive\nand/or one negative step, but also for the Richardson Extrapolation case with several\npositive and/or several negative steps.\n\nSince steps and evals contain NaNs, we have to make sure that the functions do not raise\nwarnings or errors for that case.\n\n\"\"\"\n\nfrom typing import NamedTuple\n\nimport numpy as np\n\n\nclass Evals(NamedTuple):\n    pos: np.ndarray\n    neg: np.ndarray\n\n\ndef jacobian(evals, steps, f0, method):\n    \"\"\"Calculate a Jacobian estimate with finite differences according to method.\n\n    Notation: f:R^dim_x -> R^dim_f. We compute the derivative at x0, with f0 = f(x0).\n\n    Args:\n        evals (namedtuple): It has the fields called pos and neg for evaluations with\n            positive and negative steps, respectively. Each field is a numpy array\n            of shape (n_steps, dim_f, dim_x). It contains np.nan for evaluations that\n            failed or were not attempted because a one-sided derivative rule was chosen.\n        steps (namedtuple): Namedtuple with the fields pos and neg. Each field\n            contains a numpy array of shape (n_steps, dim_x) with the steps in\n            the corresponding direction. The steps are always symmetric, in the sense\n            that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN.\n        f0 (numpy.ndarray): Numpy array of length dim_f with the output of the function\n            at the user supplied parameters.\n        method (str): One of [\"forward\", \"backward\", \"central\"]\n\n    Returns:\n        jac (numpy.ndarray): Numpy array of shape (n_steps, dim_f, dim_x) with estimated\n            Jacobians. I.e. there are n_step jacobian estimates.\n\n    \"\"\"\n    n_steps, dim_f, dim_x = evals.pos.shape\n    if method == \"forward\":\n        diffs = evals.pos - f0.reshape(1, dim_f, 1)\n        jac = diffs / steps.pos.reshape(n_steps, 1, dim_x)\n    elif method == \"backward\":\n        diffs = evals.neg - f0.reshape(1, dim_f, 1)\n        jac = diffs / steps.neg.reshape(n_steps, 1, dim_x)\n    elif method == \"central\":\n        diffs = evals.pos - evals.neg\n        deltas = steps.pos - steps.neg\n        jac = diffs / deltas.reshape(n_steps, 1, dim_x)\n    else:\n        raise ValueError(\"Method has to be 'forward', 'backward' or 'central'.\")\n    return jac\n\n\ndef hessian(evals, steps, f0, method):\n    \"\"\"Calculate a Hessian estimate with finite differences according to method.\n\n    Notation: f:R^dim_x -> R^dim_f. We compute the derivative at x0, with f0 = f(x0).\n\n    The formulae in Rideout [2009] which are implemented here use three types of\n    function evaluations:\n\n    1. f(theta + delta_j e_j)\n    2. f(theta + delta_j e_j + delta_k e_k)\n    3. f(theta + delta_j e_j - delta_k e_k)\n\n    Which are called here: 1. ``evals_one``, 2. ``evals_two`` and 3. ``evals_cross``,\n    corresponding to the idea that we are moving in one direction, in two directions and\n    in two cross directions (opposite signs). Note that theta denotes x0, delta_j the\n    step size for the j-th variable and e_j the j-th standard basis vector.\n\n    Note also that the brackets in the finite difference formulae are not arbitrary but\n    improve the numerical accuracy, see Rideout [2009].\n\n    Args:\n        evals (dict[namedtuple]): Dictionary with keys \"one_step\" for function evals in\n            a single step direction, \"two_step\" for evals in two steps in the same\n            direction, and \"cross_step\" for evals in two steps in the opposite\n            direction. Each dict item has the fields called pos and neg for evaluations\n            with positive and negative steps, respectively. Each field is a numpy array\n            of shape (n_steps, dim_f, dim_x). It contains np.nan for evaluations that\n            failed or were not attempted because a one-sided derivative rule was chosen.\n        steps (namedtuple): Namedtuple with the fields pos and neg. Each field\n            contains a numpy array of shape (n_steps, dim_x) with the steps in\n            the corresponding direction. The steps are always symmetric, in the sense\n            that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN.\n        f0 (numpy.ndarray): Numpy array of length dim_f with the output of the function\n            at the user supplied parameters.\n        method (str): One of {\"forward\", \"backward\", \"central_average\", \"central_cross\"}\n            These correspond to the finite difference approximations defined in\n            equations [7, x, 8, 9] in Rideout [2009], where (\"backward\", x) is not found\n            in Rideout [2009] but is the natural extension of equation 7 to the backward\n            case.\n\n    Returns:\n        hess (numpy.ndarray): Numpy array of shape (n_steps, dim_f, dim_x, dim_x) with\n            estimated Hessians. I.e. there are n_step hessian estimates.\n\n    \"\"\"\n    n_steps, dim_f, dim_x = evals[\"one_step\"].pos.shape\n    f0 = f0.reshape(1, dim_f, 1, 1)\n\n    # rename variables to increase readability in formulas\n    evals_one = Evals(\n        pos=np.expand_dims(evals[\"one_step\"].pos, axis=3),\n        neg=np.expand_dims(evals[\"one_step\"].neg, axis=3),\n    )\n    evals_two = evals[\"two_step\"]\n    evals_cross = evals[\"cross_step\"]\n\n    if method == \"forward\":\n        outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x)\n        diffs = (evals_two.pos - evals_one.pos.swapaxes(2, 3)) - (evals_one.pos - f0)\n        hess = diffs / outer_product_steps\n    elif method == \"backward\":\n        outer_product_steps = _calculate_outer_product_steps(steps.neg, n_steps, dim_x)\n        diffs = (evals_two.neg - evals_one.neg.swapaxes(2, 3)) - (evals_one.neg - f0)\n        hess = diffs / outer_product_steps\n    elif method == \"central_average\":\n        outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x)\n        forward = (evals_two.pos - evals_one.pos.swapaxes(2, 3)) - (evals_one.pos - f0)\n        backward = (evals_two.neg - evals_one.neg.swapaxes(2, 3)) - (evals_one.neg - f0)\n        hess = (forward + backward) / (2 * outer_product_steps)\n    elif method == \"central_cross\":\n        outer_product_steps = _calculate_outer_product_steps(steps.pos, n_steps, dim_x)\n        diffs = (evals_two.pos - evals_cross.pos) - (evals_cross.neg - evals_two.neg)\n        hess = diffs / (4 * outer_product_steps)\n    else:\n        raise ValueError(\n            \"Method has to be 'forward', 'backward', 'central_average' or \",\n            \"'central_cross'.\",\n        )\n    return hess\n\n\ndef _calculate_outer_product_steps(signed_steps, n_steps, dim_x):\n    \"\"\"Calculate array of outer product of steps.\n\n    Args:\n        signed_steps (np.ndarray): Square array with either pos or neg steps returned\n            by :func:`~optimagic.differentiation.generate_steps.generate_steps` function\n        n_steps (int): Number of steps needed. For central methods, this is\n            the number of steps per direction. It is 1 if no Richardson extrapolation\n            is used.\n        dim_x (int): Dimension of input vector x.\n\n    Returns:\n        outer_product_steps (np.ndarray): Array with outer product of steps. Has\n            dimension (n_steps, 1, dim_x, dim_x).\n\n    \"\"\"\n    outer_product_steps = np.array(\n        [np.outer(signed_steps[j], signed_steps[j]) for j in range(n_steps)]\n    ).reshape(n_steps, 1, dim_x, dim_x)\n    return outer_product_steps\n"
  },
  {
    "path": "src/optimagic/differentiation/generate_steps.py",
    "content": "import warnings\nfrom typing import NamedTuple\n\nimport numpy as np\n\nfrom optimagic.utilities import fast_numpy_full\n\n\nclass Steps(NamedTuple):\n    pos: np.ndarray\n    neg: np.ndarray\n\n\ndef generate_steps(\n    x,\n    method,\n    n_steps,\n    target,\n    base_steps,\n    scaling_factor,\n    bounds,\n    step_ratio,\n    min_steps,\n):\n    \"\"\"Generate steps for finite differences with or without Richardson Extrapolation.\n\n    steps can be used to construct x-vectors at which the function has to be evaluated\n    for finite difference formulae. How the vectors are constructed from the steps\n    differs between first and second derivative. Note that both positive and negative\n    steps are returned, even for one-sided methods, because bounds might make it\n    necessary to flip the direction of the method.\n\n    The rule of thumb for the generation of base_steps is:\n    - first_derivative: `np.finfo(float).eps ** (1 / 2) * np.maximum(np.abs(x), 0.1)`\n    - second_derivative: `np.finfo(float).eps ** (1 / 3) * np.maximum(np.abs(x), 0.1)`\n    Where `np.finfo(float).eps` is machine accuracy. This rule of thumb\n    is also used in statsmodels and scipy.\n\n    The step generation is bound aware and will try to find a good solution if\n    any step would violate a bound. For this, we use the following rules until\n    no bounds are violated:\n\n    1. If a one sided method is used, flip to the direction with more distance\n        to the bound.\n    2. Decrease the base_steps, unless this would mean to go below min_steps. By default\n        min_steps is equal to base_steps, so no squeezing happens unless explicitly\n        requested by setting a smaller min_step.\n    3. Set the conflicting steps to NaN, which means that this step won't be\n        usable in the calculation of derivatives. All derivative functions can\n        handle NaNs and will produce the best possible derivative estimate given\n        the remaining steps. If all steps of one parameter are set to NaN, no\n        derivative estimate will be produced for that parameter.\n\n    Args:\n        x (numpy.ndarray): 1d array at which the derivative is calculated.\n        method (str): One of [\"central\", \"forward\", \"backward\"]\n        n_steps (int): Number of steps needed. For central methods, this is\n            the number of steps per direction. It is 1 if no Richardson extrapolation\n            is used.\n        target (str): One of [\"first_derivative\", \"second_derivative\"]. This is used to\n            choose the appropriate rule of thumb for the base_steps.\n        base_steps (numpy.ndarray, optional): 1d array of the same length as x.\n            base_steps * scaling_factor is the absolute value of the first (and possibly\n            only) step used in the finite differences approximation of the derivative.\n            If base_steps * scaling_factor conflicts with bounds, the actual steps will\n            be adjusted. If base_steps is not provided, it will be determined according\n            to a rule of thumb as long as this does not conflict with min_steps.\n        scaling_factor (numpy.ndarray or float): Scaling factor which is applied to\n            base_steps. If it is an numpy.ndarray, it needs to have the same shape as x.\n            scaling_factor is useful if you want to increase or decrease the base_step\n            relative to the rule-of-thumb or user provided base_step, for example to\n            benchmark the effect of the step size.\n        lower_bounds (numpy.ndarray): 1d array with lower bounds for each parameter.\n        upper_bounds (numpy.ndarray): 1d array with upper bounds for each parameter.\n        step_ratio (float or array): Ratio between two consecutive Richardson\n            extrapolation steps in the same direction. default 2.0. Has to be larger\n            than one. step ratio is only used if n_steps > 1.\n        min_steps (numpy.ndarray): Minimal possible step sizes that can be chosen to\n            accommodate bounds. Needs to have same length as x. By default min_steps is\n            equal to base_steps, i.e step size is not decreased beyond what is optimal\n            according to the rule of thumb.\n\n    Returns:\n        steps (namedtuple): Namedtuple with the field names pos and neg. Each field\n            contains a numpy array of shape (n_steps, len(x)) with the steps in\n            the corresponding direction. The steps are always symmetric, in the sense\n            that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN.\n\n    \"\"\"\n    base_steps = _calculate_or_validate_base_steps(\n        base_steps, x, target, min_steps, scaling_factor\n    )\n    min_steps = base_steps if min_steps is None else min_steps\n\n    lower_bounds = bounds.lower\n    upper_bounds = bounds.upper\n    # None-valued bounds are handled by instantiating them as an -inf and inf array. In\n    # the future, this should be handled more gracefully.\n    if lower_bounds is None:\n        lower_bounds = fast_numpy_full(len(x), fill_value=-np.inf)\n    if upper_bounds is None:\n        upper_bounds = fast_numpy_full(len(x), fill_value=np.inf)\n\n    assert (upper_bounds - lower_bounds >= 2 * min_steps).all(), (\n        \"min_steps is too large to fit into bounds.\"\n    )\n\n    upper_step_bounds = upper_bounds - x\n    lower_step_bounds = lower_bounds - x\n\n    pos = step_ratio ** np.arange(n_steps) * base_steps.reshape(-1, 1)\n    neg = -pos.copy()\n\n    if method in [\"forward\", \"backward\"]:\n        pos, neg = _set_unused_side_to_nan(\n            x, pos, neg, method, lower_step_bounds, upper_step_bounds\n        )\n\n    if np.isfinite(lower_bounds).any() or np.isfinite(upper_bounds).any():\n        pos, neg = _rescale_to_accomodate_bounds(\n            base_steps, pos, neg, lower_step_bounds, upper_step_bounds, min_steps\n        )\n\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        pos[pos > upper_step_bounds.reshape(-1, 1)] = np.nan\n        neg[neg < lower_step_bounds.reshape(-1, 1)] = np.nan\n\n    steps = Steps(pos=pos.T, neg=neg.T)\n\n    return steps\n\n\ndef _calculate_or_validate_base_steps(base_steps, x, target, min_steps, scaling_factor):\n    \"\"\"Validate user provided base_steps or generate them with rule of thumb.\n\n    Args:\n        base_steps (numpy.ndarray, optional): 1d array of the same length as x.\n            base_steps * scaling_factor is the absolute value of the first (and possibly\n            only) step used in the finite differences approximation of the derivative.\n        x (numpy.ndarray): 1d array at which the derivative is evaluated\n        target (str): One of [\"first_derivative\", \"second_derivative\"]. This is used to\n            choose the appropriate rule of thumb for the base_steps.\n        min_steps (numpy.ndarray or None): Minimal possible step sizes that can be\n            chosen to accommodate bounds. Needs to have same length as x.\n        scaling_factor (numpy.ndarray or float): Scaling factor which is applied to\n            base_steps. If it is an :class:`numpy.ndarray`, it needs to have the same\n            shape as x.\n\n    Returns:\n        base_steps (numpy.ndarray): 1d array of the same length as x with the\n            absolute value of the first step.\n\n    \"\"\"\n    if np.any(scaling_factor <= 0):\n        raise ValueError(\"Scaling factor must be strictly positive.\")\n\n    if base_steps is not None:\n        if np.isscalar(base_steps):\n            base_steps = np.full(len(x), base_steps)\n\n        if base_steps.shape != x.shape:\n            raise ValueError(\"base_steps has to have the same shape as x.\")\n\n        base_steps = base_steps * scaling_factor\n\n        if np.isscalar(min_steps):\n            min_steps = np.full(len(x), min_steps)\n\n        if min_steps is not None and (base_steps <= min_steps).any():\n            raise ValueError(\n                \"scaling_factor * base_steps must be larger than min_steps.\"\n            )\n    else:\n        eps = np.finfo(float).eps\n        if target == \"first_derivative\":\n            base_steps = eps ** (1 / 2) * np.maximum(np.abs(x), 0.1) * scaling_factor\n        elif target == \"second_derivative\":\n            base_steps = eps ** (1 / 3) * np.maximum(np.abs(x), 0.1) * scaling_factor\n        else:\n            raise ValueError(f\"Invalid target: {target}.\")\n        if min_steps is not None:\n            base_steps = np.clip(base_steps, a_min=min_steps, a_max=None)\n    return base_steps\n\n\ndef _set_unused_side_to_nan(\n    x,  # noqa: ARG001\n    pos,\n    neg,\n    method,\n    lower_step_bounds,\n    upper_step_bounds,\n):\n    \"\"\"Set unused side (i.e. pos or neg) to np.nan.\n\n    A side is not used if:\n    - It was not requested due to one sided derivatives.\n    - It was requested but a side switch was better due to bounds.\n\n    This function does not yet guarantee that all bounds are fulfilled. It only switches\n    to the side that has more space to the bound if there is a bound violation.\n\n    Args:\n        x (numpy.ndarray): 1d array with parameters.\n        pos (numpy.ndarray): Array with positive steps of shape (n_steps, len(x))\n        neg (numpy.ndarray): Array with negative steps of shape (n_steps, len(x))\n        method (str): One of [\"forward\", \"backward\"]\n        lower_step_bounds (numpy.ndarray): Lower bounds for steps.\n        upper_step_bounds (numpy.ndarray): Upper bounds for steps.\n\n    Returns:\n        pos (numpy.ndarray): Copy of pos with additional NaNs\n        neg (numpy.ndarray): Copy of neg with additional NaNs\n\n    \"\"\"\n    pos = pos.copy()\n    neg = neg.copy()\n    better_side = np.where(upper_step_bounds >= -lower_step_bounds, 1, -1)\n    max_abs_step = pos[:, -1]\n    if method == \"forward\":\n        used_side = np.where(upper_step_bounds >= max_abs_step, 1, better_side)\n    elif method == \"backward\":\n        used_side = np.where(-lower_step_bounds >= max_abs_step, -1, better_side)\n    else:\n        raise ValueError(\"This function only works for forward or backward method.\")\n\n    pos[used_side == -1] = np.nan\n    neg[used_side == 1] = np.nan\n    return pos, neg\n\n\ndef _rescale_to_accomodate_bounds(\n    base_steps, pos, neg, lower_step_bounds, upper_step_bounds, min_steps\n):\n    \"\"\"Rescale steps to make them compatible with bounds unless this violates min_steps.\n\n    Args:\n        base_steps (np.ndarray, optional): 1d array of the same length as x.\n            base_steps * scaling_factor is the absolute value of the first (and possibly\n            only) step used in the finite differences approximation of the derivative.\n        pos (np.ndarray): Array with positive steps of shape (n_steps, len(x))\n        neg (np.ndarray): Array with negative steps of shape (n_steps, len(x))\n        lower_step_bounds (np.ndarray): Lower bounds for steps.\n        upper_step_bounds (np.ndarray): Upper bounds for steps.\n        min_steps (np.ndarray): Minimal possible step sizes that can be chosen\n            to accomodate bounds. Needs to have same length as x.\n\n    Returns:\n        pos (np.ndarray): Copy of pos with rescaled steps.\n        neg (np.ndarray): Copy of neg with rescaled steps.\n\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        pos_needed_scaling = _fillna(\n            upper_step_bounds / np.nanmax(pos, axis=1), 1\n        ).clip(0, 1)\n        neg_needed_scaling = _fillna(\n            lower_step_bounds / np.nanmin(neg, axis=1), 1\n        ).clip(0, 1)\n    needed_scaling = np.minimum(pos_needed_scaling, neg_needed_scaling)\n\n    min_possible_scaling = min_steps / base_steps\n\n    scaling = np.maximum(needed_scaling, min_possible_scaling).reshape(-1, 1)\n\n    pos = pos * scaling\n    neg = neg * scaling\n    return pos, neg\n\n\ndef _fillna(x, val):\n    return np.where(np.isnan(x), val, x)\n"
  },
  {
    "path": "src/optimagic/differentiation/numdiff_options.py",
    "content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Callable, Literal, TypedDict\n\nfrom typing_extensions import NotRequired\n\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.config import DEFAULT_N_CORES\nfrom optimagic.exceptions import InvalidNumdiffOptionsError\nfrom optimagic.typing import BatchEvaluatorLiteral\n\n\n@dataclass(frozen=True)\nclass NumdiffOptions:\n    \"\"\"Options for numerical differentiation.\n\n    Attributes:\n        method: The method to use for numerical differentiation. Can be \"central\",\n            \"forward\", or \"backward\".\n        step_size: The step size to use for numerical differentiation. If None, the\n            default step size will be used.\n        scaling_factor: The scaling factor to use for numerical differentiation.\n        min_steps: The minimum step size to use for numerical differentiation. If None,\n            the default minimum step size will be used.\n        n_cores: The number of cores to use for numerical differentiation.\n        batch_evaluator: The evaluator to use for batch evaluation. Allowed are\n            \"joblib\", \"pathos\", and \"threading\", or a custom callable.\n\n    Raises:\n        InvalidNumdiffError: If the numdiff options cannot be processed, e.g. because\n            they do not have the correct type.\n\n    \"\"\"\n\n    method: Literal[\n        \"central\", \"forward\", \"backward\", \"central_cross\", \"central_average\"\n    ] = \"central\"\n    step_size: float | None = None\n    scaling_factor: float = 1\n    min_steps: float | None = None\n    n_cores: int = DEFAULT_N_CORES\n    batch_evaluator: BatchEvaluatorLiteral | Callable = \"joblib\"  # type: ignore\n\n    def __post_init__(self) -> None:\n        _validate_attribute_types_and_values(self)\n\n\nclass NumdiffOptionsDict(TypedDict):\n    method: NotRequired[\n        Literal[\"central\", \"forward\", \"backward\", \"central_cross\", \"central_average\"]\n    ]\n    step_size: NotRequired[float | None]\n    scaling_factor: NotRequired[float]\n    min_steps: NotRequired[float | None]\n    n_cores: NotRequired[int]\n    batch_evaluator: NotRequired[BatchEvaluatorLiteral | Callable]  # type: ignore\n\n\ndef pre_process_numdiff_options(\n    numdiff_options: NumdiffOptions | NumdiffOptionsDict | None,\n) -> NumdiffOptions | None:\n    \"\"\"Convert all valid types of Numdiff options to optimagic.NumdiffOptions class.\n\n    This just harmonizes multiple ways of specifying numdiff options into a single\n    format. It performs runtime type checks, but it does not check whether numdiff\n    options are consistent with other option choices.\n\n    Args:\n        numdiff_options: The user provided numdiff options.\n\n    Returns:\n        The numdiff options in the optimagic format.\n\n    Raises:\n        InvalidNumdiffOptionsError: If numdiff options cannot be processed, e.g. because\n            they do not have the correct type.\n\n    \"\"\"\n    if isinstance(numdiff_options, NumdiffOptions) or numdiff_options is None:\n        pass\n    else:\n        try:\n            numdiff_options = NumdiffOptions(**numdiff_options)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if isinstance(e, InvalidNumdiffOptionsError):\n                raise e\n            raise InvalidNumdiffOptionsError(\n                f\"Invalid numdiff options of type: {type(numdiff_options)}. Numdiff \"\n                \"options must be of type optimagic.NumdiffOptions, a dictionary with a\"\n                \"subset of the keys {'method', 'step_size', 'scaling_factor', \"\n                \"'min_steps', 'n_cores', 'batch_evaluator'}, or None.\"\n            ) from e\n\n    return numdiff_options\n\n\ndef _validate_attribute_types_and_values(options: NumdiffOptions) -> None:\n    if options.method not in {\n        \"central\",\n        \"forward\",\n        \"backward\",\n        \"central_cross\",\n        \"central_average\",\n    }:\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid numdiff `method`: {options.method}. Numdiff `method` must be \"\n            \"one of 'central', 'forward', 'backward', 'central_cross', or \"\n            \"'central_average'.\"\n        )\n\n    if options.step_size is not None and (\n        not isinstance(options.step_size, float) or options.step_size <= 0\n    ):\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid numdiff `step_size`: {options.step_size}. Step size must be a \"\n            \"float greater than 0.\"\n        )\n\n    if (\n        not isinstance(options.scaling_factor, int | float)\n        or options.scaling_factor <= 0\n    ):\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid numdiff `scaling_factor`: {options.scaling_factor}. Scaling \"\n            \"factor must be an integer or float greater than 0.\"\n        )\n\n    if options.min_steps is not None and (\n        not isinstance(options.min_steps, float) or options.min_steps <= 0\n    ):\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid numdiff `min_steps`: {options.min_steps}. Minimum step \"\n            \"size must be a float greater than 0.\"\n        )\n\n    if not isinstance(options.n_cores, int) or options.n_cores <= 0:\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid numdiff `n_cores`: {options.n_cores}. Number of cores \"\n            \"must be an integer greater than 0.\"\n        )\n\n    try:\n        process_batch_evaluator(options.batch_evaluator)\n    except Exception as e:\n        raise InvalidNumdiffOptionsError(\n            f\"Invalid batch evaluator: {options.batch_evaluator}.\"\n        ) from e\n\n\nclass NumdiffPurpose(str, Enum):\n    OPTIMIZE = \"optimize\"\n    ESTIMATE_JACOBIAN = \"estimate_jacobian\"\n    ESTIMATE_HESSIAN = \"estimate_hessian\"\n\n\ndef get_default_numdiff_options(\n    purpose: NumdiffPurpose,\n) -> NumdiffOptions:\n    \"\"\"Get default numerical derivatives options for a given purpose.\n\n    Args:\n        purpose: For what purpose the numdiff options are used.\n\n    Returns:\n        The numdiff options with defaults filled in.\n\n    \"\"\"\n    defaults: NumdiffOptionsDict = {}\n\n    if purpose == NumdiffPurpose.OPTIMIZE:\n        defaults[\"method\"] = \"forward\"\n\n    if purpose == NumdiffPurpose.ESTIMATE_JACOBIAN:\n        defaults[\"method\"] = \"central\"\n\n    if purpose == NumdiffPurpose.ESTIMATE_HESSIAN:\n        defaults[\"method\"] = \"central_cross\"\n        defaults[\"scaling_factor\"] = 2\n\n    return NumdiffOptions(**defaults)\n"
  },
  {
    "path": "src/optimagic/differentiation/richardson_extrapolation.py",
    "content": "import numpy as np\nfrom scipy import stats\nfrom scipy.linalg import pinv\nfrom scipy.ndimage import convolve1d\n\n\ndef richardson_extrapolation(sequence, steps, method=\"central\", num_terms=None):\n    \"\"\"Apply Richardson extrapolation to sequence.\n\n    Suppose you have a series expansion\n\n        L = g(h) + a0*(h**p_0) + a1*(h**p_1) + a2*(h**p_2) + ... ,\n\n    where p_i = order + exponentiation_step * i  and g(h) -> L as h -> 0, but g(0) != L.\n\n    For ``method``='central', that is, for a sequence resulting from a central\n    differences derivative approximation, we get ``order`` = 2 and\n    ``exponentiation_step`` = 2, which would result in\n\n        L = g(h) + a0*(h**2) + a1*(h**4) + a2*(h**6) + ...,\n\n    where g(h) := [f(x + h) - f(x - h)] / 2h and f the function of interest. See\n    function ``_get_order_and_exponentiation_step`` for more details.\n\n    If we evaluate the right hand side for different stepsizes h we can fit a polynomial\n    to that sequence of approximations and use the estimated intercept as a better\n    approximation for L. Further, we can compute estimation errors of our approximation.\n\n\n    Args:\n        sequence (np.ndarray): The sequence of which we want to approximate the limit.\n            Has dimension (k x n x m), where k denotes the number of sequence elements\n            and an element ``sequence[l, :, :]`` denotes the (n x m) dimensional element\n\n        steps (namedtuple): Namedtuple with the field names pos and neg. Each field\n            contains a numpy array of shape (n_steps, len(x)) with the steps in\n            the corresponding direction. The steps are always symmetric, in the sense\n            that steps.neg[i, j] = - steps.pos[i, j] unless one of them is NaN.\n\n        method (str): One of [\"central\", \"forward\", \"backward\"], default \"central\".\n\n        num_terms (int): Number of terms needed to construct one estimate. Default is\n            ``steps.shape[0] - 1``.\n\n    Returns:\n        limit (np.ndarray): The refined limit.\n        error (np.ndarray): The error approximation of ``limit``.\n\n    \"\"\"\n    seq_len = sequence.shape[0]\n    steps = steps.pos\n    n_steps = steps.shape[0]\n    num_terms = n_steps if num_terms is None else num_terms\n\n    assert seq_len == n_steps, (\n        \"Length of ``steps`` must coincide with length of ``sequence``.\"\n    )\n    assert num_terms > 0, \"``num_terms`` must be greater than zero.\"\n    assert seq_len - 1 >= num_terms, (\n        \"``num_terms`` cannot be greater than ``seq_len`` - 1.\"\n    )\n\n    step_ratio = _compute_step_ratio(steps)\n    order, exponentiation_step = _get_order_and_exponentiation_step(method)\n\n    richardson_coef = _richardson_coefficients(\n        num_terms,\n        step_ratio,\n        exponentiation_step,\n        order,\n    )\n\n    new_sequence = convolve1d(\n        input=sequence, weights=richardson_coef[::-1], axis=0, origin=num_terms // 2\n    )\n\n    m = seq_len - num_terms\n    mm = m + 1 if num_terms >= 2 else seq_len\n\n    abserr = _estimate_error(new_sequence[:mm], sequence, richardson_coef)\n\n    limit = new_sequence[:m]\n    error = abserr[:m]\n\n    return limit, error\n\n\ndef _richardson_coefficients(num_terms, step_ratio, exponentiation_step, order):\n    \"\"\"Compute Richardson coefficients.\n\n    Let e := ``exponentiation_step``, r := ``step_ratio``, o := ``order`` and\n    n := ``num_terms``. We build a matrix of the form\n\n            [[1      1                  ...         1                ],\n             [1    1/(s)**(2*o)         ...  1/(s)**(2*(o+n))        ],\n        R =  [1    1/(s**2)**(2*o)      ...  1/(s**2)**(2*(o+n))     ],\n             [...                       ...        ...               ],\n             [1    1/(s**(n+1))**(2*o)  ...  1/(s**(n+1))**(2*(o+n)) ]]\n\n    which is the weighting matrix in equation 24 in https://tinyurl.com/ybtfj4pm.\n    We then return the first row of R^{-1} as the coefficients, as can be seen in\n    equation 25 in https://tinyurl.com/ybtfj4pm.\n\n\n    Args:\n        num_terms (int): Number of terms needed to construct one estimate. Default is\n            ``steps.shape[0] - 1``.\n\n        step_ratio (float): Ratio between two consecutive steps. Order is chosen such\n            that ``step_ratio`` >= 1.\n\n        exponentiation_step (int): Step representing the growth of the exponent in\n            the series expansions of the limit.\n            For central difference derivative approximation ``exponentiation_step`` = 2.\n\n        order (int): Initial order of the approximation error of sequence elements.\n            For central difference derivative approximation ``order`` = 2.\n\n    Returns:\n        coef (np.ndarray): Richardson coefficients, array has length num_terms + 1.\n\n    Example:\n    >>> import numpy as np\n    >>> num_terms = 2\n    >>> step_ratio = 2.\n    >>> exponentiation_step = 2\n    >>> order = 2\n    >>> _richardson_coefficients(num_terms, step_ratio, exponentiation_step, order)\n    array([ 0.02222222, -0.44444444,  1.42222222])\n\n    \"\"\"\n    rows, cols = np.ogrid[: num_terms + 1, :num_terms]\n\n    coef_mat = np.ones((num_terms + 1, num_terms + 1))\n    coef_mat[:, 1:] = (1.0 / step_ratio) ** (\n        rows @ (exponentiation_step * cols + order)\n    )\n\n    coef = pinv(coef_mat)[0]\n    return coef\n\n\ndef _estimate_error(new_seq, old_seq, richardson_coef):\n    \"\"\"Estimate error of multiple Richardson limit approximation.\n\n    Args:\n        new_seq (np.ndarray): Multiple estimates of the limit of ``old_seq``. The last\n            two dimensions coincide with those of ``old_seq``. The first dimensions\n            denotes the number of different estimates.\n\n        old_seq (np.ndarray): The sequence of which we want to approximate the limit.\n            Has dimension (k x n x m), where k denotes the number of sequence elements\n            and an element ``sequence[l, :, :]`` denotes the (n x m) dimensional element\n\n        richardson_coef (np.ndarray): Richardson coefficients. See function\n            ``_richardson_coefficient`` for details.\n\n    Returns:\n        abserr (np.ndarray): The error estimate for each limit approximation in\n            ``new_seq``.\n\n    \"\"\"\n    eps = np.finfo(float).eps\n    t_quantile = stats.t(df=1).ppf(0.975)  # 12.7062047361747 in numdifftools\n    new_seq_len = new_seq.shape[0]\n\n    unnormalized_covariance = np.sum(richardson_coef**2)\n    fact = np.maximum(t_quantile * np.sqrt(unnormalized_covariance), eps * 10.0)\n\n    if new_seq_len <= 1:\n        delta = np.diff(old_seq, axis=0)\n        tol = np.maximum(np.abs(old_seq[:-1]), np.abs(old_seq[1:])) * fact\n        err = np.abs(delta)\n        converged = err <= tol\n        abserr = err[-new_seq_len:] + np.where(\n            converged[-new_seq_len:],\n            tol[-new_seq_len:] * 10,\n            abs(new_seq - old_seq[-new_seq_len:]) * fact,\n        )\n    else:\n        err = np.abs(np.diff(new_seq, axis=0)) * fact\n        tol = np.maximum(np.abs(new_seq[1:]), np.abs(new_seq[:-1])) * eps * fact\n        converged = err <= tol\n        abserr = err + np.where(\n            converged,\n            tol * 10,\n            abs(new_seq[:-1] - old_seq[-new_seq_len + 1 :]) * fact,\n        )\n\n    return abserr\n\n\ndef _get_order_and_exponentiation_step(method):\n    \"\"\"Return order and exponentiation step given ``method``.\n\n    Given ``method`` we return the initial order of the approximation error of the\n    sequence under consideration (order) as well as the step size representing the\n    growth of the exponent in the series expansion of the limit (exponentiation_step).\n    See function ``richardson_extrapolation`` for more details.\n\n    For different methods, different values of order and exponentiation step apply.\n    Consider the following examples, where we continue the notation from function\n    ``richardson_extrapolation`` and use O() to denote the Big O Laundau symbol.\n\n    Central Differences.\n        Derivative approximation via central difference is given by\n            g(h) := [f(x + h) - f(x - h)] / 2h = f'(x) + r(x, h),\n        where r(x, h) denotes the remainder term.\n\n        If we expand the remainder term r(x, h) we get\n            r(x, h) = a0*(h**2) + a1*(h**4) + a2*(h**6) + ...\n        with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc.\n\n        Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h**2) and\n        we notice that order = 2 and exponentiation_step = 2.\n\n    Forward Differences.\n        Derivative approximation via forward difference is given by\n            g(h) := [f(x + h) - f(x)] / h = f'(x) + r(x, h),\n            where again r(x, h) denotes the remainder term.\n\n        If we expand the remainder term r(x, h) we get\n            r(x, h) = a0*(h**1) + a1*(h**2) + a2*(h**3) + ...\n        with a0 = f''(x) / 2!, a1 = f'''(x) / 3! etc.\n\n        Rearanging terms we can write L := f'(x) = g(h) - r(x, h) = g(h) + O(h) and\n        we notice that order = 1 and exponentiation_step = 1.\n\n    Backward Differences.\n        Analogous to forward differences.\n\n\n    Args:\n        method (str): One of [\"central\", \"forward\", \"backward\"], default \"central\".\n\n    Returns:\n        order (int): Initial order of the approximation error of sequence elements.\n\n        exponentiation_step (int): Step representing the growth of the exponent in the\n            series expansions of the limit.\n\n    Example:\n    >>> _get_order_and_exponentiation_step('central')\n    (2, 2)\n\n    \"\"\"\n    lookup = {\n        \"central\": (2, 2),\n        \"forward\": (1, 1),\n        \"backward\": (1, 1),\n    }\n\n    order, exponentiation_step = lookup[method]\n    return order, exponentiation_step\n\n\ndef _compute_step_ratio(steps):\n    \"\"\"Compute the step ratio used in producing ``steps``.\n\n    Args:\n        steps (np.ndarray): Array of shape (n_steps, len(x)) with the steps in the\n            corresponding direction.\n\n    Returns:\n        step_ratio (float): The step ratio used in producing ``steps``.\n\n    Example:\n    >>> import numpy as np\n    >>> steps = np.array([[2., np.nan, 2], [4, 4, 4], [8, 8, np.nan]])\n    >>> _compute_step_ratio(steps)\n    2.0\n\n    \"\"\"\n    ratios = steps[1:, :] / steps[:-1, :]\n    finite_ratios = ratios[np.isfinite(ratios)]\n\n    step_ratio = finite_ratios.item(0)\n    return step_ratio\n"
  },
  {
    "path": "src/optimagic/examples/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/examples/criterion_functions.py",
    "content": "\"\"\"Import common objective functions in several optimagic compatible versions.\n\nAll implemented functions accept arbitrary pytrees as parameters. If possible they are\nimplemented as scalar and least-squares versions.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom pybaum import tree_just_flatten, tree_unflatten\n\nfrom optimagic import mark\nfrom optimagic.optimization.fun_value import (\n    FunctionValue,\n)\nfrom optimagic.parameters.block_trees import matrix_to_block_tree\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import PyTree\n\nREGISTRY = get_registry(extended=True)\n\n\n@mark.scalar\ndef trid_scalar(params: PyTree) -> float:\n    \"\"\"Implement Trid function: https://www.sfu.ca/~ssurjano/trid.html.\"\"\"\n    x = _get_x(params)\n    return ((x - 1) ** 2).sum() - (x[1:] * x[:-1]).sum()\n\n\n@mark.scalar\ndef trid_gradient(params: PyTree) -> PyTree:\n    \"\"\"Calculate gradient of trid function.\"\"\"\n    x = _get_x(params)\n    l1 = np.insert(x, 0, 0)\n    l1 = np.delete(l1, [-1])\n    l2 = np.append(x, 0)\n    l2 = np.delete(l2, [0])\n    flat = 2 * (x - 1) - l1 - l2\n    return _unflatten_gradient(flat, params)\n\n\n@mark.scalar\ndef trid_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]:\n    \"\"\"Implement Trid function and calculate gradient.\"\"\"\n    val = trid_scalar(params)\n    grad = trid_gradient(params)\n    return val, grad\n\n\n@mark.scalar\ndef rhe_scalar(params: PyTree) -> float:\n    \"\"\"Implement Rotated Hyper Ellipsoid function.\n\n    Function description: https://www.sfu.ca/~ssurjano/rothyp.html.\n\n    \"\"\"\n    return (rhe_ls(params) ** 2).sum()\n\n\n@mark.scalar\ndef rhe_gradient(params: PyTree) -> PyTree:\n    \"\"\"Calculate gradient of rotated_hyper_ellipsoid function.\"\"\"\n    x = _get_x(params)\n    flat = np.arange(2 * len(x), 0, -2) * x\n    return _unflatten_gradient(flat, params)\n\n\n@mark.scalar\ndef rhe_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]:\n    \"\"\"Implement Rotated Hyper Ellipsoid function and calculate gradient.\"\"\"\n    val = rhe_scalar(params)\n    grad = rhe_gradient(params)\n    return val, grad\n\n\n@mark.least_squares\ndef rhe_ls(params: PyTree) -> NDArray[np.float64]:\n    \"\"\"Compute least-squares version of the Rotated Hyper Ellipsoid function.\"\"\"\n    x = _get_x(params)\n    dim = len(params)\n    out = np.zeros(dim)\n    for i in range(dim):\n        out[i] = np.sqrt((x[: i + 1] ** 2).sum())\n    return out\n\n\n@mark.least_squares\ndef rhe_function_value(params: PyTree) -> FunctionValue:\n    \"\"\"FunctionValue version of Rotated Hyper Ellipsoid function.\"\"\"\n    contribs = rhe_ls(params)\n    out = FunctionValue(contribs)\n    return out\n\n\n@mark.scalar\ndef rosenbrock_scalar(params: PyTree) -> float:\n    \"\"\"Rosenbrock function: https://www.sfu.ca/~ssurjano/rosen.html.\"\"\"\n    return (rosenbrock_ls(params) ** 2).sum()\n\n\n@mark.scalar\ndef rosenbrock_gradient(params: PyTree) -> PyTree:\n    \"\"\"Calculate gradient of rosenbrock function.\"\"\"\n    x = _get_x(params)\n    l1 = np.append(np.delete(x, [-1]), 0)\n    l2 = np.delete(np.insert(x, 0, 0), [1])\n    l3 = np.delete(np.insert(x, 0, 0), [-1])\n    l4 = np.append(np.delete(x, [0]), 0)\n    l5 = np.append(np.full((len(x) - 1), 2), 0)\n    flat = 100 * (4 * (l1**3) + 2 * l2 - 2 * (l3**2) - 4 * (l4 * x)) + 2 * l1 - l5\n    return _unflatten_gradient(flat, params)\n\n\n@mark.scalar\ndef rosenbrock_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]:\n    \"\"\"Implement rosenbrock function and calculate gradient.\"\"\"\n    return rosenbrock_scalar(params), rosenbrock_gradient(params)\n\n\n@mark.least_squares\ndef rosenbrock_ls(params: PyTree) -> NDArray[np.float64]:\n    \"\"\"Least-squares version of the rosenbrock function.\"\"\"\n    x = _get_x(params)\n    dim = len(params)\n    out = np.zeros(dim)\n    for i in range(dim - 1):\n        out[i] = np.sqrt(((x[i + 1] - x[i] ** 2) ** 2) * 100 + ((x[i] - 1) ** 2))\n    return out\n\n\n@mark.least_squares\ndef rosenbrock_function_value(params: PyTree) -> FunctionValue:\n    \"\"\"FunctionValue version of the rosenbrock function.\"\"\"\n    return FunctionValue(rosenbrock_ls(params))\n\n\n@mark.least_squares\ndef sos_ls(params: PyTree) -> NDArray[np.float64]:\n    \"\"\"Least-squares version of the sum of squares or sphere function.\"\"\"\n    return _get_x(params)\n\n\n@mark.least_squares\ndef sos_ls_with_pd_objects(params: PyTree) -> pd.Series[float]:\n    \"\"\"Least-squares version of the sphere function returning pandas objects.\"\"\"\n    return pd.Series(sos_ls(params))\n\n\n@mark.scalar\ndef sos_scalar(params: PyTree) -> float:\n    \"\"\"Sum of squares or sphere function.\"\"\"\n    return (_get_x(params) ** 2).sum()\n\n\n@mark.scalar\ndef sos_gradient(params: PyTree) -> PyTree:\n    \"\"\"Calculate the gradient of the sum of squares function.\"\"\"\n    flat = 2 * _get_x(params)\n    return _unflatten_gradient(flat, params)\n\n\n@mark.likelihood\ndef sos_likelihood(params: PyTree) -> NDArray[np.float64]:\n    return _get_x(params) ** 2\n\n\n@mark.likelihood\ndef sos_likelihood_jacobian(params: PyTree) -> PyTree:\n    \"\"\"Calculate the likelihood Jacobian of the sum of squares function.\"\"\"\n    x = _get_x(params)\n    out_mat = np.diag(2 * x)\n    out_tree = matrix_to_block_tree(out_mat, x, params)\n    return out_tree\n\n\n@mark.least_squares\ndef sos_ls_jacobian(params: PyTree) -> PyTree:\n    \"\"\"Calculate the least-squares Jacobian of the sum of squares function.\"\"\"\n    x = _get_x(params)\n    out_mat = np.eye(len(x))\n    out_tree = matrix_to_block_tree(out_mat, x, params)\n    return out_tree\n\n\n@mark.scalar\ndef sos_fun_and_gradient(params: PyTree) -> tuple[float, PyTree]:\n    \"\"\"Calculate sum of squares criterion value and gradient.\"\"\"\n    return sos_scalar(params), sos_gradient(params)\n\n\n@mark.likelihood\ndef sos_likelihood_fun_and_jac(\n    params: PyTree,\n) -> tuple[NDArray[np.float64], PyTree]:\n    \"\"\"Calculate sum of squares criterion value and Jacobian.\"\"\"\n    return sos_likelihood(params), sos_likelihood_jacobian(params)\n\n\n@mark.least_squares\ndef sos_ls_fun_and_jac(\n    params: PyTree,\n) -> tuple[NDArray[np.float64], PyTree]:\n    \"\"\"Calculate sum of squares criterion value and Jacobian.\"\"\"\n    return sos_ls(params), sos_ls_jacobian(params)\n\n\nsos_derivatives = [sos_gradient, sos_likelihood_jacobian, sos_ls_jacobian]\n\n\ndef _get_x(params: PyTree) -> NDArray[np.float64]:\n    if isinstance(params, np.ndarray) and params.ndim == 1:\n        x = params.astype(float)\n    else:\n        registry = get_registry(extended=True)\n        x = np.array(tree_just_flatten(params, registry=registry), dtype=np.float64)\n    return x\n\n\ndef _unflatten_gradient(flat: NDArray[np.float64], params: PyTree) -> PyTree:\n    out = tree_unflatten(params, flat.tolist(), registry=REGISTRY)\n    return out\n"
  },
  {
    "path": "src/optimagic/examples/numdiff_functions.py",
    "content": "\"\"\"Functions with known gradients, jacobians or hessians.\n\nAll functions take a numpy array with parameters as their first argument.\n\nExample inputs for the binary choice functions are in binary_choice_inputs.pickle. They\ncome from the statsmodels documentation:\nhttps://tinyurl.com/y4x67vwl\nWe pickled them so we don't need statsmodels as a dependency.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import norm\n\nFLOAT_EPS = np.finfo(float).eps\n\n# ======================================================================================\n# Logit\n# ======================================================================================\n\n\ndef logit_loglike(params, y, x):\n    return logit_loglikeobs(params, y, x).sum()\n\n\ndef logit_loglikeobs(params, y, x):\n    q = 2 * y - 1\n    return np.log(1 / (1 + np.exp(-(q * np.dot(x, params)))))\n\n\ndef logit_loglike_gradient(params, y, x):\n    c = 1 / (1 + np.exp(-(np.dot(x, params))))\n    return np.dot(y - c, x)\n\n\ndef logit_loglikeobs_jacobian(params, y, x):\n    c = 1 / (1 + np.exp(-(np.dot(x, params))))\n    return (y - c).reshape(-1, 1) * x\n\n\ndef logit_loglike_hessian(params, y, x):  # noqa: ARG001\n    c = 1 / (1 + np.exp(-(np.dot(x, params))))\n    return -np.dot(c * (1 - c) * x.T, x)\n\n\n# ======================================================================================\n# Probit\n# ======================================================================================\n\n\ndef probit_loglike(params, y, x):\n    return probit_loglikeobs(params, y, x).sum()\n\n\ndef probit_loglikeobs(params, y, x):\n    q = 2 * y - 1\n    return np.log(np.clip(norm.cdf(q * np.dot(x, params)), FLOAT_EPS, 1))\n\n\ndef probit_loglike_gradient(params, y, x):\n    xb = np.dot(x, params)\n    q = 2 * y - 1\n    c = q * norm.pdf(q * xb) / np.clip(norm.cdf(q * xb), FLOAT_EPS, 1 - FLOAT_EPS)\n    return np.dot(c, x)\n\n\ndef probit_loglikeobs_jacobian(params, y, x):\n    xb = np.dot(x, params)\n    q = 2 * y - 1\n    c = q * norm.pdf(q * xb) / np.clip(norm.cdf(q * xb), FLOAT_EPS, 1 - FLOAT_EPS)\n    return c.reshape(-1, 1) * x\n\n\ndef probit_loglike_hessian(params, y, x):\n    xb = np.dot(x, params)\n    q = 2 * y - 1\n    c = q * norm.pdf(q * xb) / norm.cdf(q * xb)\n    return np.dot(-c * (c + xb) * x.T, x)\n"
  },
  {
    "path": "src/optimagic/exceptions.py",
    "content": "import sys\nfrom traceback import format_exception\n\n\nclass OptimagicError(Exception):\n    \"\"\"Base exception for optimagic which should be inherited by all exceptions.\"\"\"\n\n\nclass TableExistsError(OptimagicError):\n    \"\"\"Exception for database tables that should not exist but do.\"\"\"\n\n\nclass InvalidFunctionError(OptimagicError):\n    \"\"\"Exception for invalid user provided functions.\n\n    This includes user functions that do not comply with interfaces, raise errors or\n    produce NaNs.\n\n    \"\"\"\n\n\nclass UserFunctionRuntimeError(OptimagicError):\n    \"\"\"Exception that is raised when user provided functions raise errors.\"\"\"\n\n\nclass MissingInputError(OptimagicError):\n    \"\"\"Exception for missing user provided input.\"\"\"\n\n\nclass AliasError(OptimagicError):\n    \"\"\"Exception for aliasing errors.\"\"\"\n\n\nclass InvalidKwargsError(OptimagicError):\n    \"\"\"Exception for invalid user provided keyword arguments.\"\"\"\n\n\nclass InvalidParamsError(OptimagicError):\n    \"\"\"Exception for invalid user provided parameters.\"\"\"\n\n\nclass InvalidConstraintError(OptimagicError):\n    \"\"\"Exception for invalid user provided constraints.\"\"\"\n\n\nclass InvalidBoundsError(OptimagicError):\n    \"\"\"Exception for invalid user provided bounds.\"\"\"\n\n\nclass IncompleteBoundsError(OptimagicError):\n    \"\"\"Exception when user provided bounds are incomplete.\"\"\"\n\n\nclass InvalidScalingError(OptimagicError):\n    \"\"\"Exception for invalid user provided scaling.\"\"\"\n\n\nclass InvalidMultistartError(OptimagicError):\n    \"\"\"Exception for invalid user provided multistart options.\"\"\"\n\n\nclass InvalidNumdiffOptionsError(OptimagicError):\n    \"\"\"Exception for invalid user provided numdiff options.\"\"\"\n\n\nclass NotInstalledError(OptimagicError):\n    \"\"\"Exception when optional dependencies are needed but not installed.\"\"\"\n\n\nclass NotAvailableError(OptimagicError):\n    \"\"\"Exception when something is not available, e.g. because a calculation failed.\"\"\"\n\n\nclass InvalidAlgoOptionError(OptimagicError):\n    \"\"\"Exception for invalid user provided algorithm options.\"\"\"\n\n\nclass InvalidAlgoInfoError(OptimagicError):\n    \"\"\"Exception for invalid user provided algorithm information.\"\"\"\n\n\nclass InvalidPlottingBackendError(OptimagicError):\n    \"\"\"Exception for invalid user provided plotting backend.\"\"\"\n\n\nclass StopOptimizationError(OptimagicError):\n    def __init__(self, message, current_status):\n        super().__init__(message)\n        self.message = message\n        self.current_status = current_status\n\n    def __reduce__(self):\n        \"\"\"Taken from here: https://tinyurl.com/y6eeys2f.\"\"\"\n        return (StopOptimizationError, (self.message, self.current_status))\n\n\ndef get_traceback():\n    tb = format_exception(*sys.exc_info())\n    if isinstance(tb, list):\n        tb = \"\".join(tb)\n    return tb\n\n\nINVALID_INFERENCE_MSG = (\n    \"Taking the inverse of the information matrix failed. Only ever use this \"\n    \"covariance matrix or standard errors based on it for diagnostic purposes, not for \"\n    \"drawing conclusions.\"\n)\n\n\nINVALID_SENSITIVITY_MSG = (\n    \"Taking inverse failed during the calculation of sensitvity measures. Interpret \"\n    \"them with caution.\"\n)\n"
  },
  {
    "path": "src/optimagic/logging/__init__.py",
    "content": "from .logger import (\n    SQLiteLogOptions as SQLiteLogOptions,\n)\nfrom .logger import (\n    SQLiteLogReader as SQLiteLogReader,\n)\nfrom .types import ExistenceStrategy as ExistenceStrategy\n"
  },
  {
    "path": "src/optimagic/logging/base.py",
    "content": "import io\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom dataclasses import asdict, fields, is_dataclass\nfrom typing import Any, Generic, Type, TypeVar\n\nimport cloudpickle\nimport pandas as pd\n\nfrom optimagic.exceptions import get_traceback\nfrom optimagic.typing import DictLikeAccess\n\nInputType = TypeVar(\"InputType\", bound=DictLikeAccess)\nOutputType = TypeVar(\"OutputType\", bound=DictLikeAccess)\n\n\nclass _KeyValueStore(Generic[InputType, OutputType], ABC):\n    \"\"\"Generic abstract base class for a key-value store.\n\n    This class defines the basic interface for key-value stores that support\n    insertion and selection of items based on a primary key.\n\n    Args:\n        input_type: The type of input data that can be stored.\n        output_type: The type of output data that can be retrieved.\n        primary_key: The primary key used to uniquely identify items in the store.\n\n    Raises:\n        ValueError: If input_type or output_type is not a dataclass, or if\n                the primary key is not found in output_type fields.\n\n    \"\"\"\n\n    def __init__(\n        self,\n        input_type: Type[InputType],\n        output_type: Type[OutputType],\n        primary_key: str,\n    ):\n        if not (is_dataclass(input_type) and is_dataclass(output_type)):\n            raise ValueError(\"Arguments input_type and output_type must by dataclasses\")\n\n        output_fields = {f.name for f in fields(output_type)}\n        if primary_key not in output_fields:\n            raise ValueError(\n                f\"Primary key {primary_key} not found in output_type fields \"\n                f\"{fields(output_type)}\"\n            )\n\n        self._output_type = output_type\n        self._input_type = input_type\n        self._primary_key = primary_key\n        self._supported_fields = {f.name for f in fields(input_type)}\n\n    @property\n    def primary_key(self) -> str:\n        \"\"\"Get the primary key of the store.\n\n        Returns:\n            The primary key field name.\n\n        \"\"\"\n        return self._primary_key\n\n    @abstractmethod\n    def insert(self, value: InputType) -> None:\n        \"\"\"Implement this method to insert a new value into the key-value store.\n\n        Make sure an auto-increment logic is implemented for the insertion.\n\n        \"\"\"\n\n    @abstractmethod\n    def _select_by_key(self, key: int) -> list[OutputType]:\n        \"\"\"Implement this method to select a value from the store by its primary key.\"\"\"\n\n    @abstractmethod\n    def _select_all(self) -> list[OutputType]:\n        \"\"\"Implement this method to select all values from the store.\"\"\"\n\n    def select(self, key: int | None = None) -> list[OutputType]:\n        \"\"\"Select items from the store.\n\n        Args:\n            key: Optional; the primary key of the item to be selected. If not provided,\n                 all items will be selected.\n\n        Returns:\n            A list of output items.\n\n        \"\"\"\n        if key is None:\n            return self._select_all()\n\n        return self._select_by_key(key)\n\n    @abstractmethod\n    def select_last_rows(self, n_rows: int) -> list[OutputType]:\n        \"\"\"Implement this to select the last `n_rows` from the store.\n\n        Args:\n            n_rows: The number of rows to select.\n\n        Returns:\n            A list of the last `n_rows` output items.\n\n        \"\"\"\n\n    def to_df(self) -> pd.DataFrame:\n        \"\"\"Convert the store's data to a Pandas DataFrame.\n\n        Returns:\n            A DataFrame containing all items in the store.\n\n        \"\"\"\n        items = self._select_all()\n        return pd.DataFrame([asdict(item) for item in items])\n\n\nclass UpdatableKeyValueStore(_KeyValueStore[InputType, OutputType], ABC):\n    \"\"\"Generic abstract base class for an updatable key-value store.\n\n    This class extends `KeyValueStore` to add support for updating existing\n    items in the store.\n\n    \"\"\"\n\n    def update(self, key: int, value: InputType | dict[str, Any]) -> None:\n        \"\"\"Update an existing item in the store.\n\n        Args:\n            key: The primary key of the item to be updated.\n            value: The updated item, or a dictionary of fields to update.\n\n        Raises:\n            ValueError: If any fields in `value` are not supported by the store.\n\n        \"\"\"\n        self._check_fields(value)\n        self._update(key, value)\n\n    @abstractmethod\n    def _update(self, key: int, value: InputType | dict[str, Any]) -> None:\n        \"\"\"Implement the internal method to update an existing item in the store.\"\"\"\n\n    def _check_fields(self, value: InputType | dict[str, Any]) -> None:\n        if isinstance(value, dict):\n            not_supported_fields = set(value.keys()).difference(self._supported_fields)\n            if not_supported_fields:\n                raise ValueError(\n                    f\"Not supported fields {not_supported_fields}. \"\n                    f\"Only supports fields {self._supported_fields}\"\n                )\n\n\nclass NonUpdatableKeyValueStore(_KeyValueStore[InputType, OutputType], ABC):\n    def __getattr__(self, name: str) -> Any:\n        if name == \"update\":\n            msg = (\n                f\"'{self.__class__.__name__}' object does not allow to update items in\"\n                f\"the store\"\n            )\n        else:\n            msg = f\"'{self.__class__.__name__}' object has no attribute '{name}'\"\n        raise AttributeError(msg)\n\n\nclass RobustPickler:\n    @staticmethod\n    def loads(\n        data: Any,\n        fix_imports: bool = True,  # noqa: ARG004\n        encoding: str = \"ASCII\",  # noqa: ARG004\n        errors: str = \"strict\",  # noqa: ARG004\n        buffers: Any = None,  # noqa: ARG004\n    ) -> Any:\n        \"\"\"Robust pickle loading.\n\n        We first try to unpickle the object with pd.read_pickle. This makes no\n        difference for non-pandas objects but makes the de-serialization\n        of pandas objects more robust across pandas versions. If that fails, we use\n        cloudpickle. If that fails, we return None but do not raise an error.\n\n        See: https://github.com/pandas-dev/pandas/issues/16474\n\n        \"\"\"\n        try:\n            res = pd.read_pickle(io.BytesIO(data), compression=None)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception:\n            try:\n                res = cloudpickle.loads(data)\n            except (KeyboardInterrupt, SystemExit):\n                raise\n            except Exception:\n                res = None\n                tb = get_traceback()\n                warnings.warn(\n                    f\"Unable to read PickleType column from database:\\n{tb}\\n \"\n                    \"The entry was replaced by None.\"\n                )\n\n        return res\n\n    @staticmethod\n    def dumps(\n        obj: Any,\n        protocol: str | None = None,\n        *,\n        fix_imports: bool = True,  # noqa: ARG001\n        buffer_callback: Any = None,  # noqa: ARG004\n    ) -> Any:\n        return cloudpickle.dumps(obj, protocol=protocol)\n"
  },
  {
    "path": "src/optimagic/logging/logger.py",
    "content": "from __future__ import annotations\n\nimport os\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Any, Generic, Type, TypeVar, cast\n\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy as sql\nfrom sqlalchemy.engine import Engine\n\nfrom optimagic.logging.base import (\n    NonUpdatableKeyValueStore,\n    UpdatableKeyValueStore,\n)\nfrom optimagic.logging.sqlalchemy import (\n    IterationStore,\n    ProblemStore,\n    SQLAlchemyConfig,\n    StepStore,\n)\nfrom optimagic.logging.types import (\n    ExistenceStrategy,\n    ExistenceStrategyLiteral,\n    IterationState,\n    IterationStateWithId,\n    ProblemInitialization,\n    ProblemInitializationWithId,\n    StepResult,\n    StepResultWithId,\n    StepType,\n)\nfrom optimagic.typing import (\n    Direction,\n    DirectionLiteral,\n    IterationHistory,\n    MultiStartIterationHistory,\n    PyTree,\n)\n\n\nclass LogOptions:\n    \"\"\"Base class for defining different log options.\n\n    Serves as a registry for implemented option classes for better discoverability.\n\n    \"\"\"\n\n    _subclass_registry: list[Type[LogOptions]] = []\n\n    def __init_subclass__(\n        cls: Type[LogOptions], abstract: bool = False, **kwargs: dict[Any, Any]\n    ):\n        if not abstract:\n            LogOptions._subclass_registry.append(cls)\n        super().__init_subclass__(**kwargs)\n\n    @classmethod\n    def available_option_types(cls) -> list[Type[LogOptions]]:\n        return cls._subclass_registry\n\n\n_LogOptionsType = TypeVar(\"_LogOptionsType\", bound=LogOptions)\n\n\nclass LogReader(Generic[_LogOptionsType], ABC):\n    \"\"\"A class that manages the retrieving of optimization and exploration data.\n\n    This class exposes methods to retrieve optimization logging data from stores.\n\n    \"\"\"\n\n    _step_store: UpdatableKeyValueStore[StepResult, StepResultWithId]\n    _iteration_store: NonUpdatableKeyValueStore[IterationState, IterationStateWithId]\n    _problem_store: UpdatableKeyValueStore[\n        ProblemInitialization, ProblemInitializationWithId\n    ]\n\n    @property\n    def problem_df(self) -> pd.DataFrame:\n        return self._problem_store.to_df()\n\n    @classmethod\n    def from_options(cls, log_options: LogOptions) -> LogReader[_LogOptionsType]:\n        log_reader_class = _LOG_OPTION_LOG_READER_REGISTRY.get(type(log_options), None)\n\n        if log_reader_class is None:\n            raise ValueError(\n                f\"No LogReader implementation found for type \"\n                f\"{type(log_options)}. Available option types: \"\n                f\"\\n {list(_LOG_OPTION_LOG_READER_REGISTRY.keys())}\"\n            )\n\n        return log_reader_class._create(log_options)\n\n    @classmethod\n    @abstractmethod\n    def _create(cls, log_options: _LogOptionsType) -> LogReader[_LogOptionsType]:\n        pass\n\n    def read_iteration(self, iteration: int) -> IterationStateWithId:\n        \"\"\"Read a specific iteration from the iteration store.\n\n        Args:\n            iteration: The iteration number to read. Negative values read from the end.\n\n        Returns:\n            A `CriterionEvaluationWithId` object containing the iteration data.\n\n        Raises:\n            IndexError: If the iteration is invalid or the store is empty.\n\n        \"\"\"\n        if iteration >= 0:\n            rowid = iteration + 1\n        else:\n            try:\n                last_row = self._iteration_store.select_last_rows(1)\n                highest_rowid = last_row[0].rowid\n            except IndexError as e:\n                raise IndexError(\n                    \"Invalid iteration request, iteration store is empty\"\n                ) from e\n\n            # iteration is negative here!\n            assert highest_rowid is not None\n            rowid = highest_rowid + iteration + 1\n\n        row_list = self._iteration_store.select(rowid)\n\n        if len(row_list) == 0:\n            raise IndexError(f\"Invalid iteration requested: {iteration}\")\n        else:\n            data = row_list[0]\n\n        return data\n\n    def read_history(self) -> IterationHistory:\n        \"\"\"Read the entire iteration history from the iteration store.\n\n        Returns:\n            An `IterationHistory` object containing the parameters,\n                criterion values, and runtimes.\n\n        \"\"\"\n        raw_res = self._iteration_store.select()\n        params_list = []\n        criterion_list = []\n        runtime_list = []\n        for data in raw_res:\n            if data.scalar_fun is not None:\n                params_list.append(data.params)\n                criterion_list.append(data.scalar_fun)\n                runtime_list.append(data.timestamp)\n\n        times = np.array(runtime_list)\n        times -= times[0]\n\n        return IterationHistory(params_list, criterion_list, times)\n\n    @staticmethod\n    def _normalize_direction(\n        direction: Direction | DirectionLiteral,\n    ) -> Direction:\n        if isinstance(direction, str):\n            direction = Direction(direction)\n        return direction\n\n    def _build_history_dataframe(self) -> pd.DataFrame:\n        steps = self._step_store.to_df()\n        raw_res = self._iteration_store.select()\n\n        history: dict[str, list[Any]] = {\n            \"params\": [],\n            \"fun\": [],\n            \"time\": [],\n            \"step\": [],\n        }\n\n        for data in raw_res:\n            if data.scalar_fun is not None:\n                history[\"params\"].append(data.params)\n                history[\"fun\"].append(data.scalar_fun)\n                history[\"time\"].append(data.timestamp)\n                history[\"step\"].append(data.step)\n\n        times = np.array(history[\"time\"])\n        times -= times[0]\n        # For numpy arrays with ndim = 0, tolist() returns a scalar, which violates the\n        # type hinting list[Any] from above. As history[\"time\"] is always a list, this\n        # case is safe to ignore.\n        history[\"time\"] = times.tolist()\n\n        df = pd.DataFrame(history)\n        df = df.merge(\n            steps[[f\"{self._step_store.primary_key}\", \"type\"]],\n            left_on=\"step\",\n            right_on=f\"{self._step_store.primary_key}\",\n        )\n        return df.drop(columns=f\"{self._step_store.primary_key}\")\n\n    @staticmethod\n    def _split_exploration_and_optimization(\n        df: pd.DataFrame,\n    ) -> tuple[pd.DataFrame | None, pd.DataFrame]:\n        exploration = df.query(f\"type == '{StepType.EXPLORATION.value}'\").drop(\n            columns=[\"step\", \"type\"]\n        )\n        histories = df.query(f\"type == '{StepType.OPTIMIZATION.value}'\")\n        histories = histories.drop(columns=\"type\").set_index(\"step\", append=True)\n\n        return None if exploration.empty else exploration, histories\n\n    @staticmethod\n    def _sort_exploration(\n        exploration: pd.DataFrame | None, optimization_type: Direction\n    ) -> IterationHistory | None:\n        if exploration is not None:\n            is_minimization = optimization_type is Direction.MINIMIZE\n            exploration = exploration.sort_values(by=\"fun\", ascending=is_minimization)\n            exploration_dict = cast(dict[str, Any], exploration.to_dict(orient=\"list\"))\n            return IterationHistory(**exploration_dict)\n        return exploration\n\n    @staticmethod\n    def _extract_best_history(\n        histories: pd.DataFrame, optimization_type: Direction\n    ) -> tuple[IterationHistory, list[IterationHistory] | None]:\n        groupby_step_criterion = histories[\"fun\"].groupby(level=\"step\")\n\n        if optimization_type is Direction.MINIMIZE:\n            best_idx = groupby_step_criterion.min().idxmin()\n        else:\n            best_idx = groupby_step_criterion.max().idxmax()\n\n        remaining_indices = (\n            histories.index.get_level_values(\"step\").unique().difference([best_idx])\n        )\n\n        best_history: pd.DataFrame | pd.Series[Any] = histories.xs(\n            best_idx, level=\"step\"\n        )\n\n        def _to_dict(pandas_obj: pd.DataFrame | pd.Series) -> dict[str, Any]:\n            if isinstance(pandas_obj, pd.DataFrame):\n                result = pandas_obj.to_dict(orient=\"list\")\n            else:\n                result = best_history.to_dict()\n            return cast(dict[str, Any], result)\n\n        best_history_dict = _to_dict(best_history)\n        local_histories = [\n            _to_dict(histories.xs(idx, level=\"step\")) for idx in remaining_indices\n        ]\n        if len(local_histories) == 0:\n            remaining_histories = None\n        else:\n            remaining_histories = [\n                IterationHistory(**history_dict) for history_dict in local_histories\n            ]\n\n        return IterationHistory(**best_history_dict), remaining_histories\n\n    def read_multistart_history(\n        self, direction: Direction | DirectionLiteral\n    ) -> MultiStartIterationHistory:\n        \"\"\"Read and the multistart optimization history.\n\n        Args:\n            direction: The optimization direction, either as an enum or string.\n\n        Returns:\n            A `MultiStartIterationHistory` object containing the best history,\n                local histories, and exploration history.\n\n        \"\"\"\n        optimization_type = self._normalize_direction(direction)\n        history_df = self._build_history_dataframe()\n        exploration, optimization_history = self._split_exploration_and_optimization(\n            history_df\n        )\n        exploration_history = self._sort_exploration(exploration, optimization_type)\n        best_history, remaining_histories = self._extract_best_history(\n            optimization_history, optimization_type\n        )\n\n        return MultiStartIterationHistory(\n            best_history,\n            local_histories=remaining_histories,\n            exploration=exploration_history,\n        )\n\n    def read_start_params(self) -> PyTree:\n        \"\"\"Read the start parameters form the problem store.\n\n        Returns:\n            A pytree object representing the start parameter.\n\n        \"\"\"\n        return self._problem_store.select(1)[0].params\n\n\n_LogReaderType = TypeVar(\"_LogReaderType\", bound=LogReader[Any])\n\n\nclass LogStore(Generic[_LogOptionsType, _LogReaderType], ABC):\n    \"\"\"A class that manages the logging of optimization and exploration data.\n\n    This class handles storing iterations, steps, and problem\n    initialization data using various stores.\n\n    Args:\n        iteration_store: A non-updatable store for iteration data.\n        step_store: An updatable store for step data.\n        problem_store: An updatable store for problem initialization data.\n\n    \"\"\"\n\n    def __init__(\n        self,\n        iteration_store: NonUpdatableKeyValueStore[\n            IterationState, IterationStateWithId\n        ],\n        step_store: UpdatableKeyValueStore[StepResult, StepResultWithId],\n        problem_store: UpdatableKeyValueStore[\n            ProblemInitialization, ProblemInitializationWithId\n        ],\n    ):\n        self.step_store = step_store\n        self.iteration_store = iteration_store\n        self.problem_store = problem_store\n\n    @classmethod\n    def from_options(\n        cls, log_options: LogOptions\n    ) -> LogStore[_LogOptionsType, _LogReaderType]:\n        logger_class = _LOG_OPTION_LOGGER_REGISTRY.get(type(log_options), None)\n\n        if logger_class is None:\n            raise ValueError(\n                f\"No Logger implementation found for type \"\n                f\"{type(log_options)}. Available option types: \"\n                f\"\\n {list(_LOG_OPTION_LOGGER_REGISTRY.keys())}\"\n            )\n\n        return logger_class.create(log_options)\n\n    @classmethod\n    @abstractmethod\n    def create(\n        cls, log_options: _LogOptionsType\n    ) -> LogStore[_LogOptionsType, _LogReaderType]:\n        pass\n\n\nclass SQLiteLogOptions(SQLAlchemyConfig, LogOptions):\n    \"\"\"Configuration class for setting up an SQLite database with SQLAlchemy.\n\n    This class extends the `SQLAlchemyConfig` class to configure an SQLite database.\n    It handles the creation of the database engine, manages database files,\n    and applies various optimizations for logging performance.\n\n    Args:\n        path (str | Path): The file path to the SQLite database.\n        fast_logging (bool): A boolean that determines if “unsafe” settings are used to\n            speed up write processes to the database. This should only be used for very\n            short running criterion functions where the main purpose of the log\n            is a real-time dashboard, and it would not be catastrophic to get\n            a corrupted database in case of a sudden system shutdown.\n            If one evaluation of the criterion function (and gradient if applicable)\n            takes more than 100 ms, the logging overhead is negligible.\n        if_database_exists (ExistenceStrategy): Strategy for handling an existing\n            database file. One of “extend”, “replace”, “raise”.\n\n    \"\"\"\n\n    def __init__(\n        self,\n        path: str | Path,\n        fast_logging: bool = True,\n        if_database_exists: ExistenceStrategy\n        | ExistenceStrategyLiteral = ExistenceStrategy.RAISE,\n    ):\n        url = f\"sqlite:///{path}\"\n        self._fast_logging = fast_logging\n        self._path = path\n        if isinstance(if_database_exists, str):\n            if_database_exists = ExistenceStrategy(if_database_exists)\n        self.if_database_exists = if_database_exists\n        super().__init__(url)\n\n    @property\n    def path(self) -> str | Path:\n        return self._path\n\n    def create_engine(self) -> Engine:\n        engine = sql.create_engine(self.url)\n        self._configure_engine(engine)\n        return engine\n\n    def _configure_engine(self, engine: Engine) -> None:\n        \"\"\"Configure the sqlite engine.\n\n        The two functions that configure the emission of the `begin` statement are taken\n        from the sqlalchemy documentation the documentation:\n        https://tinyurl.com/u9xea5z\n        and are\n        the recommended way of working around a bug in the pysqlite driver.\n\n        The other function speeds up the write process. If fast_logging is False, it\n        does so using only completely safe optimizations. Of fast_logging is True,\n        it also uses unsafe optimizations.\n\n        \"\"\"\n\n        @sql.event.listens_for(engine, \"connect\")\n        def do_connect(dbapi_connection: Any, connection_record: Any) -> None:  # noqa: ARG001\n            # disable pysqlite's emitting of the BEGIN statement entirely.\n            # also stops it from emitting COMMIT before absolutely necessary.\n            dbapi_connection.isolation_level = None\n\n        @sql.event.listens_for(engine, \"begin\")\n        def do_begin(conn: Any) -> None:\n            # emit our own BEGIN\n            conn.exec_driver_sql(\"BEGIN DEFERRED\")\n\n        @sql.event.listens_for(engine, \"connect\")\n        def set_sqlite_pragma(dbapi_connection: Any, connection_record: Any) -> None:  # noqa: ARG001\n            cursor = dbapi_connection.cursor()\n            cursor.execute(\"PRAGMA journal_mode = WAL\")\n            if self._fast_logging:\n                cursor.execute(\"PRAGMA synchronous = OFF\")\n            else:\n                cursor.execute(\"PRAGMA synchronous = NORMAL\")\n            cursor.close()\n\n\nclass SQLiteLogReader(LogReader[SQLiteLogOptions]):\n    \"\"\"A class that manages the retrieving of optimization and exploration data from a\n\n    SQLite database.\n\n    This class exposes methods to retrieve optimization logging data from stores.\n\n    Args:\n            path (str | Path): The path to the SQLite database file.\n\n    \"\"\"\n\n    def __init__(self, path: str | Path):\n        if not os.path.exists(path):\n            raise FileNotFoundError(f\"No file found at {path=}\")\n\n        log_options = SQLiteLogOptions(\n            path, fast_logging=True, if_database_exists=ExistenceStrategy.EXTEND\n        )\n        self._iteration_store = IterationStore(log_options)\n        self._step_store = StepStore(log_options)\n        self._problem_store = ProblemStore(log_options)\n\n    @classmethod\n    def _create(cls, log_options: SQLiteLogOptions) -> SQLiteLogReader:\n        \"\"\"Create an instance of SQLiteLogReader using the provided log options.\n\n        Args:\n            log_options (SQLiteLogOptions): Configuration options for the SQLite log.\n\n        Returns:\n            SQLiteLogReader: An instance of SQLiteLogReader initialized with the\n            provided log options.\n\n        \"\"\"\n        return cls(log_options.path)\n\n\nclass _SQLiteLogStore(LogStore[SQLiteLogOptions, SQLiteLogReader]):\n    \"\"\"A logger class that stores and manages optimization and exploration data using\n\n    SQLite.\n\n    It supports different strategies for handling existing databases, such as extending,\n    replacing, or raising an error.\n\n    \"\"\"\n\n    @staticmethod\n    def _handle_existing_database(\n        path: str | Path,\n        if_database_exists: ExistenceStrategy | ExistenceStrategyLiteral,\n    ) -> None:\n        if isinstance(if_database_exists, str):\n            if_database_exists = ExistenceStrategy(if_database_exists)\n        database_exists = os.path.exists(path)\n        if database_exists:\n            if if_database_exists is ExistenceStrategy.RAISE:\n                raise FileExistsError(\n                    f\"The database at {path} already exists. To reuse and extend \"\n                    f\"the existing database, set if_database_exists to \"\n                    f\"ExistenceStrategy.EXTEND.\"\n                )\n            elif if_database_exists is ExistenceStrategy.REPLACE:\n                try:\n                    os.remove(path)\n                except PermissionError as e:\n                    msg = (\n                        f\"Failed to remove file {path}. \"\n                        f\"In particular, this can happen on Windows \"\n                        f\"machines, when a different process is accessing the file, \"\n                        f\"which results in a PermissionError. In this case, delete\"\n                        f\"the file manually.\"\n                    )\n                    raise RuntimeError(msg) from e\n\n    @classmethod\n    def create(cls, log_options: SQLiteLogOptions) -> _SQLiteLogStore:\n        cls._handle_existing_database(log_options.path, log_options.if_database_exists)\n\n        iteration_store = IterationStore(log_options)\n        step_store = StepStore(log_options)\n        problem_store = ProblemStore(log_options)\n        return cls(iteration_store, step_store, problem_store)\n\n\n_LOG_OPTION_LOGGER_REGISTRY: dict[Type[LogOptions], Type[LogStore[Any, Any]]] = {\n    SQLiteLogOptions: _SQLiteLogStore\n}\n_LOG_OPTION_LOG_READER_REGISTRY: dict[Type[LogOptions], Type[LogReader[Any]]] = {\n    SQLiteLogOptions: SQLiteLogReader\n}\n"
  },
  {
    "path": "src/optimagic/logging/read_log.py",
    "content": "\"\"\"Deprecated module:\n\nFunctions to read data from the database used for logging.\n\nThe functions in the module are meant for end users of optimagic. They do not require\nany knowledge of databases.\n\nWhen using them internally, make sure to supply a database to path_or_database.\nOtherwise, the functions may be very slow.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport warnings\nfrom dataclasses import dataclass\n\nfrom optimagic.logging.logger import SQLiteLogOptions, SQLiteLogReader\n\n\n@dataclass\nclass OptimizeLogReader:\n    def __new__(cls, *args, **kwargs):  # type: ignore\n        warnings.warn(\n            \"OptimizeLogReader is deprecated and will be removed in a future \"\n            \"version. Please use optimagic.logging.SQLiteLogReader instead.\",\n            FutureWarning,\n        )\n        sqlite_options = SQLiteLogOptions(*args, **kwargs)\n        return SQLiteLogReader.from_options(sqlite_options)\n"
  },
  {
    "path": "src/optimagic/logging/sqlalchemy.py",
    "content": "from __future__ import annotations\n\nimport traceback\nimport warnings\nfrom dataclasses import asdict, dataclass\nfrom functools import cached_property\nfrom typing import Any, Sequence, Type, cast\n\nimport sqlalchemy as sql\nfrom sqlalchemy import Column, Integer, PickleType, String\nfrom sqlalchemy.engine.base import Engine\nfrom sqlalchemy.sql.base import Executable\nfrom sqlalchemy.sql.schema import MetaData\n\nfrom optimagic.logging.base import (\n    InputType,\n    NonUpdatableKeyValueStore,\n    OutputType,\n    RobustPickler,\n    UpdatableKeyValueStore,\n)\nfrom optimagic.logging.types import (\n    IterationState,\n    IterationStateWithId,\n    ProblemInitialization,\n    ProblemInitializationWithId,\n    StepResult,\n    StepResultWithId,\n)\n\n\nclass SQLAlchemyConfig:\n    \"\"\"Configuration class for setting up an SQLAlchemy engine and metadata.\n\n    This class manages the connection URL, engine creation, and metadata reflection\n    for an SQLAlchemy database connection.\n\n    Args:\n        url: The database URL to connect to.\n\n    \"\"\"\n\n    def __init__(\n        self,\n        url: str,\n    ):\n        self.url = url\n\n    @cached_property\n    def metadata(self) -> MetaData:\n        \"\"\"Get the metadata object.\n\n        Returns:\n            The SQLAlchemy MetaData object reflecting the database schema.\n\n        \"\"\"\n        engine = self.create_engine()\n        metadata = MetaData()\n        self._configure_reflect()\n        metadata.reflect(engine)\n        return metadata\n\n    def create_engine(self) -> Engine:\n        \"\"\"Create and return an SQLAlchemy engine.\n\n        Returns:\n            An SQLAlchemy Engine object.\n\n        \"\"\"\n        return sql.create_engine(self.url)\n\n    @staticmethod\n    def _configure_reflect() -> None:\n        \"\"\"Mark all BLOB dtypes as PickleType with our custom pickle reader.\n\n        Code ist taken from the documentation: https://tinyurl.com/y7q287jr\n\n        \"\"\"\n\n        @sql.event.listens_for(sql.Table, \"column_reflect\")\n        def _setup_pickletype(\n            inspector: Any, table: sql.Table, column_info: dict[str, Any]\n        ) -> None:  # noqa: ARG001\n            if isinstance(column_info[\"type\"], sql.BLOB):\n                column_info[\"type\"] = sql.PickleType(pickler=RobustPickler)  # type:ignore\n\n\n@dataclass\nclass TableConfig:\n    \"\"\"Configuration for creating and managing SQLAlchemy tables.\n\n    This class defines the schema for an SQLAlchemy table, including its name,\n    columns, primary key, and strategy for handling existing tables.\n\n    Args:\n        table_name: The name of the table.\n        columns: A list of SQLAlchemy Column objects defining the table schema.\n        primary_key: The name of the primary key column.\n\n    \"\"\"\n\n    table_name: str\n    columns: list[sql.Column[Any]]\n    primary_key: str\n\n    @property\n    def column_names(self) -> list[str]:\n        return [c.name for c in self.columns]\n\n    def create_table(self, metadata: MetaData, engine: Engine) -> sql.Table:\n        \"\"\"Create or reflect the table in the database.\n\n        Args:\n            metadata: The SQLAlchemy MetaData object.\n            engine: The SQLAlchemy Engine object.\n\n        Returns:\n            The SQLAlchemy Table object representing the created or reflected table.\n\n        \"\"\"\n        metadata.reflect(engine)\n        table = sql.Table(\n            self.table_name, metadata, *self.columns, extend_existing=True\n        )\n        metadata.create_all(engine)\n        return table\n\n\nclass _SQLAlchemyStoreMixin:\n    \"\"\"Mixin class for common SQLAlchemy store operations.\n\n    This class provides common methods for selecting, inserting, and executing\n    SQL statements in an SQLAlchemy-based key-value store.\n\n    Args:\n        db_config: The SQLAlchemyConfig object for database configuration.\n        table_config: The TableConfig object for table configuration.\n\n    \"\"\"\n\n    def __init__(self, db_config: SQLAlchemyConfig, table_config: TableConfig):\n        self._db_config = db_config\n        self._engine = db_config.create_engine()\n        self._table_config = table_config\n        self._table = table_config.create_table(db_config.metadata, self._engine)\n\n    @property\n    def column_names(self) -> list[str]:\n        return self._table_config.column_names\n\n    @property\n    def table_name(self) -> str:\n        return self._table_config.table_name\n\n    @property\n    def table(self) -> sql.Table:\n        return self._table\n\n    @property\n    def engine(self) -> Engine:\n        return self._engine\n\n    def _select_row_by_key(self, key: int) -> list[Any]:\n        stmt = self._table.select().where(\n            getattr(self._table.c, self._table_config.primary_key) == key\n        )\n        return self._execute_read_statement(stmt)\n\n    def _select_all_rows(self) -> list[Any]:\n        stmt = self._table.select()\n        return self._execute_read_statement(stmt)\n\n    def _select_last_rows(self, n_rows: int) -> list[Any]:\n        stmt = (\n            self._table.select()\n            .order_by(getattr(self._table.c, self._table_config.primary_key).desc())\n            .limit(n_rows)\n        )\n        result = self._execute_read_statement(stmt)\n        return result[::-1]\n\n    def _insert(self, insert_values: dict[str, Any]) -> None:\n        stmt = self._table.insert().values(**insert_values)\n        self._execute_write_statement(stmt)\n\n    def _execute_read_statement(self, statement: Executable) -> list[Any]:\n        with self._engine.connect() as connection:\n            return connection.execute(statement).fetchall()\n\n    def _execute_write_statement(self, statement: Executable) -> None:\n        try:\n            with self._engine.begin() as connection:\n                connection.execute(statement)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception:\n            exception_info = traceback.format_exc()\n            warnings.warn(\n                f\"Unable to write to database. The traceback was:\\n\\n{exception_info}\"\n            )\n\n\nclass SQLAlchemySimpleStore(\n    NonUpdatableKeyValueStore[InputType, OutputType],\n    _SQLAlchemyStoreMixin,\n):\n    \"\"\"A simple SQLAlchemy-based key-value store that does not support updates.\n\n    This class provides basic key-value storage functionality using SQLAlchemy,\n    where values are serialized and stored as BLOBs. The store does not support\n    updating existing entries.\n\n    Args:\n            table_name: The name of the table.\n            primary_key: The primary key column name.\n            db_config: The SQLAlchemyConfig object for database configuration.\n\n    \"\"\"\n\n    _value_column: str = \"serialized_value\"\n\n    def __init__(\n        self,\n        table_name: str,\n        primary_key: str,\n        db_config: SQLAlchemyConfig,\n        input_type: Type[InputType],\n        output_type: Type[OutputType],\n    ):\n        super().__init__(input_type, output_type, primary_key)\n        columns = [\n            sql.Column(primary_key, sql.Integer, primary_key=True, autoincrement=True),\n            sql.Column(self._value_column, sql.PickleType(pickler=RobustPickler)),  # type:ignore\n        ]\n        table_config = TableConfig(table_name, columns, self.primary_key)\n\n        _SQLAlchemyStoreMixin.__init__(self, db_config, table_config)\n\n    def __reduce__(\n        self,\n    ) -> tuple[\n        Type[SQLAlchemySimpleStore[Any, Any]],\n        tuple[str, str, SQLAlchemyConfig, Type[Any], Type[Any]],\n    ]:\n        return SQLAlchemySimpleStore, (\n            self.table_name,\n            self.primary_key,\n            self._db_config,\n            self._input_type,\n            self._output_type,\n        )\n\n    def insert(self, value: InputType) -> None:\n        \"\"\"Insert a new value into the store.\n\n        Args:\n            value: The value to insert into the store.\n\n        \"\"\"\n        self._insert({self._value_column: value})\n\n    def _select_by_key(self, key: int) -> list[OutputType]:\n        result = self._select_row_by_key(key)\n        return self._post_process(result)\n\n    def _select_all(self) -> list[OutputType]:\n        result = self._select_all_rows()\n        return self._post_process(result)\n\n    def select_last_rows(self, n_rows: int) -> list[OutputType]:\n        \"\"\"Select the last `n_rows` values from the store.\n\n        Args:\n            n_rows: The number of rows to select.\n\n        Returns:\n            A list of the last `n_rows` output values.\n\n        \"\"\"\n        result = self._select_last_rows(n_rows)\n        return self._post_process(result)\n\n    def _post_process(self, results: Sequence[sql.Row]) -> list[OutputType]:  # type:ignore\n        output_list = []\n        for row in results:\n            row_dict = {self.primary_key: row[0]}\n            row_dict.update(asdict(row[-1]))\n            output_list.append(self._output_type(**row_dict))\n        return output_list\n\n\nclass SQLAlchemyTableStore(\n    UpdatableKeyValueStore[InputType, OutputType], _SQLAlchemyStoreMixin\n):\n    \"\"\"An SQLAlchemy-based key-value store that supports updates.\n\n    This class provides key-value storage functionality using SQLAlchemy,\n    allowing for insertion, updating, and selection of data.\n\n    Args:\n        table_config: The TableConfig object defining the table schema.\n        db_config: The SQLAlchemyConfig object for database configuration.\n        input_type: The type of input data.\n        output_type: The type of output data.\n\n    \"\"\"\n\n    def __init__(\n        self,\n        table_config: TableConfig,\n        db_config: SQLAlchemyConfig,\n        input_type: Type[InputType],\n        output_type: Type[OutputType],\n    ):\n        _SQLAlchemyStoreMixin.__init__(self, db_config, table_config)\n        super().__init__(input_type, output_type, self._table_config.primary_key)\n\n    def __reduce__(\n        self,\n    ) -> tuple[\n        Type[SQLAlchemyTableStore[Any, Any]],\n        tuple[TableConfig, SQLAlchemyConfig, Type[Any], Type[Any]],\n    ]:\n        return SQLAlchemyTableStore, (\n            self._table_config,\n            self._db_config,\n            self._input_type,\n            self._output_type,\n        )\n\n    def insert(self, value: InputType) -> None:\n        \"\"\"Insert a new value into the store.\n\n        Args:\n            value: The value to insert into the store.\n\n        \"\"\"\n        self._insert(asdict(value))\n\n    def _update(self, key: int, value: InputType | dict[str, Any]) -> None:\n        if not isinstance(value, dict):\n            update_values = asdict(value)\n        else:\n            update_values = value\n        stmt = (\n            self._table.update()\n            .where(getattr(self._table.c, self.primary_key) == key)\n            .values(**update_values)\n        )\n        self._execute_write_statement(stmt)\n\n    def _select_by_key(self, key: int) -> list[OutputType]:\n        result = self._select_row_by_key(key)\n        return self._post_process(result)\n\n    def _select_all(self) -> list[OutputType]:\n        result = self._select_all_rows()\n        return self._post_process(result)\n\n    def select_last_rows(self, n_rows: int) -> list[OutputType]:\n        \"\"\"Select the last `n_rows` values from the store.\n\n        Args:\n            n_rows: The number of rows to select.\n\n        Returns:\n            A list of the last `n_rows` output values.\n\n        \"\"\"\n        result = self._select_last_rows(n_rows)\n        return self._post_process(result)\n\n    def _post_process(self, results: Sequence[sql.Row]) -> list[OutputType]:  # type:ignore\n        return [\n            self._output_type(**dict(zip(self.column_names, row, strict=False)))\n            for row in results\n        ]\n\n\nclass IterationStore(SQLAlchemySimpleStore[IterationState, IterationStateWithId]):\n    \"\"\"Store for managing iteration data in an SQLite database.\n\n    Args:\n        db_config (SQLiteConfig): The SQLiteConfig object for database configuration.\n\n    \"\"\"\n\n    _TABLE_NAME = \"optimization_iterations\"\n    _PRIMARY_KEY = \"rowid\"\n\n    def __init__(\n        self,\n        db_config: SQLAlchemyConfig,\n    ):\n        super().__init__(\n            self._TABLE_NAME,\n            self._PRIMARY_KEY,\n            db_config,\n            IterationState,\n            IterationStateWithId,\n        )\n\n\nclass StepStore(SQLAlchemyTableStore[StepResult, StepResultWithId]):\n    \"\"\"Store for managing step data in an SQLite database.\n\n    Args:\n        db_config (SQLiteConfig): The SQLiteConfig object for database configuration.\n\n    \"\"\"\n\n    _TABLE_NAME = \"steps\"\n    _PRIMARY_KEY = \"rowid\"\n\n    def __init__(\n        self,\n        db_config: SQLAlchemyConfig,\n    ):\n        columns = [\n            Column(self._PRIMARY_KEY, Integer, primary_key=True, autoincrement=True),\n            Column(\"type\", String),  # e.g. optimization\n            Column(\"status\", String),  # e.g. running\n            Column(\"n_iterations\", Integer),  # optional\n            Column(\"name\", String),  # e.g. \"optimization-1\", \"exploration\", not unique\n        ]\n\n        table_config = TableConfig(\n            self._TABLE_NAME,\n            cast(list[Column[Any]], columns),\n            self._PRIMARY_KEY,\n        )\n\n        super().__init__(\n            table_config,\n            db_config,\n            StepResult,\n            StepResultWithId,\n        )\n\n\nclass ProblemStore(\n    SQLAlchemyTableStore[ProblemInitialization, ProblemInitializationWithId]\n):\n    \"\"\"Store for managing optimization problem initialization data in an SQLite\n\n    database.\n\n    Args:\n        db_config (SQLiteConfig): The SQLiteConfig object for database configuration.\n\n    \"\"\"\n\n    _TABLE_NAME = \"optimization_problem\"\n    _PRIMARY_KEY = \"rowid\"\n\n    def __init__(\n        self,\n        db_config: SQLAlchemyConfig,\n    ):\n        columns = [\n            Column(self._PRIMARY_KEY, Integer, primary_key=True, autoincrement=True),\n            Column(\"direction\", String),\n            Column(\"params\", PickleType(pickler=RobustPickler)),  # type:ignore\n        ]\n\n        table_config = TableConfig(\n            self._TABLE_NAME,\n            cast(list[Column[Any]], columns),\n            self._PRIMARY_KEY,\n        )\n\n        super().__init__(\n            table_config,\n            db_config,\n            ProblemInitialization,\n            ProblemInitializationWithId,\n        )\n"
  },
  {
    "path": "src/optimagic/logging/types.py",
    "content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Literal\n\nfrom optimagic.optimization.fun_value import SpecificFunctionValue\nfrom optimagic.typing import (\n    DictLikeAccess,\n    Direction,\n    DirectionLiteral,\n    PyTree,\n)\n\n\nclass StepStatus(str, Enum):\n    \"\"\"Status of a step in a process.\n\n    Attributes:\n        SCHEDULED: Indicates that the step is scheduled but not yet started.\n        RUNNING: Indicates that the step is currently in progress.\n        COMPLETE: Indicates that the step has completed successfully.\n        SKIPPED: Indicates that the step was skipped.\n\n    \"\"\"\n\n    SCHEDULED = \"scheduled\"\n    RUNNING = \"running\"\n    COMPLETE = \"complete\"\n    SKIPPED = \"skipped\"\n\n\nStepStatusLiteral = Literal[\"scheduled\", \"running\", \"complete\", \"skipped\"]\n\n\nclass StepType(str, Enum):\n    \"\"\"Type of step in a process.\n\n    Attributes:\n        OPTIMIZATION: Represents an optimization step.\n        EXPLORATION: Represents an exploration step.\n\n    \"\"\"\n\n    OPTIMIZATION = \"optimization\"\n    EXPLORATION = \"exploration\"\n\n\nStepTypeLiteral = Literal[\"optimization\", \"exploration\"]\n\n\nclass ExistenceStrategy(str, Enum):\n    \"\"\"Strategies to handle the existence of a database or table.\n\n    Attributes:\n        RAISE: Raise an error if the database resp. table exists.\n        EXTEND: Extend the existing database or table.\n        REPLACE: Replace the existing database or table.\n\n    \"\"\"\n\n    RAISE = \"raise\"\n    EXTEND = \"extend\"\n    REPLACE = \"replace\"\n\n\nExistenceStrategyLiteral = Literal[\"raise\", \"extend\", \"replace\"]\n\n\n@dataclass(frozen=True)\nclass IterationState(DictLikeAccess):\n    \"\"\"Result of a criterion evaluation.\n\n    Attributes:\n        params: The parameters used in the evaluation.\n        timestamp: The time at which the evaluation was performed.\n        value: The result value of the evaluation.\n        valid: Indicates if the evaluation is valid.\n        criterion_eval: Optional, additional evaluation information.\n        internal_derivative: Optional, derivative information used internally.\n        step: Optional, step number associated with the evaluation.\n        exceptions: Optional, exceptions encountered during evaluation.\n        hash: Optional, hash of the evaluation for identification purposes.\n\n    \"\"\"\n\n    params: PyTree\n    timestamp: float\n    scalar_fun: float | None\n    valid: bool\n    raw_fun: SpecificFunctionValue | None\n    step: int | None\n    exceptions: str | None\n\n    def combine(self, other: \"IterationState\") -> \"IterationState\":\n        \"\"\"Combine two iteration states.\n\n        Args:\n            other (IterationState): The second iteration state.\n\n        Returns:\n            IterationState: The combined iteration state.\n\n        \"\"\"\n        raw = [e for e in [self.exceptions, other.exceptions] if e is not None]\n        exceptions: str | None = None\n        if raw:\n            exceptions = \"\\n\\n\".join(raw)\n\n        new = IterationState(\n            # one of the values must be None\n            params=self.params,\n            timestamp=min(self.timestamp, other.timestamp),\n            scalar_fun=self.scalar_fun or other.scalar_fun,\n            valid=self.valid and other.valid,\n            # one of the values must be None\n            raw_fun=self.raw_fun or other.raw_fun,\n            step=self.step,\n            exceptions=exceptions,\n        )\n        return new\n\n\n@dataclass(frozen=True)\nclass IterationStateWithId(IterationState):\n    \"\"\"Criterion evaluation result with an ID.\n\n    Attributes:\n        rowid: The unique ID associated with the evaluation result.\n\n    Raises:\n        ValueError: If `rowid` is None.\n\n    \"\"\"\n\n    rowid: int | None = None\n\n    def __post_init__(self) -> None:\n        if self.rowid is None:\n            raise ValueError(\"rowid must not be None\")\n\n\n@dataclass(frozen=True)\nclass StepResult(DictLikeAccess):\n    \"\"\"Result of a process step.\n\n    Attributes:\n        name: The name of the step.\n        type: The type of the step, either as `StepType` or string.\n        status: The status of the step, either as `StepStatus` or string.\n        n_iterations: Optional, the number of iterations performed in the step.\n\n    \"\"\"\n\n    name: str\n    type: StepType | StepTypeLiteral\n    status: StepStatus | StepStatusLiteral\n    n_iterations: int | None = None\n\n    def __post_init__(self) -> None:\n        if isinstance(self.type, str):\n            object.__setattr__(self, \"type\", StepType(self.type))\n        if isinstance(self.status, str):\n            object.__setattr__(self, \"status\", StepStatus(self.status))\n\n\n@dataclass(frozen=True)\nclass StepResultWithId(StepResult):\n    \"\"\"Step result with an ID.\n\n    Attributes:\n        rowid: The unique ID associated with the step result.\n\n    Raises:\n        ValueError: If `rowid` is None.\n\n    \"\"\"\n\n    rowid: int | None = None\n\n    def __post_init__(self) -> None:\n        if self.rowid is None:\n            raise ValueError(\"rowid must not be None\")\n        super().__post_init__()\n\n\n@dataclass(frozen=True)\nclass ProblemInitialization(DictLikeAccess):\n    \"\"\"Start characteristics of an optimization problem.\n\n    Attributes:\n        direction: The direction of optimization,\n            either as `Direction` or string literal.\n        params: The parameters for the initialization.\n\n    \"\"\"\n\n    direction: Direction | DirectionLiteral\n    params: PyTree\n\n\n@dataclass(frozen=True)\nclass ProblemInitializationWithId(ProblemInitialization):\n    \"\"\"Problem initialization with an ID.\n\n    Attributes:\n        rowid: The unique ID associated with the problem initialization.\n\n    Raises:\n        ValueError: If `rowid` is None.\n\n    \"\"\"\n\n    rowid: int | None = None\n\n    def __post_init__(self) -> None:\n        if self.rowid is None:\n            raise ValueError(\"rowid must not be None\")\n"
  },
  {
    "path": "src/optimagic/mark.py",
    "content": "from functools import wraps\nfrom typing import Any, Callable, ParamSpec, TypeVar\n\nfrom optimagic.optimization.algorithm import AlgoInfo\nfrom optimagic.typing import AggregationLevel\n\nP = ParamSpec(\"P\")\n\n\nScalarFuncT = TypeVar(\"ScalarFuncT\", bound=Callable[..., Any])\nVectorFuncT = TypeVar(\"VectorFuncT\", bound=Callable[..., Any])\n\n\ndef scalar(func: ScalarFuncT) -> ScalarFuncT:\n    \"\"\"Mark a function as a scalar function.\"\"\"\n    wrapper = func\n    try:\n        wrapper._problem_type = AggregationLevel.SCALAR  # type: ignore\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception:\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):  # type: ignore\n            return func(*args, **kwargs)\n\n        wrapper._problem_type = AggregationLevel.SCALAR  # type: ignore\n    return wrapper\n\n\ndef least_squares(func: VectorFuncT) -> VectorFuncT:\n    \"\"\"Mark a function as a least squares function.\"\"\"\n    wrapper = func\n    try:\n        wrapper._problem_type = AggregationLevel.LEAST_SQUARES  # type: ignore\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception:\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):  # type: ignore\n            return func(*args, **kwargs)\n\n        wrapper._problem_type = AggregationLevel.LEAST_SQUARES  # type: ignore\n    return wrapper\n\n\ndef likelihood(func: VectorFuncT) -> VectorFuncT:\n    \"\"\"Mark a function as a likelihood function.\"\"\"\n    wrapper = func\n    try:\n        wrapper._problem_type = AggregationLevel.LIKELIHOOD  # type: ignore\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception:\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):  # type: ignore\n            return func(*args, **kwargs)\n\n        wrapper._problem_type = AggregationLevel.LIKELIHOOD  # type: ignore\n    return wrapper\n\n\n# TODO: I get an error when adding bound=Algorithm to AlgorithmSubclass. Why?\nAlgorithmSubclass = TypeVar(\"AlgorithmSubclass\")\n\n\ndef minimizer(\n    name: str,\n    solver_type: AggregationLevel,\n    is_available: bool,\n    is_global: bool,\n    needs_jac: bool,\n    needs_hess: bool,\n    needs_bounds: bool,\n    supports_parallelism: bool,\n    supports_bounds: bool,\n    supports_infinite_bounds: bool,\n    supports_linear_constraints: bool,\n    supports_nonlinear_constraints: bool,\n    disable_history: bool = False,\n    experimental: bool = False,\n) -> Callable[[AlgorithmSubclass], AlgorithmSubclass]:\n    \"\"\"Mark an algorithm as a optimagic minimizer and add AlgoInfo.\n\n    Args:\n        name: The name of the algorithm as a string. Used in error messages, warnings\n            and the OptimizeResult.\n        solver_type: The type of optimization problem the algorithm solves. Used to\n            distinguish between scalar, least-squares and likelihood optimizers. Can\n            take the values AggregationLevel.SCALAR, AggregationLevel.LEAST_SQUARES and\n            AggregationLevel.LIKELIHOOD.\n        is_available: Whether the algorithm is installed.\n        is_global: Whether the algorithm is a global optimizer.\n        needs_jac: Whether the algorithm needs some kind of first derivative. This needs\n            to be True if the algorithm uses `jac` or `fun_and_jac`.\n        needs_hess: Whether the algorithm needs some kind of second derivative. This\n            is not yet implemented and will be False for all currently wrapped\n            algorithms.\n        needs_bounds: Whether the algorithm needs bounds to run. This is different from\n            supports_bounds in that algorithms that support bounds can run without\n            requiring them.\n        supports_parallelism: Whether the algorithm supports parallelism. This needs to\n            be True if the algorithm previously took `n_cores` and/or `batch_evaluator`\n            as arguments.\n        supports_bounds: Whether the algorithm supports bounds. This needs to be True\n            if the algorithm previously took `lower_bounds` and/or `upper_bounds` as\n            arguments.\n        supports_infinite_bounds: Whether the algorithm supports infinite values in\n            bounds.\n        supports_linear_constraints: Whether the algorithm supports linear constraints.\n            This is not yet implemented and will be False for all currently wrapped\n            algorithms.\n        supports_nonlinear_constraints: Whether the algorithm supports nonlinear\n            constraints. This needs to be True if the algorithm previously took\n            `nonlinear_constraints` as an argument.\n        disable_history: Whether the algorithm should disable history collection.\n        experimental: Whether the algorithm is experimental and should skip tests.\n\n    \"\"\"\n\n    def decorator(cls: AlgorithmSubclass) -> AlgorithmSubclass:\n        algo_info = AlgoInfo(\n            name=name,\n            solver_type=solver_type,\n            is_available=is_available,\n            is_global=is_global,\n            needs_jac=needs_jac,\n            needs_hess=needs_hess,\n            needs_bounds=needs_bounds,\n            supports_parallelism=supports_parallelism,\n            supports_bounds=supports_bounds,\n            supports_infinite_bounds=supports_infinite_bounds,\n            supports_linear_constraints=supports_linear_constraints,\n            supports_nonlinear_constraints=supports_nonlinear_constraints,\n            disable_history=disable_history,\n            experimental=experimental,\n        )\n        cls.__algo_info__ = algo_info  # type: ignore\n        return cls\n\n    return decorator\n"
  },
  {
    "path": "src/optimagic/optimization/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/optimization/algo_options.py",
    "content": "import numpy as np\n\nCONVERGENCE_FTOL_REL = 2e-9\n\"\"\"float: Stop when the relative improvement between two iterations is below this.\n\n    The exact definition of relative improvement depends on the optimizer and should\n    be documented there. To disable it, set it to 0.\n\n    The default value is inspired by scipy L-BFGS-B defaults, but rounded.\n\n\"\"\"\n\nCONVERGENCE_FTOL_ABS = 0\n\"\"\"float: Stop when the absolute improvement between two iterations is below this.\n\n    Disabled by default because it is very problem specific.\n\n\"\"\"\n\nCONVERGENCE_GTOL_ABS = 1e-5\n\"\"\"float: Stop when the gradient are smaller than this.\n\n    For some algorithms this criterion refers to all entries, for others to some norm.\n\n    For bound constrained optimizers this typically refers to a projected gradient.\n    The exact definition should be documented for each optimizer.\n\n    The default is the same as scipy. To disable it, set it to zero.\n\n\"\"\"\n\nCONVERGENCE_GTOL_REL = 1e-8\n\"\"\"float: Stop when the gradient, divided by the absolute value of the criterion\n    function is smaller than this. For some algorithms this criterion refers to\n    all entries, for others to some norm.For bound constrained optimizers this\n    typically refers to a projected gradient. The exact definition should be documented\n    for each optimizer. To disable it, set it to zero.\n\n\"\"\"\n\nCONVERGENCE_GTOL_SCALED = 1e-8\n\"\"\"float: Stop when all entries (or for some algorithms the norm) of the gradient,\n    divided by the norm of the gradient at start parameters is smaller than this.\n    For bound constrained optimizers this typically refers to a projected gradient.\n    The exact definition should be documented for each optimizer.\n    To disable it, set it to zero.\n\n\"\"\"\n\nCONVERGENCE_XTOL_REL = 1e-5\n\"\"\"float: Stop when the relative change in parameters is smaller than this.\n    The exact definition of relative change and whether this refers to the maximum\n    change or the average change depends on the algorithm and should be documented\n    there. To disable it, set it to zero. The default is the same as in scipy.\n\n\"\"\"\n\nCONVERGENCE_XTOL_ABS = 0\n\"\"\"float: Stop when the absolute change in parameters between two iterations is smaller\n    than this. Whether this refers to the maximum change or the average change depends\n    on the algorithm and should be documented there.\n\n    Disabled by default because it is very problem specific. To enable it, set it to a\n    value larger than zero.\n\n\"\"\"\n\n\nSTOPPING_MAXFUN = 1_000_000\n\"\"\"int:\n    If the maximum number of function evaluation is reached, the optimization stops\n    but we do not count this as successful convergence. The function evaluations used\n    to evaluate a numerical gradient do not count for this.\n\n\"\"\"\n\n\nSTOPPING_MAXFUN_GLOBAL = 1_000\n\"\"\"int:\n    If the maximum number of function evaluation is reached, the optimization stops\n    but we do not count this as successful convergence. The function evaluations used\n    to evaluate a numerical gradient do not count for this. Set to a lower number than\n    STOPPING_MAX_CRITERION_EVALUATIONS for global optimizers.\n\n\"\"\"\n\n\nSTOPPING_MAXITER = 1_000_000\n\"\"\"int:\n    If the maximum number of iterations is reached, the\n    optimization stops, but we do not count this as successful convergence.\n    The difference to ``max_criterion_evaluations`` is that one iteration might\n    need several criterion evaluations, for example in a line search or to determine\n    if the trust region radius has to be shrunk.\n\n\"\"\"\n\n\nCONVERGENCE_SECOND_BEST_FTOL_ABS = 1e-08\n\"\"\"float: absolute criterion tolerance optimagic requires if no other stopping\ncriterion apart from max iterations etc. is available\nthis is taken from scipy (SLSQP's value, smaller than Nelder-Mead).\n\n\"\"\"\n\nCONVERGENCE_SECOND_BEST_XTOL_ABS = 1e-08\n\"\"\"float: The absolute parameter tolerance optimagic requires if no other stopping\ncriterion apart from max iterations etc. is available. This is taken from pybobyqa.\n\n\"\"\"\n\nCONVERGENCE_TARGET_VALUE = None\n\"\"\"float or None: Stop when the criterion value is better than or equal to\n    this target. The definition of \"better\" depends on the optimization direction.\n\n    - Minimization: criterion <= target\n    - Maximization: criterion >= target\n\n    Used in population-based algorithms like genetic algorithms.\n    To disable, set to None.\n\n\"\"\"\n\nCONVERGENCE_GENERATIONS_NOIMPROVE = None\n\"\"\"int or None: Stop when the best criterion value has not improved for this\n    many consecutive generations.\n\n    Used in population-based algorithms like genetic algorithms.\n    To disable, set to None.\n\n\"\"\"\n\n\nMAX_LINE_SEARCH_STEPS = 20\n\"\"\"int: Inspired by scipy L-BFGS-B.\"\"\"\n\nLIMITED_MEMORY_STORAGE_LENGTH = 10\n\"\"\"int: Taken from scipy L-BFGS-B.\"\"\"\n\n\nCONSTRAINTS_ABSOLUTE_TOLERANCE = 1e-5\n\"\"\"float: Allowed tolerance of the equality and inequality constraints for values to be\nconsidered 'feasible'.\n\n\"\"\"\n\nN_RESTARTS = 1\n\"\"\"int: Number of times to restart the optimizer if convergence is not reached.\n    This parameter controls how many times the optimization process is restarted\n    in an attempt to achieve convergence.\n\n    - A value of 1 (the default) indicates that the optimizer will only run once,\n      disabling the restart feature.\n    - Values greater than 1 specify the maximum number of restart attempts.\n\n    Note: This is distinct from `STOPPING_MAXITER`, which limits the number of\n    iterations within a single optimizer run, not the number of restarts.\n\"\"\"\n\n\ndef get_population_size(population_size, x, lower_bound=10):\n    \"\"\"Default population size for genetic algorithms.\"\"\"\n    if population_size is None:\n        population_size = int(np.clip(10 * (len(x) + 1), lower_bound, np.inf))\n    else:\n        population_size = int(population_size)\n    return population_size\n"
  },
  {
    "path": "src/optimagic/optimization/algorithm.py",
    "content": "import typing\nimport warnings\nfrom abc import ABC, ABCMeta, abstractmethod\nfrom dataclasses import dataclass, replace\nfrom typing import Any\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom typing_extensions import Self\n\nfrom optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError\nfrom optimagic.logging.types import StepStatus\nfrom optimagic.optimization.history import History\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.type_conversion import TYPE_CONVERTERS\nfrom optimagic.typing import AggregationLevel\n\n\n@dataclass(frozen=True)\nclass AlgoInfo:\n    name: str\n    solver_type: AggregationLevel\n    is_available: bool\n    is_global: bool\n    needs_jac: bool\n    needs_hess: bool\n    needs_bounds: bool\n    supports_parallelism: bool\n    supports_bounds: bool\n    supports_infinite_bounds: bool\n    supports_linear_constraints: bool\n    supports_nonlinear_constraints: bool\n    disable_history: bool = False\n    experimental: bool = False\n\n    def __post_init__(self) -> None:\n        report: list[str] = []\n        if not isinstance(self.name, str):\n            report.append(\"name must be a string\")\n        if not isinstance(self.solver_type, AggregationLevel):\n            report.append(\"problem_type must be an AggregationLevel\")\n        if not isinstance(self.is_available, bool):\n            report.append(\"is_available must be a bool\")\n        if not isinstance(self.is_global, bool):\n            report.append(\"is_global must be a bool\")\n        if not isinstance(self.needs_jac, bool):\n            report.append(\"needs_jac must be a bool\")\n        if not isinstance(self.needs_hess, bool):\n            report.append(\"needs_hess must be a bool\")\n        if not isinstance(self.needs_bounds, bool):\n            report.append(\"needs_bounds must be a bool\")\n        if not isinstance(self.supports_parallelism, bool):\n            report.append(\"supports_parallelism must be a bool\")\n        if not isinstance(self.supports_bounds, bool):\n            report.append(\"supports_bounds must be a bool\")\n        if not isinstance(self.supports_infinite_bounds, bool):\n            report.append(\"supports_infinite_bounds must be a bool\")\n        if not isinstance(self.supports_linear_constraints, bool):\n            report.append(\"supports_linear_constraints must be a bool\")\n        if not isinstance(self.supports_nonlinear_constraints, bool):\n            report.append(\"supports_nonlinear_constraints must be a bool\")\n        if not isinstance(self.disable_history, bool):\n            report.append(\"disable_history must be a bool\")\n\n        if report:\n            msg = (\n                \"The following arguments to AlgoInfo or `mark.minimizer` are \"\n                \"invalid:\\n\" + \"\\n\".join(report)\n            )\n            raise InvalidAlgoInfoError(msg)\n\n\n@dataclass(frozen=True)\nclass InternalOptimizeResult:\n    \"\"\"Internal representation of the result of an optimization problem.\n\n    Args:\n        x: The optimal parameters.\n        fun: The function value at the optimal parameters.\n        success: Whether the optimization was successful.\n        message: A message from the optimizer.\n        status: The status of the optimization.\n        n_fun_evals: The number of function evaluations.\n        n_jac_evals: The number of gradient or jacobian evaluations.\n        n_hess_evals: The number of Hessian evaluations.\n        n_iterations: The number of iterations.\n        jac: The Jacobian of the objective function at the optimal parameters.\n        hess: The Hessian of the objective function at the optimal parameters.\n        hess_inv: The inverse of the Hessian of the objective function at the optimal\n            parameters.\n        max_constraint_violation: The maximum constraint violation.\n        info: Additional information from the optimizer.\n\n    \"\"\"\n\n    x: NDArray[np.float64]\n    fun: float | NDArray[np.float64]\n    success: bool | None = None\n    message: str | None = None\n    status: int | None = None\n    n_fun_evals: int | None = None\n    n_jac_evals: int | None = None\n    n_hess_evals: int | None = None\n    n_iterations: int | None = None\n    jac: NDArray[np.float64] | None = None\n    hess: NDArray[np.float64] | None = None\n    hess_inv: NDArray[np.float64] | None = None\n    max_constraint_violation: float | None = None\n    info: dict[str, typing.Any] | None = None\n    history: History | None = None\n    multistart_info: dict[str, typing.Any] | None = None\n\n    def __post_init__(self) -> None:\n        report: list[str] = []\n        if not isinstance(self.x, np.ndarray):\n            report.append(\"x must be a numpy array\")\n\n        if not (isinstance(self.fun, np.ndarray) or np.isscalar(self.fun)):\n            report.append(\"fun must be a numpy array or scalar\")\n\n        if self.success is not None and not isinstance(self.success, bool):\n            report.append(\"success must be a bool or None\")\n\n        if self.message is not None and not isinstance(self.message, str):\n            report.append(\"message must be a string or None\")\n\n        if self.n_fun_evals is not None and not isinstance(self.n_fun_evals, int):\n            report.append(\"n_fun_evals must be an int or None\")\n\n        if self.n_jac_evals is not None and not isinstance(self.n_jac_evals, int):\n            report.append(\"n_jac_evals must be an int or None\")\n\n        if self.n_hess_evals is not None and not isinstance(self.n_hess_evals, int):\n            report.append(\"n_hess_evals must be an int or None\")\n\n        if self.n_iterations is not None and not isinstance(self.n_iterations, int):\n            report.append(\"n_iterations must be an int or None\")\n\n        if self.jac is not None and not isinstance(self.jac, np.ndarray):\n            report.append(\"jac must be a numpy array or None\")\n\n        if self.hess is not None and not isinstance(self.hess, np.ndarray):\n            report.append(\"hess must be a numpy array or None\")\n\n        if self.hess_inv is not None and not isinstance(self.hess_inv, np.ndarray):\n            report.append(\"hess_inv must be a numpy array or None\")\n\n        if self.max_constraint_violation is not None and not np.isscalar(\n            self.max_constraint_violation\n        ):\n            report.append(\"max_constraint_violation must be a scalar or None\")\n\n        if self.info is not None and not isinstance(self.info, dict):\n            report.append(\"info must be a dictionary or None\")\n\n        if self.status is not None and not isinstance(self.status, int):\n            report.append(\"status must be an int or None\")\n\n        if self.max_constraint_violation and not isinstance(\n            self.max_constraint_violation, float\n        ):\n            report.append(\"max_constraint_violation must be a float or None\")\n\n        if report:\n            msg = (\n                \"The following arguments to InternalOptimizeResult are invalid:\\n\"\n                + \"\\n\".join(report)\n            )\n            raise TypeError(msg)\n\n\nclass AlgorithmMeta(ABCMeta):\n    \"\"\"Metaclass to get repr, algo_info and name for classes, not just instances.\"\"\"\n\n    def __repr__(self) -> str:\n        if hasattr(self, \"__algo_info__\") and self.__algo_info__ is not None:\n            out = f\"om.algos.{self.__algo_info__.name}\"\n        else:\n            out = self.__class__.__name__\n        return out\n\n    @property\n    def name(self) -> str:\n        if hasattr(self, \"__algo_info__\") and self.__algo_info__ is not None:\n            out = self.__algo_info__.name\n        else:\n            out = self.__class__.__name__\n        return out\n\n    @property\n    def algo_info(self) -> AlgoInfo:\n        if not hasattr(self, \"__algo_info__\") or self.__algo_info__ is None:\n            msg = (\n                f\"The algorithm {self.name} does not have have the __algo_info__ \"\n                \"attribute. Use the `mark.minimizer` decorator to add this attribute.\"\n            )\n            raise AttributeError(msg)\n\n        return self.__algo_info__\n\n\n@dataclass(frozen=True)\nclass Algorithm(ABC, metaclass=AlgorithmMeta):\n    \"\"\"Base class for all optimization algorithms in optimagic.\n\n    To add an optimizer to optimagic you need to subclass Algorithm and overide the\n    ``_solve_internal_problem`` method.\n\n    \"\"\"\n\n    @abstractmethod\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        pass\n\n    def __post_init__(self) -> None:\n        for field in self.__dataclass_fields__:\n            raw_value = getattr(self, field)\n            target_type = typing.cast(type, self.__dataclass_fields__[field].type)\n            if target_type in TYPE_CONVERTERS:\n                try:\n                    value = TYPE_CONVERTERS[target_type](raw_value)\n                except (KeyboardInterrupt, SystemExit):\n                    raise\n                except Exception as e:\n                    msg = (\n                        f\"Could not convert the value of the field {field} to the \"\n                        f\"expected type {target_type}.\"\n                    )\n                    raise InvalidAlgoOptionError(msg) from e\n\n                object.__setattr__(self, field, value)\n\n    def with_option(self, **kwargs: Any) -> Self:\n        \"\"\"Create a modified copy with the given options.\"\"\"\n        valid_keys = set(self.__dataclass_fields__) - {\"__algo_info__\"}\n        invalid = set(kwargs) - valid_keys\n        if invalid:\n            raise InvalidAlgoOptionError(\n                f\"The keyword arguments {invalid} are not valid options for \"\n                f\"the algorithm {self.name}\"\n            )\n        return replace(self, **kwargs)\n\n    def with_stopping(self, **kwargs: Any) -> Self:\n        \"\"\"Create a modified copy with the given stopping options.\"\"\"\n        options = {}\n        for k, v in kwargs.items():\n            if k.startswith(\"stopping_\"):\n                options[k] = v\n            else:\n                options[f\"stopping_{k}\"] = v\n\n        return self.with_option(**options)\n\n    def with_convergence(self, **kwargs: Any) -> Self:\n        \"\"\"Create a modified copy with the given convergence options.\"\"\"\n        options = {}\n        for k, v in kwargs.items():\n            if k.startswith(\"convergence_\"):\n                options[k] = v\n            else:\n                options[f\"convergence_{k}\"] = v\n\n        return self.with_option(**options)\n\n    def solve_internal_problem(\n        self,\n        problem: InternalOptimizationProblem,\n        x0: NDArray[np.float64],\n        step_id: int,\n    ) -> InternalOptimizeResult:\n        \"\"\"Solve the internal optimization problem.\n\n        This method is called internally by `minimize` or `maximize` to solve the\n        internal optimization problem and process the results.\n\n        \"\"\"\n        problem = problem.with_new_history().with_step_id(step_id)\n\n        if problem.logger:\n            problem.logger.step_store.update(\n                step_id, {\"status\": str(StepStatus.RUNNING.value)}\n            )\n\n        result = self._solve_internal_problem(problem, x0)\n\n        if (not self.algo_info.disable_history) and (result.history is None):\n            result = replace(result, history=problem.history)\n\n        if problem.logger:\n            problem.logger.step_store.update(\n                step_id, {\"status\": str(StepStatus.COMPLETE.value)}\n            )\n\n        return result\n\n    def with_option_if_applicable(self, **kwargs: Any) -> Self:\n        \"\"\"Call with_option only with applicable keyword arguments.\"\"\"\n        valid_keys = set(self.__dataclass_fields__) - {\"__algo_info__\"}\n        invalid = set(kwargs) - valid_keys\n        if invalid:\n            msg = (\n                \"The following algo_options were ignored because they are not \"\n                f\"compatible with {self.name}:\\n\\n {invalid}\"\n            )\n            warnings.warn(msg)\n\n        kwargs = {k: v for k, v in kwargs.items() if k in valid_keys}\n        return self.with_option(**kwargs)\n\n    @property\n    def name(self) -> str:\n        \"\"\"The name of the algorithm.\"\"\"\n        # cannot call algo_info here because it would be an infinite recursion\n        if hasattr(self, \"__algo_info__\") and self.__algo_info__ is not None:\n            return self.__algo_info__.name\n        return self.__class__.__name__\n\n    @property\n    def algo_info(self) -> AlgoInfo:\n        \"\"\"Information about the algorithm.\"\"\"\n        if not hasattr(self, \"__algo_info__\") or self.__algo_info__ is None:\n            msg = (\n                f\"The algorithm {self.name} does not have have the __algo_info__ \"\n                \"attribute. Use the `mark.minimizer` decorator to add this attribute.\"\n            )\n            raise AttributeError(msg)\n\n        return self.__algo_info__\n"
  },
  {
    "path": "src/optimagic/optimization/convergence_report.py",
    "content": "import numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic.optimization.history import History\n\n\ndef get_convergence_report(history: History) -> dict[str, dict[str, float]] | None:\n    is_accepted = history.is_accepted\n\n    critvals = np.array(history.fun, dtype=np.float64)[is_accepted]\n    params = np.array(history.flat_params, dtype=np.float64)[is_accepted]\n\n    if len(critvals) < 2:\n        out = None\n    else:\n        out = {}\n        for name, n_entries in [(\"one_step\", 2), (\"five_steps\", min(6, len(critvals)))]:\n            relevant_critvals = critvals[-n_entries:]\n            relevant_params = params[-n_entries:]\n\n            max_f_rel, max_f_abs = _get_max_f_changes(relevant_critvals)\n            max_x_rel, max_x_abs = _get_max_x_changes(relevant_params)\n\n            col_dict = {\n                \"relative_criterion_change\": max_f_rel,\n                \"relative_params_change\": max_x_rel,\n                \"absolute_criterion_change\": max_f_abs,\n                \"absolute_params_change\": max_x_abs,\n            }\n\n            out[name] = col_dict\n\n    return out\n\n\ndef _get_max_f_changes(critvals: NDArray[np.float64]) -> tuple[float, float]:\n    best_val = critvals[-1]\n    worst_val = critvals[0]\n\n    max_change_abs = np.abs(best_val - worst_val)\n    denom = max(np.abs(best_val), 0.1)\n\n    max_change_rel = max_change_abs / denom\n\n    return max_change_rel, max_change_abs\n\n\ndef _get_max_x_changes(params: NDArray[np.float64]) -> tuple[float, float]:\n    best_x = params[-1]\n    diffs = params - best_x\n    denom = np.clip(np.abs(best_x), 0.1, np.inf)\n\n    distances_abs = np.linalg.norm(diffs, axis=1)\n    max_change_abs = distances_abs.max()\n\n    scaled = diffs / denom\n\n    distances_rel = np.linalg.norm(scaled, axis=1)\n    max_change_rel = distances_rel.max()\n    return max_change_rel, max_change_abs\n"
  },
  {
    "path": "src/optimagic/optimization/create_optimization_problem.py",
    "content": "import warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Callable, Type\n\nfrom optimagic import deprecations\nfrom optimagic.algorithms import ALL_ALGORITHMS\nfrom optimagic.deprecations import (\n    handle_log_options_throw_deprecated_warning,\n    replace_and_warn_about_deprecated_algo_options,\n    replace_and_warn_about_deprecated_bounds,\n)\nfrom optimagic.differentiation.numdiff_options import (\n    NumdiffOptions,\n    NumdiffPurpose,\n    get_default_numdiff_options,\n    pre_process_numdiff_options,\n)\nfrom optimagic.exceptions import (\n    AliasError,\n    InvalidFunctionError,\n    MissingInputError,\n)\nfrom optimagic.logging.logger import LogOptions, SQLiteLogOptions\nfrom optimagic.optimization.algorithm import AlgoInfo, Algorithm\nfrom optimagic.optimization.fun_value import (\n    SpecificFunctionValue,\n    convert_fun_output_to_function_value,\n    enforce_return_type,\n    enforce_return_type_with_jac,\n)\nfrom optimagic.optimization.multistart_options import (\n    MultistartOptions,\n    pre_process_multistart,\n)\nfrom optimagic.optimization.scipy_aliases import (\n    map_method_to_algorithm,\n    split_fun_and_jac,\n)\nfrom optimagic.parameters.bounds import Bounds, pre_process_bounds\nfrom optimagic.parameters.scaling import ScalingOptions, pre_process_scaling\nfrom optimagic.shared.process_user_function import (\n    get_kwargs_from_args,\n    infer_aggregation_level,\n    partial_func_of_params,\n)\nfrom optimagic.typing import AggregationLevel, Direction, ErrorHandling, PyTree\nfrom optimagic.utilities import propose_alternatives\n\n\n@dataclass(frozen=True)\nclass OptimizationProblem:\n    \"\"\"Collect everything that defines the optimization problem.\n\n    The attributes are very close to the arguments of `maximize` and `minimize` but they\n    are converted to stricter types. For example, the bounds argument that can be a\n    sequence of tuples, a scipy.optimize.Bounds object or an optimagic.Bounds when\n    calling `maximize` or `minimize` is converted to an optimagic.Bounds object.\n\n    All deprecated arguments are removed and all scipy aliases are replaced by their\n    optimagic counterparts.\n\n    All user provided functions are partialled if corresponding `kwargs` dictionaries\n    were provided.\n\n    # TODO: Document attributes after other todos are resolved.\n\n    \"\"\"\n\n    fun: Callable[[PyTree], SpecificFunctionValue]\n    params: PyTree\n    algorithm: Algorithm\n    bounds: Bounds | None\n    # TODO: Only allow list[Constraint] or Constraint\n    constraints: list[dict[str, Any]]\n    jac: Callable[[PyTree], PyTree] | None\n    fun_and_jac: Callable[[PyTree], tuple[SpecificFunctionValue, PyTree]] | None\n    numdiff_options: NumdiffOptions\n    # TODO: logging will become None | Logger and log_options will be removed\n    error_handling: ErrorHandling\n    logging: LogOptions | None\n    error_penalty: dict[str, Any] | None\n    scaling: ScalingOptions | None\n    multistart: MultistartOptions | None\n    collect_history: bool\n    skip_checks: bool\n    direction: Direction\n    fun_eval: SpecificFunctionValue\n\n\ndef create_optimization_problem(\n    direction,\n    fun,\n    params,\n    algorithm,\n    *,\n    bounds,\n    fun_kwargs,\n    constraints,\n    algo_options,\n    jac,\n    jac_kwargs,\n    fun_and_jac,\n    fun_and_jac_kwargs,\n    numdiff_options,\n    logging,\n    error_handling,\n    error_penalty,\n    scaling,\n    multistart,\n    collect_history,\n    skip_checks,\n    # scipy aliases\n    x0,\n    method,\n    args,\n    # scipy arguments that are not yet supported\n    hess,\n    hessp,\n    callback,\n    # scipy arguments that will never be supported\n    options,\n    tol,\n    # deprecated arguments\n    criterion,\n    criterion_kwargs,\n    derivative,\n    derivative_kwargs,\n    criterion_and_derivative,\n    criterion_and_derivative_kwargs,\n    lower_bounds,\n    log_options,\n    upper_bounds,\n    soft_lower_bounds,\n    soft_upper_bounds,\n    scaling_options,\n    multistart_options,\n):\n    # ==================================================================================\n    # error handling needed as long as fun is an optional argument\n    # ==================================================================================\n\n    if fun_and_jac is None and fun is None and criterion is None:\n        msg = (\n            \"Missing objective function. Please provide an objective function as the \"\n            \"first positional argument or as the keyword argument `fun` or \"\n            \" with `fun_and_jac`.\"\n        )\n        raise MissingInputError(msg)\n\n    if params is None and x0 is None:\n        msg = (\n            \"Missing start parameters. Please provide start parameters as the second \"\n            \"positional argument or as the keyword argument `params`.\"\n        )\n        raise MissingInputError(msg)\n\n    if algorithm is None and method is None:\n        msg = (\n            \"Missing algorithm. Please provide an algorithm as the third positional \"\n            \"argument or as the keyword argument `algorithm`.\"\n        )\n        raise MissingInputError(msg)\n\n    if fun_and_jac is not None and fun is None and criterion is None:\n        if isinstance(fun_and_jac, list):\n            raise NotImplementedError(\n                \"If `fun_and_jac` is a list of callables, `fun` is not optional. \"\n            )\n        fun = split_fun_and_jac(fun_and_jac, target=\"fun\")\n\n    # ==================================================================================\n    # deprecations\n    # ==================================================================================\n\n    if log_options is not None:\n        logging = handle_log_options_throw_deprecated_warning(log_options, logging)\n\n    if criterion is not None:\n        deprecations.throw_criterion_future_warning()\n        fun = criterion if fun is None else fun\n\n    if criterion_kwargs is not None:\n        deprecations.throw_criterion_kwargs_future_warning()\n        fun_kwargs = criterion_kwargs if fun_kwargs is None else fun_kwargs\n\n    if derivative is not None:\n        deprecations.throw_derivative_future_warning()\n        jac = derivative if jac is None else jac\n\n    if derivative_kwargs is not None:\n        deprecations.throw_derivative_kwargs_future_warning()\n        jac_kwargs = derivative_kwargs if jac_kwargs is None else jac_kwargs\n\n    if criterion_and_derivative is not None:\n        deprecations.throw_criterion_and_derivative_future_warning()\n        fun_and_jac = criterion_and_derivative if fun_and_jac is None else fun_and_jac\n\n    if criterion_and_derivative_kwargs is not None:\n        deprecations.throw_criterion_and_derivative_kwargs_future_warning()\n        fun_and_jac_kwargs = (\n            criterion_and_derivative_kwargs\n            if fun_and_jac_kwargs is None\n            else fun_and_jac_kwargs\n        )\n\n    if scaling_options is not None:\n        deprecations.throw_scaling_options_future_warning()\n        if scaling is True and scaling_options is not None:\n            scaling = scaling_options\n\n    if multistart_options is not None:\n        deprecations.throw_multistart_options_future_warning()\n        if multistart is True and multistart_options is not None:\n            multistart = multistart_options\n\n    deprecations.throw_dict_constraints_future_warning_if_required(constraints)\n\n    algo_options = replace_and_warn_about_deprecated_algo_options(algo_options)\n\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n        soft_lower_bounds=soft_lower_bounds,\n        soft_upper_bounds=soft_upper_bounds,\n    )\n\n    if isinstance(jac, dict):\n        jac = deprecations.replace_and_warn_about_deprecated_derivatives(jac, \"jac\")\n\n    if isinstance(fun_and_jac, dict):\n        fun_and_jac = deprecations.replace_and_warn_about_deprecated_derivatives(\n            fun_and_jac, \"fun_and_jac\"\n        )\n    # ==================================================================================\n    # handle scipy aliases\n    # ==================================================================================\n\n    if x0 is not None:\n        if params is not None:\n            msg = (\n                \"x0 is an alias for params (for better compatibility with scipy). \"\n                \"Do not use both x0 and params.\"\n            )\n            raise AliasError(msg)\n        else:\n            params = x0\n\n    if method is not None:\n        if algorithm is not None:\n            msg = (\n                \"method is an alias for algorithm to select the scipy optimizers under \"\n                \"their original name. Do not use both method and algorithm.\"\n            )\n            raise AliasError(msg)\n        else:\n            algorithm = map_method_to_algorithm(method)\n\n    if args is not None:\n        if (\n            fun_kwargs is not None\n            or jac_kwargs is not None\n            or fun_and_jac_kwargs is not None\n        ):\n            msg = (\n                \"args is an alternative to fun_kwargs, jac_kwargs and \"\n                \"fun_and_jac_kwargs that optimagic supports for compatibility \"\n                \"with scipy. Do not use args in conjunction with any of the other \"\n                \"arguments.\"\n            )\n            raise AliasError(msg)\n        else:\n            kwargs = get_kwargs_from_args(args, fun, offset=1)\n            fun_kwargs, jac_kwargs, fun_and_jac_kwargs = kwargs, kwargs, kwargs\n\n    # jac is not an alias but we need to handle the case where `jac=True`, i.e. fun is\n    # actually fun_and_jac. This is not recommended in optimagic because then optimizers\n    # cannot evaluate fun in isolation but we can easily support it for compatibility.\n    if jac is True:\n        jac = None\n        if fun_and_jac is None:\n            fun_and_jac = fun\n            fun = split_fun_and_jac(fun_and_jac, target=\"fun\")\n\n    # ==================================================================================\n    # Handle scipy arguments that are not yet implemented\n    # ==================================================================================\n\n    if hess is not None:\n        msg = (\n            \"The hess argument is not yet supported in optimagic. Creat an issue on \"\n            \"https://github.com/optimagic-dev/optimagic/ if you have urgent need \"\n            \"for this feature.\"\n        )\n        raise NotImplementedError(msg)\n\n    if hessp is not None:\n        msg = (\n            \"The hessp argument is not yet supported in optimagic. Creat an issue on \"\n            \"https://github.com/optimagic-dev/optimagic/ if you have urgent need \"\n            \"for this feature.\"\n        )\n        raise NotImplementedError(msg)\n\n    if callback is not None:\n        msg = (\n            \"The callback argument is not yet supported in optimagic. Creat an issue \"\n            \"on https://github.com/optimagic-dev/optimagic/ if you have urgent \"\n            \"need for this feature.\"\n        )\n        raise NotImplementedError(msg)\n\n    # ==================================================================================\n    # Handle scipy arguments that will never be supported\n    # ==================================================================================\n\n    if options is not None:\n        # TODO: Add link to a how-to guide or tutorial for this\n        msg = (\n            \"The options argument is not supported in optimagic. Please use the \"\n            \"algo_options argument instead.\"\n        )\n        raise NotImplementedError(msg)\n\n    if tol is not None:\n        # TODO: Add link to a how-to guide or tutorial for this\n        msg = (\n            \"The tol argument is not supported in optimagic. Please use \"\n            \"algo_options or configured algorithms instead to set convergence criteria \"\n            \"for your optimizer.\"\n        )\n        raise NotImplementedError(msg)\n\n    # ==================================================================================\n    # Convert literals to enums\n    # ==================================================================================\n    error_handling = ErrorHandling(error_handling)\n\n    # ==================================================================================\n    # Set default values and check options\n    # ==================================================================================\n    bounds = pre_process_bounds(bounds)\n    scaling = pre_process_scaling(scaling)\n    multistart = pre_process_multistart(multistart)\n    numdiff_options = pre_process_numdiff_options(numdiff_options)\n    constraints = deprecations.pre_process_constraints(constraints)\n\n    if numdiff_options is None:\n        numdiff_options = get_default_numdiff_options(purpose=NumdiffPurpose.OPTIMIZE)\n\n    fun_kwargs = {} if fun_kwargs is None else fun_kwargs\n    constraints = [] if constraints is None else constraints\n    algo_options = {} if algo_options is None else algo_options\n    jac_kwargs = {} if jac_kwargs is None else jac_kwargs\n    fun_and_jac_kwargs = {} if fun_and_jac_kwargs is None else fun_and_jac_kwargs\n    error_penalty = {} if error_penalty is None else error_penalty\n\n    if isinstance(logging, str) or isinstance(logging, Path):\n        log_path = Path(logging)\n        logging = SQLiteLogOptions(log_path)\n\n    # ==================================================================================\n    # evaluate fun for the first time\n    # ==================================================================================\n    fun = partial_func_of_params(\n        func=fun,\n        kwargs=fun_kwargs,\n        name=\"criterion\",\n        skip_checks=skip_checks,\n    )\n\n    # This should be done as late as possible; It has to be done here to infer the\n    # problem type until the decorator approach becomes mandatory.\n    # TODO: Move this into `_optimize` as soon as we reach 0.6.0\n    try:\n        fun_eval = fun(params)\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception as e:\n        msg = \"Error while evaluating fun at start params.\"\n        raise InvalidFunctionError(msg) from e\n\n    if deprecations.is_dict_output(fun_eval):\n        deprecations.throw_dict_output_warning()\n\n    # ==================================================================================\n    # infer the problem type\n    # ==================================================================================\n\n    if deprecations.is_dict_output(fun_eval):\n        problem_type = deprecations.infer_problem_type_from_dict_output(fun_eval)\n    else:\n        problem_type = infer_aggregation_level(fun)\n\n    if (\n        problem_type == AggregationLevel.LEAST_SQUARES\n        and direction == Direction.MAXIMIZE\n    ):\n        raise InvalidFunctionError(\"Least-squares problems cannot be maximized.\")\n\n    # ==================================================================================\n    # process the fun_eval; Can be removed once the first evaluation gets moved to\n    # a later point where the `enforce` decorator has already been applied.\n    # ==================================================================================\n    if deprecations.is_dict_output(fun_eval):\n        fun_eval = deprecations.convert_dict_to_function_value(fun_eval)\n        fun = deprecations.replace_dict_output(fun)\n    else:\n        fun_eval = convert_fun_output_to_function_value(fun_eval, problem_type)\n\n    fun = enforce_return_type(problem_type)(fun)\n\n    # ==================================================================================\n    # Process the user provided algorithm\n    # ==================================================================================\n\n    algorithm = pre_process_user_algorithm(algorithm)\n    algorithm = algorithm.with_option_if_applicable(**algo_options)\n\n    if algorithm.algo_info.solver_type == AggregationLevel.LIKELIHOOD:\n        if problem_type not in [\n            AggregationLevel.LIKELIHOOD,\n            AggregationLevel.LEAST_SQUARES,\n        ]:\n            raise InvalidFunctionError(\n                \"Likelihood solvers can only be used with likelihood or least-squares \"\n                \"problems.\"\n            )\n    elif algorithm.algo_info.solver_type == AggregationLevel.LEAST_SQUARES:\n        if problem_type != AggregationLevel.LEAST_SQUARES:\n            raise InvalidFunctionError(\n                \"Least-squares solvers can only be used with least-squares problems.\"\n            )\n\n    # ==================================================================================\n    # select the correct derivative functions\n    # ==================================================================================\n\n    if jac is not None:\n        jac = pre_process_derivatives(\n            candidate=jac, name=\"jac\", solver_type=algorithm.algo_info.solver_type\n        )\n\n    if fun_and_jac is not None:\n        fun_and_jac = pre_process_derivatives(\n            candidate=fun_and_jac,\n            name=\"fun_and_jac\",\n            solver_type=algorithm.algo_info.solver_type,\n        )\n\n    # ==================================================================================\n    # partial the kwargs into corresponding functions\n    # ==================================================================================\n\n    if jac is not None:\n        jac = partial_func_of_params(\n            func=jac,\n            kwargs=jac_kwargs,\n            name=\"derivative\",\n            skip_checks=skip_checks,\n        )\n\n    if fun_and_jac is not None:\n        fun_and_jac = partial_func_of_params(\n            func=fun_and_jac,\n            kwargs=fun_and_jac_kwargs,\n            name=\"criterion_and_derivative\",\n            skip_checks=skip_checks,\n        )\n        fun_and_jac = deprecations.replace_dict_output(fun_and_jac)\n\n        fun_and_jac = enforce_return_type_with_jac(algorithm.algo_info.solver_type)(\n            fun_and_jac\n        )\n\n    # ==================================================================================\n    # Check types of arguments\n    # ==================================================================================\n\n    if not skip_checks:\n        if params is None:\n            raise ValueError(\"params cannot be None\")\n\n        if not isinstance(fun, Callable):\n            raise ValueError(\"fun must be a callable\")\n\n        if not isinstance(algorithm, Algorithm):\n            raise ValueError(\"algorithm must be an Algorithm object.\")\n\n        if not isinstance(algo_options, dict | None):\n            raise ValueError(\"algo_options must be a dictionary or None\")\n\n        if not isinstance(algorithm.algo_info, AlgoInfo):\n            raise ValueError(\"algo_info must be an AlgoInfo object\")\n\n        if not isinstance(bounds, Bounds | None):\n            raise ValueError(\"bounds must be a Bounds object or None\")\n\n        if not all(isinstance(c, dict) for c in constraints):\n            # TODO: Only allow list[Constraint]\n            raise ValueError(\"constraints must be a list of dictionaries\")\n\n        if not isinstance(jac, Callable | None):\n            raise ValueError(\"jac must be a callable or None\")\n\n        if not isinstance(fun_and_jac, Callable | None):\n            raise ValueError(\"fun_and_jac must be a callable or None\")\n\n        if not isinstance(numdiff_options, NumdiffOptions):\n            raise ValueError(\"numdiff_options must be a NumdiffOptions object\")\n\n        if not isinstance(logging, bool | Path | LogOptions | None):\n            raise ValueError(\n                \"logging must be a boolean, a path, a LogOptions instance or None\"\n            )\n\n        if not isinstance(log_options, dict | None):\n            raise ValueError(\"log_options must be a dictionary or None\")\n\n        if not isinstance(error_penalty, dict | None):\n            raise ValueError(\"error_penalty must be a dictionary or None\")\n\n        if not isinstance(scaling, ScalingOptions | None):\n            raise ValueError(\"scaling must be a ScalingOptions object or None\")\n\n        if not isinstance(multistart, MultistartOptions | None):\n            raise ValueError(\"multistart must be a MultistartOptions object or None\")\n\n        if not isinstance(collect_history, bool):\n            raise ValueError(\"collect_history must be a boolean\")\n\n    # ==================================================================================\n    # create the problem object\n    # ==================================================================================\n\n    problem = OptimizationProblem(\n        fun=fun,\n        params=params,\n        algorithm=algorithm,\n        bounds=bounds,\n        constraints=constraints,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n        numdiff_options=numdiff_options,\n        logging=logging,\n        error_handling=error_handling,\n        error_penalty=error_penalty,\n        scaling=scaling,\n        multistart=multistart,\n        collect_history=collect_history,\n        skip_checks=skip_checks,\n        direction=direction,\n        fun_eval=fun_eval,\n    )\n\n    return problem\n\n\ndef pre_process_derivatives(candidate, name, solver_type):\n    if callable(candidate):\n        candidate = [candidate]\n\n    out = None\n    for func in candidate:\n        if not callable(func):\n            raise ValueError(f\"{name} must be a callable or sequence of callables.\")\n\n        problem_type = infer_aggregation_level(func)\n        if problem_type == solver_type:\n            out = func\n\n    if out is None:\n        msg = (\n            f\"You used the `{name}` argument but none of the callables you provided \"\n            \"has the correct aggregation level for your selected optimization \"\n            \"algorithm. Falling back to numerical derivatives.\"\n        )\n        warnings.warn(msg)\n\n    return out\n\n\ndef pre_process_user_algorithm(\n    algorithm: str | Algorithm | Type[Algorithm],\n) -> Algorithm:\n    \"\"\"Process the user specfied algorithm.\"\"\"\n    if isinstance(algorithm, str):\n        try:\n            # Use ALL_ALGORITHMS and not just AVAILABLE_ALGORITHMS such that the\n            # algorithm specific error message with installation instruction will be\n            # reached if an optional dependency is not installed.\n            algorithm = ALL_ALGORITHMS[algorithm]()\n        except KeyError:\n            proposed = propose_alternatives(algorithm, list(ALL_ALGORITHMS))\n            raise ValueError(\n                f\"Invalid algorithm: {algorithm}. Did you mean {proposed}?\"\n            ) from None\n    elif isinstance(algorithm, type) and issubclass(algorithm, Algorithm):\n        algorithm = algorithm()\n\n    return algorithm\n"
  },
  {
    "path": "src/optimagic/optimization/error_penalty.py",
    "content": "from typing import Callable\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic.config import CRITERION_PENALTY_CONSTANT, CRITERION_PENALTY_SLOPE\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n    SpecificFunctionValue,\n)\nfrom optimagic.typing import AggregationLevel, Direction\n\n\ndef _scalar_penalty(\n    x: NDArray[np.float64],\n    constant: float | NDArray[np.float64],\n    slope: float | NDArray[np.float64],\n    x0: NDArray[np.float64],\n    dim_out: int | None = None,\n) -> tuple[ScalarFunctionValue, NDArray[np.float64]]:  # noqa: ARG001\n    value = constant + slope * np.linalg.norm(x - x0)\n    jac = slope * (x - x0) / np.linalg.norm(x - x0)\n    return ScalarFunctionValue(value=value), jac\n\n\ndef _likelihood_penalty(\n    x: NDArray[np.float64],\n    constant: float | NDArray[np.float64],\n    slope: float | NDArray[np.float64],\n    x0: NDArray[np.float64],\n    dim_out: int,\n) -> tuple[LikelihoodFunctionValue, NDArray[np.float64]]:  # noqa: ARG001\n    factor = (constant + slope * np.linalg.norm(x - x0)) / dim_out\n    contrib = np.ones(dim_out) * factor\n    row = slope * (x - x0) / (dim_out * np.linalg.norm(x - x0))\n    jac = np.full((dim_out, len(x)), row)\n    return LikelihoodFunctionValue(value=contrib), jac\n\n\ndef _penalty_residuals(\n    x: NDArray[np.float64],\n    constant: float | NDArray[np.float64],\n    slope: float | NDArray[np.float64],\n    x0: NDArray[np.float64],\n    dim_out: int,\n) -> tuple[LeastSquaresFunctionValue, NDArray[np.float64]]:\n    factor = np.sqrt((constant + slope * np.linalg.norm(x - x0)) / dim_out)\n    contrib = np.ones(dim_out) * factor\n\n    scalar_penalty, _ = _scalar_penalty(x, constant, slope, x0)\n    inner_deriv = slope * (x - x0) / np.linalg.norm(x - x0)\n    outer_deriv = 0.5 / np.sqrt(scalar_penalty.value * dim_out)\n    row = outer_deriv * inner_deriv\n    jac = np.full((dim_out, len(x)), row)\n\n    return LeastSquaresFunctionValue(value=contrib), jac\n\n\ndef get_error_penalty_function(\n    start_x: NDArray[np.float64],\n    start_criterion: SpecificFunctionValue,\n    error_penalty: dict[str, float] | None,\n    solver_type: AggregationLevel,\n    direction: Direction,\n) -> Callable[[NDArray[np.float64]], tuple[SpecificFunctionValue, NDArray[np.float64]]]:\n    error_penalty = {} if error_penalty is None else error_penalty\n\n    first_value = start_criterion.internal_value(solver_type)\n\n    constant, slope = _process_error_penalty(\n        error_penalty=error_penalty,\n        first_value=first_value,\n        direction=direction,\n    )\n\n    dim_out = (\n        1\n        if solver_type == AggregationLevel.SCALAR\n        else len(start_criterion.internal_value(solver_type))  # type: ignore\n    )\n\n    _penalty: Callable[\n        [\n            NDArray[np.float64],\n            float | NDArray[np.float64],\n            float | NDArray[np.float64],\n            NDArray[np.float64],\n            int,\n        ],\n        tuple[SpecificFunctionValue, NDArray[np.float64]],\n    ]\n    if solver_type == AggregationLevel.SCALAR:\n        _penalty = _scalar_penalty\n    elif solver_type == AggregationLevel.LIKELIHOOD:\n        _penalty = _likelihood_penalty\n    elif solver_type == AggregationLevel.LEAST_SQUARES:\n        _penalty = _penalty_residuals\n\n    def penalty(\n        x: NDArray[np.float64],\n    ) -> tuple[SpecificFunctionValue, NDArray[np.float64]]:\n        out = _penalty(\n            x=x,\n            constant=constant,\n            slope=slope,\n            x0=start_x,\n            dim_out=dim_out,\n        )\n        return out\n\n    return penalty\n\n\ndef _process_error_penalty(\n    error_penalty: dict[str, float] | None,\n    first_value: float | NDArray[np.float64],\n    direction: Direction,\n) -> tuple[float | NDArray[np.float64], float | NDArray[np.float64]]:\n    \"\"\"Add default options to error_penalty options.\"\"\"\n    if error_penalty is not None:\n        error_penalty = error_penalty.copy()\n    else:\n        error_penalty = {}\n\n    if direction == Direction.MINIMIZE:\n        default_constant = (\n            first_value + np.abs(first_value) + CRITERION_PENALTY_CONSTANT\n        )\n        default_slope = CRITERION_PENALTY_SLOPE\n    elif direction == Direction.MAXIMIZE:\n        default_constant = (\n            first_value - np.abs(first_value) - CRITERION_PENALTY_CONSTANT\n        )\n        default_slope = -CRITERION_PENALTY_SLOPE\n    else:\n        raise ValueError()\n\n    constant = error_penalty.get(\"constant\", default_constant)\n    slope = error_penalty.get(\"slope\", default_slope)\n\n    return constant, slope\n"
  },
  {
    "path": "src/optimagic/optimization/fun_value.py",
    "content": "import functools\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, ParamSpec\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom pybaum import tree_just_flatten\n\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import AggregationLevel, PyTree, Scalar\nfrom optimagic.utilities import isscalar\n\n\n@dataclass(frozen=True)\nclass FunctionValue:\n    value: float | PyTree\n    info: dict[str, Any] | None = None\n\n\nclass SpecificFunctionValue(FunctionValue, ABC):\n    @abstractmethod\n    def internal_value(\n        self, solver_type: AggregationLevel\n    ) -> float | NDArray[np.float64]:\n        pass\n\n\n@dataclass(frozen=True)\nclass ScalarFunctionValue(SpecificFunctionValue):\n    value: Scalar\n    info: dict[str, Any] | None = None\n\n    def __post_init__(self) -> None:\n        if not isscalar(self.value):\n            raise InvalidFunctionError(\n                f\"Scalar objective values need to be scalars, not: {type(self.value)}. \"\n                \"If you meant to provide a scalar objective function, make sure it \"\n                \"returns a scalar value. If you meant to provide a least_squares or \"\n                \"likelihood function, use the mark.least_squares or mark.likelihood \"\n                \"decorators.\"\n            )\n\n    def internal_value(self, solver_type: AggregationLevel) -> float:\n        if solver_type == AggregationLevel.SCALAR:\n            val = float(self.value)\n        else:\n            raise InvalidFunctionError(\n                f\"You are using a {solver_type.value} optimizer but provided a \"\n                \"scalar objective function.\"\n            )\n        return val\n\n\n@dataclass(frozen=True)\nclass LeastSquaresFunctionValue(SpecificFunctionValue):\n    value: PyTree\n    info: dict[str, Any] | None = None\n\n    def __post_init__(self) -> None:\n        if isscalar(self.value):\n            raise InvalidFunctionError(\n                \"Least squares objective values cannot be scalars. Your value has \"\n                f\"scalar type: {type(self.value)}. If you meant to provide a least \"\n                \"squares objective function, make sure it does not have a scalar value.\"\n                \" If you meant to provide a scalar function, use the mark.scalar \"\n                \"decorator.\"\n            )\n\n    def internal_value(\n        self, solver_type: AggregationLevel\n    ) -> float | NDArray[np.float64]:\n        resid = _get_flat_value(self.value)\n\n        val: float | NDArray[np.float64]\n\n        if solver_type == AggregationLevel.LEAST_SQUARES:\n            val = resid\n        elif solver_type == AggregationLevel.LIKELIHOOD:\n            val = resid**2\n        else:\n            val = float(resid @ resid)\n        return val\n\n\n@dataclass(frozen=True)\nclass LikelihoodFunctionValue(SpecificFunctionValue):\n    value: PyTree\n    info: dict[str, Any] | None = None\n\n    def __post_init__(self) -> None:\n        if isscalar(self.value):\n            raise InvalidFunctionError(\n                \"Likelihood objective values cannot be scalars. Your value has scalar \"\n                f\"type: {type(self.value)}. If you meant to provide a likelihood \"\n                \"objective function, make sure it does not have a scalar value. If you \"\n                \"meant to provide a scalar function, use the mark.scalar decorator.\"\n            )\n\n    def internal_value(\n        self, solver_type: AggregationLevel\n    ) -> float | NDArray[np.float64]:\n        loglikes = _get_flat_value(self.value)\n\n        val: float | NDArray[np.float64]\n\n        if solver_type == AggregationLevel.LIKELIHOOD:\n            val = loglikes\n        elif solver_type == AggregationLevel.SCALAR:\n            val = float(np.sum(loglikes))\n        else:\n            raise InvalidFunctionError(\n                \"You are using a least_squares optimizer but provided a \"\n                \"likelihood objective function.\"\n            )\n        return val\n\n\ndef _get_flat_value(value: PyTree) -> NDArray[np.float64]:\n    \"\"\"Flatten a PyTree value to a 1d numpy array with multiple fast paths.\"\"\"\n    if isinstance(value, np.ndarray) and value.ndim == 1:\n        flat = value\n    elif isinstance(value, np.ndarray):\n        flat = value.flatten()\n    else:\n        registry = get_registry(extended=True)\n        flat = tree_just_flatten(value, registry=registry)\n\n    flat_arr = np.asarray(flat, dtype=np.float64)\n    return flat_arr\n\n\ndef convert_fun_output_to_function_value(\n    raw: Scalar | PyTree | FunctionValue, problem_type: AggregationLevel\n) -> SpecificFunctionValue:\n    out: FunctionValue\n    if problem_type == AggregationLevel.SCALAR:\n        out = _convert_output_to_scalar_function_value(raw)\n    elif problem_type == AggregationLevel.LEAST_SQUARES:\n        out = _convert_output_to_least_squares_function_value(raw)\n    elif problem_type == AggregationLevel.LIKELIHOOD:\n        out = _convert_output_to_likelihood_function_value(raw)\n    return out\n\n\ndef _convert_output_to_scalar_function_value(\n    raw: Scalar | FunctionValue,\n) -> ScalarFunctionValue:\n    if isinstance(raw, ScalarFunctionValue):\n        out = raw\n    elif isinstance(raw, FunctionValue):\n        out = ScalarFunctionValue(value=raw.value, info=raw.info)\n    else:\n        out = ScalarFunctionValue(value=raw)\n    return out\n\n\ndef _convert_output_to_least_squares_function_value(\n    raw: PyTree | FunctionValue,\n) -> LeastSquaresFunctionValue:\n    if isinstance(raw, LeastSquaresFunctionValue):\n        out = raw\n    elif isinstance(raw, FunctionValue):\n        out = LeastSquaresFunctionValue(value=raw.value, info=raw.info)\n    else:\n        out = LeastSquaresFunctionValue(value=raw)\n    return out\n\n\ndef _convert_output_to_likelihood_function_value(\n    raw: PyTree | FunctionValue,\n) -> LikelihoodFunctionValue:\n    if isinstance(raw, LikelihoodFunctionValue):\n        out = raw\n    elif isinstance(raw, FunctionValue):\n        out = LikelihoodFunctionValue(value=raw.value, info=raw.info)\n    else:\n        out = LikelihoodFunctionValue(value=raw)\n    return out\n\n\nP = ParamSpec(\"P\")\n\n\ndef enforce_return_type(\n    problem_type: AggregationLevel,\n) -> Callable[\n    [Callable[P, Scalar | PyTree | FunctionValue]], Callable[P, SpecificFunctionValue]\n]:\n    \"\"\"Enforce a strict return type for objective functions based on problem_type.\n\n    This has no effect if the function already returns the strictest possible type for\n    the problem_type but converts everything else to that type.\n\n    \"\"\"\n\n    def decorator_enforce(\n        func: Callable[P, Scalar | PyTree | FunctionValue],\n    ) -> Callable[P, SpecificFunctionValue]:\n        if problem_type == AggregationLevel.SCALAR:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> ScalarFunctionValue:\n                raw = func(*args, **kwargs)\n                return _convert_output_to_scalar_function_value(raw)\n        elif problem_type == AggregationLevel.LEAST_SQUARES:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> LeastSquaresFunctionValue:\n                raw = func(*args, **kwargs)\n                return _convert_output_to_least_squares_function_value(raw)\n        elif problem_type == AggregationLevel.LIKELIHOOD:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> LikelihoodFunctionValue:\n                raw = func(*args, **kwargs)\n                return _convert_output_to_likelihood_function_value(raw)\n\n        return wrapper_enforce\n\n    return decorator_enforce\n\n\ndef enforce_return_type_with_jac(\n    problem_type: AggregationLevel,\n) -> Callable[\n    [Callable[P, tuple[Scalar | PyTree | FunctionValue, PyTree]]],\n    Callable[P, tuple[SpecificFunctionValue, PyTree]],\n]:\n    \"\"\"Enforce a strict return type for fun_and_jac based on problem_type.\n\n    This has no effect if the first return value of the function already has the\n    strictest possible type for the problem_type but converts everything else to that\n    type. The second return value stays unchanged.\n\n    \"\"\"\n\n    def decorator_enforce(\n        func: Callable[P, tuple[Scalar | PyTree | FunctionValue, PyTree]],\n    ) -> Callable[P, tuple[SpecificFunctionValue, PyTree]]:\n        if problem_type == AggregationLevel.SCALAR:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> tuple[ScalarFunctionValue, PyTree]:\n                raw = func(*args, **kwargs)\n                return (_convert_output_to_scalar_function_value(raw[0]), raw[1])\n        elif problem_type == AggregationLevel.LEAST_SQUARES:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> tuple[LeastSquaresFunctionValue, PyTree]:\n                raw = func(*args, **kwargs)\n                return (_convert_output_to_least_squares_function_value(raw[0]), raw[1])\n        elif problem_type == AggregationLevel.LIKELIHOOD:\n\n            @functools.wraps(func)\n            def wrapper_enforce(\n                *args: P.args, **kwargs: P.kwargs\n            ) -> tuple[LikelihoodFunctionValue, PyTree]:\n                raw = func(*args, **kwargs)\n                return (_convert_output_to_likelihood_function_value(raw[0]), raw[1])\n\n        return wrapper_enforce\n\n    return decorator_enforce\n"
  },
  {
    "path": "src/optimagic/optimization/history.py",
    "content": "import warnings\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Callable, Iterable, Literal\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom pybaum import leaf_names, tree_just_flatten\n\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.timing import CostModel\nfrom optimagic.typing import Direction, EvalTask, PyTree\n\n\n@dataclass(frozen=True)\nclass HistoryEntry:\n    params: PyTree\n    fun: float | None\n    start_time: float\n    stop_time: float\n    task: EvalTask\n\n\nclass History:\n    # TODO: add counters for the relevant evaluations\n    def __init__(\n        self,\n        direction: Direction,\n        params: list[PyTree] | None = None,\n        fun: list[float | None] | None = None,\n        start_time: list[float] | None = None,\n        stop_time: list[float] | None = None,\n        batches: list[int] | None = None,\n        task: list[EvalTask] | None = None,\n    ) -> None:\n        \"\"\"Initialize a history.\n\n        The history must know the direction of the optimization problem in order to\n        correctly return monotone sequences. The history can be initialized empty, for\n        example for usage during an optimization process, or with data, for example to\n        recover a history from a log.\n\n        \"\"\"\n        _validate_args_are_all_none_or_lists_of_same_length(\n            params, fun, start_time, stop_time, batches, task\n        )\n\n        self.direction = direction\n        self._params = params if params is not None else []\n        self._fun = fun if fun is not None else []\n        self._start_time = start_time if start_time is not None else []\n        self._stop_time = stop_time if stop_time is not None else []\n        self._batches = batches if batches is not None else []\n        self._task = task if task is not None else []\n\n    # ==================================================================================\n    # Methods to add entries to the history\n    # ==================================================================================\n\n    def add_entry(self, entry: HistoryEntry, batch_id: int | None = None) -> None:\n        if batch_id is None:\n            batch_id = self._get_next_batch_id()\n        self._params.append(entry.params)\n        self._fun.append(entry.fun)\n        self._start_time.append(entry.start_time)\n        self._stop_time.append(entry.stop_time)\n        self._batches.append(batch_id)\n        self._task.append(entry.task)\n\n    def add_batch(\n        self, batch: list[HistoryEntry], batch_size: int | None = None\n    ) -> None:\n        # The naming is complicated here:\n        # batch refers to the entries to be added to the history in one go\n        # batch_size is a property of a parallelizing algorithm that influences how\n        # the batch_ids are assigned. It is not the same as the length of the batch.\n        if batch_size is None:\n            batch_size = len(batch)\n\n        start = self._get_next_batch_id()\n        n_batches = int(np.ceil(len(batch) / batch_size))\n        ids = np.repeat(np.arange(start, start + n_batches), batch_size)[: len(batch)]\n\n        for entry, id in zip(batch, ids, strict=False):\n            self.add_entry(entry, id)\n\n    def _get_next_batch_id(self) -> int:\n        if not self._batches:\n            batch = 0\n        else:\n            batch = self._batches[-1] + 1\n        return batch\n\n    # ==================================================================================\n    # Properties and methods to access the history\n    # ==================================================================================\n\n    # Function data, function value, and monotone function value\n    # ----------------------------------------------------------------------------------\n\n    def fun_data(self, cost_model: CostModel, monotone: bool = False) -> pd.DataFrame:\n        \"\"\"Return the function value data.\n\n        Args:\n            cost_model: The cost model that is used to calculate the time measure.\n            monotone: Whether to return the monotone function values. Defaults to False.\n\n        Returns:\n            pd.DataFrame: The function value data. The columns are: 'fun', 'time' and\n                'task'. If monotone is False, value is the fun value, otherwise the\n                monotone function value. If dropna is True, rows with missing values\n                are dropped.\n\n        \"\"\"\n        if monotone:\n            fun = self.monotone_fun\n        else:\n            fun = np.array(self.fun, dtype=np.float64)  # converts None to nan\n\n        timings = self._get_total_timings(cost_model)\n        task = _task_to_categorical(self.task)\n\n        if not self._is_serial():\n            # In the non-serial case, we take the batching into account and reduce\n            # timings and fun to one value per batch.\n            timings = _apply_reduction_to_batches(\n                data=timings,\n                batch_ids=self.batches,\n                reduction_function=cost_model.aggregate_batch_time,\n            )\n\n            min_or_max = (\n                np.nanmin if self.direction == Direction.MINIMIZE else np.nanmax\n            )\n            fun = _apply_reduction_to_batches(\n                data=fun,\n                batch_ids=self.batches,\n                reduction_function=min_or_max,  # type: ignore[arg-type]\n            )\n\n            # Verify that tasks are homogeneous in each batch, and select first if true.\n            tasks_and_batches = pd.DataFrame({\"task\": task, \"batches\": self.batches})\n            grouped_tasks = tasks_and_batches.groupby(\"batches\")[\"task\"]\n            if not grouped_tasks.nunique().eq(1).all():\n                raise ValueError(\"Tasks are not homogeneous in each batch.\")\n\n            task = grouped_tasks.first().reset_index(drop=True)\n\n        time = np.cumsum(timings)\n        return pd.DataFrame({\"fun\": fun, \"time\": time, \"task\": task})\n\n    @property\n    def fun(self) -> list[float | None]:\n        return self._fun\n\n    @property\n    def monotone_fun(self) -> NDArray[np.float64]:\n        \"\"\"The monotone function value of the history.\n\n        If the value is None, the output at that position is nan.\n\n        \"\"\"\n        return _calculate_monotone_sequence(self.fun, direction=self.direction)\n\n    # Acceptance\n    # ----------------------------------------------------------------------------------\n\n    @property\n    def is_accepted(self) -> NDArray[np.bool_]:\n        \"\"\"Boolean indicator whether a function value is accepted.\n\n        A function value is accepted if it is smaller (or equal) than the monotone\n        function value counterpart in the case of minimization, or larger (or equal) in\n        the case of maximization. If the value is None, the output at that position is\n        False.\n\n        \"\"\"\n        fun_arr = np.array(self.fun, dtype=np.float64)\n        if self.direction == Direction.MINIMIZE:\n            return fun_arr <= self.monotone_fun\n        elif self.direction == Direction.MAXIMIZE:\n            return fun_arr >= self.monotone_fun\n\n    # Parameter data, params, flat params, and flat params names\n    # ----------------------------------------------------------------------------------\n\n    def params_data(\n        self, dropna: bool = False, collapse_batches: bool = False\n    ) -> pd.DataFrame:\n        \"\"\"Return the parameter data.\n\n        Args:\n            dropna: Whether to drop rows with missing function values. These correspond\n                to parameters that were used to calculate pure jacobians. Defaults to\n                False.\n            collapse_batches: Whether to collapse the batches and only keep the\n                parameters that led to the minimal (or maximal) function value in each\n                batch. Defaults to False.\n\n        Returns:\n            pd.DataFrame: The parameter data. The columns are: 'name' (the parameter\n                names), 'value' (the parameter values), 'task' (the task for which the\n                parameter was used), and 'counter' (a counter that is unique for each\n                row).\n\n        \"\"\"\n        wide = pd.DataFrame(self.flat_params, columns=self.flat_param_names)\n        wide[\"task\"] = _task_to_categorical(self.task)\n        wide[\"fun\"] = self.fun\n\n        # If requested, we collapse the batches and only keep the parameters that led to\n        # the minimal (or maximal) function value in each batch.\n        if collapse_batches and not self._is_serial():\n            wide[\"batches\"] = self.batches\n\n            # Verify that tasks are homogeneous in each batch\n            if not wide.groupby(\"batches\")[\"task\"].nunique().eq(1).all():\n                raise ValueError(\"Tasks are not homogeneous in each batch.\")\n\n            # We fill nans with inf or -inf to make sure that the idxmin/idxmax is\n            # well-defined, since there is the possibility that all fun values are nans\n            # in a batch.\n            if self.direction == Direction.MINIMIZE:\n                loc = (\n                    wide.assign(fun_without_nan=wide[\"fun\"].fillna(np.inf))\n                    .groupby(\"batches\")[\"fun_without_nan\"]\n                    .idxmin()\n                )\n            elif self.direction == Direction.MAXIMIZE:\n                loc = (\n                    wide.assign(fun_without_nan=wide[\"fun\"].fillna(-np.inf))\n                    .groupby(\"batches\")[\"fun_without_nan\"]\n                    .idxmax()\n                )\n\n            wide = wide.loc[loc].drop(columns=\"batches\")\n\n        # We drop rows with missing values if requested. These correspond to parameters\n        # that were used to calculate pure jacobians. This step must be done before\n        # dropping the fun column and before setting the counter.\n        if dropna:\n            wide = wide.dropna(subset=\"fun\")\n\n        wide[\"counter\"] = np.arange(len(wide))\n\n        long = pd.melt(\n            wide,\n            var_name=\"name\",\n            value_name=\"value\",\n            id_vars=[\"task\", \"counter\", \"fun\"],\n        )\n\n        data = long.reindex(columns=[\"counter\", \"name\", \"value\", \"task\", \"fun\"])\n\n        return data.set_index([\"counter\", \"name\"]).sort_index()\n\n    @property\n    def params(self) -> list[PyTree]:\n        return self._params\n\n    @property\n    def flat_params(self) -> list[list[float]]:\n        return _get_flat_params(self._params)\n\n    @property\n    def flat_param_names(self) -> list[str]:\n        return _get_flat_param_names(param=self._params[0])\n\n    # Time\n    # ----------------------------------------------------------------------------------\n\n    def _get_total_timings(\n        self, cost_model: CostModel | Literal[\"wall_time\"]\n    ) -> NDArray[np.float64]:\n        \"\"\"Return the total timings across all tasks.\n\n        Args:\n            cost_model: The cost model that is used to calculate the time measure. If\n                \"wall_time\", the wall time is returned.\n\n        Returns:\n            np.ndarray: The sum of the timings across all tasks.\n\n        \"\"\"\n        if not isinstance(cost_model, CostModel) and cost_model != \"wall_time\":\n            raise TypeError(\"cost_model must be a CostModel or 'wall_time'.\")\n\n        if cost_model == \"wall_time\":\n            return np.array(self.stop_time, dtype=np.float64) - self.start_time[0]\n\n        fun_time = self._get_timings_per_task(\n            task=EvalTask.FUN, cost_factor=cost_model.fun\n        )\n        jac_time = self._get_timings_per_task(\n            task=EvalTask.JAC, cost_factor=cost_model.jac\n        )\n        fun_and_jac_time = self._get_timings_per_task(\n            task=EvalTask.FUN_AND_JAC, cost_factor=cost_model.fun_and_jac\n        )\n\n        return fun_time + jac_time + fun_and_jac_time\n\n    def _get_timings_per_task(\n        self, task: EvalTask, cost_factor: float | None\n    ) -> NDArray[np.float64]:\n        \"\"\"Return the time measure per task.\n\n        Args:\n            task: The task for which the time is calculated.\n            cost_factor: The cost factor used to calculate the time. If None, the time\n                is the difference between the start and stop time, otherwise the time\n                is given by the cost factor.\n\n        Returns:\n            np.ndarray: The time per task. For entries where the task is not the\n                requested task, the time is 0.\n\n        \"\"\"\n        task_mask = np.array([1 if t == task else 0 for t in self.task])\n        factor: float | NDArray[np.float64]\n        if cost_factor is None:\n            factor = np.array(self.stop_time, dtype=np.float64) - np.array(\n                self.start_time, dtype=np.float64\n            )\n        else:\n            factor = cost_factor\n\n        return factor * task_mask\n\n    @property\n    def start_time(self) -> list[float]:\n        return self._start_time\n\n    @property\n    def stop_time(self) -> list[float]:\n        return self._stop_time\n\n    # Batches and fast_path\n    # ----------------------------------------------------------------------------------\n\n    @property\n    def batches(self) -> list[int]:\n        return self._batches\n\n    def _is_serial(self) -> bool:\n        return np.array_equal(self.batches, np.arange(len(self.batches)))\n\n    # Tasks\n    # ----------------------------------------------------------------------------------\n\n    @property\n    def task(self) -> list[EvalTask]:\n        return self._task\n\n    # ==================================================================================\n    # Add deprecated dict access\n    # ==================================================================================\n\n    @property\n    def time(self) -> list[float]:\n        msg = (\n            \"The attribute `time` of History will be deprecated soon. Use the \"\n            \"`start_time` method instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        arr = np.array(self._start_time)\n        return (arr - arr[0]).tolist()\n\n    @property\n    def criterion(self) -> list[float | None]:\n        msg = \"The attribute `criterion` of History is deprecated. Use `fun` instead.\"\n        warnings.warn(msg, FutureWarning)\n        return self.fun\n\n    @property\n    def runtime(self) -> list[float]:\n        msg = (\n            \"The attribute `runtime` of History will be deprecated soon. Use the \"\n            \"`start_time` method instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        return self.time\n\n    def __getitem__(self, key: str) -> Any:\n        msg = \"dict-like access to History is deprecated. Use attribute access instead.\"\n        warnings.warn(msg, FutureWarning)\n        return getattr(self, key)\n\n\n# ======================================================================================\n# Functions directly used in History methods\n# ======================================================================================\n\n\ndef _get_flat_params(params: list[PyTree]) -> list[list[float]]:\n    fast_path = len(params) > 0 and _is_1d_array(params[0])\n    if fast_path:\n        flatten = lambda x: x.tolist()\n    else:\n        registry = get_registry(extended=True)\n        flatten = partial(tree_just_flatten, registry=registry)\n\n    return [flatten(p) for p in params]\n\n\ndef _get_flat_param_names(param: PyTree) -> list[str]:\n    fast_path = _is_1d_array(param)\n    if fast_path:\n        # Mypy raises an error here because .tolist() returns a str for zero-dimensional\n        # arrays, but the fast path is only taken for 1d arrays, so it can be ignored.\n        return np.arange(param.size).astype(str).tolist()\n\n    registry = get_registry(extended=True)\n    return leaf_names(param, registry=registry)\n\n\ndef _is_1d_array(param: PyTree) -> bool:\n    return isinstance(param, np.ndarray) and param.ndim == 1\n\n\ndef _calculate_monotone_sequence(\n    sequence: list[float | None], direction: Direction\n) -> NDArray[np.float64]:\n    sequence_arr = np.array(sequence, dtype=np.float64)  # converts None to nan\n    nan_mask = np.isnan(sequence_arr)\n\n    if direction == Direction.MINIMIZE:\n        sequence_arr[nan_mask] = np.inf\n        out = np.minimum.accumulate(sequence_arr)\n    elif direction == Direction.MAXIMIZE:\n        sequence_arr[nan_mask] = -np.inf\n        out = np.maximum.accumulate(sequence_arr)\n\n    out[nan_mask] = np.nan\n    return out\n\n\n# ======================================================================================\n# Misc\n# ======================================================================================\n\n\ndef _validate_args_are_all_none_or_lists_of_same_length(\n    *args: list[Any] | None,\n) -> None:\n    all_none = all(arg is None for arg in args)\n    all_list = all(isinstance(arg, list) for arg in args)\n\n    if not all_none:\n        if all_list:\n            unique_list_lengths = set(map(len, args))  # type: ignore[arg-type]\n\n            if len(unique_list_lengths) != 1:\n                raise ValueError(\"All list arguments must have the same length.\")\n\n        else:\n            raise ValueError(\"All arguments must be lists of the same length or None.\")\n\n\ndef _task_to_categorical(task: list[EvalTask]) -> \"pd.Series[str]\":\n    EvalTaskDtype = pd.CategoricalDtype(categories=[t.value for t in EvalTask])\n    return pd.Series([t.value for t in task], dtype=EvalTaskDtype)\n\n\ndef _apply_reduction_to_batches(\n    data: NDArray[np.float64],\n    batch_ids: list[int],\n    reduction_function: Callable[[Iterable[float]], float],\n) -> NDArray[np.float64]:\n    \"\"\"Apply a reduction operator on batches of data.\n\n    This function assumes that batch_ids are non-empty and sorted.\n\n    Args:\n        data: 1d array with data.\n        batch_ids: A list with batch ids whose length is equal to the size of data.\n            Values need to be sorted and can be repeated.\n        reduction_function: A reduction function that takes an iterable of floats as\n            input (e.g., a numpy.ndarray or list of floats) and returns a scalar. The\n            function must be able to handle NaN's.\n\n    Returns:\n        The transformed data. Has one entry per unique batch id, equal to the result of\n        applying the reduction function to the data of that batch.\n\n    \"\"\"\n    batch_starts, batch_stops = _get_batch_starts_and_stops(batch_ids)\n\n    batch_results: list[float] = []\n\n    for start, stop in zip(batch_starts, batch_stops, strict=True):\n        batch_data = data[start:stop]\n        batch_id = batch_ids[start]\n\n        try:\n            if np.isnan(batch_data).all():\n                reduced = np.nan\n            else:\n                reduced = reduction_function(batch_data)\n        except Exception as e:\n            msg = (\n                f\"Calling function {reduction_function.__name__} on batch {batch_id} \"\n                \"of the History raised an Exception. Please verify that \"\n                f\"{reduction_function.__name__} is well-defined, takes an iterable of \"\n                \"floats as input and returns a scalar. The function must be able to \"\n                \"handle NaN's.\"\n            )\n            raise ValueError(msg) from e\n\n        if not np.isscalar(reduced):\n            msg = (\n                f\"Function {reduction_function.__name__} did not return a scalar for \"\n                f\"batch {batch_id}. Please verify that {reduction_function.__name__} \"\n                \"returns a scalar when called on an iterable of floats. The function \"\n                \"must be able to handle NaN's.\"\n            )\n            raise ValueError(msg)\n\n        batch_results.append(float(reduced))  # type: ignore[arg-type,unused-ignore]\n\n    return np.array(batch_results, dtype=np.float64)\n\n\ndef _get_batch_starts_and_stops(batch_ids: list[int]) -> tuple[list[int], list[int]]:\n    \"\"\"Get start and stop indices of batches.\n\n    This function assumes that batch_ids are non-empty and sorted.\n\n    \"\"\"\n    ids_arr = np.array(batch_ids, dtype=np.int64)\n    indices = np.where(ids_arr[:-1] != ids_arr[1:])[0] + 1\n    list_indices: list[int] = indices.tolist()\n    starts = [0, *list_indices]\n    stops = [*starts[1:], len(batch_ids)]\n    return starts, stops\n"
  },
  {
    "path": "src/optimagic/optimization/internal_optimization_problem.py",
    "content": "import time\nimport warnings\nfrom copy import copy\nfrom dataclasses import asdict, dataclass, replace\nfrom typing import Any, Callable, Literal, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom typing_extensions import Self\n\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.differentiation.derivatives import first_derivative\nfrom optimagic.differentiation.numdiff_options import NumdiffOptions\nfrom optimagic.exceptions import UserFunctionRuntimeError, get_traceback\nfrom optimagic.logging.logger import LogStore\nfrom optimagic.logging.types import IterationState\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n    SpecificFunctionValue,\n)\nfrom optimagic.optimization.history import History, HistoryEntry\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.conversion import Converter\nfrom optimagic.typing import (\n    AggregationLevel,\n    BatchEvaluator,\n    Direction,\n    ErrorHandling,\n    EvalTask,\n    PyTree,\n)\n\n\n@dataclass(frozen=True)\nclass InternalBounds(Bounds):\n    lower: NDArray[np.float64] | None\n    upper: NDArray[np.float64] | None\n    soft_lower: None = None\n    soft_upper: None = None\n\n\nclass InternalOptimizationProblem:\n    def __init__(\n        self,\n        fun: Callable[[PyTree], SpecificFunctionValue],\n        jac: Callable[[PyTree], PyTree] | None,\n        fun_and_jac: Callable[[PyTree], tuple[SpecificFunctionValue, PyTree]] | None,\n        converter: Converter,\n        solver_type: AggregationLevel,\n        direction: Direction,\n        bounds: InternalBounds,\n        numdiff_options: NumdiffOptions,\n        error_handling: ErrorHandling,\n        error_penalty_func: Callable[\n            [NDArray[np.float64]],\n            tuple[SpecificFunctionValue, NDArray[np.float64]],\n        ],\n        batch_evaluator: BatchEvaluator,\n        linear_constraints: list[dict[str, Any]] | None,\n        nonlinear_constraints: list[dict[str, Any]] | None,\n        logger: LogStore[Any, Any] | None,\n        # TODO: add hess and hessp\n    ):\n        self._fun = fun\n        self._jac = jac\n        self._fun_and_jac = fun_and_jac\n        self._converter = converter\n        self._solver_type = solver_type\n        self._direction = direction\n        self._bounds = bounds\n        self._numdiff_options = numdiff_options\n        self._error_handling = error_handling\n        self._error_penalty_func = error_penalty_func\n        self._batch_evaluator = batch_evaluator\n        self._history = History(direction)\n        self._linear_constraints = linear_constraints\n        self._nonlinear_constraints = nonlinear_constraints\n        self._logger = logger\n        self._step_id: int | None = None\n\n    # ==================================================================================\n    # Public methods used by optimizers\n    # ==================================================================================\n\n    def fun(self, x: NDArray[np.float64]) -> float | NDArray[np.float64]:\n        \"\"\"Evaluate the objective function at x.\n\n        Args:\n            x: The parameter vector at which to evaluate the objective function.\n\n        Returns:\n            The function value at x. This is a scalar for scalar problems and an array\n                for least squares  or likelihood problems.\n\n        \"\"\"\n        fun_value, hist_entry = self._evaluate_fun(x)\n        self._history.add_entry(hist_entry)\n        return fun_value\n\n    def jac(self, x: NDArray[np.float64]) -> NDArray[np.float64]:\n        \"\"\"Evaluate the first derivative at x.\n\n        Args:\n            x: The parameter vector at which to evaluate the first derivative.\n\n        Returns:\n            The first derivative at x. This is a 1d array for scalar problems (the\n                gradient) and a 2d array for least squares or likelihood problems (the\n                Jacobian).\n\n        \"\"\"\n        jac_value, hist_entry = self._evaluate_jac(x)\n        self._history.add_entry(hist_entry)\n        return jac_value\n\n    def fun_and_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[float | NDArray[np.float64], NDArray[np.float64]]:\n        \"\"\"Simultaneously evaluate the objective function and its first derivative.\n\n        See .fun and .jac for details.\n\n        \"\"\"\n        fun_and_jac_value, hist_entry = self._evaluate_fun_and_jac(x)\n        self._history.add_entry(hist_entry)\n        return fun_and_jac_value\n\n    def batch_fun(\n        self,\n        x_list: list[NDArray[np.float64]],\n        n_cores: int,\n        batch_size: int | None = None,\n    ) -> list[float | NDArray[np.float64]]:\n        \"\"\"Parallelized batch version of .fun.\n\n        Args:\n            x_list: A list of parameter vectors at which to evaluate the objective\n                function.\n            n_cores: The number of cores to use for the parallel evaluation.\n            batch_size: Batch size that can be used by some algorithms to simulate\n                the behavior under parallelization on more cores than are actually\n                available. Only used by `criterion_plots` and benchmark plots.\n\n        Returns:\n            A list of function values at the points in x_list. See .fun for details.\n\n        \"\"\"\n        batch_size = n_cores if batch_size is None else batch_size\n        batch_result = self._batch_evaluator(\n            func=self._evaluate_fun,\n            arguments=x_list,\n            n_cores=n_cores,\n            # This should always be raise because errors are already handled\n            error_handling=\"raise\",\n        )\n        fun_values = [result[0] for result in batch_result]\n        hist_entries = [result[1] for result in batch_result]\n        self._history.add_batch(hist_entries, batch_size)\n\n        return fun_values\n\n    def batch_jac(\n        self,\n        x_list: list[NDArray[np.float64]],\n        n_cores: int,\n        batch_size: int | None = None,\n    ) -> list[NDArray[np.float64]]:\n        \"\"\"Parallelized batch version of .jac.\n\n        Args:\n            x_list: A list of parameter vectors at which to evaluate the first\n                derivative.\n            n_cores: The number of cores to use for the parallel evaluation.\n            batch_size: Batch size that can be used by some algorithms to simulate\n                the behavior under parallelization on more cores than are actually\n                available. Only used by `criterion_plots` and benchmark plots.\n\n        Returns:\n            A list of first derivatives at the points in x_list. See .jac for details.\n\n        \"\"\"\n        batch_size = n_cores if batch_size is None else batch_size\n\n        batch_result = self._batch_evaluator(\n            func=self._evaluate_jac,\n            arguments=x_list,\n            n_cores=n_cores,\n            # This should always be raise because errors are already handled\n            error_handling=\"raise\",\n        )\n        jac_values = [result[0] for result in batch_result]\n        hist_entries = [result[1] for result in batch_result]\n        self._history.add_batch(hist_entries, batch_size)\n        return jac_values\n\n    def batch_fun_and_jac(\n        self,\n        x_list: list[NDArray[np.float64]],\n        n_cores: int,\n        batch_size: int | None = None,\n    ) -> list[tuple[float | NDArray[np.float64], NDArray[np.float64]]]:\n        \"\"\"Parallelized batch version of .fun_and_jac.\n\n        Args:\n            x_list: A list of parameter vectors at which to evaluate the objective\n                function and its first derivative.\n            n_cores: The number of cores to use for the parallel evaluation.\n            batch_size: Batch size that can be used by some algorithms to simulate\n                the behavior under parallelization on more cores than are actually\n                available. Only used by `criterion_plots` and benchmark plots.\n\n        Returns:\n            A list of tuples containing the function value and the first derivative\n                at the points in x_list. See .fun_and_jac for details.\n\n        \"\"\"\n        batch_size = n_cores if batch_size is None else batch_size\n        batch_result = self._batch_evaluator(\n            func=self._evaluate_fun_and_jac,\n            arguments=x_list,\n            n_cores=n_cores,\n            # This should always be raise because errors are already handled\n            error_handling=\"raise\",\n        )\n        fun_and_jac_values = [result[0] for result in batch_result]\n        hist_entries = [result[1] for result in batch_result]\n        self._history.add_batch(hist_entries, batch_size)\n\n        return fun_and_jac_values\n\n    def exploration_fun(\n        self,\n        x_list: list[NDArray[np.float64]],\n        n_cores: int,\n        batch_size: int | None = None,\n    ) -> list[float]:\n        batch_size = n_cores if batch_size is None else batch_size\n        batch_result = self._batch_evaluator(\n            func=self._evaluate_exploration_fun,\n            arguments=x_list,\n            n_cores=n_cores,\n            # This should always be raise because errors are already handled\n            error_handling=\"raise\",\n        )\n        fun_values = [result[0] for result in batch_result]\n        hist_entries = [result[1] for result in batch_result]\n        self._history.add_batch(hist_entries, batch_size)\n\n        return fun_values\n\n    def with_new_history(self) -> Self:\n        new = copy(self)\n        new._history = History(self.direction)\n        return new\n\n    def with_error_handling(self, error_handling: ErrorHandling) -> Self:\n        new = copy(self)\n        new._error_handling = error_handling\n        return new\n\n    def with_step_id(self, step_id: int) -> Self:\n        new = copy(self)\n        new._step_id = step_id\n        return new\n\n    # ==================================================================================\n    # Public attributes\n    # ==================================================================================\n\n    @property\n    def bounds(self) -> InternalBounds:\n        \"\"\"Bounds of the optimization problem.\"\"\"\n        return self._bounds\n\n    @property\n    def converter(self) -> Converter:\n        \"\"\"Converter between external and internal parameter representation.\n\n        The converter transforms parameters between their user-provided\n        representation (the external representation) and the flat numpy array used\n        by the optimizer (the internal representation).\n\n        This transformation includes:\n        - Flattening and unflattening of pytree structures.\n        - Applying parameter constraints via reparametrizations.\n        - Scaling and unscaling of parameter values.\n\n        The Converter object provides the following main attributes:\n\n        - ``params_to_internal``: Callable that converts a pytree of external\n          parameters to a flat numpy array of internal parameters.\n        - ``params_from_internal``: Callable that converts a flat numpy array of\n          internal parameters to a pytree of external parameters.\n        - ``derivative_to_internal``: Callable that converts the derivative\n          from the external parameter space to the internal space.\n        - ``has_transforming_constraints``: Boolean that is True if the conversion\n          involves constraints that are handled by reparametrization.\n\n        Examples:\n            The converter is particularly useful for algorithms that require initial\n            values in the internal (flat) parameter space, while allowing the user\n            to specify these values in the more convenient external (pytree) format.\n\n            Here's how an optimization algorithm might use the converter internally\n            to prepare parameters for the optimizer:\n\n                >>> from optimagic.optimization.internal_optimization_problem import (\n                ...     SphereExampleInternalOptimizationProblem\n                ... )\n                >>> import numpy as np\n                >>>\n                >>> # Optimization problem instance.\n                >>> problem = SphereExampleInternalOptimizationProblem()\n                >>>\n                >>> # User provided parameters in external format.\n                >>> user_params = np.array([1.0, 2.0, 3.0])\n                >>>\n                >>> # Convert to internal format for optimization algorithms.\n                >>> internal_params = problem.converter.params_to_internal(user_params)\n                >>> internal_params\n                array([1., 2., 3.])\n\n        \"\"\"\n        return self._converter\n\n    @property\n    def linear_constraints(self) -> list[dict[str, Any]] | None:\n        # TODO: write a docstring as soon as we actually use this\n        return self._linear_constraints\n\n    @property\n    def nonlinear_constraints(self) -> list[dict[str, Any]] | None:\n        \"\"\"Internal representation of nonlinear constraints.\n\n        Compared to the user provided constraints, we have done the following\n        transformations:\n\n        1. The constraint a <= g(x) <= b is transformed to h(x) >= 0, where h(x) is\n        - h(x) = g(x), if a == 0 and b == inf\n        - h(x) = g(x) - a, if a != 0 and b == inf\n        - h(x) = (g(x) - a, -g(x) + b) >= 0, if a != 0 and b != inf.\n\n        2. The equality constraint g(x) = v is transformed to h(x) >= 0, where\n        h(x) = (g(x) - v, -g(x) + v).\n\n        3. Vector constraints are transformed to a list of scalar constraints.\n        g(x) = (g1(x), g2(x), ...) >= 0 is transformed to (g1(x) >= 0, g2(x) >= 0, ...).\n\n        4. The constraint function (defined on a selection of user-facing parameters) is\n        transformed to be evaluated on the internal parameters.\n\n        \"\"\"\n        return self._nonlinear_constraints\n\n    @property\n    def direction(self) -> Direction:\n        \"\"\"Direction of the optimization problem.\"\"\"\n        return self._direction\n\n    @property\n    def history(self) -> History:\n        \"\"\"History container for the optimization problem.\"\"\"\n        return self._history\n\n    @property\n    def logger(self) -> LogStore[Any, Any] | None:\n        \"\"\"Logger for the optimization problem.\"\"\"\n        return self._logger\n\n    # ==================================================================================\n    # Implementation of the public functions; The main difference is that the lower-\n    # level implementations return a history entry instead of adding it to the history\n    # directly so they can be called in parallel!\n    # ==================================================================================\n\n    def _evaluate_fun(\n        self, x: NDArray[np.float64]\n    ) -> tuple[float | NDArray[np.float64], HistoryEntry]:\n        fun_value, hist_entry, log_entry = self._pure_evaluate_fun(x)\n\n        if self._logger:\n            self._logger.iteration_store.insert(log_entry)\n\n        return fun_value, hist_entry\n\n    def _evaluate_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[NDArray[np.float64], HistoryEntry]:\n        if self._jac is not None:\n            jac_value, hist_entry, log_entry = self._pure_evaluate_jac(x)\n        else:\n            if self._fun_and_jac is not None:\n                (_, jac_value), hist_entry, log_entry = self._pure_evaluate_fun_and_jac(\n                    x\n                )\n            else:\n                (_, jac_value), hist_entry, log_entry = (\n                    self._pure_evaluate_numerical_fun_and_jac(x)\n                )\n\n            hist_entry = replace(hist_entry, task=EvalTask.JAC)\n\n        if self._logger:\n            self._logger.iteration_store.insert(log_entry)\n\n        return jac_value, hist_entry\n\n    def _evaluate_exploration_fun(\n        self, x: NDArray[np.float64]\n    ) -> tuple[float, HistoryEntry]:\n        fun_value, hist_entry, log_entry = self._pure_exploration_fun(x)\n\n        if self._logger:\n            self._logger.iteration_store.insert(log_entry)\n\n        return fun_value, hist_entry\n\n    def _evaluate_fun_and_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[tuple[float | NDArray[np.float64], NDArray[np.float64]], HistoryEntry]:\n        if self._fun_and_jac is not None:\n            (fun_value, jac_value), hist_entry, log_entry = (\n                self._pure_evaluate_fun_and_jac(x)\n            )\n        elif self._jac is not None:\n            fun_value, hist_entry, log_entry_fun = self._pure_evaluate_fun(x)\n            jac_value, _, log_entry_jac = self._pure_evaluate_jac(x)\n            hist_entry = replace(hist_entry, task=EvalTask.FUN_AND_JAC)\n            log_entry = log_entry_fun.combine(log_entry_jac)\n        else:\n            (fun_value, jac_value), hist_entry, log_entry = (\n                self._pure_evaluate_numerical_fun_and_jac(x)\n            )\n\n        if self._logger:\n            self._logger.iteration_store.insert(log_entry)\n\n        return (fun_value, jac_value), hist_entry\n\n    # ==================================================================================\n    # Atomic evaluations of user provided functions or numerical derivatives\n    # ==================================================================================\n\n    def _pure_evaluate_fun(\n        self, x: NDArray[np.float64]\n    ) -> tuple[float | NDArray[np.float64], HistoryEntry, IterationState]:\n        \"\"\"Evaluate fun and handle exceptions.\n\n        This function does all the conversions from x to params and from\n        SpecificFunctionValue to the internal value, including a sign flip for\n        maximization.\n\n        If any exception occurs during the evaluation of fun and error handling is set\n        to CONTINUE, the fun value is replaced by a penalty value and a warning is\n        issued.\n\n        \"\"\"\n        start_time = time.perf_counter()\n        params = self._converter.params_from_internal(x)\n        traceback: None | str = None\n        try:\n            fun_value = self._fun(params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if self._error_handling in (\n                ErrorHandling.RAISE,\n                ErrorHandling.RAISE_STRICT,\n            ):\n                msg = \"An error occurred when evaluating fun during optimization.\"\n                raise UserFunctionRuntimeError(msg) from e\n            else:\n                traceback = get_traceback()\n                msg = (\n                    \"The following exception was caught when evaluating fun during \"\n                    \"optimization. The fun value was replaced by a penalty value to \"\n                    f\"continue with the optimization.:\\n\\n{traceback}\"\n                )\n                warnings.warn(msg)\n                fun_value, _ = self._error_penalty_func(x)\n\n        algo_fun_value, hist_fun_value = _process_fun_value(\n            value=fun_value, solver_type=self._solver_type, direction=self._direction\n        )\n        stop_time = time.perf_counter()\n\n        hist_entry = HistoryEntry(\n            params=params,\n            fun=hist_fun_value,\n            start_time=start_time,\n            stop_time=stop_time,\n            task=EvalTask.FUN,\n        )\n\n        log_entry = IterationState(\n            params=params,\n            timestamp=start_time,\n            scalar_fun=hist_fun_value,\n            valid=not bool(traceback),\n            raw_fun=fun_value,\n            step=self._step_id,\n            exceptions=traceback,\n        )\n\n        return algo_fun_value, hist_entry, log_entry\n\n    def _pure_evaluate_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[NDArray[np.float64], HistoryEntry, IterationState]:\n        if self._jac is None:\n            raise ValueError(\"The jac function is not defined.\")\n\n        start_time = time.perf_counter()\n        traceback: None | str = None\n\n        params = self._converter.params_from_internal(x)\n        try:\n            jac_value = self._jac(params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if self._error_handling in (\n                ErrorHandling.RAISE,\n                ErrorHandling.RAISE_STRICT,\n            ):\n                msg = \"An error occurred when evaluating jac during optimization.\"\n                raise UserFunctionRuntimeError(msg) from e\n            else:\n                traceback = get_traceback()\n\n                msg = (\n                    \"The following exception was caught when evaluating jac during \"\n                    \"optimization. The jac value was replaced by a penalty value to \"\n                    f\"continue with the optimization.:\\n\\n{traceback}\"\n                )\n                warnings.warn(msg)\n                _, jac_value = self._error_penalty_func(x)\n\n        out_jac = _process_jac_value(\n            value=jac_value, direction=self._direction, converter=self._converter, x=x\n        )\n        _assert_finite_jac(\n            out_jac=out_jac, jac_value=jac_value, params=params, origin=\"jac\"\n        )\n\n        stop_time = time.perf_counter()\n\n        hist_entry = HistoryEntry(\n            params=params,\n            fun=None,\n            start_time=start_time,\n            stop_time=stop_time,\n            task=EvalTask.JAC,\n        )\n\n        log_entry = IterationState(\n            params=params,\n            timestamp=start_time,\n            scalar_fun=None,\n            valid=not bool(traceback),\n            raw_fun=None,\n            step=self._step_id,\n            exceptions=traceback,\n        )\n\n        return out_jac, hist_entry, log_entry\n\n    def _pure_evaluate_numerical_fun_and_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[\n        tuple[float | NDArray[np.float64], NDArray[np.float64]],\n        HistoryEntry,\n        IterationState,\n    ]:\n        start_time = time.perf_counter()\n        traceback: None | str = None\n\n        def func(x: NDArray[np.float64]) -> SpecificFunctionValue:\n            p = self._converter.params_from_internal(x)\n            return self._fun(p)\n\n        try:\n            numdiff_res = first_derivative(\n                func,\n                x,\n                bounds=self._bounds,\n                **asdict(self._numdiff_options),\n                unpacker=lambda x: x.internal_value(self._solver_type),\n                error_handling=\"raise_strict\",\n            )\n            fun_value = numdiff_res.func_value\n            jac_value = numdiff_res.derivative\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if self._error_handling in (\n                ErrorHandling.RAISE,\n                ErrorHandling.RAISE_STRICT,\n            ):\n                msg = (\n                    \"An error occurred when evaluating a numerical derivative \"\n                    \"during optimization.\"\n                )\n                raise UserFunctionRuntimeError(msg) from e\n            else:\n                traceback = get_traceback()\n\n                msg = (\n                    \"The following exception was caught when calculating a \"\n                    \"numerical derivative during optimization. The jac value was \"\n                    \"replaced by a penalty value to continue with the optimization.\"\n                    f\":\\n\\n{traceback}\"\n                )\n                warnings.warn(msg)\n                fun_value, jac_value = self._error_penalty_func(x)\n\n        _assert_finite_jac(\n            out_jac=jac_value,\n            jac_value=jac_value,\n            params=self._converter.params_from_internal(x),\n            origin=\"numerical\",\n        )\n\n        algo_fun_value, hist_fun_value = _process_fun_value(\n            value=fun_value,  # type: ignore\n            solver_type=self._solver_type,\n            direction=self._direction,\n        )\n\n        if self._direction == Direction.MAXIMIZE:\n            jac_value = -jac_value\n\n        stop_time = time.perf_counter()\n\n        hist_entry = HistoryEntry(\n            params=self._converter.params_from_internal(x),\n            fun=hist_fun_value,\n            start_time=start_time,\n            stop_time=stop_time,\n            task=EvalTask.FUN_AND_JAC,\n        )\n\n        log_entry = IterationState(\n            params=self._converter.params_from_internal(x),\n            timestamp=start_time,\n            scalar_fun=hist_fun_value,\n            valid=not bool(traceback),\n            raw_fun=fun_value,\n            step=self._step_id,\n            exceptions=traceback,\n        )\n\n        return (algo_fun_value, jac_value), hist_entry, log_entry\n\n    def _pure_exploration_fun(\n        self, x: NDArray[np.float64]\n    ) -> tuple[float, HistoryEntry, IterationState]:\n        start_time = time.perf_counter()\n        params = self._converter.params_from_internal(x)\n        traceback: None | str = None\n\n        try:\n            fun_value = self._fun(params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception:\n            traceback = get_traceback()\n\n            msg = (\n                \"The following exception was caught when evaluating fun during the \"\n                \"exploration phase of a multistart optimization. The fun value was \"\n                \"replaced by a penalty value to continue with the \"\n                f\"optimization.:\\n\\n{traceback}\"\n            )\n            warnings.warn(msg)\n            fun_value, _ = self._error_penalty_func(x)\n\n        if not traceback:\n            algo_fun_value, hist_fun_value = _process_fun_value(\n                value=fun_value,\n                # For exploration we always need a scalar value\n                solver_type=AggregationLevel.SCALAR,\n                direction=self._direction,\n            )\n        else:\n            algo_fun_value = -np.inf\n            hist_fun_value = -np.inf\n            if self._direction == Direction.MAXIMIZE:\n                hist_fun_value = np.inf\n\n        stop_time = time.perf_counter()\n\n        hist_entry = HistoryEntry(\n            params=params,\n            fun=hist_fun_value,\n            start_time=start_time,\n            stop_time=stop_time,\n            task=EvalTask.EXPLORATION,\n        )\n\n        log_entry = IterationState(\n            params=params,\n            timestamp=start_time,\n            scalar_fun=hist_fun_value,\n            valid=not bool(traceback),\n            raw_fun=fun_value,\n            step=self._step_id,\n            exceptions=traceback,\n        )\n\n        return cast(float, algo_fun_value), hist_entry, log_entry\n\n    def _pure_evaluate_fun_and_jac(\n        self, x: NDArray[np.float64]\n    ) -> tuple[\n        tuple[float | NDArray[np.float64], NDArray[np.float64]],\n        HistoryEntry,\n        IterationState,\n    ]:\n        if self._fun_and_jac is None:\n            raise ValueError(\"The fun_and_jac function is not defined.\")\n\n        start_time = time.perf_counter()\n        traceback: None | str = None\n        params = self._converter.params_from_internal(x)\n\n        try:\n            fun_value, jac_value = self._fun_and_jac(params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if self._error_handling in (\n                ErrorHandling.RAISE,\n                ErrorHandling.RAISE_STRICT,\n            ):\n                msg = (\n                    \"An error occurred when evaluating fun_and_jac during optimization.\"\n                )\n                raise UserFunctionRuntimeError(msg) from e\n            else:\n                traceback = get_traceback()\n                msg = (\n                    \"The following exception was caught when evaluating fun_and_jac \"\n                    \"during optimization. The fun and jac values were replaced by \"\n                    f\"penalty values to continue with the optimization.:\\n\\n{traceback}\"\n                )\n                warnings.warn(msg)\n\n                fun_value, jac_value = self._error_penalty_func(x)\n\n        algo_fun_value, hist_fun_value = _process_fun_value(\n            value=fun_value, solver_type=self._solver_type, direction=self._direction\n        )\n\n        if traceback:\n            out_jac = jac_value\n        else:\n            out_jac = self._converter.derivative_to_internal(jac_value, x)\n\n        if self._direction == Direction.MAXIMIZE:\n            out_jac = -out_jac\n\n        _assert_finite_jac(\n            out_jac=out_jac, jac_value=jac_value, params=params, origin=\"fun_and_jac\"\n        )\n\n        stop_time = time.perf_counter()\n\n        hist_entry = HistoryEntry(\n            params=params,\n            fun=hist_fun_value,\n            start_time=start_time,\n            stop_time=stop_time,\n            task=EvalTask.FUN_AND_JAC,\n        )\n\n        log_entry = IterationState(\n            params=params,\n            timestamp=start_time,\n            scalar_fun=hist_fun_value,\n            valid=not bool(traceback),\n            raw_fun=fun_value,\n            step=self._step_id,\n            exceptions=traceback,\n        )\n\n        return (algo_fun_value, out_jac), hist_entry, log_entry\n\n\ndef _assert_finite_jac(\n    out_jac: NDArray[np.float64],\n    jac_value: PyTree,\n    params: PyTree,\n    origin: Literal[\"numerical\", \"jac\", \"fun_and_jac\"],\n) -> None:\n    \"\"\"Check for infinite and NaN values in the Jacobian and raise an error if found.\n\n    Args:\n        out_jac: internal processed Jacobian to check for finiteness.\n        jac_value: original Jacobian value as returned by the user function,\n        params: user-facing parameter representation at evaluation point.\n        origin: Source of Jacobian calculation, for the error message.\n\n    Raises:\n        UserFunctionRuntimeError:\n            If any infinite or NaN values are found in the Jacobian.\n\n    \"\"\"\n    if not np.all(np.isfinite(out_jac)):\n        if origin == \"jac\" or \"fun_and_jac\":\n            msg = (\n                \"The optimization failed because the derivative provided via \"\n                f\"{origin} contains infinite or NaN values.\"\n                \"\\nPlease validate the derivative function.\"\n            )\n        elif origin == \"numerical\":\n            msg = (\n                \"The optimization failed because the numerical derivative \"\n                \"(computed using fun) contains infinite or NaN values.\"\n                \"\\nPlease validate the criterion function or try a different optimizer.\"\n            )\n        msg += (\n            f\"\\nParameters at evaluation point: {params}\\nJacobian values: {jac_value}\"\n        )\n        raise UserFunctionRuntimeError(msg)\n\n\ndef _process_fun_value(\n    value: SpecificFunctionValue,\n    solver_type: AggregationLevel,\n    direction: Direction,\n) -> tuple[float | NDArray[np.float64], float]:\n    \"\"\"Post-process a function value for use by the algorithm and as history entry.\n\n    The sign flip for maximization is only applied to the value that will be passed to\n    the algorithm.\n\n    Args:\n        value: The function value.\n        solver_type: The aggregation level of the solver.\n        direction: The direction of optimization.\n\n    Returns:\n        A tuple of the function value for the algorithm and the function value for the\n        history entry.\n\n    \"\"\"\n    algo_value = value.internal_value(solver_type)\n    history_value = cast(float, value.internal_value(AggregationLevel.SCALAR))\n    if direction == Direction.MAXIMIZE:\n        algo_value = -algo_value\n\n    return algo_value, history_value\n\n\ndef _process_jac_value(\n    value: SpecificFunctionValue,\n    direction: Direction,\n    converter: Converter,\n    x: NDArray[np.float64],\n) -> NDArray[np.float64]:\n    \"\"\"Post-process a for use by the algorithm.\n\n    Args:\n        value: The Jacobian value.\n        direction: The direction of optimization.\n        converter: The converter object.\n\n    Returns:\n        The Jacobian value for the algorithm.\n\n    \"\"\"\n    out_value = converter.derivative_to_internal(value, x)\n    if direction == Direction.MAXIMIZE:\n        out_value = -out_value\n\n    return out_value\n\n\nclass SphereExampleInternalOptimizationProblem(InternalOptimizationProblem):\n    \"\"\"Super simple example of an internal optimization problem.\n\n    This can be used to test algorithm wrappers or to familiarize yourself with the\n    internal optimization problem interface.\n\n    Args:\n\n    \"\"\"\n\n    def __init__(\n        self,\n        solver_type: AggregationLevel = AggregationLevel.SCALAR,\n        binding_bounds: bool = False,\n    ) -> None:\n        _fun_dict = {\n            AggregationLevel.SCALAR: lambda x: ScalarFunctionValue(x @ x),\n            AggregationLevel.LIKELIHOOD: lambda x: LikelihoodFunctionValue(x**2),\n            AggregationLevel.LEAST_SQUARES: lambda x: LeastSquaresFunctionValue(x),  # noqa: PLW0108\n        }\n\n        _jac_dict = {\n            AggregationLevel.SCALAR: lambda x: 2 * x,\n            AggregationLevel.LIKELIHOOD: lambda x: 2 * x,\n            AggregationLevel.LEAST_SQUARES: lambda x: np.eye(len(x)),\n        }\n\n        fun = _fun_dict[solver_type]\n        jac = _jac_dict[solver_type]\n        fun_and_jac = lambda x: (fun(x), jac(x))\n\n        converter = Converter(\n            params_to_internal=lambda x: x,\n            params_from_internal=lambda x: x,\n            derivative_to_internal=lambda x, x0: x,\n            has_transforming_constraints=False,\n        )\n\n        direction = Direction.MINIMIZE\n\n        if binding_bounds:\n            lb = np.arange(10, dtype=np.float64) - 7.0\n            ub = np.arange(10, dtype=np.float64) - 3.0\n            self._x_opt = np.array([-3, -2, -1, 0, 0, 0, 0, 0, 1, 2.0])\n        else:\n            lb = np.full(10, -10, dtype=np.float64)\n            ub = np.full(10, 10, dtype=np.float64)\n            self._x_opt = np.zeros(10)\n\n        bounds = InternalBounds(lb, ub)\n\n        numdiff_options = NumdiffOptions()\n\n        error_handling = ErrorHandling.RAISE\n\n        error_penalty_func = fun_and_jac\n\n        batch_evaluator = process_batch_evaluator(\"joblib\")\n\n        linear_constraints = None\n        nonlinear_constraints = None\n\n        logger = None\n\n        super().__init__(\n            fun=fun,\n            jac=jac,\n            fun_and_jac=fun_and_jac,\n            converter=converter,\n            solver_type=solver_type,\n            direction=direction,\n            bounds=bounds,\n            numdiff_options=numdiff_options,\n            error_handling=error_handling,\n            error_penalty_func=error_penalty_func,\n            batch_evaluator=batch_evaluator,\n            linear_constraints=linear_constraints,\n            nonlinear_constraints=nonlinear_constraints,\n            logger=logger,\n        )\n\n\nclass SphereExampleInternalOptimizationProblemWithConverter(\n    InternalOptimizationProblem\n):\n    \"\"\"Super simple example of an internal optimization problem with PyTree Converter.\n\n    Note: params should be a dict with key-value pairs `\"x{i}\" : val .\n    eg. `{'x0': 1, 'x1': 2, ...}`.\n\n    The converter.params_to_internal method converts tree like\n    `{'x0': 1, 'x1': 2, 'x2': 3 ...}` to flat array `[1,2,3 ...]` .\n\n    The converter.params_from_internal method converts flat array `[1,2,3 ...]`\n    to tree like `{'x0': 1, 'x1': 2, 'x2': 3 ...}`.\n\n    The converter.derivative_to_internal converts derivative trees\n    {'x0': 2,'x1': 4, } to flat arrays [2,4] and jacobian tree\n    `{  \"x0\": {\"x0\": 1, \"x1\": 0, },\n        \"x1\": {\"x0\": 0, \"x1\": 1, }`\n    to NDArray [[1, 0,], [0, 1, ],]. }.\n    This can be used to test algorithm wrappers or to familiarize yourself\n    with the internal optimization problem interface.\n\n    Args:\n\n    \"\"\"\n\n    def __init__(\n        self,\n        solver_type: AggregationLevel = AggregationLevel.SCALAR,\n        binding_bounds: bool = False,\n    ) -> None:\n        def sphere(params: PyTree) -> SpecificFunctionValue:\n            out = sum([params[f\"x{i}\"] ** 2 for i in range(len(params))])\n            return ScalarFunctionValue(out)\n\n        def ls_sphere(params: PyTree) -> SpecificFunctionValue:\n            out = [params[f\"x{i}\"] for i in range(len(params))]\n            return LeastSquaresFunctionValue(out)\n\n        def likelihood_sphere(params: PyTree) -> SpecificFunctionValue:\n            out = [params[f\"x{i}\"] ** 2 for i in range(len(params))]\n            return LikelihoodFunctionValue(out)\n\n        _fun_dict = {\n            AggregationLevel.SCALAR: sphere,\n            AggregationLevel.LIKELIHOOD: likelihood_sphere,\n            AggregationLevel.LEAST_SQUARES: ls_sphere,\n        }\n\n        def sphere_gradient(params: PyTree) -> PyTree:\n            return {f\"x{i}\": 2 * v for i, v in enumerate(params.values())}\n\n        def likelihood_sphere_gradient(params: PyTree) -> PyTree:\n            return {f\"x{i}\": 2 * v for i, v in enumerate(params.values())}\n\n        def ls_sphere_jac(params: PyTree) -> PyTree:\n            return {\n                f\"x{i}\": {f\"x{j}\": 1 if i == j else 0 for j in range(len(params))}\n                for i in range(len(params))\n            }\n\n        _jac_dict = {\n            AggregationLevel.SCALAR: sphere_gradient,\n            AggregationLevel.LIKELIHOOD: likelihood_sphere_gradient,\n            AggregationLevel.LEAST_SQUARES: ls_sphere_jac,\n        }\n\n        fun = _fun_dict[solver_type]\n        jac = _jac_dict[solver_type]\n        fun_and_jac = lambda x: (fun(x), jac(x))\n\n        def params_flatten(params: PyTree) -> NDArray[np.float64]:\n            return np.array([v for v in params.values()]).astype(float)\n\n        def params_unflatten(x: NDArray[np.float64]) -> PyTree:\n            return {f\"x{i}\": v for i, v in enumerate(x)}\n\n        def derivative_flatten(tree: PyTree, x: NDArray[np.float64]) -> Any:\n            if solver_type == AggregationLevel.LEAST_SQUARES:\n                out = [list(row.values()) for row in tree.values()]\n                return np.array(out)\n            else:\n                return params_flatten(tree)\n\n        converter = Converter(\n            params_to_internal=params_flatten,\n            params_from_internal=params_unflatten,\n            derivative_to_internal=derivative_flatten,\n            has_transforming_constraints=False,\n        )\n\n        direction = Direction.MINIMIZE\n\n        if binding_bounds:\n            lb = np.arange(10, dtype=np.float64) - 7.0\n            ub = np.arange(10, dtype=np.float64) - 3.0\n            self._x_opt = {\n                f\"x{i}\": x\n                for i, x in enumerate(np.array([-3, -2, -1, 0, 0, 0, 0, 0, 1, 2.0]))\n            }\n        else:\n            lb = np.full(10, -10, dtype=np.float64)\n            ub = np.full(10, 10, dtype=np.float64)\n            self._x_opt = {f\"x{i}\": x for i, x in enumerate(np.zeros(10))}\n\n        bounds = InternalBounds(lb, ub)\n\n        numdiff_options = NumdiffOptions()\n\n        error_handling = ErrorHandling.RAISE\n\n        error_penalty_func = fun_and_jac\n\n        batch_evaluator = process_batch_evaluator(\"joblib\")\n\n        linear_constraints = None\n        nonlinear_constraints = None\n\n        logger = None\n\n        super().__init__(\n            fun=fun,\n            jac=jac,\n            fun_and_jac=fun_and_jac,\n            converter=converter,\n            solver_type=solver_type,\n            direction=direction,\n            bounds=bounds,\n            numdiff_options=numdiff_options,\n            error_handling=error_handling,\n            error_penalty_func=error_penalty_func,\n            batch_evaluator=batch_evaluator,\n            linear_constraints=linear_constraints,\n            nonlinear_constraints=nonlinear_constraints,\n            logger=logger,\n        )\n"
  },
  {
    "path": "src/optimagic/optimization/multistart.py",
    "content": "\"\"\"Functions for multi start optimization a la TikTak.\n\nTikTak (`Arnoud, Guvenen, and Kleineberg\n<https://www.nber.org/system/files/working_papers/w26340/w26340.pdf>`_)\n\n is an algorithm for solving global optimization problems. It performs local searches\nfrom a set of carefully-selected points in the parameter space.\n\nFirst implemented in Python by Alisdair McKay (\n`GitHub Repository <https://github.com/amckay/TikTak>`_)\n\n\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass, replace\nfrom typing import Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom scipy.stats import qmc, triang\n\nfrom optimagic.logging.logger import LogStore\nfrom optimagic.logging.types import StepStatus\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.optimization.multistart_options import InternalMultistartOptions\nfrom optimagic.optimization.optimization_logging import (\n    log_scheduled_steps_and_get_ids,\n)\nfrom optimagic.typing import AggregationLevel, ErrorHandling\nfrom optimagic.utilities import get_rng\n\n\ndef run_multistart_optimization(\n    local_algorithm: Algorithm,\n    internal_problem: InternalOptimizationProblem,\n    x: NDArray[np.float64],\n    sampling_bounds: InternalBounds,\n    options: InternalMultistartOptions,\n    logger: LogStore | None,\n    error_handling: ErrorHandling,\n) -> InternalOptimizeResult:\n    steps = determine_steps(options.n_samples, stopping_maxopt=options.stopping_maxopt)\n\n    scheduled_steps = log_scheduled_steps_and_get_ids(\n        steps=steps,\n        logger=logger,\n    )\n\n    if options.sample is not None:\n        sample = options.sample\n    else:\n        sample = _draw_exploration_sample(\n            x=x,\n            lower=sampling_bounds.lower,\n            upper=sampling_bounds.upper,\n            # -1 because we add start parameters\n            n_samples=options.n_samples - 1,\n            distribution=options.sampling_distribution,\n            method=options.sampling_method,\n            seed=options.seed,\n        )\n\n        sample = np.vstack([x.reshape(1, -1), sample])\n\n    if logger:\n        logger.step_store.update(\n            scheduled_steps[0], {\"status\": StepStatus.RUNNING.value}\n        )\n\n    exploration_res = run_explorations(\n        internal_problem=internal_problem,\n        sample=sample,\n        n_cores=options.n_cores,\n        step_id=scheduled_steps[0],\n    )\n\n    if logger:\n        logger.step_store.update(\n            scheduled_steps[0], {\"status\": StepStatus.COMPLETE.value}\n        )\n\n    scheduled_steps = scheduled_steps[1:]\n\n    sorted_sample = exploration_res.sorted_sample\n    sorted_values = exploration_res.sorted_values\n\n    stopping_maxopt = options.stopping_maxopt\n    if stopping_maxopt > len(sorted_sample):\n        n_skipped_steps = stopping_maxopt - len(sorted_sample)\n        stopping_maxopt = len(sorted_sample)\n        warnings.warn(\n            \"There are less valid starting points than requested optimizations. \"\n            \"The number of optimizations has been reduced from \"\n            f\"{options.stopping_maxopt} to {len(sorted_sample)}.\"\n        )\n        skipped_steps = scheduled_steps[-n_skipped_steps:]\n        scheduled_steps = scheduled_steps[:-n_skipped_steps]\n\n        if logger:\n            for step in skipped_steps:\n                new_status = StepStatus.SKIPPED.value\n                logger.step_store.update(step, {\"status\": new_status})\n\n    batched_sample = get_batched_optimization_sample(\n        sorted_sample=sorted_sample,\n        stopping_maxopt=stopping_maxopt,\n        batch_size=options.batch_size,\n    )\n\n    state = {\n        \"best_x\": sorted_sample[0],\n        \"best_y\": sorted_values[0],\n        \"best_res\": None,\n        \"x_history\": [],\n        \"y_history\": [],\n        \"result_history\": [],\n        \"start_history\": [],\n    }\n\n    convergence_criteria = {\n        \"xtol\": options.convergence_xtol_rel,\n        \"max_discoveries\": options.convergence_max_discoveries,\n    }\n\n    batch_evaluator = options.batch_evaluator\n\n    def single_optimization(x0, step_id):\n        \"\"\"Closure for running a single optimization, given a starting point.\"\"\"\n        problem = internal_problem.with_error_handling(error_handling)\n        res = local_algorithm.solve_internal_problem(problem, x0, step_id)\n        return res\n\n    opt_counter = 0\n    for batch in batched_sample:\n        weight = options.weight_func(opt_counter, stopping_maxopt)\n        starts = [weight * state[\"best_x\"] + (1 - weight) * x for x in batch]\n\n        arguments = [\n            {\"x0\": x, \"step_id\": id_}\n            for x, id_ in zip(starts, scheduled_steps[: len(batch)], strict=False)\n        ]\n        scheduled_steps = scheduled_steps[len(batch) :]\n\n        batch_results = batch_evaluator(\n            func=single_optimization,\n            arguments=arguments,\n            unpack_symbol=\"**\",\n            n_cores=options.n_cores,\n            error_handling=options.error_handling,\n        )\n\n        state, is_converged = update_convergence_state(\n            current_state=state,\n            starts=starts,\n            results=batch_results,\n            convergence_criteria=convergence_criteria,\n            solver_type=local_algorithm.algo_info.solver_type,\n        )\n        opt_counter += len(batch)\n        if is_converged:\n            if logger:\n                for step in scheduled_steps:\n                    new_status = StepStatus.SKIPPED.value\n                    logger.step_store.update(step, {\"status\": new_status})\n            break\n\n    multistart_info = {\n        \"start_parameters\": state[\"start_history\"],\n        \"local_optima\": state[\"result_history\"],\n        \"exploration_sample\": sorted_sample,\n        \"exploration_results\": sorted_values,\n    }\n\n    raw_res = state[\"best_res\"]\n    res = replace(raw_res, multistart_info=multistart_info)\n\n    return res\n\n\ndef determine_steps(n_samples, stopping_maxopt):\n    \"\"\"Determine the number and type of steps for the multistart optimization.\n\n    This is mainly used to write them to the log. The number of steps is also\n    used if logging is False.\n\n    Args:\n        n_samples (int): Number of exploration points for the multistart optimization.\n        stopping_maxopt (int): Number of local optimizations.\n\n\n    Returns:\n        list: List of dictionaries with information on each step.\n\n    \"\"\"\n    exploration_step = {\n        \"type\": \"exploration\",\n        \"status\": \"running\",\n        \"name\": \"exploration\",\n        \"n_iterations\": n_samples,\n    }\n\n    steps = [exploration_step]\n    for i in range(stopping_maxopt):\n        optimization_step = {\n            \"type\": \"optimization\",\n            \"status\": \"scheduled\",\n            \"name\": f\"optimization_{i}\",\n        }\n        steps.append(optimization_step)\n    return steps\n\n\ndef _draw_exploration_sample(\n    x: NDArray[np.float64],\n    lower: NDArray[np.float64] | None,\n    upper: NDArray[np.float64] | None,\n    n_samples: int,\n    distribution: Literal[\"uniform\", \"triangular\"],\n    method: Literal[\"sobol\", \"random\", \"halton\", \"latin_hypercube\"],\n    seed: int | np.random.Generator | None,\n) -> NDArray[np.float64]:\n    \"\"\"Get a sample of parameter values for the first stage of the tiktak algorithm.\n\n    The sample is created randomly or using a low discrepancy sequence. Different\n    distributions are available.\n\n    Args:\n        x: Internal parameter vector of shape (n_params,).\n        lower: Vector of internal lower bounds of shape (n_params,).\n        upper: Vector of internal upper bounds of shape (n_params,).\n        n_samples: Number of sample points.\n        distribution: The distribution from which the exploration sample is\n            drawn. Allowed are \"uniform\" and \"triangular\". Defaults to \"uniform\".\n        method: The method used to draw the exploration sample. Allowed are\n            \"sobol\", \"random\", \"halton\", and \"latin_hypercube\". Defaults to \"sobol\".\n        seed: Random number seed or generator.\n\n    Returns:\n        Array of shape (n_samples, n_params). Each row represents a vector of parameter\n            values.\n\n    \"\"\"\n    if lower is None or upper is None:\n        raise ValueError(\"lower and upper bounds must be provided for multistart.\")\n\n    for name, bound in zip([\"lower\", \"upper\"], [lower, upper], strict=False):\n        if not np.isfinite(bound).all():\n            raise ValueError(\n                f\"multistart optimization requires finite {name}_bounds or \"\n                f\"soft_{name}_bounds for all parameters.\"\n            )\n\n    if method == \"sobol\":\n        # Draw `n` points from the open interval (lower, upper)^d.\n        # Note that scipy uses the half-open interval [lower, upper)^d internally.\n        # We apply a burn-in phase of 1, i.e. we skip the first point in the sequence\n        # and thus exclude the lower bound.\n        sampler = qmc.Sobol(d=len(lower), scramble=False, seed=seed)\n        _ = sampler.fast_forward(1)\n        sample_unscaled = sampler.random(n=n_samples)\n\n    elif method == \"halton\":\n        sampler = qmc.Halton(d=len(lower), scramble=False, seed=seed)\n        sample_unscaled = sampler.random(n=n_samples)\n\n    elif method == \"latin_hypercube\":\n        sampler = qmc.LatinHypercube(d=len(lower), strength=1, seed=seed)\n        sample_unscaled = sampler.random(n=n_samples)\n\n    elif method == \"random\":\n        rng = get_rng(seed)\n        sample_unscaled = rng.uniform(size=(n_samples, len(lower)))\n\n    if distribution == \"uniform\":\n        sample_scaled = qmc.scale(sample_unscaled, lower, upper)\n    elif distribution == \"triangular\":\n        sample_scaled = triang.ppf(\n            sample_unscaled,\n            c=(x - lower) / (upper - lower),\n            loc=lower,\n            scale=upper - lower,\n        )\n\n    return sample_scaled\n\n\n@dataclass(frozen=True)\nclass _InternalExplorationResult:\n    \"\"\"Exploration result of the multistart optimization.\n\n    Attributes:\n        sorted_values: List of sorted function values.\n        sorted_sample: 2d numpy array where each row is the internal parameter\n            vector corresponding to the sorted function values.\n\n    \"\"\"\n\n    sorted_values: list[float]\n    sorted_sample: NDArray[np.float64]\n\n\ndef run_explorations(\n    internal_problem: InternalOptimizationProblem,\n    sample: NDArray[np.float64],\n    n_cores: int,\n    step_id: int,\n) -> _InternalExplorationResult:\n    \"\"\"Do the function evaluations for the exploration phase.\n\n    Args:\n        internal_problem: The internal optimization problem.\n        sample: 2d numpy array where each row is a sampled internal\n            parameter vector.\n        batch_evaluator: See :ref:`batch_evaluators`.\n        n_cores: Number of cores.\n        step_id: The identifier of the exploration step.\n\n    Returns:\n        A data object containing\n            - sorted_values: List of sorted function values. Invalid function values are\n                excluded.\n            - sorted_sample: 2d numpy array where each row is the internal parameter\n                vector corresponding to the sorted function values.\n\n    \"\"\"\n    internal_problem = internal_problem.with_step_id(step_id)\n    x_list: list[NDArray[np.float64]] = list(sample)\n\n    raw_values = np.asarray(\n        internal_problem.exploration_fun(x_list, n_cores=n_cores), dtype=np.float64\n    )\n\n    is_valid = np.isfinite(raw_values)\n\n    if not is_valid.any():\n        raise RuntimeError(\n            \"All function evaluations of the exploration phase in a multistart \"\n            \"optimization are invalid. Check your code or the sampling bounds.\"\n        )\n\n    valid_values = raw_values[is_valid]\n    valid_sample = sample[is_valid]\n\n    # this sorts from low to high values; internal criterion and derivative took care\n    # of the sign switch.\n    sorting_indices = np.argsort(valid_values)\n\n    out = _InternalExplorationResult(\n        sorted_values=valid_values[sorting_indices].tolist(),\n        sorted_sample=valid_sample[sorting_indices],\n    )\n\n    return out\n\n\ndef get_batched_optimization_sample(sorted_sample, stopping_maxopt, batch_size):\n    \"\"\"Create a batched sample of internal parameters for the optimization phase.\n\n    Note that in the end the optimizations will not be started from those parameter\n    vectors but from a convex combination of that parameter vector and the\n    best parameter vector at the time when the optimization is started.\n\n    Args:\n        sorted_sample (np.ndarray): 2d numpy array with containing sorted internal\n            parameter vectors.\n        stopping_maxopt (int): Number of optimizations to run. If sample is shorter\n            than that, optimizations are run on all entries of the sample.\n        batch_size (int): Batch size.\n\n    Returns:\n        list: Nested list of parameter vectors from which an optimization is run.\n            The inner lists have length ``batch_size`` or shorter.\n\n    \"\"\"\n    n_batches = int(np.ceil(stopping_maxopt / batch_size))\n\n    start = 0\n    batched = []\n    for _ in range(n_batches):\n        stop = min(start + batch_size, len(sorted_sample), stopping_maxopt)\n        batched.append(list(sorted_sample[start:stop]))\n        start = stop\n    return batched\n\n\ndef update_convergence_state(\n    current_state, starts, results, convergence_criteria, solver_type\n):\n    \"\"\"Update the state of all quantities related to convergence.\n\n    Args:\n        current_state (dict): Dictionary with the entries:\n            - \"best_x\": The currently best parameter vector\n            - \"best_y\": The currently best function value\n            - \"best_res\": The currently best optimization result\n            - \"x_history\": The history of locally optimal parameters\n            - \"y_history\": The history of locally optimal function values.\n            - \"result_history\": The history of local optimization results\n            - \"start_history\": The history of start parameters\n        starts (list): List of starting points for local optimizations.\n        results (list): List of results from local optimizations.\n        convergence_criteria (dict): Dict with the entries \"xtol\" and \"max_discoveries\"\n        solver_type: The aggregation level of the local optimizer. Needed to\n            interpret the output of the internal criterion function.\n\n\n    Returns:\n        dict: The updated state, same entries as current_state.\n        bool: A bool that indicates if the optimizer has converged.\n\n    \"\"\"\n    # ==================================================================================\n    # unpack some variables\n    # ==================================================================================\n    xtol = convergence_criteria[\"xtol\"]\n    max_discoveries = convergence_criteria[\"max_discoveries\"]\n\n    best_x = current_state[\"best_x\"]\n    best_y = current_state[\"best_y\"]\n    best_res = current_state[\"best_res\"]\n\n    # ==================================================================================\n    # filter out optimizations that raised errors\n    # ==================================================================================\n    # get indices of local optimizations that did not fail\n    valid_indices = [i for i, res in enumerate(results) if not isinstance(res, str)]\n\n    # If all local optimizations failed, return early so we don't have to worry about\n    # index errors later.\n    if not valid_indices:\n        return current_state, False\n    # ==================================================================================\n    # reduce eveything to valid optimizations\n    # ==================================================================================\n    valid_results = [results[i] for i in valid_indices]\n    valid_starts = [starts[i] for i in valid_indices]\n    valid_new_x = [res.x for res in valid_results]\n    valid_new_y = []\n\n    # make the criterion output scalar if a least squares optimizer returns an\n    # array as solution_criterion.\n    for res in valid_results:\n        if np.isscalar(res.fun):\n            fun = float(res.fun)\n        elif solver_type == AggregationLevel.LIKELIHOOD:\n            fun = float(np.sum(res.fun))\n        elif solver_type == AggregationLevel.LEAST_SQUARES:\n            fun = np.dot(res.fun, res.fun)\n\n        valid_new_y.append(fun)\n\n    # ==================================================================================\n    # accept new best point if we find a new lowest function value\n    # ==================================================================================\n    best_index = np.argmin(valid_new_y)\n    if valid_new_y[best_index] <= best_y:\n        best_x = valid_new_x[best_index]\n        best_y = valid_new_y[best_index]\n        best_res = valid_results[best_index]\n    # handle the case that the global optimum was found in the exploration sample and\n    # due to floating point imprecisions the result of the optimization that started at\n    # the global optimum is slightly worse\n    elif best_res is None:\n        best_res = valid_results[best_index]\n\n    # ==================================================================================\n    # update history and state\n    # ==================================================================================\n    new_x_history = current_state[\"x_history\"] + valid_new_x\n    all_x = np.array(new_x_history)\n    relative_diffs = (all_x - best_x) / np.clip(best_x, 0.1, np.inf)\n    distances = np.linalg.norm(relative_diffs, axis=1)\n    n_close = (distances <= xtol).sum()\n\n    is_converged = n_close >= max_discoveries\n\n    new_state = {\n        \"best_x\": best_x,\n        \"best_y\": best_y,\n        \"best_res\": best_res,\n        \"x_history\": new_x_history,\n        \"y_history\": current_state[\"y_history\"] + valid_new_y,\n        \"result_history\": current_state[\"result_history\"] + valid_results,\n        \"start_history\": current_state[\"start_history\"] + valid_starts,\n    }\n\n    return new_state, is_converged\n"
  },
  {
    "path": "src/optimagic/optimization/multistart_options.py",
    "content": "from dataclasses import dataclass\nfrom functools import partial\nfrom typing import Callable, Literal, Sequence, TypedDict, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom typing_extensions import NotRequired\n\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.deprecations import replace_and_warn_about_deprecated_multistart_options\nfrom optimagic.exceptions import InvalidMultistartError\nfrom optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral, PyTree\n\n# ======================================================================================\n# Public Options\n# ======================================================================================\n\n\n@dataclass(frozen=True)\nclass MultistartOptions:\n    \"\"\"Multistart options in optimization problems.\n\n    Attributes:\n        n_samples: The number of points at which the objective function is evaluated\n            during the exploration phase. If None, n_samples is set to 100 times the\n            number of parameters.\n        stopping_maxopt: The maximum number of local optimizations to run. Defaults to\n            10% of n_samples. This number may not be reached if multistart converges\n            earlier.\n        sampling_distribution: The distribution from which the exploration sample is\n            drawn. Allowed are \"uniform\" and \"triangular\". Defaults to \"uniform\".\n        sampling_method: The method used to draw the exploration sample. Allowed are\n            \"sobol\", \"random\", \"halton\", and \"latin_hypercube\". Defaults to \"random\".\n        sample: A sequence of PyTrees or None. If None, a sample is drawn from the\n            sampling distribution.\n        mixing_weight_method: The method used to determine the mixing weight, i,e, how\n            start parameters for local optimizations are calculated. Allowed are\n            \"tiktak\" and \"linear\", or a custom callable. Defaults to \"tiktak\".\n        mixing_weight_bounds: The lower and upper bounds for the mixing weight.\n            Defaults to (0.1, 0.995).\n        convergence_max_discoveries: The maximum number of discoveries for convergence.\n            Determines after how many re-descoveries of the currently best local\n            optima the multistart algorithm stops. Defaults to 2.\n        convergence_xtol_rel: The relative tolerance in parameters\n            for convergence. Determines the maximum relative distance two parameter\n            vecctors can have to be considered equal. Defaults to 0.01.\n        n_cores: The number of cores to use for parallelization. Defaults to 1.\n        batch_evaluator: The evaluator to use for batch evaluation. Allowed are\n            \"joblib\", \"pathos\", and \"threading\", or a custom callable.\n        batch_size: The batch size for batch evaluation. Must be larger than n_cores\n            or None.\n        seed: The seed for the random number generator.\n        error_handling: The error handling for exploration and optimization errors.\n            Allowed are \"raise\" and \"continue\".\n\n    Raises:\n        InvalidMultistartError: If the multistart options cannot be processed, e.g.\n            because they do not have the correct type.\n\n    \"\"\"\n\n    n_samples: int | None = None\n    stopping_maxopt: int | None = None\n    sampling_distribution: Literal[\"uniform\", \"triangular\"] = \"uniform\"\n    sampling_method: Literal[\"sobol\", \"random\", \"halton\", \"latin_hypercube\"] = \"random\"\n    sample: Sequence[PyTree] | None = None\n    mixing_weight_method: (\n        Literal[\"tiktak\", \"linear\"] | Callable[[int, int, float, float], float]\n    ) = \"tiktak\"\n    mixing_weight_bounds: tuple[float, float] = (0.1, 0.995)\n    convergence_xtol_rel: float | None = None\n    convergence_max_discoveries: int = 2\n    n_cores: int = 1\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = \"joblib\"\n    batch_size: int | None = None\n    seed: int | np.random.Generator | None = None\n    error_handling: Literal[\"raise\", \"continue\"] | None = None\n    # Deprecated attributes\n    share_optimization: float | None = None\n    convergence_relative_params_tolerance: float | None = None\n    optimization_error_handling: Literal[\"raise\", \"continue\"] | None = None\n    exploration_error_handling: Literal[\"raise\", \"continue\"] | None = None\n\n    def __post_init__(self) -> None:\n        _validate_attribute_types_and_values(self)\n\n\nclass MultistartOptionsDict(TypedDict):\n    n_samples: NotRequired[int | None]\n    stopping_maxopt: NotRequired[int | None]\n    sampling_distribution: NotRequired[Literal[\"uniform\", \"triangular\"]]\n    sampling_method: NotRequired[\n        Literal[\"sobol\", \"random\", \"halton\", \"latin_hypercube\"]\n    ]\n    sample: NotRequired[Sequence[PyTree] | None]\n    mixing_weight_method: NotRequired[\n        Literal[\"tiktak\", \"linear\"] | Callable[[int, int, float, float], float]\n    ]\n    mixing_weight_bounds: NotRequired[tuple[float, float]]\n    convergence_xtol_rel: NotRequired[float | None]\n    convergence_max_discoveries: NotRequired[int]\n    n_cores: NotRequired[int]\n    batch_evaluator: NotRequired[BatchEvaluatorLiteral | BatchEvaluator]\n    batch_size: NotRequired[int | None]\n    seed: NotRequired[int | np.random.Generator | None]\n    error_handling: NotRequired[Literal[\"raise\", \"continue\"] | None]\n    # Deprecated attributes\n    share_optimization: NotRequired[float | None]\n    convergence_relative_params_tolerance: NotRequired[float | None]\n    optimization_error_handling: NotRequired[Literal[\"raise\", \"continue\"] | None]\n    exploration_error_handling: NotRequired[Literal[\"raise\", \"continue\"] | None]\n\n\ndef pre_process_multistart(\n    multistart: bool | MultistartOptions | MultistartOptionsDict | None,\n) -> MultistartOptions | None:\n    \"\"\"Convert all valid types of multistart to a optimagic.MultistartOptions.\n\n    This just harmonizes multiple ways of specifying multistart options into a single\n    format. It performs runime type checks, but it does not check whether multistart\n    options are consistent with other option choices.\n\n    Args:\n        multistart: The user provided multistart options.\n        n_params: The number of parameters in the optimization problem.\n\n    Returns:\n        The multistart options in the optimagic format.\n\n    Raises:\n        InvalidMultistartError: If the multistart options cannot be processed, e.g.\n            because they do not have the correct type.\n\n    \"\"\"\n    if isinstance(multistart, bool):\n        multistart = MultistartOptions() if multistart else None\n    elif isinstance(multistart, MultistartOptions) or multistart is None:\n        pass\n    else:\n        try:\n            multistart = MultistartOptions(**multistart)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if isinstance(e, InvalidMultistartError):\n                raise e\n            raise InvalidMultistartError(\n                f\"Invalid multistart options of type: {type(multistart)}. Multistart \"\n                \"options must be of type optimagic.MultistartOptions, a dictionary \"\n                \"with valid keys, None, or a boolean.\"\n            ) from e\n\n    if multistart is not None:\n        multistart = replace_and_warn_about_deprecated_multistart_options(multistart)\n        # The replace and warn function cannot be typed due to circular imports, but\n        # we know that the return type is MultistartOptions\n        multistart = cast(MultistartOptions, multistart)\n\n    return multistart\n\n\ndef _validate_attribute_types_and_values(options: MultistartOptions) -> None:\n    if options.n_samples is not None and (\n        not isinstance(options.n_samples, int) or options.n_samples < 1\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid number of samples: {options.n_samples}. Number of samples \"\n            \"must be a positive integer or None.\"\n        )\n\n    if options.stopping_maxopt is not None and (\n        not isinstance(options.stopping_maxopt, int) or options.stopping_maxopt < 0\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid number of optimizations: {options.stopping_maxopt}. Number of \"\n            \"optimizations must be a positive integer or None.\"\n        )\n\n    if (\n        options.n_samples is not None\n        and options.stopping_maxopt is not None\n        and options.n_samples < options.stopping_maxopt\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid number of samples: {options.n_samples}. Number of samples \"\n            \"must be at least as large as the number of optimizations.\"\n        )\n\n    if options.sampling_distribution not in (\"uniform\", \"triangular\"):\n        raise InvalidMultistartError(\n            f\"Invalid sampling distribution: {options.sampling_distribution}. Sampling \"\n            f\"distribution must be one of ('uniform', 'triangular').\"\n        )\n\n    if options.sampling_method not in (\"sobol\", \"random\", \"halton\", \"latin_hypercube\"):\n        raise InvalidMultistartError(\n            f\"Invalid sampling method: {options.sampling_method}. Sampling method \"\n            f\"must be one of ('sobol', 'random', 'halton', 'latin_hypercube').\"\n        )\n\n    if not isinstance(options.sample, Sequence | None):\n        raise InvalidMultistartError(\n            f\"Invalid sample: {options.sample}. Sample must be a sequence of \"\n            \"parameters.\"\n        )\n\n    if not callable(\n        options.mixing_weight_method\n    ) and options.mixing_weight_method not in (\"tiktak\", \"linear\"):\n        raise InvalidMultistartError(\n            f\"Invalid mixing weight method: {options.mixing_weight_method}. Mixing \"\n            \"weight method must be Callable or one of ('tiktak', 'linear').\"\n        )\n\n    if (\n        not isinstance(options.mixing_weight_bounds, tuple)\n        or len(options.mixing_weight_bounds) != 2\n        or not set(type(x) for x in options.mixing_weight_bounds) <= {int, float}\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid mixing weight bounds: {options.mixing_weight_bounds}. Mixing \"\n            \"weight bounds must be a tuple of two numbers.\"\n        )\n\n    if options.convergence_xtol_rel is not None and (\n        not isinstance(options.convergence_xtol_rel, int | float)\n        or options.convergence_xtol_rel < 0\n    ):\n        raise InvalidMultistartError(\n            \"Invalid relative params tolerance:\"\n            f\"{options.convergence_xtol_rel}. Relative params \"\n            \"tolerance must be a number.\"\n        )\n\n    if (\n        not isinstance(options.convergence_max_discoveries, int | float)\n        or options.convergence_max_discoveries < 1\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid max discoveries: {options.convergence_max_discoveries}. Max \"\n            \"discoveries must be a positive integer or infinity.\"\n        )\n\n    if not isinstance(options.n_cores, int) or options.n_cores < 1:\n        raise InvalidMultistartError(\n            f\"Invalid number of cores: {options.n_cores}. Number of cores \"\n            \"must be a positive integer.\"\n        )\n\n    try:\n        process_batch_evaluator(options.batch_evaluator)\n    except Exception as e:\n        raise InvalidMultistartError(\n            f\"Invalid batch evaluator: {options.batch_evaluator}.\"\n        ) from e\n\n    if options.batch_size is not None and (\n        not isinstance(options.batch_size, int) or options.batch_size < options.n_cores\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid batch size: {options.batch_size}. Batch size \"\n            \"must be a positive integer larger than n_cores, or None.\"\n        )\n\n    if not isinstance(options.seed, int | np.random.Generator | None):\n        raise InvalidMultistartError(\n            f\"Invalid seed: {options.seed}. Seed \"\n            \"must be an integer, a numpy random generator, or None.\"\n        )\n\n    if options.error_handling is not None and options.error_handling not in (\n        \"raise\",\n        \"continue\",\n    ):\n        raise InvalidMultistartError(\n            f\"Invalid error handling: {options.error_handling}. Error handling must be \"\n            \"'raise' or 'continue'.\"\n        )\n\n\n# ======================================================================================\n# Internal Options\n# ======================================================================================\n\n\ndef _tiktak_weights(\n    iteration: int, n_iterations: int, min_weight: float, max_weight: float\n) -> float:\n    return np.clip(np.sqrt(iteration / n_iterations), min_weight, max_weight)\n\n\ndef _linear_weights(\n    iteration: int, n_iterations: int, min_weight: float, max_weight: float\n) -> float:\n    unscaled = iteration / n_iterations\n    span = max_weight - min_weight\n    return min_weight + unscaled * span\n\n\nWEIGHT_FUNCTIONS = {\n    \"tiktak\": _tiktak_weights,\n    \"linear\": _linear_weights,\n}\n\n\n@dataclass(frozen=True)\nclass InternalMultistartOptions:\n    \"\"\"Multistart options used internally in optimagic.\n\n    Compared to `MultistartOptions`, this data class has stricter types and combines\n    some of the attributes. It is generated at runtime using a `MultistartOptions`\n    instance and the function `get_internal_multistart_options_from_public`.\n\n    \"\"\"\n\n    n_samples: int\n    weight_func: Callable[[int, int], float]\n    convergence_xtol_rel: float\n    convergence_max_discoveries: int\n    sampling_distribution: Literal[\"uniform\", \"triangular\"]\n    sampling_method: Literal[\"sobol\", \"random\", \"halton\", \"latin_hypercube\"]\n    sample: NDArray[np.float64] | None\n    seed: int | np.random.Generator | None\n    n_cores: int\n    batch_evaluator: BatchEvaluator\n    batch_size: int\n    error_handling: Literal[\"raise\", \"continue\"]\n    stopping_maxopt: int\n\n    def __post_init__(self) -> None:\n        must_be_at_least_1 = [\n            \"n_samples\",\n            \"stopping_maxopt\",\n            \"n_cores\",\n            \"batch_size\",\n            \"convergence_max_discoveries\",\n        ]\n\n        for attr in must_be_at_least_1:\n            if getattr(self, attr) < 1:\n                raise InvalidMultistartError(f\"{attr} must be at least 1.\")\n\n        if self.batch_size < self.n_cores:\n            raise InvalidMultistartError(\"batch_size must be at least n_cores.\")\n\n        if self.convergence_xtol_rel < 0:\n            raise InvalidMultistartError(\"convergence_xtol_rel must be at least 0.\")\n\n\ndef get_internal_multistart_options_from_public(\n    options: MultistartOptions,\n    params: PyTree,\n    params_to_internal: Callable[[PyTree], NDArray[np.float64]],\n) -> InternalMultistartOptions:\n    \"\"\"Get internal multistart options from public multistart options.\n\n    Args:\n        options: The pre-processed multistart options.\n        params: The parameters of the optimization problem.\n        params_to_internal: A function that converts parameters to internal parameters.\n\n    Returns:\n        InternalMultistartOptions: The updated options with runtime defaults.\n\n    \"\"\"\n    x = params_to_internal(params)\n\n    if options.sample is not None:\n        sample = np.array([params_to_internal(x) for x in list(options.sample)])\n        n_samples = len(options.sample)\n    else:\n        sample = None\n        n_samples = options.n_samples  # type: ignore\n\n    batch_size = options.n_cores if options.batch_size is None else options.batch_size\n    batch_evaluator = process_batch_evaluator(options.batch_evaluator)\n\n    if callable(options.mixing_weight_method):\n        weight_func = options.mixing_weight_method\n    else:\n        _weight_method = WEIGHT_FUNCTIONS[options.mixing_weight_method]\n\n    weight_func = partial(\n        _weight_method,\n        min_weight=options.mixing_weight_bounds[0],\n        max_weight=options.mixing_weight_bounds[1],\n    )\n\n    if n_samples is None:\n        if options.stopping_maxopt is None:\n            n_samples = 100 * len(x)\n        else:\n            n_samples = 10 * options.stopping_maxopt\n\n    if options.share_optimization is None:\n        share_optimization = 0.1\n    else:\n        share_optimization = options.share_optimization\n\n    if options.stopping_maxopt is None:\n        stopping_maxopt = max(1, int(share_optimization * n_samples))\n    else:\n        stopping_maxopt = options.stopping_maxopt\n\n    # Set defaults resulting from deprecated attributes\n    if options.error_handling is not None:\n        error_handling = options.error_handling\n    else:\n        error_handling = \"continue\"\n\n    if options.convergence_xtol_rel is not None:\n        convergence_xtol_rel = options.convergence_xtol_rel\n    else:\n        convergence_xtol_rel = 0.01\n\n    return InternalMultistartOptions(\n        # Attributes taken directly from MultistartOptions\n        convergence_max_discoveries=options.convergence_max_discoveries,\n        n_cores=options.n_cores,\n        sampling_distribution=options.sampling_distribution,\n        sampling_method=options.sampling_method,\n        seed=options.seed,\n        # Updated attributes\n        sample=sample,\n        n_samples=n_samples,\n        weight_func=weight_func,\n        error_handling=error_handling,\n        convergence_xtol_rel=convergence_xtol_rel,\n        stopping_maxopt=stopping_maxopt,\n        batch_evaluator=batch_evaluator,\n        batch_size=batch_size,\n    )\n"
  },
  {
    "path": "src/optimagic/optimization/optimization_logging.py",
    "content": "from typing import Any, cast\n\nfrom optimagic.logging.logger import LogStore\nfrom optimagic.logging.types import StepResult, StepStatus\n\n\ndef log_scheduled_steps_and_get_ids(\n    steps: list[dict[str, Any]], logger: LogStore | None\n) -> list[int]:\n    \"\"\"Add scheduled steps to the steps table of the database and get their ids.\n\n    The ids are only determined once the steps are written to the database and the\n    ids of all previously existing steps are known.\n\n    Args:\n        steps (list): List of dicts with entries for the steps table.\n        logging (bool): Whether to actually write to the database.\n\n    Returns:\n        list: List of integers with the step ids.\n\n    \"\"\"\n    default_row = {\"status\": StepStatus.SCHEDULED.value}\n    if logger:\n        for row in steps:\n            data = StepResult(**{**default_row, **row})\n            logger.step_store.insert(data)\n\n        last_steps = logger.step_store.select_last_rows(len(steps))\n        step_ids = cast(list[int], [row.rowid for row in last_steps])\n    else:\n        step_ids = list(range(len(steps)))\n\n    return step_ids\n"
  },
  {
    "path": "src/optimagic/optimization/optimize.py",
    "content": "\"\"\"Public functions for optimization.\n\nThis module defines the public functions `maximize` and `minimize` that will be called\nby users.\n\nInternally, `maximize` and `minimize` just call `create_optimization_problem` with\nall arguments and add the `direction`. In `create_optimization_problem`, the user input\nis consolidated and converted to stricter types.  The resulting `OptimizationProblem`\nis then passed to `_optimize` which handles the optimization logic.\n\n`_optimize` processes the optimization problem and performs the actual optimization.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Callable, Sequence, Type, cast\n\nimport numpy as np\nfrom scipy.optimize import Bounds as ScipyBounds\n\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.constraints import Constraint\nfrom optimagic.differentiation.numdiff_options import NumdiffOptions, NumdiffOptionsDict\nfrom optimagic.exceptions import (\n    IncompleteBoundsError,\n    InvalidFunctionError,\n)\nfrom optimagic.logging.logger import LogReader, LogStore\nfrom optimagic.logging.types import ProblemInitialization\nfrom optimagic.optimization.algorithm import Algorithm\nfrom optimagic.optimization.create_optimization_problem import (\n    OptimizationProblem,\n    create_optimization_problem,\n)\nfrom optimagic.optimization.error_penalty import get_error_penalty_function\nfrom optimagic.optimization.fun_value import FunctionValue\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.optimization.multistart import (\n    run_multistart_optimization,\n)\nfrom optimagic.optimization.multistart_options import (\n    MultistartOptions,\n    MultistartOptionsDict,\n    get_internal_multistart_options_from_public,\n)\nfrom optimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.optimization.process_results import (\n    ExtraResultFields,\n    process_multistart_result,\n    process_single_result,\n)\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.conversion import (\n    get_converter,\n)\nfrom optimagic.parameters.nonlinear_constraints import process_nonlinear_constraints\nfrom optimagic.parameters.scaling import ScalingOptions, ScalingOptionsDict\nfrom optimagic.typing import (\n    AggregationLevel,\n    Direction,\n    ErrorHandling,\n    ErrorHandlingLiteral,\n    NonNegativeFloat,\n    PyTree,\n)\n\nFunType = Callable[..., float | PyTree | FunctionValue]\nAlgorithmType = str | Algorithm | Type[Algorithm]\nConstraintsType = Constraint | list[Constraint] | dict[str, Any] | list[dict[str, Any]]\nJacType = Callable[..., PyTree]\nFunAndJacType = Callable[..., tuple[float | PyTree | FunctionValue, PyTree]]\nHessType = Callable[..., PyTree]\n# TODO: refine this type\nCallbackType = Callable[..., Any]\n\nCriterionType = Callable[..., float | dict[str, Any]]\nCriterionAndDerivativeType = Callable[..., tuple[float | dict[str, Any], PyTree]]\n\n\nfrom optimagic.logging.logger import LogOptions\n\n\ndef maximize(\n    fun: FunType | CriterionType | None = None,\n    params: PyTree | None = None,\n    algorithm: AlgorithmType | None = None,\n    *,\n    bounds: Bounds | ScipyBounds | Sequence[tuple[float, float]] | None = None,\n    constraints: ConstraintsType | None = None,\n    fun_kwargs: dict[str, Any] | None = None,\n    algo_options: dict[str, Any] | None = None,\n    jac: JacType | list[JacType] | None = None,\n    jac_kwargs: dict[str, Any] | None = None,\n    fun_and_jac: FunAndJacType | CriterionAndDerivativeType | None = None,\n    fun_and_jac_kwargs: dict[str, Any] | None = None,\n    numdiff_options: NumdiffOptions | NumdiffOptionsDict | None = None,\n    # TODO: add typed-dict support?\n    logging: bool | str | Path | LogOptions | dict[str, Any] | None = None,\n    error_handling: ErrorHandling | ErrorHandlingLiteral = ErrorHandling.RAISE,\n    error_penalty: dict[str, float] | None = None,\n    scaling: bool | ScalingOptions | ScalingOptionsDict = False,\n    multistart: bool | MultistartOptions | MultistartOptionsDict = False,\n    collect_history: bool = True,\n    skip_checks: bool = False,\n    # scipy aliases\n    x0: PyTree | None = None,\n    method: str | None = None,\n    args: tuple[Any] | None = None,\n    # scipy arguments that are not yet supported\n    hess: HessType | None = None,\n    hessp: HessType | None = None,\n    callback: CallbackType | None = None,\n    # scipy arguments that will never be supported\n    options: dict[str, Any] | None = None,\n    tol: NonNegativeFloat | None = None,\n    # deprecated arguments\n    criterion: CriterionType | None = None,\n    criterion_kwargs: dict[str, Any] | None = None,\n    derivative: JacType | None = None,\n    derivative_kwargs: dict[str, Any] | None = None,\n    criterion_and_derivative: CriterionAndDerivativeType | None = None,\n    criterion_and_derivative_kwargs: dict[str, Any] | None = None,\n    log_options: dict[str, Any] | None = None,\n    lower_bounds: PyTree | None = None,\n    upper_bounds: PyTree | None = None,\n    soft_lower_bounds: PyTree | None = None,\n    soft_upper_bounds: PyTree | None = None,\n    scaling_options: dict[str, Any] | None = None,\n    multistart_options: dict[str, Any] | None = None,\n) -> OptimizeResult:\n    \"\"\"Maximize fun using algorithm subject to constraints.\n\n    Args:\n        fun: The objective function of a scalar, least-squares or likelihood\n            optimization problem. Non-scalar objective functions have to be marked\n            with the `mark.likelihood` or `mark.least_squares` decorators. `fun` maps\n            params and fun_kwargs to an objective value. See :ref:`how-to-fun` for\n            details and examples.\n        params: The start parameters for the optimization. Params can be numpy arrays,\n            dictionaries, pandas.Series, pandas.DataFrames, NamedTuples, floats, lists,\n            and any nested combination thereof. See :ref:`params` for details and\n            examples.\n        algorithm: The optimization algorithm to use. Can be a string, subclass of\n            :class:`optimagic.Algorithm` or an instance of a subclass of\n            :class:`optimagic.Algorithm`. For guidelines on how to choose an algorithm\n            see :ref:`how-to-select-algorithms`. For examples of specifying and\n            configuring algorithms see :ref:`specify-algorithm`.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an :class:`optimagic.Bounds` object that collects\n            lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. See :ref:`how-to-bounds` for\n            details and examples. If params is a flat numpy array, you can also provide\n            bounds via any format that is supported by scipy.optimize.minimize.\n        constraints: Constraints for the optimization problem. Constraints can be\n            specified as a single :class:`optimagic.Constraint` object, a list of\n            Constraint objects. For details and examples check :ref:`constraints`.\n        fun_kwargs: Additional keyword arguments for the objective function.\n        algo_options: Additional options for the optimization algorithm. `algo_options`\n            is an alternative to configuring algorithm objects directly. See\n            :ref:`list_of_algorithms` for supported options of each algorithm.\n        jac: The first derivative of `fun`. Providing a closed form derivative can be\n            a great way to speed up your optimization. The easiest way to get\n            a derivative for your objective function are autodiff frameworks like\n            JAX. For details and examples see :ref:`how-to-jac`.\n        jac_kwargs: Additional keyword arguments for `jac`.\n        fun_and_jac: A function that returns both the objective value and the\n            derivative. This can be used do exploit synergies in the calculation of the\n            function value and its derivative. For details and examples see\n            :ref:`how-to-jac`.\n        fun_and_jac_kwargs: Additional keyword arguments for `fun_and_jac`.\n        numdiff_options: Options for numerical differentiation. Can be a dictionary\n            or an instance of :class:`optimagic.NumdiffOptions`.\n        logging: If None, no logging is used. If a str or pathlib.Path is provided,\n            it is interpreted as path to an sqlite3 file (which typically has\n            the file extension ``.db``. If the file does not exist, it will be created.\n            and the optimization history will be stored in that database. For more\n            customization, provide LogOptions. For details and examples see\n            :ref:`how-to-logging`.\n        error_handling: If \"raise\" or ErrorHandling.RAISE, exceptions that occur during\n            the optimization are raised and the optimization is stopped. If \"continue\"\n            or ErrorHandling.CONTINUE, exceptions are caught and the function value and\n            its derivative are replaced by penalty values. The penalty values are\n            constructed such that the optimizer is guided back towards the start\n            parameters until a feasible region is reached and then continues the\n            optimization from there. For details see  :ref:`how-to-errors`.\n        error_penalty: A dictionary with the keys \"slope\" and \"constant\" that\n            influences the magnitude of the penalty values. For maximization problems\n            both should be negative. For details see :ref:`how-to-errors`.\n        scaling: If None or False, the parameter space is not rescaled. If True,\n            a heuristic is used to improve the conditioning of the optimization problem.\n            To choose which heuristic is used and to customize the scaling, provide\n            a dictionary or an instance of :class:`optimagic.ScalingOptions`.\n            For details and examples see :ref:`scaling`.\n        multistart: If None or False, no multistart approach is used. If True, the\n            optimization is restarted from multiple starting points. Note that this\n            requires finite bounds or soft bounds for all parameters. To customize the\n            multistart approach, provide a dictionary or an instance of\n            :class:`optimagic.MultistartOptions`. For details and examples see\n            :ref:`how-to-multistart`.\n        collect_history: If True, the optimization history is collected and returned\n            in the OptimizeResult. This is required to create `criterion_plot` or\n            `params_plot` from an OptimizeResult.\n        skip_checks: If True, some checks are skipped to speed up the optimization.\n            This is only relevant if your objective function is very fast, i.e. runs in\n            a few microseconds.\n        x0: Alias for params for scipy compatibility.\n        method: Alternative to algorithm for scipy compatibility. With `method` you can\n            select scipy optimizers via their original scipy name.\n        args: Alternative to fun_kwargs for scipy compatibility.\n        hess: Not yet supported.\n        hessp: Not yet supported.\n        callback: Not yet supported.\n        options: Not yet supported.\n        tol: Not yet supported.\n        criterion: Deprecated. Use fun instead.\n        criterion_kwargs: Deprecated. Use fun_kwargs instead.\n        derivative: Deprecated. Use jac instead.\n        derivative_kwargs: Deprecated. Use jac_kwargs instead.\n        criterion_and_derivative: Deprecated. Use fun_and_jac instead.\n        criterion_and_derivative_kwargs: Deprecated. Use fun_and_jac_kwargs instead.\n        lower_bounds: Deprecated. Use bounds instead.\n        upper_bounds: Deprecated. Use bounds instead.\n        soft_lower_bounds: Deprecated. Use bounds instead.\n        soft_upper_bounds: Deprecated. Use bounds instead.\n        scaling_options: Deprecated. Use scaling instead.\n        multistart_options: Deprecated. Use multistart instead.\n\n    \"\"\"\n    problem = create_optimization_problem(\n        direction=Direction.MAXIMIZE,\n        fun=fun,\n        params=params,\n        bounds=bounds,\n        algorithm=algorithm,\n        fun_kwargs=fun_kwargs,\n        constraints=constraints,\n        algo_options=algo_options,\n        jac=jac,\n        jac_kwargs=jac_kwargs,\n        fun_and_jac=fun_and_jac,\n        fun_and_jac_kwargs=fun_and_jac_kwargs,\n        numdiff_options=numdiff_options,\n        logging=logging,\n        log_options=log_options,\n        error_handling=error_handling,\n        error_penalty=error_penalty,\n        scaling=scaling,\n        multistart=multistart,\n        collect_history=collect_history,\n        skip_checks=skip_checks,\n        # scipy aliases\n        x0=x0,\n        method=method,\n        args=args,\n        # scipy arguments that are not yet supported\n        hess=hess,\n        hessp=hessp,\n        callback=callback,\n        # scipy arguments that will never be supported\n        options=options,\n        tol=tol,\n        # deprecated arguments\n        criterion=criterion,\n        criterion_kwargs=criterion_kwargs,\n        derivative=derivative,\n        derivative_kwargs=derivative_kwargs,\n        criterion_and_derivative=criterion_and_derivative,\n        criterion_and_derivative_kwargs=criterion_and_derivative_kwargs,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        soft_lower_bounds=soft_lower_bounds,\n        soft_upper_bounds=soft_upper_bounds,\n        scaling_options=scaling_options,\n        multistart_options=multistart_options,\n    )\n    return _optimize(problem)\n\n\ndef minimize(\n    fun: FunType | CriterionType | None = None,\n    params: PyTree | None = None,\n    algorithm: AlgorithmType | None = None,\n    *,\n    bounds: Bounds | ScipyBounds | Sequence[tuple[float, float]] | None = None,\n    constraints: ConstraintsType | None = None,\n    fun_kwargs: dict[str, Any] | None = None,\n    algo_options: dict[str, Any] | None = None,\n    jac: JacType | list[JacType] | None = None,\n    jac_kwargs: dict[str, Any] | None = None,\n    fun_and_jac: FunAndJacType | CriterionAndDerivativeType | None = None,\n    fun_and_jac_kwargs: dict[str, Any] | None = None,\n    numdiff_options: NumdiffOptions | NumdiffOptionsDict | None = None,\n    # TODO: add typed-dict support?\n    logging: bool | str | Path | LogOptions | dict[str, Any] | None = None,\n    error_handling: ErrorHandling | ErrorHandlingLiteral = ErrorHandling.RAISE,\n    error_penalty: dict[str, float] | None = None,\n    scaling: bool | ScalingOptions | ScalingOptionsDict = False,\n    multistart: bool | MultistartOptions | MultistartOptionsDict = False,\n    collect_history: bool = True,\n    skip_checks: bool = False,\n    # scipy aliases\n    x0: PyTree | None = None,\n    method: str | None = None,\n    args: tuple[Any] | None = None,\n    # scipy arguments that are not yet supported\n    hess: HessType | None = None,\n    hessp: HessType | None = None,\n    callback: CallbackType | None = None,\n    # scipy arguments that will never be supported\n    options: dict[str, Any] | None = None,\n    tol: NonNegativeFloat | None = None,\n    # deprecated arguments\n    criterion: CriterionType | None = None,\n    criterion_kwargs: dict[str, Any] | None = None,\n    derivative: JacType | None = None,\n    derivative_kwargs: dict[str, Any] | None = None,\n    criterion_and_derivative: CriterionAndDerivativeType | None = None,\n    criterion_and_derivative_kwargs: dict[str, Any] | None = None,\n    log_options: dict[str, Any] | None = None,\n    lower_bounds: PyTree | None = None,\n    upper_bounds: PyTree | None = None,\n    soft_lower_bounds: PyTree | None = None,\n    soft_upper_bounds: PyTree | None = None,\n    scaling_options: dict[str, Any] | None = None,\n    multistart_options: dict[str, Any] | None = None,\n) -> OptimizeResult:\n    \"\"\"Minimize criterion using algorithm subject to constraints.\n\n    Args:\n        fun: The objective function of a scalar, least-squares or likelihood\n            optimization problem. Non-scalar objective functions have to be marked\n            with the `mark.likelihood` or `mark.least_squares` decorators. `fun` maps\n            params and fun_kwargs to an objective value. See :ref:`how-to-fun` for\n            details and examples.\n        params: The start parameters for the optimization. Params can be numpy arrays,\n            dictionaries, pandas.Series, pandas.DataFrames, NamedTuples, floats, lists,\n            and any nested combination thereof. See :ref:`params` for details and\n            examples.\n        algorithm: The optimization algorithm to use. Can be a string, subclass of\n            :class:`optimagic.Algorithm` or an instance of a subclass of\n            :class:`optimagic.Algorithm`. For guidelines on how to choose an algorithm\n            see :ref:`how-to-select-algorithms`. For examples of specifying and\n            configuring algorithms see :ref:`specify-algorithm`.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an :class:`optimagic.Bounds` object that collects\n            lower, upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. See :ref:`how-to-bounds` for\n            details and examples. If params is a flat numpy array, you can also provide\n            bounds via any format that is supported by scipy.optimize.minimize.\n        constraints: Constraints for the optimization problem. Constraints can be\n            specified as a single :class:`optimagic.Constraint` object, a list of\n            Constraint objects. For details and examples check :ref:`constraints`.\n        fun_kwargs: Additional keyword arguments for the objective function.\n        algo_options: Additional options for the optimization algorithm. `algo_options`\n            is an alternative to configuring algorithm objects directly. See\n            :ref:`list_of_algorithms` for supported options of each algorithm.\n        jac: The first derivative of `fun`. Providing a closed form derivative can be\n            a great way to speed up your optimization. The easiest way to get\n            a derivative for your objective function are autodiff frameworks like\n            JAX. For details and examples see :ref:`how-to-jac`.\n        jac_kwargs: Additional keyword arguments for `jac`.\n        fun_and_jac: A function that returns both the objective value and the\n            derivative. This can be used do exploit synergies in the calculation of the\n            function value and its derivative. For details and examples see\n            :ref:`how-to-jac`.\n        fun_and_jac_kwargs: Additional keyword arguments for `fun_and_jac`.\n        numdiff_options: Options for numerical differentiation. Can be a dictionary\n            or an instance of :class:`optimagic.NumdiffOptions`.\n        logging: If None, no logging is used. If a str or pathlib.Path is provided,\n            it is interpreted as path to an sqlite3 file (which typically has\n            the file extension ``.db``. If the file does not exist, it will be created.\n            and the optimization history will be stored in that database. For more\n            customization, provide LogOptions. For details and examples see\n            :ref:`how-to-logging`.\n        error_handling: If \"raise\" or ErrorHandling.RAISE, exceptions that occur during\n            the optimization are raised and the optimization is stopped. If \"continue\"\n            or ErrorHandling.CONTINUE, exceptions are caught and the function value and\n            its derivative are replaced by penalty values. The penalty values are\n            constructed such that the optimizer is guided back towards the start\n            parameters until a feasible region is reached and then continues the\n            optimization from there. For details see  :ref:`how-to-errors`.\n        error_penalty: A dictionary with the keys \"slope\" and \"constant\" that\n            influences the magnitude of the penalty values. For minimization problems\n            both should be positive. For details see :ref:`how-to-errors`.\n        scaling: If None or False, the parameter space is not rescaled. If True,\n            a heuristic is used to improve the conditioning of the optimization problem.\n            To choose which heuristic is used and to customize the scaling, provide\n            a dictionary or an instance of :class:`optimagic.ScalingOptions`.\n            For details and examples see :ref:`scaling`.\n        multistart: If None or False, no multistart approach is used. If True, the\n            optimization is restarted from multiple starting points. Note that this\n            requires finite bounds or soft bounds for all parameters. To customize the\n            multistart approach, provide a dictionary or an instance of\n            :class:`optimagic.MultistartOptions`. For details and examples see\n            :ref:`how-to-multistart`.\n        collect_history: If True, the optimization history is collected and returned\n            in the OptimizeResult. This is required to create `criterion_plot` or\n            `params_plot` from an OptimizeResult.\n        skip_checks: If True, some checks are skipped to speed up the optimization.\n            This is only relevant if your objective function is very fast, i.e. runs in\n            a few microseconds.\n        x0: Alias for params for scipy compatibility.\n        method: Alternative to algorithm for scipy compatibility. With `method` you can\n            select scipy optimizers via their original scipy name.\n        args: Alternative to fun_kwargs for scipy compatibility.\n        hess: Not yet supported.\n        hessp: Not yet supported.\n        callback: Not yet supported.\n        options: Not yet supported.\n        tol: Not yet supported.\n        criterion: Deprecated. Use fun instead.\n        criterion_kwargs: Deprecated. Use fun_kwargs instead.\n        derivative: Deprecated. Use jac instead.\n        derivative_kwargs: Deprecated. Use jac_kwargs instead.\n        criterion_and_derivative: Deprecated. Use fun_and_jac instead.\n        criterion_and_derivative_kwargs: Deprecated. Use fun_and_jac_kwargs instead.\n        lower_bounds: Deprecated. Use bounds instead.\n        upper_bounds: Deprecated. Use bounds instead.\n        soft_lower_bounds: Deprecated. Use bounds instead.\n        soft_upper_bounds: Deprecated. Use bounds instead.\n        scaling_options: Deprecated. Use scaling instead.\n        multistart_options: Deprecated. Use multistart instead.\n\n    \"\"\"\n    problem = create_optimization_problem(\n        direction=Direction.MINIMIZE,\n        fun=fun,\n        params=params,\n        algorithm=algorithm,\n        bounds=bounds,\n        fun_kwargs=fun_kwargs,\n        constraints=constraints,\n        algo_options=algo_options,\n        jac=jac,\n        jac_kwargs=jac_kwargs,\n        fun_and_jac=fun_and_jac,\n        fun_and_jac_kwargs=fun_and_jac_kwargs,\n        numdiff_options=numdiff_options,\n        logging=logging,\n        error_handling=error_handling,\n        error_penalty=error_penalty,\n        scaling=scaling,\n        multistart=multistart,\n        collect_history=collect_history,\n        skip_checks=skip_checks,\n        # scipy aliases\n        x0=x0,\n        method=method,\n        args=args,\n        # scipy arguments that are not yet supported\n        hess=hess,\n        hessp=hessp,\n        callback=callback,\n        # scipy arguments that will never be supported\n        options=options,\n        tol=tol,\n        # deprecated arguments\n        criterion=criterion,\n        criterion_kwargs=criterion_kwargs,\n        derivative=derivative,\n        derivative_kwargs=derivative_kwargs,\n        criterion_and_derivative=criterion_and_derivative,\n        criterion_and_derivative_kwargs=criterion_and_derivative_kwargs,\n        lower_bounds=lower_bounds,\n        log_options=log_options,\n        upper_bounds=upper_bounds,\n        soft_lower_bounds=soft_lower_bounds,\n        soft_upper_bounds=soft_upper_bounds,\n        scaling_options=scaling_options,\n        multistart_options=multistart_options,\n    )\n    return _optimize(problem)\n\n\ndef _optimize(problem: OptimizationProblem) -> OptimizeResult:\n    \"\"\"Solve an optimization problem.\"\"\"\n    # ==================================================================================\n    # Split constraints into nonlinear and reparametrization parts\n    # ==================================================================================\n    constraints = problem.constraints\n\n    nonlinear_constraints = [c for c in constraints if c[\"type\"] == \"nonlinear\"]\n\n    if nonlinear_constraints:\n        if not problem.algorithm.algo_info.supports_nonlinear_constraints:\n            raise ValueError(\n                f\"Algorithm {problem.algorithm.name} does not support \"\n                \"nonlinear constraints.\"\n            )\n\n    # the following constraints will be handled via reparametrization\n    constraints = [c for c in constraints if c[\"type\"] != \"nonlinear\"]\n\n    # ==================================================================================\n    # Do first evaluation of user provided functions\n    # ==================================================================================\n    first_crit_eval = problem.fun_eval\n\n    # do first derivative evaluation (if given)\n    if problem.jac is not None:\n        try:\n            first_deriv_eval = problem.jac(problem.params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"Error while evaluating derivative at start params.\"\n            raise InvalidFunctionError(msg) from e\n\n    if problem.fun_and_jac is not None:\n        try:\n            first_crit_and_deriv_eval = problem.fun_and_jac(problem.params)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"Error while evaluating criterion_and_derivative at start params.\"\n            raise InvalidFunctionError(msg) from e\n\n    if problem.jac is not None:\n        used_deriv = first_deriv_eval\n    elif problem.fun_and_jac is not None:\n        used_deriv = first_crit_and_deriv_eval[1]\n    else:\n        used_deriv = None\n\n    # ==================================================================================\n    # Get the converter (for tree flattening, constraints and scaling)\n    # ==================================================================================\n    converter, internal_params = get_converter(\n        params=problem.params,\n        constraints=constraints,\n        bounds=problem.bounds,\n        func_eval=first_crit_eval.value,\n        solver_type=problem.algorithm.algo_info.solver_type,\n        scaling=problem.scaling,\n        derivative_eval=used_deriv,\n        add_soft_bounds=problem.multistart is not None,\n    )\n\n    # ==================================================================================\n    # initialize the log database\n    # ==================================================================================\n    logger: LogStore[Any, Any] | None\n\n    if problem.logging:\n        logger = LogStore.from_options(problem.logging)\n        problem_data = ProblemInitialization(problem.direction, problem.params)\n        logger.problem_store.insert(problem_data)\n    else:\n        logger = None\n\n    # ==================================================================================\n    # Strict checking if bounds are required and infinite values in bounds\n    # ==================================================================================\n    if problem.algorithm.algo_info.supports_bounds:\n        bounds_missing = (\n            internal_params.lower_bounds is None or internal_params.upper_bounds is None\n        )\n\n        # Check for infinite values in bounds arrays (only possible in mixed cases now)\n        infinite_values_in_bounds = False\n        if internal_params.lower_bounds is not None:\n            infinite_values_in_bounds |= np.isinf(internal_params.lower_bounds).any()\n        if internal_params.upper_bounds is not None:\n            infinite_values_in_bounds |= np.isinf(internal_params.upper_bounds).any()\n\n        # Case 1: Algorithm needs bounds but none provided\n        if problem.algorithm.algo_info.needs_bounds and bounds_missing:\n            raise IncompleteBoundsError(\n                f\"Algorithm {problem.algorithm.name} requires bounds for all \"\n                \"parameters. Please provide finite lower and upper bounds.\"\n            )\n\n        # Case 2: Algorithm doesn't support infinite bounds but they are present\n        if (\n            not problem.algorithm.algo_info.supports_infinite_bounds\n            and infinite_values_in_bounds\n        ):\n            raise IncompleteBoundsError(\n                f\"Algorithm {problem.algorithm.name} does not support infinite bounds. \"\n                \"Please provide finite bounds for all parameters.\"\n            )\n\n    # ==================================================================================\n    # Do some things that require internal parameters or bounds\n    # ==================================================================================\n\n    if converter.has_transforming_constraints and problem.multistart is not None:\n        raise NotImplementedError(\n            \"multistart optimizations are not yet compatible with transforming \"\n            \"constraints.\"\n        )\n\n    # get error penalty function\n    error_penalty_func = get_error_penalty_function(\n        start_x=internal_params.values,\n        start_criterion=first_crit_eval,\n        error_penalty=problem.error_penalty,\n        solver_type=problem.algorithm.algo_info.solver_type,\n        direction=problem.direction,\n    )\n\n    # process nonlinear constraints:\n    internal_nonlinear_constraints = process_nonlinear_constraints(\n        nonlinear_constraints=nonlinear_constraints,\n        params=problem.params,\n        bounds=problem.bounds,\n        converter=converter,\n        numdiff_options=problem.numdiff_options,\n        skip_checks=problem.skip_checks,\n    )\n\n    x = internal_params.values\n    internal_bounds = InternalBounds(\n        lower=internal_params.lower_bounds,\n        upper=internal_params.upper_bounds,\n    )\n\n    # ==================================================================================\n    # Create a batch evaluator\n    # ==================================================================================\n    # TODO: Make batch evaluator an argument of maximize and minimize and move this\n    # to create_optimization_problem\n    batch_evaluator = process_batch_evaluator(\"joblib\")\n\n    # ==================================================================================\n    # Create the InternalOptimizationProblem\n    # ==================================================================================\n\n    internal_problem = InternalOptimizationProblem(\n        fun=problem.fun,\n        jac=problem.jac,\n        fun_and_jac=problem.fun_and_jac,\n        converter=converter,\n        solver_type=problem.algorithm.algo_info.solver_type,\n        direction=problem.direction,\n        bounds=internal_bounds,\n        numdiff_options=problem.numdiff_options,\n        error_handling=problem.error_handling,\n        error_penalty_func=error_penalty_func,\n        batch_evaluator=batch_evaluator,\n        # TODO: Actually pass through linear constraints if possible\n        linear_constraints=None,\n        nonlinear_constraints=internal_nonlinear_constraints,\n        logger=logger,\n    )\n\n    # ==================================================================================\n    # Do actual optimization\n    # ==================================================================================\n    if problem.multistart is None:\n        steps = [{\"type\": \"optimization\", \"name\": \"optimization\"}]\n\n        # TODO: Actually use the step ids\n        step_id = log_scheduled_steps_and_get_ids(  # noqa: F841\n            steps=steps,\n            logger=logger,\n        )[0]\n\n        raw_res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id)\n\n    else:\n        multistart_options = get_internal_multistart_options_from_public(\n            options=problem.multistart,\n            params=problem.params,\n            params_to_internal=converter.params_to_internal,\n        )\n\n        sampling_bounds = InternalBounds(\n            lower=internal_params.soft_lower_bounds,\n            upper=internal_params.soft_upper_bounds,\n        )\n\n        raw_res = run_multistart_optimization(\n            local_algorithm=problem.algorithm,\n            internal_problem=internal_problem,\n            x=x,\n            sampling_bounds=sampling_bounds,\n            options=multistart_options,\n            logger=logger,\n            error_handling=problem.error_handling,\n        )\n\n    # ==================================================================================\n    # Process the result\n    # ==================================================================================\n\n    _scalar_start_criterion = cast(\n        float, first_crit_eval.internal_value(AggregationLevel.SCALAR)\n    )\n    log_reader: LogReader[Any] | None\n\n    extra_fields = ExtraResultFields(\n        start_fun=_scalar_start_criterion,\n        start_params=problem.params,\n        algorithm=problem.algorithm.algo_info.name,\n        direction=problem.direction,\n        n_free=internal_params.free_mask.sum(),\n    )\n\n    if problem.multistart is None:\n        res = process_single_result(\n            raw_res=raw_res,\n            converter=converter,\n            solver_type=problem.algorithm.algo_info.solver_type,\n            extra_fields=extra_fields,\n        )\n    else:\n        res = process_multistart_result(\n            raw_res=raw_res,\n            converter=converter,\n            solver_type=problem.algorithm.algo_info.solver_type,\n            extra_fields=extra_fields,\n        )\n\n    if logger is not None:\n        assert problem.logging is not None\n        log_reader = LogReader.from_options(problem.logging)\n    else:\n        log_reader = None\n\n    res.logger = log_reader\n\n    return res\n"
  },
  {
    "path": "src/optimagic/optimization/optimize_result.py",
    "content": "import warnings\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic import deprecations\nfrom optimagic.logging.logger import LogReader\nfrom optimagic.optimization.history import History\nfrom optimagic.shared.compat import pd_df_map\nfrom optimagic.typing import PyTree\nfrom optimagic.utilities import to_pickle\n\n\n@dataclass\nclass OptimizeResult:\n    \"\"\"Optimization result object.\n\n    **Attributes**\n\n    Attributes:\n        params: The optimal parameters.\n        fun: The optimal criterion value.\n        start_fun: The criterion value at the start parameters.\n        start_params: The start parameters.\n        algorithm: The algorithm used for the optimization.\n        direction: Maximize or minimize.\n        n_free: Number of free parameters.\n        message: Message returned by the underlying algorithm.\n        success: Whether the optimization was successful.\n        n_fun_evals: Number of criterion evaluations.\n        n_jac_evals: Number of derivative evaluations.\n        n_iterations: Number of iterations until termination.\n        history: Optimization history.\n        convergence_report: The convergence report.\n        multistart_info: Multistart information.\n        algorithm_output: Additional algorithm specific information.\n\n    \"\"\"\n\n    params: Any\n    fun: float\n    start_fun: float\n    start_params: Any\n    algorithm: str\n    direction: str\n    n_free: int\n\n    message: str | None = None\n    success: bool | None = None\n    n_fun_evals: int | None = None\n    n_jac_evals: int | None = None\n    n_hess_evals: int | None = None\n    n_iterations: int | None = None\n    status: int | None = None\n    jac: PyTree | None = None\n    hess: PyTree | None = None\n    hess_inv: PyTree | None = None\n    max_constraint_violation: float | None = None\n\n    history: History | None = None\n\n    convergence_report: Dict | None = None\n\n    multistart_info: Optional[\"MultistartInfo\"] = None\n    algorithm_output: Dict[str, Any] | None = None\n    logger: LogReader | None = None\n\n    # ==================================================================================\n    # Deprecations\n    # ==================================================================================\n\n    @property\n    def criterion(self) -> float:\n        msg = \"The criterion attribute is deprecated. Use the fun attribute instead.\"\n        warnings.warn(msg, FutureWarning)\n        return self.fun\n\n    @property\n    def start_criterion(self) -> float:\n        msg = (\n            \"The start_criterion attribute is deprecated. Use the start_fun attribute \"\n            \"instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        return self.start_fun\n\n    @property\n    def n_criterion_evaluations(self) -> int | None:\n        msg = (\n            \"The n_criterion_evaluations attribute is deprecated. Use the n_fun_evals \"\n            \"attribute instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        return self.n_fun_evals\n\n    @property\n    def n_derivative_evaluations(self) -> int | None:\n        msg = (\n            \"The n_derivative_evaluations attribute is deprecated. Use the n_jac_evals \"\n            \"attribute instead.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        return self.n_jac_evals\n\n    # ==================================================================================\n    # Scipy aliases\n    # ==================================================================================\n\n    @property\n    def x(self) -> PyTree:\n        return self.params\n\n    @property\n    def x0(self) -> PyTree:\n        return self.start_params\n\n    @property\n    def nfev(self) -> int | None:\n        return self.n_fun_evals\n\n    @property\n    def nit(self) -> int | None:\n        return self.n_iterations\n\n    @property\n    def njev(self) -> int | None:\n        return self.n_jac_evals\n\n    @property\n    def nhev(self) -> int | None:\n        return self.n_hess_evals\n\n    # Enable attribute access using dictionary-style notation for scipy compatibility\n    def __getitem__(self, key):\n        return getattr(self, key)\n\n    def __repr__(self) -> str:\n        first_line = (\n            f\"{self.direction.title()} with {self.n_free} free parameters terminated\"\n        )\n\n        if self.success is not None:\n            snippet = \"successfully\" if self.success else \"unsuccessfully\"\n            first_line += f\" {snippet}\"\n\n        counters = [\n            (\"criterion evaluations\", self.n_fun_evals),\n            (\"derivative evaluations\", self.n_jac_evals),\n            (\"iterations\", self.n_iterations),\n        ]\n\n        counters = [(n, v) for n, v in counters if v is not None]\n\n        if counters:\n            name, val = counters[0]\n            counter_msg = f\"after {val} {name}\"\n            if len(counters) >= 2:\n                for name, val in counters[1:-1]:\n                    counter_msg += f\", {val} {name}\"\n\n                name, val = counters[-1]\n                counter_msg += f\" and {val} {name}\"\n            first_line += f\" {counter_msg}\"\n\n        first_line += \".\"\n\n        if self.message:\n            message = f\"The {self.algorithm} algorithm reported: {self.message}\"\n        else:\n            message = None\n\n        if self.start_fun is not None and self.fun is not None:\n            improvement = (\n                f\"The value of criterion improved from {self.start_fun} to {self.fun}.\"\n            )\n        else:\n            improvement = None\n\n        if self.convergence_report is not None:\n            convergence = _format_convergence_report(\n                self.convergence_report, self.algorithm\n            )\n        else:\n            convergence = None\n\n        sections = [first_line, improvement, message, convergence]\n        sections = [sec for sec in sections if sec is not None]\n\n        msg = \"\\n\\n\".join(sections)\n\n        return msg\n\n    def to_pickle(self, path):\n        \"\"\"Save the OptimizeResult object to pickle.\n\n        Args:\n            path (str, pathlib.Path): A str or pathlib.path ending in .pkl or .pickle.\n\n        \"\"\"\n        to_pickle(self, path=path)\n\n\n@dataclass(frozen=True)\nclass MultistartInfo:\n    \"\"\"Information about the multistart optimization.\n\n    Attributes:\n        start_parameters: List of start parameters for each optimization.\n        local_optima: List of optimization results.\n        exploration_sample: List of parameters used for exploration.\n        exploration_results: List of function values corresponding to exploration.\n        n_optimizations: Number of local optimizations that were run.\n\n    \"\"\"\n\n    start_parameters: list[PyTree]\n    local_optima: list[OptimizeResult]\n    exploration_sample: list[PyTree]\n    exploration_results: list[float]\n\n    def __getitem__(self, key):\n        deprecations.throw_dict_access_future_warning(key, obj_name=type(self).__name__)\n        return getattr(self, key)\n\n    @property\n    def n_optimizations(self) -> int:\n        return len(self.local_optima)\n\n\ndef _format_convergence_report(report, algorithm):\n    report = pd.DataFrame.from_dict(report)\n    columns = [\"one_step\", \"five_steps\"]\n\n    table = pd_df_map(report[columns], _format_float).astype(str)\n\n    for col in \"one_step\", \"five_steps\":\n        table[col] = table[col] + _create_stars(report[col])\n\n    table = table.to_string(justify=\"center\")\n\n    introduction = (\n        f\"Independent of the convergence criteria used by {algorithm}, \"\n        \"the strength of convergence can be assessed by the following criteria:\"\n    )\n\n    explanation = (\n        \"(***: change <= 1e-10, **: change <= 1e-8, *: change <= 1e-5. \"\n        \"Change refers to a change between accepted steps. The first column only \"\n        \"considers the last step. The second column considers the last five steps.)\"\n    )\n\n    out = \"\\n\\n\".join([introduction, table, explanation])\n\n    return out\n\n\ndef _create_stars(sr):\n    stars = pd.cut(\n        sr,\n        bins=[-np.inf, 1e-10, 1e-8, 1e-5, np.inf],\n        labels=[\"***\", \"** \", \"*  \", \"   \"],\n    ).astype(\"str\")\n\n    return stars\n\n\ndef _format_float(number):\n    \"\"\"Round to four significant digits.\"\"\"\n    return f\"{number:.4g}\"\n"
  },
  {
    "path": "src/optimagic/optimization/process_results.py",
    "content": "from dataclasses import dataclass, replace\nfrom typing import Any\n\nimport numpy as np\n\nfrom optimagic.optimization.algorithm import InternalOptimizeResult\nfrom optimagic.optimization.convergence_report import get_convergence_report\nfrom optimagic.optimization.history import History\nfrom optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult\nfrom optimagic.parameters.conversion import Converter\nfrom optimagic.typing import AggregationLevel, Direction, EvalTask, PyTree\nfrom optimagic.utilities import isscalar\n\n\n@dataclass(frozen=True)\nclass ExtraResultFields:\n    \"\"\"Fields for OptimizeResult that are not part of InternalOptimizeResult.\"\"\"\n\n    start_fun: float\n    start_params: PyTree\n    algorithm: str\n    direction: Direction\n    n_free: int\n\n\ndef process_single_result(\n    raw_res: InternalOptimizeResult,\n    converter: Converter,\n    solver_type: AggregationLevel,\n    extra_fields: ExtraResultFields,\n) -> OptimizeResult:\n    \"\"\"Process an internal optimizer result.\"\"\"\n    params = converter.params_from_internal(raw_res.x)\n    if isscalar(raw_res.fun):\n        fun = float(raw_res.fun)\n    elif solver_type == AggregationLevel.LIKELIHOOD:\n        fun = float(np.sum(raw_res.fun))\n    elif solver_type == AggregationLevel.LEAST_SQUARES:\n        fun = np.dot(raw_res.fun, raw_res.fun)\n\n    if extra_fields.direction == Direction.MAXIMIZE:\n        fun = -fun\n\n    if raw_res.history is not None:\n        conv_report = get_convergence_report(raw_res.history)\n    else:\n        conv_report = None\n\n    out = OptimizeResult(\n        params=params,\n        fun=fun,\n        start_fun=extra_fields.start_fun,\n        start_params=extra_fields.start_params,\n        algorithm=extra_fields.algorithm,\n        direction=extra_fields.direction.value,\n        n_free=extra_fields.n_free,\n        message=raw_res.message,\n        success=raw_res.success,\n        n_fun_evals=raw_res.n_fun_evals,\n        n_jac_evals=raw_res.n_jac_evals,\n        n_hess_evals=raw_res.n_hess_evals,\n        n_iterations=raw_res.n_iterations,\n        status=raw_res.status,\n        jac=raw_res.jac,\n        hess=raw_res.hess,\n        hess_inv=raw_res.hess_inv,\n        max_constraint_violation=raw_res.max_constraint_violation,\n        history=raw_res.history,\n        algorithm_output=raw_res.info,\n        convergence_report=conv_report,\n    )\n    return out\n\n\ndef process_multistart_result(\n    raw_res: InternalOptimizeResult,\n    converter: Converter,\n    solver_type: AggregationLevel,\n    extra_fields: ExtraResultFields,\n) -> OptimizeResult:\n    \"\"\"Process results of internal optimizers.\n\n    Args:\n        res (dict): Results dictionary of an internal optimizer or multistart optimizer.\n\n    \"\"\"\n    if raw_res.multistart_info is None:\n        raise ValueError(\"Multistart info is missing.\")\n\n    if isinstance(raw_res, str):\n        res = _dummy_result_from_traceback(raw_res, extra_fields)\n    else:\n        res = process_single_result(\n            raw_res=raw_res,\n            converter=converter,\n            solver_type=solver_type,\n            extra_fields=extra_fields,\n        )\n\n        info = _process_multistart_info(\n            raw_res.multistart_info,\n            converter=converter,\n            solver_type=solver_type,\n            extra_fields=extra_fields,\n        )\n\n        # ==============================================================================\n        # create a convergence report for the multistart optimization; This is not\n        # the same as the convergence report for the individual local optimizations.\n        # ==============================================================================\n        report_history = History(\n            direction=extra_fields.direction,\n            fun=[opt.fun for opt in info.local_optima],\n            params=[opt.params for opt in info.local_optima],\n            start_time=len(info.local_optima) * [np.nan],\n            stop_time=len(info.local_optima) * [np.nan],\n            batches=list(range(len(info.local_optima))),\n            task=len(info.local_optima) * [EvalTask.FUN],\n        )\n        conv_report = get_convergence_report(report_history)\n\n        res.convergence_report = conv_report\n\n        res.algorithm = f\"multistart_{res.algorithm}\"\n        res.n_iterations = _sum_or_none([opt.n_iterations for opt in info.local_optima])\n\n        res.n_fun_evals = _sum_or_none([opt.n_fun_evals for opt in info.local_optima])\n        res.n_jac_evals = _sum_or_none([opt.n_jac_evals for opt in info.local_optima])\n\n        res.multistart_info = info\n    return res\n\n\ndef _process_multistart_info(\n    info: dict[str, Any],\n    converter: Converter,\n    solver_type: AggregationLevel,\n    extra_fields: ExtraResultFields,\n) -> MultistartInfo:\n    # The `info` dictionary is obtained from the `multistart_info` field of the\n    # InternalOptimizeResult returned by `run_multistart_optimization` function.\n\n    starts = [converter.params_from_internal(x) for x in info[\"start_parameters\"]]\n\n    optima = []\n    for res, start in zip(info[\"local_optima\"], starts, strict=False):\n        replacements = {\n            \"start_params\": start,\n            \"start_fun\": None,\n        }\n\n        processed = process_single_result(\n            res,\n            converter=converter,\n            solver_type=solver_type,\n            extra_fields=replace(extra_fields, **replacements),\n        )\n        optima.append(processed)\n\n    sample = [converter.params_from_internal(x) for x in info[\"exploration_sample\"]]\n\n    if extra_fields.direction == Direction.MINIMIZE:\n        exploration_res = info[\"exploration_results\"]\n    else:\n        exploration_res = [-res for res in info[\"exploration_results\"]]\n\n    return MultistartInfo(\n        start_parameters=starts,\n        local_optima=optima,\n        exploration_sample=sample,\n        exploration_results=exploration_res,\n    )\n\n\ndef _dummy_result_from_traceback(\n    candidate: str, extra_fields: ExtraResultFields\n) -> OptimizeResult:\n    out = OptimizeResult(\n        params=extra_fields.start_params,\n        fun=extra_fields.start_fun,\n        start_fun=extra_fields.start_fun,\n        start_params=extra_fields.start_params,\n        algorithm=extra_fields.algorithm,\n        direction=extra_fields.direction.value,\n        n_free=extra_fields.n_free,\n        message=candidate,\n    )\n    return out\n\n\ndef _sum_or_none(summands: list[int | None | float]) -> int | None:\n    if any(s is None for s in summands):\n        out = None\n    else:\n        out = int(np.array(summands).sum())\n    return out\n"
  },
  {
    "path": "src/optimagic/optimization/scipy_aliases.py",
    "content": "import functools\n\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.utilities import propose_alternatives\n\n\ndef map_method_to_algorithm(method):\n    implemented = {\n        \"Nelder-Mead\": \"scipy_neldermead\",\n        \"Powell\": \"scipy_powell\",\n        \"CG\": \"scipy_conjugate_gradient\",\n        \"BFGS\": \"scipy_bfgs\",\n        \"Newton-CG\": \"scipy_newton_cg\",\n        \"L-BFGS-B\": \"scipy_lbfgsb\",\n        \"TNC\": \"scipy_truncated_newton\",\n        \"COBYLA\": \"scipy_cobyla\",\n        \"SLSQP\": \"scipy_slsqp\",\n        \"trust-constr\": \"scipy_trust_constr\",\n    }\n\n    not_implemented = {\n        \"dogleg\": \"scipy_dogleg\",\n        \"trust-ncg\": \"scipy_trust_ncg\",\n        \"trust-exact\": \"scipy_trust_exact\",\n        \"trust-krylov\": \"scipy_trust_krylov\",\n        \"COBYQA\": \"scipy_cobyqa\",\n    }\n\n    if method in implemented:\n        algo = implemented[method]\n    elif method in not_implemented:\n        msg = (\n            f\"The method {method} is not yet wrapped in optimagic. Create an issue on \"\n            \"https://github.com/optimagic-dev/optimagic/ if you have urgent need \"\n            \"for this method.\"\n        )\n        raise NotImplementedError(msg)\n    else:\n        alt = propose_alternatives(method, list(implemented) + list(not_implemented))\n        msg = (\n            \"method is an alias for algorithm to select the scipy optimizers under \"\n            f\"their original name. {method} is not a valid scipy algorithm name. \"\n            f\"Did you mean {alt}?\"\n        )\n        raise ValueError(msg)\n    return algo\n\n\ndef split_fun_and_jac(fun_and_jac, target=\"fun\"):\n    index = 0 if target == \"fun\" else 1\n\n    @functools.wraps(fun_and_jac)\n    def fun(*args, **kwargs):\n        raw = fun_and_jac(*args, **kwargs)\n        try:\n            out = raw[index]\n        except TypeError as e:\n            msg = (\n                \"If you set `jac=True`, `fun` needs to return a tuple where the first \"\n                \"entry is the value of your objective function and the second entry \"\n                \"is its derivative.\"\n            )\n            raise InvalidFunctionError(msg) from e\n        return out\n\n    return fun\n"
  },
  {
    "path": "src/optimagic/optimizers/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/optimizers/_pounders/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/optimizers/_pounders/_conjugate_gradient.py",
    "content": "\"\"\"Implementation of the Conjugate Gradient algorithm.\"\"\"\n\nimport numpy as np\n\n\ndef minimize_trust_cg(\n    model_gradient, model_hessian, trustregion_radius, *, gtol_abs=1e-8, gtol_rel=1e-6\n):\n    \"\"\"Minimize the quadratic subproblem via (standard) conjugate gradient.\n\n    Solve the trust-region quadratic subproblem:\n      min_x   g.T @ x + 0.5 * x.T @ H @ x\n        s.t.   ||x|| <= trustregion_radius\n\n    approximately, where g denotes the gradient and H the hessian of the quadratic\n    model (i.e. the linear terms and square_terms), respectively.\n\n    Args:\n        model_gradient (np.ndarray): 1d array of shape (n,) containing the\n            gradient (i.e. linear terms) of the quadratic model.\n        model_hessian (np.ndarray): 2d array of shape (n, n) containing the\n            hessian (i.e .square terms) of the quadratic model.\n        trustregion_radius (float): Radius of the trust-region.\n        gtol_abs (float): Convergence tolerance for the absolute gradient norm.\n        gtol_rel (float): Convergence tolerance for the relative gradient norm.\n\n    Returns:\n        np.ndarray: Solution vector of shape (n,).\n\n    \"\"\"\n    n = len(model_gradient)\n    max_iter = n * 2\n    x_candidate = np.zeros(n)\n\n    residual = model_gradient\n    direction = -model_gradient\n\n    gradient_norm = np.linalg.norm(residual)\n    stop_tol = max(gtol_abs, gtol_rel * gradient_norm)\n\n    for _ in range(max_iter):\n        if gradient_norm <= stop_tol:\n            break\n\n        square_terms = direction.T @ model_hessian @ direction\n\n        distance_to_boundary = _get_distance_to_trustregion_boundary(\n            x_candidate, direction, trustregion_radius\n        )\n\n        # avoid divide by zero warning\n        if square_terms > 0:\n            step_size = (residual @ residual) / square_terms\n        else:\n            step_size = np.inf\n\n        if square_terms <= 0 or step_size > distance_to_boundary:\n            x_candidate = x_candidate + distance_to_boundary * direction\n            break\n\n        x_candidate, residual, direction = _update_vectors_for_next_iteration(\n            x_candidate, residual, direction, model_hessian, step_size\n        )\n        gradient_norm = np.linalg.norm(residual)\n\n    return x_candidate\n\n\ndef _update_vectors_for_next_iteration(\n    x_candidate, residual, direction, hessian, alpha\n):\n    \"\"\"Update candidate, residual, and direction vectors for the next iteration.\n\n    Args:\n        x_candidate (np.ndarray): Candidate vector of shape (n,).\n        residual (np.ndarray): Array of residuals of shape (n,). The residual vector\n            is defined as `r = Ax - b`, where `A` denotes the hessian matrix and `b` the\n            gradient vector of the quadratic trust-region subproblem.\n            `r` is equivalent to the first derivative of the quadratic subproblem.\n        direction (np.ndarray): Direction vector of shape (n,).\n\n    Returns:\n        (tuple) Tuple containing:\n            - x_candidate (np.ndarray): Updated candidate vector of shape (n,).\n            - residual (np.ndarray): Updated array of residuals of shape (n,).\n            - direction (np.darray): Updated direction vector of shape (n,).\n\n    \"\"\"\n    residual_old = residual\n\n    x_candidate = x_candidate + alpha * direction\n    residual = residual_old + alpha * (hessian @ direction)\n\n    beta = (residual @ residual) / (residual_old @ residual_old)\n    direction = -residual + beta * direction\n\n    return x_candidate, residual, direction\n\n\ndef _get_distance_to_trustregion_boundary(candidate, direction, radius):\n    \"\"\"Compute the distance of the candidate vector to trustregion boundary.\n\n    The positive distance sigma is defined in Eculidean norm, as follows:\n\n        || x + sigma * d || = radius\n\n    where x denotes the candidate vector, and d the direction vector.\n\n    Args:\n        candidate(np.ndarray): Candidate vector of shape (n,).\n        direction (np.ndarray): Direction vector of shape (n,).\n        radius (floar): Radius of the trust-region\n\n    Returns:\n        float: The candidate vector's distance to the trustregion\n            boundary.\n\n    \"\"\"\n    cc = candidate @ candidate\n    cd = candidate @ direction\n    dd = direction @ direction\n\n    sigma = -cd + np.sqrt(cd * cd + dd * (radius**2 - cc))\n    sigma /= dd\n\n    return sigma\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/_steihaug_toint.py",
    "content": "\"\"\"Implementation of the Steihaug-Toint Conjugate Gradient algorithm.\"\"\"\n\nimport numpy as np\n\n\ndef minimize_trust_stcg(model_gradient, model_hessian, trustregion_radius):\n    \"\"\"Minimize the quadratic subproblem via Steihaug-Toint conjugate gradient.\n\n    Solve the quadratic trust-region subproblem:\n\n      min_x   g.T @ x + 0.5 * x.T @ hess @ x\n        s.t.   ||x|| <= trustregion_radius\n\n    approximately, where g denotes the gradient and hess the hessian of the quadratic\n    model (i.e. the linear terms and square_terms), respectively.\n\n    The Steihaug-Toint conjugate gradient method is based on Steihaug\n    (:cite:`Steihaug1983`) and Toint (:cite:`Toint1981`).\n\n    Args:\n        model_gradient (np.ndarray): 1d array of shape (n,) containing the\n            gradient (i.e. linear terms) of the quadratic model.\n        model_hessian (np.ndarray): 2d array of shape (n, n) containing the\n            hessian (i.e .square terms) of the quadratic model.\n        trustregion_radius (float): Radius of the trust-region.\n\n    Returns:\n        np.ndarray: Solution vector of shape (n,).\n\n    \"\"\"\n    abstol = 1e-50\n    rtol = 1e-5\n    divtol = 10_000\n\n    n = len(model_gradient)\n    radius_sq = trustregion_radius**2\n\n    residual = -model_gradient\n    rr = residual.T @ residual\n\n    x_candidate = np.zeros(n)\n\n    max_iter = min(n, 10_000)\n\n    z = np.linalg.pinv(model_hessian) @ residual\n    rz = residual @ residual\n\n    n_iter = 0\n    diverged = False\n    converged = False\n\n    norm_r = np.sqrt(rr)\n    norm_r0 = norm_r\n    ttol = max(rtol * norm_r0, abstol)\n\n    converged, diverged = _check_convergence(\n        norm_r, norm_r0, abstol, ttol, divtol, converged, diverged\n    )\n\n    p = model_hessian @ z\n    z = model_hessian @ p\n    n_iter += 1\n\n    kappa = p @ z\n\n    dp = 0\n    norm_d = 0\n    norm_p = p @ p\n\n    if kappa <= 0:\n        converged = True\n\n        x_candidate, z, n_iter = _update_candidate_vector_and_iteration_number(\n            x_candidate,\n            residual,\n            p,\n            z,\n            model_gradient,\n            model_hessian,\n            rr,\n            trustregion_radius,\n            norm_p,\n            n_iter,\n        )\n\n    for _ in range(max_iter):\n        alpha = rz / kappa\n        norm_dp1 = norm_d + alpha * (2 * dp + alpha * norm_p)\n\n        if trustregion_radius != 0 and norm_dp1 >= radius_sq:\n            converged = True\n\n            if norm_p > 0:\n                x_candidate = _take_step_to_trustregion_boundary(\n                    x_candidate, p, dp, radius_sq, norm_d, norm_p\n                )\n\n            break\n\n        x_candidate = x_candidate + alpha * p\n        residual = residual - alpha * (model_hessian @ p)\n\n        norm_d = x_candidate @ x_candidate\n\n        rzm1 = rz\n        rz = residual @ residual\n\n        norm_r = np.linalg.norm(residual)\n\n        converged, diverged = _check_convergence(\n            norm_r, norm_r0, abstol, ttol, divtol, converged, diverged\n        )\n\n        if converged or diverged:\n            break\n\n        beta = rz / rzm1\n\n        if abs(beta) <= 0:\n            diverged = True\n            break\n\n        if n_iter >= max_iter:\n            diverged = True\n            break\n\n        p = residual + beta * p\n\n        dp = x_candidate @ p\n        norm_p = p @ p\n\n        z = model_hessian @ p\n        kappa = p @ z\n        n_iter += 1\n\n        if kappa <= 0:\n            converged = True\n\n            if trustregion_radius != 0 and norm_p > 0:\n                x_candidate = _take_step_to_trustregion_boundary(\n                    x_candidate, p, dp, radius_sq, norm_d, norm_p\n                )\n\n            break\n\n    return x_candidate\n\n\ndef _update_candidate_vector_and_iteration_number(\n    x_candidate,\n    residual,\n    p,\n    z,\n    model_gradient,\n    model_hessian,\n    rr,\n    radius,\n    norm_p,\n    n_iter,\n):\n    \"\"\"Update candidate, z vector, and iteration number.\"\"\"\n    radius_sq = radius**2\n\n    if radius != 0 and norm_p > 0:\n        # Take step to boundary\n        step = np.sqrt(radius_sq / norm_p)\n        x_candidate = x_candidate + step * p\n\n    elif radius != 0:\n        if radius_sq >= rr:\n            alpha = 1.0\n        else:\n            alpha = np.sqrt(radius_sq / rr)\n\n        x_candidate = x_candidate + alpha * residual\n        z = model_gradient - 0.5 * (model_hessian @ x_candidate)\n\n        n_iter += 1\n\n    return x_candidate, z, n_iter\n\n\ndef _take_step_to_trustregion_boundary(x_candidate, p, dp, radius_sq, norm_d, norm_p):\n    \"\"\"Take step to trust-region boundary.\"\"\"\n    step = (np.sqrt(dp * dp + norm_p * (radius_sq - norm_d)) - dp) / norm_p\n    x_candidate = x_candidate + step * p\n\n    return x_candidate\n\n\ndef _check_convergence(\n    rnorm,\n    rnorm0,\n    abstol,  # noqa: ARG001\n    ttol,\n    divtol,\n    converged,\n    diverged,\n):\n    \"\"\"Check for convergence.\"\"\"\n    if rnorm <= ttol:\n        converged = True\n    elif rnorm >= divtol * rnorm0:\n        diverged = True\n\n    return converged, diverged\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/_trsbox.py",
    "content": "\"\"\"Implementation of the quadratic trustregion solver TRSBOX.\"\"\"\n\nimport numpy as np\n\n\ndef minimize_trust_trsbox(\n    model_gradient,\n    model_hessian,\n    trustregion_radius,\n    *,\n    lower_bounds,\n    upper_bounds,\n):\n    \"\"\"Minimize a qaudratic trust-region subproblem using the trsbox algorithm.\n\n    Solve the quadratic trust-region subproblem:\n      min_x   g.T @ x + 0.5 * x.T @ hess @ x\n        s.t.   ||x|| <= trustregion_radius\n               lower_bounds <= x <= upper_bounds\n\n    approximately, using an active-set approach, where g denotes the gradient\n    and hess the hessian of the quadratic model (i.e. the linear terms and\n    square_terms), respectively.\n\n    The subproblem is assumed to be centered, i.e. ``x_center`` is the zero vector.\n    The trsbox algorithm applies a conjugate gradient step in its main loop.\n\n    This implementation of the quadratic trsbox algorithm is based on\n    M. J. D. Powell (2009) \"The BOBYQA algorithm for bound constrained\n    optimization without derivatives.\" (cite:`Powell2009`).\n\n    Some modifications to the termination conditions are taken from the\n    DFBOLS method by Zhang et al. (:cite:`Zhang2010`).\n\n    Args:\n        model_gradient (np.ndarray): 1d array of shape (n,) containing the\n            gradient (i.e. linear terms) of the quadratic model.\n        model_hessian (np.ndarray): 2d array of shape (n, n) containing the\n            hessian (i.e .square terms) of the quadratic model.\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        trustregion_radius (float): Radius of the trust-region.\n\n    Returns:\n        np.ndarray: Solution vector for the quadratic trust-region subproblem\n            of shape (n,).\n\n    \"\"\"\n    n = len(model_gradient)\n    x_center = np.zeros(n)\n\n    n_iter = 0\n    n_fixed_variables = 0\n\n    x_bounded = np.zeros(n)\n    x_bounded[(x_center <= lower_bounds) & (model_gradient >= 0.0)] = -1\n    x_bounded[(x_center >= upper_bounds) & (model_gradient <= 0.0)] = 1\n\n    x_candidate = np.zeros(n)\n    gradient_projected = np.zeros(n)\n    gradient_candidate = model_gradient\n\n    total_reduction = 0\n    delta_sq = trustregion_radius**2\n    curve_min = -1.0\n    beta = 0\n\n    need_alt_trust_step = False\n    max_iter = 100 * n**2\n\n    # Main Conjugate Gradient loop\n    for _ in range(max_iter):\n        gradient_projected[x_bounded != 0] = 0\n        if beta == 0:\n            gradient_projected[x_bounded == 0] = -gradient_candidate[x_bounded == 0]\n        else:\n            gradient_projected[x_bounded == 0] = (\n                beta * gradient_projected[x_bounded == 0]\n                - gradient_candidate[x_bounded == 0]\n            )\n        gradient_projected_sumsq = gradient_projected @ gradient_projected\n\n        if gradient_projected_sumsq == 0:\n            need_alt_trust_step = False\n            break\n\n        if beta == 0:\n            gradient_sumsq = gradient_projected_sumsq\n            max_iter = n_iter + n - n_fixed_variables\n\n        if n_iter == 0:\n            gradient_sumsq_initial = gradient_sumsq\n\n        if gradient_sumsq <= min(\n            1.0e-6 * gradient_sumsq_initial, 1.0e-18\n        ) or gradient_sumsq * delta_sq <= min(1.0e-6 * total_reduction**2, 1.0e-18):\n            need_alt_trust_step = False\n            break\n\n        hess_g = model_hessian @ gradient_projected\n        g_x = gradient_projected[x_bounded == 0] @ x_candidate[x_bounded == 0]\n        g_hess_g = gradient_projected[x_bounded == 0] @ hess_g[x_bounded == 0]\n        raw_distance = (\n            delta_sq - x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]\n        )\n\n        if raw_distance <= 0:\n            need_alt_trust_step = True\n            break\n\n        step_len, distance_to_boundary = _take_unconstrained_step_up_to_boundary(\n            raw_distance, gradient_sumsq, gradient_projected_sumsq, g_x, g_hess_g\n        )\n\n        if step_len <= 1.0e-30:\n            need_alt_trust_step = False\n            break\n\n        step_len, index_bound_active = _take_constrained_step_up_to_boundary(\n            x_candidate, gradient_projected, step_len, lower_bounds, upper_bounds\n        )\n\n        current_reduction = 0\n        if step_len > 0:\n            n_iter += 1\n            (\n                x_candidate,\n                gradient_candidate,\n                current_reduction,\n                total_reduction,\n                curve_min,\n                gradient_sumsq,\n                gradient_sumsq_old,\n            ) = _update_candidate_vectors_and_reduction(\n                x_candidate,\n                x_bounded,\n                gradient_candidate,\n                gradient_projected,\n                step_len,\n                total_reduction,\n                curve_min,\n                index_bound_active,\n                gradient_projected_sumsq,\n                gradient_sumsq,\n                g_hess_g,\n                hess_g,\n            )\n\n        if index_bound_active is not None:\n            n_fixed_variables += 1\n            if gradient_projected[index_bound_active] >= 0:\n                x_bounded[index_bound_active] = 1\n            else:\n                x_bounded[index_bound_active] = -1\n\n            delta_sq = delta_sq - x_candidate[index_bound_active] ** 2\n            if delta_sq <= 0:\n                need_alt_trust_step = True\n                break\n\n            beta = 0\n            continue\n\n        if step_len >= distance_to_boundary:\n            need_alt_trust_step = True\n            break\n\n        if n_iter == max_iter or current_reduction <= 1.0e-6 * total_reduction:\n            need_alt_trust_step = False\n            break\n\n        beta = gradient_sumsq / gradient_sumsq_old\n        continue\n\n    if need_alt_trust_step:\n        curve_min = 0\n        x_candidate = _perform_alternative_trustregion_step(\n            x_candidate=x_candidate,\n            x_bounded=x_bounded,\n            gradient_candidate=gradient_candidate,\n            model_hessian=model_hessian,\n            lower_bounds=lower_bounds,\n            upper_bounds=upper_bounds,\n            n_fixed_variables=n_fixed_variables,\n            total_reduction=total_reduction,\n        )\n    else:\n        x_candidate = _apply_bounds_to_candidate_vector(\n            x_candidate, x_bounded, lower_bounds, upper_bounds\n        )\n\n    return x_candidate\n\n\ndef _perform_alternative_trustregion_step(\n    x_candidate,\n    x_bounded,\n    gradient_candidate,\n    model_hessian,\n    lower_bounds,\n    upper_bounds,\n    n_fixed_variables,\n    total_reduction,\n):\n    \"\"\"Perform the alternative trust-region step.\"\"\"\n    n = len(x_candidate)\n    max_iter = 100 * n**2\n\n    for _ in range(max_iter):\n        if n_fixed_variables >= n - 1:\n            x_candidate = _apply_bounds_to_candidate_vector(\n                x_candidate, x_bounded, lower_bounds, upper_bounds\n            )\n            break\n\n        search_direction = np.zeros(n)\n        search_direction[x_bounded == 0] = x_candidate[x_bounded == 0]\n\n        x_reduced = x_candidate[x_bounded == 0] @ x_candidate[x_bounded == 0]\n        x_grad = x_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]\n        gradient_reduced = (\n            gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]\n        )\n        hess_s = model_hessian @ search_direction\n        hessian_reduced = hess_s\n\n        restart_alt_loop = False\n\n        for _ in range(max_iter):\n            raw_reduction = gradient_reduced * x_reduced - x_grad**2\n            if raw_reduction <= 1.0e-4 * total_reduction**2:\n                restart_alt_loop = False\n                break\n\n            search_direction, s_norm = _compute_new_search_direction_and_norm(\n                x_candidate,\n                x_bounded,\n                x_reduced,\n                gradient_candidate,\n                x_grad,\n                raw_reduction,\n            )\n\n            (\n                x_bounded,\n                index_active_bound,\n                n_fixed_variables,\n                active_bound,\n                bound_on_tangent,\n                free_variable_reached_bound,\n            ) = _calc_upper_bound_on_tangent(\n                x_candidate,\n                search_direction,\n                x_bounded,\n                lower_bounds,\n                upper_bounds,\n                n_fixed_variables,\n            )\n\n            if free_variable_reached_bound:\n                restart_alt_loop = True\n                break\n\n            hess_s = model_hessian @ search_direction\n\n            s_hess_s = np.sum(search_direction[x_bounded == 0] * hess_s[x_bounded == 0])\n            x_hess_s = np.sum(x_candidate[x_bounded == 0] * hess_s[x_bounded == 0])\n            x_hess_x = np.sum(\n                x_candidate[x_bounded == 0] * hessian_reduced[x_bounded == 0]\n            )\n\n            (\n                previous_reduction,\n                next_reduction,\n                max_reduction,\n                tangent,\n                index_angle_greatest_reduction,\n                n_angles,\n            ) = _calc_greatest_criterion_reduction(\n                bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm\n            )\n\n            if index_angle_greatest_reduction == -1:\n                restart_alt_loop = False\n                break\n\n            if index_angle_greatest_reduction < n_angles - 1:\n                tangent = _update_tangent(\n                    index_angle_greatest_reduction,\n                    bound_on_tangent,\n                    n_angles,\n                    next_reduction,\n                    previous_reduction,\n                    max_reduction,\n                )\n\n            cosine = (1.0 - tangent**2) / (1.0 + tangent**2)\n            sine = 2.0 * tangent / (1.0 + tangent**2)\n            current_reduction = _calc_new_reduction(\n                tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm\n            )\n\n            if current_reduction <= 0.0:\n                restart_alt_loop = False\n                break\n\n            (\n                x_candidate,\n                gradient_candidate,\n                x_grad,\n                gradient_reduced,\n                hessian_reduced,\n            ) = _update_candidate_vectors_and_reduction_alt_step(\n                x_candidate,\n                search_direction,\n                x_bounded,\n                gradient_candidate,\n                cosine,\n                sine,\n                hess_s,\n                hessian_reduced,\n            )\n\n            total_reduction = total_reduction + current_reduction\n            if (\n                index_active_bound is not None\n                and index_angle_greatest_reduction == n_angles - 1\n            ):\n                n_fixed_variables += 1\n                x_bounded[index_active_bound] = active_bound\n                restart_alt_loop = True\n                break\n\n            if current_reduction <= 0.01 * total_reduction:\n                restart_alt_loop = False\n                break\n\n            continue\n\n        if restart_alt_loop:\n            continue\n        else:\n            break\n\n    x_candidate = _apply_bounds_to_candidate_vector(\n        x_candidate, x_bounded, lower_bounds, upper_bounds\n    )\n\n    return x_candidate\n\n\ndef _apply_bounds_to_candidate_vector(\n    x_candidate,\n    x_bounded,\n    lower_bounds,\n    upper_bounds,\n):\n    \"\"\"Force candidate vector to lie within bounds.\"\"\"\n    x_candidate_new = np.clip(lower_bounds, x_candidate, upper_bounds)\n    x_candidate_new[x_bounded == -1] = lower_bounds[x_bounded == -1]\n    x_candidate_new[x_bounded == 1] = upper_bounds[x_bounded == 1]\n\n    return x_candidate_new\n\n\ndef _take_unconstrained_step_up_to_boundary(\n    raw_distance, gradient_sumsq, gradient_projected_sumsq, g_x, g_hess_g\n):\n    \"\"\"Take unconstrained step, ignoring bounds, up to boundary.\"\"\"\n    temp = np.sqrt(gradient_projected_sumsq * raw_distance + g_x**2)\n\n    if g_x >= 0:\n        distance_to_boundary = raw_distance / (temp + g_x)\n    else:\n        distance_to_boundary = (temp - g_x) / gradient_projected_sumsq\n\n    if g_hess_g <= 0:\n        step_len = distance_to_boundary\n    else:\n        step_len = min(distance_to_boundary, gradient_sumsq / g_hess_g)\n\n    return step_len, distance_to_boundary\n\n\ndef _update_candidate_vectors_and_reduction(\n    x_candidate,\n    x_bounded,\n    gradient_candidate,\n    gradient_projected,\n    step_len,\n    total_reduction,\n    curve_min,\n    index_bound_active,\n    gradient_projected_sumsq,\n    gradient_sumsq,\n    g_hess_g,\n    hess_g,\n):\n    \"\"\"Update candidate vectors and the associated criterion reduction.\"\"\"\n    current_min = g_hess_g / gradient_projected_sumsq\n\n    if index_bound_active is None and current_min > 0:\n        if curve_min != -1.0:\n            curve_min = min(curve_min, current_min)\n        else:\n            curve_min = current_min\n\n    gradient_sumsq_old = gradient_sumsq\n\n    gradient_candidate = gradient_candidate + step_len * hess_g\n    x_candidate = x_candidate + step_len * gradient_projected\n\n    gradient_sumsq = (\n        gradient_candidate[x_bounded == 0] @ gradient_candidate[x_bounded == 0]\n    )\n\n    current_reduction = max(\n        step_len * (gradient_sumsq_old - 0.5 * step_len * g_hess_g), 0\n    )\n    total_reduction = total_reduction + current_reduction\n\n    return (\n        x_candidate,\n        gradient_candidate,\n        current_reduction,\n        total_reduction,\n        curve_min,\n        gradient_sumsq,\n        gradient_sumsq_old,\n    )\n\n\ndef _take_constrained_step_up_to_boundary(\n    x_candidate, gradient_projected, step_len, lower_bounds, upper_bounds\n):\n    \"\"\"Reduce step length, where boundary is hit, to preserve simple bounds.\"\"\"\n    index_bound_active = None\n\n    for i in range(len(x_candidate)):\n        if gradient_projected[i] != 0:\n            if gradient_projected[i] > 0:\n                step_len_constr = (\n                    upper_bounds[i] - x_candidate[i]\n                ) / gradient_projected[i]\n            else:\n                step_len_constr = (\n                    lower_bounds[i] - x_candidate[i]\n                ) / gradient_projected[i]\n\n            if step_len_constr < step_len:\n                step_len = step_len_constr\n                index_bound_active = i\n\n    return step_len, index_bound_active\n\n\ndef _calc_upper_bound_on_tangent(\n    x_candidate,\n    search_direction,\n    x_bounded,\n    lower_bounds,\n    upper_bounds,\n    n_fixed_variables,\n):\n    \"\"\"Calculate upper bound on tangent of half the angle to the boundary.\"\"\"\n    bound_on_tangent = 1\n    free_variable_reached_bound = False\n    index_active_bound = None\n    active_bound = None\n\n    for i in range(len(x_candidate)):\n        if x_bounded[i] == 0:\n            lower_bound_centered = x_candidate[i] - lower_bounds[i]\n            upper_bound_centered = upper_bounds[i] - x_candidate[i]\n\n            if lower_bound_centered <= 0.0:\n                n_fixed_variables += 1\n                x_bounded[i] = -1\n                free_variable_reached_bound = True\n                break\n\n            elif upper_bound_centered <= 0.0:\n                n_fixed_variables += 1\n                x_bounded[i] = 1\n                free_variable_reached_bound = True\n                break\n\n            ssq = x_candidate[i] ** 2 + search_direction[i] ** 2\n\n            ssq_lower = ssq - lower_bounds[i] ** 2\n            if ssq_lower > 0.0:\n                ssq_lower = np.sqrt(ssq_lower) - search_direction[i]\n                if bound_on_tangent * ssq_lower > lower_bound_centered:\n                    bound_on_tangent = lower_bound_centered / ssq_lower\n                    index_active_bound = i\n                    active_bound = -1\n\n            ssq_upper = ssq - upper_bounds[i] ** 2\n            if ssq_upper > 0.0:\n                ssq_upper = np.sqrt(ssq_upper) + search_direction[i]\n                if bound_on_tangent * ssq_upper > upper_bound_centered:\n                    bound_on_tangent = upper_bound_centered / ssq_upper\n                    index_active_bound = i\n                    active_bound = 1\n\n    return (\n        x_bounded,\n        index_active_bound,\n        n_fixed_variables,\n        active_bound,\n        bound_on_tangent,\n        free_variable_reached_bound,\n    )\n\n\ndef _calc_greatest_criterion_reduction(\n    bound_on_tangent, s_hess_s, x_hess_s, x_hess_x, x_grad, s_norm\n):\n    \"\"\"Calculate the greatest feasible reduction in the criterion function.\n\n    The largest reduction is found by looking at a range of equally spaced values of\n    ``tangent`` in the interval [0, ``bound_on_tangent``], where ``tangent`` is the\n    tangent of half the angle to the trust-region boundary.\n\n    \"\"\"\n    previous_reduction = None\n    next_reduction = None\n\n    max_reduction = 0\n    index_angle_greatest_reduction = -1\n    old_reduction = 0\n    n_angles = int(17 * bound_on_tangent + 3.1)\n\n    for i in range(n_angles):\n        tangent = bound_on_tangent * (i + 1) / n_angles\n        sine = 2.0 * tangent / (1.0 + tangent**2)\n\n        new_reduction = _calc_new_reduction(\n            tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm\n        )\n\n        if new_reduction > max_reduction:\n            max_reduction = new_reduction\n            index_angle_greatest_reduction = i\n            previous_reduction = old_reduction\n        elif i == index_angle_greatest_reduction + 1:\n            next_reduction = new_reduction\n        old_reduction = new_reduction\n\n    return (\n        previous_reduction,\n        next_reduction,\n        max_reduction,\n        tangent,\n        index_angle_greatest_reduction,\n        n_angles,\n    )\n\n\ndef _update_candidate_vectors_and_reduction_alt_step(\n    x_candidate,\n    search_direction,\n    x_bounded,\n    gradient_candidate,\n    cosine,\n    sine,\n    hess_s,\n    hessian_reduced,\n):\n    \"\"\"Update candidate vectors and the associated criterion reduction.\n\n    If the angle of the alternative iteration is restricted by a bound on a free\n    variable, that variable is fixed at the bound.\n\n    \"\"\"\n    gradient_candidate_new = (\n        gradient_candidate + (cosine - 1.0) * hessian_reduced + sine * hess_s\n    )\n    x_candidate_new = np.copy(x_candidate)\n    x_candidate_new[x_bounded == 0] = (\n        cosine * x_candidate[x_bounded == 0] + sine * search_direction[x_bounded == 0]\n    )\n    x_grad = x_candidate_new[x_bounded == 0] @ gradient_candidate_new[x_bounded == 0]\n    gradient_reduced = (\n        gradient_candidate_new[x_bounded == 0] @ gradient_candidate_new[x_bounded == 0]\n    )\n    hessian_reduced = cosine * hessian_reduced + sine * hess_s\n\n    return (\n        x_candidate_new,\n        gradient_candidate_new,\n        x_grad,\n        gradient_reduced,\n        hessian_reduced,\n    )\n\n\ndef _compute_new_search_direction_and_norm(\n    x_candidate, x_bounded, x_reduced, gradient_candidate, x_grad, raw_reduction\n):\n    \"\"\"Compute the new search direction and its norm.\"\"\"\n    raw_reduction = np.sqrt(raw_reduction)\n    search_direction = np.zeros_like(x_candidate)\n\n    search_direction[x_bounded == 0] = (\n        x_grad * x_candidate[x_bounded == 0]\n        - x_reduced * gradient_candidate[x_bounded == 0]\n    ) / raw_reduction\n    s_norm = -raw_reduction\n\n    return search_direction, s_norm\n\n\ndef _calc_new_reduction(tangent, sine, s_hess_s, x_hess_x, x_hess_s, x_grad, s_norm):\n    \"\"\"Calculate the new reduction in the criterion function.\"\"\"\n    raw_reduction = s_hess_s + tangent * (tangent * x_hess_x - 2.0 * x_hess_s)\n    current_reduction = sine * (tangent * x_grad - s_norm - 0.5 * sine * raw_reduction)\n\n    return current_reduction\n\n\ndef _update_tangent(\n    index_angle_greatest_reduction,\n    bound_on_tangent,\n    n_angles,\n    next_reduction,\n    previous_reduction,\n    max_reduction,\n):\n    \"\"\"Update the tangent of half the angle to the trust-region boundary.\"\"\"\n    raw_reduction = (next_reduction - previous_reduction) / (\n        2.0 * max_reduction - previous_reduction - next_reduction\n    )\n    tangent = (\n        bound_on_tangent\n        * ((index_angle_greatest_reduction + 1) + 0.5 * raw_reduction)\n        / n_angles\n    )\n    return tangent\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/bntr.py",
    "content": "\"\"\"Auxiliary functions for the quadratic BNTR trust-region subsolver.\"\"\"\n\nfrom functools import reduce\nfrom typing import NamedTuple\n\nimport numpy as np\n\nfrom optimagic.optimizers._pounders._conjugate_gradient import (\n    minimize_trust_cg,\n)\nfrom optimagic.optimizers._pounders._steihaug_toint import (\n    minimize_trust_stcg,\n)\nfrom optimagic.optimizers._pounders._trsbox import minimize_trust_trsbox\n\nEPSILON = np.finfo(float).eps ** (2 / 3)\n\n\nclass ActiveBounds(NamedTuple):\n    lower: np.ndarray | None = None\n    upper: np.ndarray | None = None\n    fixed: np.ndarray | None = None\n    active: np.ndarray | None = None\n    inactive: np.ndarray | None = None\n\n\ndef bntr(\n    model,\n    lower_bounds,\n    upper_bounds,\n    x_candidate,\n    *,\n    conjugate_gradient_method,\n    maxiter,\n    maxiter_gradient_descent,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n    gtol_abs_conjugate_gradient,\n    gtol_rel_conjugate_gradient,\n):\n    \"\"\"Minimize a bounded trust-region subproblem via Newton Conjugate Gradient method.\n\n    The BNTR (Bounded Newton Trust Rregion) algorithm uses an active-set approach\n    to solve the symmetric system of equations:\n\n        hessian @ x = - gradient\n\n    only for the inactive parameters of x that lie within the bounds. The active-set\n    estimation employed here is based on Bertsekas (:cite:`Bertsekas1982`).\n\n    In the main loop, BNTR globalizes the Newton step using a trust-region method\n    based on the predicted versus actual reduction in the criterion function.\n    The trust-region radius is increased only if the accepted step is at the\n    trust-region boundary.\n\n\n    Args:\n        model (NamedTuple): NamedTuple containing the parameters of the\n            main model, i.e.:\n            - ``linear_terms`` (np.ndarray): 1d array of shape (n,)\n            - ``square_terms`` (np.ndarray): 2d array of shape (n,n).\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        x_candidate (np.ndarray): Initial guess for the solution of the subproblem.\n        conjugate_gradient_method (str): Method for computing the conjugate gradient\n            step. Available conjugate gradient methods are:\n            - \"cg\"\n            - \"steihaug_toint\"\n            - \"trsbox\" (default)\n        maxiter (int): Maximum number of iterations. If reached, terminate.\n        maxiter_gradient_descent (int): Maximum number of steepest descent iterations\n            to perform when the trust-region subsolver BNTR is used.\n        gtol_abs (float): Convergence tolerance for the absolute gradient norm.\n        gtol_rel (float): Convergence tolerance for the relative gradient norm.\n        gtol_scaled (float): Convergence tolerance for the scaled gradient norm.\n        gtol_abs_conjugate_gradient (float): Convergence tolerance for the absolute\n            gradient norm in the conjugate gradient step of the trust-region\n            subproblem (\"BNTR\").\n        gtol_rel_conjugate_gradient (float): Convergence tolerance for the relative\n            gradient norm in the conjugate gradient step of the trust-region\n            subproblem (\"BNTR\").\n\n    Returns:\n        (dict): Result dictionary containing the following keys:\n            - ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,)\n            - ``criterion`` (float): Minimum function value associated with the\n                solution.\n            - ``n_iterations`` (int): Number of iterations the algorithm ran before\n                termination.\n            - ``success`` (bool): Boolean indicating whether a solution has been found\n                before reaching maxiter.\n\n    \"\"\"\n    options_update_radius = {\n        \"eta1\": 1.0e-4,\n        \"eta2\": 0.25,\n        \"eta3\": 0.50,\n        \"eta4\": 0.90,\n        \"alpha1\": 0.25,\n        \"alpha2\": 0.50,\n        \"alpha3\": 1.00,\n        \"alpha4\": 2.00,\n        \"alpha5\": 4.00,\n        \"min_radius\": 1e-10,\n        \"max_radius\": 1e10,\n        \"default_radius\": 100.00,\n    }\n\n    (\n        x_candidate,\n        f_candidate,\n        gradient_unprojected,\n        hessian_bounds_inactive,\n        trustregion_radius,\n        active_bounds_info,\n        converged,\n        convergence_reason,\n    ) = _take_preliminary_gradient_descent_step_and_check_for_solution(\n        x_candidate,\n        model,\n        lower_bounds,\n        upper_bounds,\n        maxiter_gradient_descent,\n        gtol_abs,\n        gtol_rel,\n        gtol_scaled,\n    )\n\n    for niter in range(maxiter + 1):\n        if converged:\n            break\n\n        x_old = x_candidate\n        f_old = f_candidate\n        accept_step = False\n\n        while not accept_step and not converged:\n            gradient_bounds_inactive = gradient_unprojected[active_bounds_info.inactive]\n            hessian_bounds_inactive = _find_hessian_submatrix_where_bounds_inactive(\n                model, active_bounds_info\n            )\n            (\n                conjugate_gradient_step,\n                conjugate_gradient_step_inactive_bounds,\n                cg_step_norm,\n            ) = _compute_conjugate_gradient_step(\n                x_candidate,\n                gradient_bounds_inactive,\n                hessian_bounds_inactive,\n                lower_bounds,\n                upper_bounds,\n                active_bounds_info,\n                trustregion_radius,\n                conjugate_gradient_method=conjugate_gradient_method,\n                gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient,\n                gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient,\n                options_update_radius=options_update_radius,\n            )\n\n            x_unbounded = x_candidate + conjugate_gradient_step\n            x_candidate = _apply_bounds_to_x_candidate(\n                x_unbounded, lower_bounds, upper_bounds\n            )\n\n            predicted_reduction = (\n                _compute_predicted_reduction_from_conjugate_gradient_step(\n                    conjugate_gradient_step,\n                    conjugate_gradient_step_inactive_bounds,\n                    gradient_unprojected,\n                    gradient_bounds_inactive,\n                    hessian_bounds_inactive,\n                    active_bounds_info,\n                )\n            )\n\n            f_candidate = _evaluate_model_criterion(\n                x_candidate, model.linear_terms, model.square_terms\n            )\n            actual_reduction = f_old - f_candidate\n\n            trustregion_radius_old = trustregion_radius\n            (\n                trustregion_radius,\n                accept_step,\n            ) = _update_trustregion_radius_conjugate_gradient(\n                f_candidate,\n                predicted_reduction,\n                actual_reduction,\n                cg_step_norm,\n                trustregion_radius,\n                options_update_radius,\n            )\n\n            if accept_step:\n                gradient_unprojected = (\n                    model.linear_terms + model.square_terms @ x_candidate\n                )\n\n                active_bounds_info = _get_information_on_active_bounds(\n                    x_candidate,\n                    gradient_unprojected,\n                    lower_bounds,\n                    upper_bounds,\n                )\n            else:\n                x_candidate = x_old\n                f_candidate = f_old\n\n                if trustregion_radius == trustregion_radius_old:\n                    converged = True\n                    break\n\n            converged, convergence_reason = _check_for_convergence(\n                x_candidate,\n                f_candidate,\n                gradient_unprojected,\n                model,\n                lower_bounds,\n                upper_bounds,\n                converged,\n                convergence_reason,\n                niter,\n                maxiter=maxiter,\n                gtol_abs=gtol_abs,\n                gtol_rel=gtol_rel,\n                gtol_scaled=gtol_scaled,\n            )\n\n    result = {\n        \"x\": x_candidate,\n        \"criterion\": f_candidate,\n        \"n_iterations\": niter,\n        \"success\": converged,\n        \"message\": convergence_reason,\n    }\n\n    return result\n\n\ndef _take_preliminary_gradient_descent_step_and_check_for_solution(\n    x_candidate,\n    model,\n    lower_bounds,\n    upper_bounds,\n    maxiter_gradient_descent,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n):\n    \"\"\"Take a preliminary gradient descent step and check if we found a solution.\"\"\"\n    options_update_radius = {\n        \"mu1\": 0.35,\n        \"mu2\": 0.50,\n        \"gamma1\": 0.0625,\n        \"gamma2\": 0.5,\n        \"gamma3\": 2.0,\n        \"gamma4\": 5.0,\n        \"theta\": 0.25,\n        \"min_radius\": 1e-10,\n        \"max_radius\": 1e10,\n        \"default_radius\": 100.0,\n    }\n\n    converged = False\n    convergence_reason = \"Continue iterating.\"\n\n    criterion_candidate = _evaluate_model_criterion(\n        x_candidate, model.linear_terms, model.square_terms\n    )\n\n    active_bounds_info = _get_information_on_active_bounds(\n        x_candidate,\n        model.linear_terms,\n        lower_bounds,\n        upper_bounds,\n    )\n\n    gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate\n    gradient_projected = _project_gradient_onto_feasible_set(\n        gradient_unprojected, active_bounds_info\n    )\n\n    converged, convergence_reason = _check_for_convergence(\n        x_candidate,\n        criterion_candidate,\n        gradient_unprojected,\n        model,\n        lower_bounds,\n        upper_bounds,\n        converged,\n        convergence_reason,\n        niter=None,\n        maxiter=None,\n        gtol_abs=gtol_abs,\n        gtol_rel=gtol_rel,\n        gtol_scaled=gtol_scaled,\n    )\n\n    if converged:\n        hessian_inactive = model.square_terms\n        trustregion_radius = options_update_radius[\"default_radius\"]\n    else:\n        hessian_inactive = _find_hessian_submatrix_where_bounds_inactive(\n            model, active_bounds_info\n        )\n\n        (\n            x_candidate_gradient_descent,\n            f_min_gradient_descent,\n            step_size_gradient_descent,\n            trustregion_radius,\n            radius_lower_bound,\n        ) = _perform_gradient_descent_step(\n            x_candidate,\n            criterion_candidate,\n            gradient_projected,\n            hessian_inactive,\n            model,\n            lower_bounds,\n            upper_bounds,\n            active_bounds_info,\n            maxiter_gradient_descent,\n            options_update_radius,\n        )\n\n        if f_min_gradient_descent < criterion_candidate:\n            criterion_candidate = f_min_gradient_descent\n\n            x_unbounded = (\n                x_candidate_gradient_descent\n                - step_size_gradient_descent * gradient_projected\n            )\n            x_candidate = _apply_bounds_to_x_candidate(\n                x_unbounded, lower_bounds, upper_bounds\n            )\n\n            gradient_unprojected = model.linear_terms + model.square_terms @ x_candidate\n            active_bounds_info = _get_information_on_active_bounds(\n                x_candidate,\n                gradient_unprojected,\n                lower_bounds,\n                upper_bounds,\n            )\n\n            gradient_projected = _project_gradient_onto_feasible_set(\n                gradient_unprojected, active_bounds_info\n            )\n            hessian_inactive = _find_hessian_submatrix_where_bounds_inactive(\n                model, active_bounds_info\n            )\n\n            converged, convergence_reason = _check_for_convergence(\n                x_candidate,\n                criterion_candidate,\n                gradient_projected,\n                model,\n                lower_bounds,\n                upper_bounds,\n                converged,\n                convergence_reason,\n                niter=None,\n                maxiter=None,\n                gtol_abs=gtol_abs,\n                gtol_rel=gtol_rel,\n                gtol_scaled=gtol_scaled,\n            )\n\n        if not converged:\n            trustregion_radius = np.clip(\n                max(trustregion_radius, radius_lower_bound),\n                options_update_radius[\"min_radius\"],\n                options_update_radius[\"max_radius\"],\n            )\n\n    return (\n        x_candidate,\n        criterion_candidate,\n        gradient_unprojected,\n        hessian_inactive,\n        trustregion_radius,\n        active_bounds_info,\n        converged,\n        convergence_reason,\n    )\n\n\ndef _compute_conjugate_gradient_step(\n    x_candidate,\n    gradient_inactive,\n    hessian_inactive,\n    lower_bounds,\n    upper_bounds,\n    active_bounds_info,\n    trustregion_radius,\n    *,\n    conjugate_gradient_method,\n    gtol_abs_conjugate_gradient,\n    gtol_rel_conjugate_gradient,\n    options_update_radius,\n):\n    \"\"\"Compute the bounded Conjugate Gradient trust-region step.\"\"\"\n    conjugate_gradient_step = np.zeros_like(x_candidate)\n\n    if active_bounds_info.inactive.size == 0:\n        # Save some computation and return an adjusted zero step\n        step_inactive = _apply_bounds_to_x_candidate(\n            x_candidate, lower_bounds, upper_bounds\n        )\n        step_norm = np.linalg.norm(step_inactive)\n\n        conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step(\n            step_inactive,\n            x_candidate,\n            lower_bounds,\n            upper_bounds,\n            active_bounds_info,\n        )\n\n    else:\n        if conjugate_gradient_method == \"cg\":\n            step_inactive = minimize_trust_cg(\n                gradient_inactive,\n                hessian_inactive,\n                trustregion_radius,\n                gtol_abs=gtol_abs_conjugate_gradient,\n                gtol_rel=gtol_rel_conjugate_gradient,\n            )\n            step_norm = np.linalg.norm(step_inactive)\n        elif conjugate_gradient_method == \"steihaug_toint\":\n            step_inactive = minimize_trust_stcg(\n                gradient_inactive,\n                hessian_inactive,\n                trustregion_radius,\n            )\n            step_norm = np.linalg.norm(step_inactive)\n        elif conjugate_gradient_method == \"trsbox\":\n            step_inactive = minimize_trust_trsbox(\n                gradient_inactive,\n                hessian_inactive,\n                trustregion_radius,\n                lower_bounds=lower_bounds[active_bounds_info.inactive],\n                upper_bounds=upper_bounds[active_bounds_info.inactive],\n            )\n            step_norm = np.linalg.norm(step_inactive)\n        else:\n            raise ValueError(\n                \"Invalid method: {conjugate_gradient_method}. \"\n                \"Must be one of cg, steihaug_toint, trsbox.\"\n            )\n\n        if trustregion_radius == 0:\n            if step_norm > 0:\n                # Accept\n                trustregion_radius = np.clip(\n                    step_norm,\n                    options_update_radius[\"min_radius\"],\n                    options_update_radius[\"max_radius\"],\n                )\n\n            else:\n                # Re-solve\n                trustregion_radius = np.clip(\n                    options_update_radius[\"default_radius\"],\n                    options_update_radius[\"min_radius\"],\n                    options_update_radius[\"max_radius\"],\n                )\n\n                if conjugate_gradient_method == \"cg\":\n                    step_inactive = minimize_trust_cg(\n                        gradient_inactive,\n                        hessian_inactive,\n                        trustregion_radius,\n                        gtol_abs=gtol_abs_conjugate_gradient,\n                        gtol_rel=gtol_rel_conjugate_gradient,\n                    )\n                    step_norm = np.linalg.norm(step_inactive)\n                elif conjugate_gradient_method == \"steihaug_toint\":\n                    step_inactive = minimize_trust_stcg(\n                        gradient_inactive,\n                        hessian_inactive,\n                        trustregion_radius,\n                    )\n                    step_norm = np.linalg.norm(step_inactive)\n                elif conjugate_gradient_method == \"trsbox\":\n                    step_inactive = minimize_trust_trsbox(\n                        gradient_inactive,\n                        hessian_inactive,\n                        trustregion_radius,\n                        lower_bounds=lower_bounds[active_bounds_info.inactive],\n                        upper_bounds=upper_bounds[active_bounds_info.inactive],\n                    )\n                    step_norm = np.linalg.norm(step_inactive)\n\n                if step_norm == 0:\n                    raise ValueError(\"Initial direction is zero.\")\n\n        conjugate_gradient_step = _apply_bounds_to_conjugate_gradient_step(\n            step_inactive,\n            x_candidate,\n            lower_bounds,\n            upper_bounds,\n            active_bounds_info,\n        )\n\n    return (\n        conjugate_gradient_step,\n        step_inactive,\n        step_norm,\n    )\n\n\ndef _compute_predicted_reduction_from_conjugate_gradient_step(\n    conjugate_gradient_step,\n    conjugate_gradient_step_inactive,\n    gradient_unprojected,\n    gradient_inactive,\n    hessian_inactive,\n    active_bounds_info,\n):\n    \"\"\"Compute predicted reduction induced by the Conjugate Gradient step.\"\"\"\n    if active_bounds_info.active.size > 0:\n        # Projection changed the step, so we have to recompute the step\n        # and the predicted reduction. Leave the rust radius unchanged.\n        cg_step_recomp = conjugate_gradient_step[active_bounds_info.inactive]\n        gradient_inactive_recomp = gradient_unprojected[active_bounds_info.inactive]\n\n        predicted_reduction = _evaluate_model_criterion(\n            cg_step_recomp, gradient_inactive_recomp, hessian_inactive\n        )\n    else:\n        # Step did not change, so we can just recover the\n        # pre-computed prediction\n        predicted_reduction = _evaluate_model_criterion(\n            conjugate_gradient_step_inactive,\n            gradient_inactive,\n            hessian_inactive,\n        )\n\n    return -predicted_reduction\n\n\ndef _perform_gradient_descent_step(\n    x_candidate,\n    f_candidate_initial,\n    gradient_projected,\n    hessian_inactive,\n    model,\n    lower_bounds,\n    upper_bounds,\n    active_bounds_info,\n    maxiter_steepest_descent,\n    options_update_radius,\n):\n    \"\"\"Perform gradient descent step and update trust-region radius.\"\"\"\n    f_min = f_candidate_initial\n    gradient_norm = np.linalg.norm(gradient_projected)\n\n    trustregion_radius = options_update_radius[\"default_radius\"]\n    radius_lower_bound = 0\n    step_size_accepted = 0\n\n    for _ in range(maxiter_steepest_descent):\n        x_old = x_candidate\n\n        step_size_candidate = trustregion_radius / gradient_norm\n        x_candidate = x_old - step_size_candidate * gradient_projected\n\n        x_candidate = _apply_bounds_to_x_candidate(\n            x_candidate, lower_bounds, upper_bounds\n        )\n        f_candidate = _evaluate_model_criterion(\n            x_candidate, model.linear_terms, model.square_terms\n        )\n\n        x_diff = x_candidate - x_old\n\n        if f_candidate < f_min:\n            f_min = f_candidate\n            step_size_accepted = step_size_candidate\n\n        x_inactive = x_diff[active_bounds_info.inactive]\n        square_terms = x_inactive.T @ hessian_inactive @ x_inactive\n\n        predicted_reduction = trustregion_radius * (\n            gradient_norm - 0.5 * trustregion_radius * square_terms / (gradient_norm**2)\n        )\n        actual_reduction = f_candidate_initial - f_candidate\n\n        (\n            trustregion_radius,\n            radius_lower_bound,\n        ) = _update_trustregion_radius_and_gradient_descent(\n            trustregion_radius,\n            radius_lower_bound,\n            predicted_reduction,\n            actual_reduction,\n            gradient_norm,\n            options_update_radius,\n        )\n\n    return (\n        x_candidate,\n        f_min,\n        step_size_accepted,\n        trustregion_radius,\n        radius_lower_bound,\n    )\n\n\ndef _update_trustregion_radius_conjugate_gradient(\n    f_candidate,\n    predicted_reduction,\n    actual_reduction,\n    x_norm_cg,\n    trustregion_radius,\n    options,\n):\n    \"\"\"Update the trust-region radius based on predicted and actual reduction.\"\"\"\n    accept_step = False\n\n    if predicted_reduction < 0 or ~np.isfinite(predicted_reduction):\n        # Reject and start over\n        trustregion_radius = options[\"alpha1\"] * min(trustregion_radius, x_norm_cg)\n\n    else:\n        if ~np.isfinite(actual_reduction):\n            trustregion_radius = options[\"alpha1\"] * min(trustregion_radius, x_norm_cg)\n        else:\n            if abs(actual_reduction) <= max(1, abs(f_candidate) * EPSILON) and abs(\n                predicted_reduction\n            ) <= max(1, abs(f_candidate) * EPSILON):\n                kappa = 1\n            else:\n                kappa = actual_reduction / predicted_reduction\n\n            if kappa < options[\"eta1\"]:\n                # Reject the step\n                trustregion_radius = options[\"alpha1\"] * min(\n                    trustregion_radius, x_norm_cg\n                )\n            else:\n                accept_step = True\n\n                # Update the trust-region radius only if the computed step is at the\n                # trust-radius boundary\n                if x_norm_cg == trustregion_radius:\n                    if kappa < options[\"eta2\"]:\n                        # Marginal bad step\n                        trustregion_radius = options[\"alpha2\"] * trustregion_radius\n                    elif kappa < options[\"eta3\"]:\n                        # Reasonable step\n                        trustregion_radius = options[\"alpha3\"] * trustregion_radius\n                    elif kappa < options[\"eta4\"]:\n                        trustregion_radius = options[\"alpha4\"] * trustregion_radius\n                    else:\n                        # Very good step\n                        trustregion_radius = options[\"alpha5\"] * trustregion_radius\n\n    trustregion_radius = np.clip(\n        trustregion_radius, options[\"min_radius\"], options[\"max_radius\"]\n    )\n\n    return trustregion_radius, accept_step\n\n\ndef _get_information_on_active_bounds(\n    x,\n    gradient_unprojected,\n    lower_bounds,\n    upper_bounds,\n):\n    \"\"\"Return the index set of active bounds.\"\"\"\n    active_lower = np.where((x <= lower_bounds) & (gradient_unprojected > 0))[0]\n    active_upper = np.where((x >= upper_bounds) & (gradient_unprojected < 0))[0]\n    active_fixed = np.where(lower_bounds == upper_bounds)[0]\n    active_all = reduce(np.union1d, (active_fixed, active_lower, active_upper))\n    inactive = np.setdiff1d(np.arange(len(x)), active_all)\n\n    active_bounds_info = ActiveBounds(\n        lower=active_lower,\n        upper=active_upper,\n        fixed=active_fixed,\n        active=active_all,\n        inactive=inactive,\n    )\n\n    return active_bounds_info\n\n\ndef _find_hessian_submatrix_where_bounds_inactive(model, active_bounds_info):\n    \"\"\"Find the submatrix of the initial hessian where bounds are inactive.\"\"\"\n    hessian_inactive = model.square_terms[\n        active_bounds_info.inactive[:, np.newaxis], active_bounds_info.inactive\n    ]\n\n    return hessian_inactive\n\n\ndef _check_for_convergence(\n    x_candidate,\n    f_candidate,\n    gradient_candidate,\n    model,\n    lower_bounds,\n    upper_bounds,\n    converged,\n    reason,\n    niter,\n    *,\n    maxiter,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n):\n    \"\"\"Check if we have found a solution.\"\"\"\n    direction_fischer_burmeister = _get_fischer_burmeister_direction_vector(\n        x_candidate, gradient_candidate, lower_bounds, upper_bounds\n    )\n    gradient_norm = np.linalg.norm(direction_fischer_burmeister)\n    gradient_norm_initial = np.linalg.norm(model.linear_terms)\n\n    if gradient_norm < gtol_abs:\n        converged = True\n        reason = \"Norm of the gradient is less than absolute_gradient_tolerance.\"\n    elif f_candidate != 0 and abs(gradient_norm / f_candidate) < gtol_rel:\n        converged = True\n        reason = (\n            \"Norm of the gradient relative to the criterion value is less than \"\n            \"relative_gradient_tolerance.\"\n        )\n    elif (\n        gradient_norm_initial != 0\n        and gradient_norm / gradient_norm_initial < gtol_scaled\n    ):\n        converged = True\n        reason = (\n            \"Norm of the gradient divided by norm of the gradient at the \"\n            \"initial parameters is less than scaled_gradient_tolerance.\"\n        )\n    elif gradient_norm_initial != 0 and gradient_norm == 0 and gtol_scaled == 0:\n        converged = True\n        reason = (\n            \"Norm of the gradient divided by norm of the gradient at the \"\n            \"initial parameters is less than scaled_gradient_tolerance.\"\n        )\n    elif f_candidate <= -np.inf:\n        converged = True\n        reason = \"Criterion value is negative infinity.\"\n    elif niter is not None and niter == maxiter:\n        reason = \"Maximum number of iterations reached.\"\n\n    return converged, reason\n\n\ndef _apply_bounds_to_x_candidate(x, lower_bounds, upper_bounds, bound_tol=0):\n    \"\"\"Apply upper and lower bounds to the candidate vector.\"\"\"\n    x = np.where(x <= lower_bounds + bound_tol, lower_bounds, x)\n    x = np.where(x >= upper_bounds - bound_tol, upper_bounds, x)\n\n    return x\n\n\ndef _project_gradient_onto_feasible_set(gradient_unprojected, active_bounds_info):\n    \"\"\"Project gradient onto feasible set, where search directions unconstrained.\"\"\"\n    gradient_projected = np.zeros_like(gradient_unprojected)\n    gradient_projected[active_bounds_info.inactive] = gradient_unprojected[\n        active_bounds_info.inactive\n    ]\n\n    return gradient_projected\n\n\ndef _apply_bounds_to_conjugate_gradient_step(\n    step_inactive,\n    x_candidate,\n    lower_bounds,\n    upper_bounds,\n    active_bounds_info,\n):\n    \"\"\"Apply lower and upper bounds to the Conjugate Gradient step.\"\"\"\n    cg_step = np.zeros_like(x_candidate)\n    cg_step[active_bounds_info.inactive] = step_inactive\n\n    if active_bounds_info.lower.size > 0:\n        x_active_lower = x_candidate[active_bounds_info.lower]\n        lower_bound_active = lower_bounds[active_bounds_info.lower]\n\n        cg_step[active_bounds_info.lower] = lower_bound_active - x_active_lower\n\n    if active_bounds_info.upper.size > 0:\n        x_active_upper = x_candidate[active_bounds_info.upper]\n        upper_bound_active = upper_bounds[active_bounds_info.upper]\n\n        cg_step[active_bounds_info.upper] = upper_bound_active - x_active_upper\n\n    if active_bounds_info.fixed.size > 0:\n        cg_step[active_bounds_info.fixed] = 0\n\n    return cg_step\n\n\ndef _update_trustregion_radius_and_gradient_descent(\n    trustregion_radius,\n    radius_lower_bound,\n    predicted_reduction,\n    actual_reduction,\n    gradient_norm,\n    options,\n):\n    \"\"\"Update the trust-region radius and its upper bound.\"\"\"\n    if abs(actual_reduction) <= EPSILON and abs(predicted_reduction) <= EPSILON:\n        kappa = 1\n    else:\n        kappa = actual_reduction / predicted_reduction\n\n    tau_1 = (\n        options[\"theta\"]\n        * gradient_norm\n        * trustregion_radius\n        / (\n            options[\"theta\"] * gradient_norm * trustregion_radius\n            + (1 - options[\"theta\"]) * predicted_reduction\n            - actual_reduction\n        )\n    )\n    tau_2 = (\n        options[\"theta\"]\n        * gradient_norm\n        * trustregion_radius\n        / (\n            options[\"theta\"] * gradient_norm * trustregion_radius\n            - (1 + options[\"theta\"]) * predicted_reduction\n            + actual_reduction\n        )\n    )\n\n    tau_min = min(tau_1, tau_2)\n    tau_max = max(tau_1, tau_2)\n\n    if abs(kappa - 1) <= options[\"mu1\"]:\n        # Great agreement\n        radius_lower_bound = max(radius_lower_bound, trustregion_radius)\n\n        if tau_max < 1:\n            tau = options[\"gamma3\"]\n        elif tau_max > options[\"gamma4\"]:\n            tau = options[\"gamma4\"]\n        else:\n            tau = tau_max\n\n    elif abs(kappa - 1) <= options[\"mu2\"]:\n        # Good agreement\n        radius_lower_bound = max(radius_lower_bound, trustregion_radius)\n\n        if tau_max < options[\"gamma2\"]:\n            tau = options[\"gamma2\"]\n        elif tau_max > options[\"gamma3\"]:\n            tau = options[\"gamma3\"]\n        else:\n            tau = tau_max\n\n    else:\n        # Not good agreement\n        if tau_min > 1:\n            tau = options[\"gamma2\"]\n        elif tau_max < options[\"gamma1\"]:\n            tau = options[\"gamma1\"]\n        elif (tau_min < options[\"gamma1\"]) and (tau_max >= 1):\n            tau = options[\"gamma1\"]\n        elif (\n            (tau_1 >= options[\"gamma1\"])\n            and (tau_1 < 1.0)\n            and ((tau_2 < options[\"gamma1\"]) or (tau_2 >= 1.0))\n        ):\n            tau = tau_1\n        elif (\n            (tau_2 >= options[\"gamma1\"])\n            and (tau_2 < 1.0)\n            and ((tau_1 < options[\"gamma1\"]) or (tau_2 >= 1.0))\n        ):\n            tau = tau_2\n        else:\n            tau = tau_max\n\n    trustregion_radius = trustregion_radius * tau\n\n    return trustregion_radius, radius_lower_bound\n\n\ndef _get_fischer_burmeister_direction_vector(x, gradient, lower_bounds, upper_bounds):\n    \"\"\"Compute the constrained direction vector via the Fischer-Burmeister function.\"\"\"\n    fischer_vec = np.vectorize(_get_fischer_burmeister_scalar)\n\n    fischer_burmeister = reduce(\n        fischer_vec, (upper_bounds - x, -gradient, x - lower_bounds)\n    )\n    direction = np.where(\n        lower_bounds == upper_bounds, lower_bounds - x, fischer_burmeister\n    )\n\n    return direction\n\n\ndef _get_fischer_burmeister_scalar(a, b):\n    \"\"\"Get the value of the Fischer-Burmeister function for two scalar inputs.\n\n    This method was suggested by Bob Vanderbei. Since the Fischer-Burmeister\n    is symmetric, the order of the scalar inputs does not matter.\n\n    Args:\n        a (float): First input.\n        b (float): Second input.\n\n    Returns:\n        float: Value of the Fischer-Burmeister function for inputs a and b.\n\n    \"\"\"\n    if a + b <= 0:\n        fischer_burmeister = np.sqrt(a**2 + b**2) - (a + b)\n    else:\n        fischer_burmeister = -2 * a * b / (np.sqrt(a**2 + b**2) + (a + b))\n\n    return fischer_burmeister\n\n\ndef _evaluate_model_criterion(\n    x,\n    gradient,\n    hessian,\n):\n    \"\"\"Evaluate the criterion function value of the main model.\n\n    Args:\n        x (np.ndarray): Parameter vector of shape (n,).\n        gradient (np.ndarray): Gradient of shape (n,) for which the main model\n            shall be evaluated.\n        hessian (np.ndarray): Hessian of shape (n, n) for which the main model\n            shall be evaulated.\n\n    Returns:\n        float: Criterion value of the main model.\n\n    \"\"\"\n    return gradient.T @ x + 0.5 * x.T @ hessian @ x\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/gqtpar.py",
    "content": "\"\"\"Auxiliary functions for the quadratic GQTPAR trust-region subsolver.\"\"\"\n\nfrom typing import NamedTuple\n\nimport numpy as np\nfrom scipy.linalg import cho_solve, solve_triangular\nfrom scipy.linalg.lapack import dpotrf as compute_cholesky_factorization\nfrom scipy.optimize._trustregion_exact import estimate_smallest_singular_value\n\n\nclass HessianInfo(NamedTuple):\n    hessian_plus_lambda: np.ndarray | None = None  # shape (n_params, n_params)\n    upper_triangular: np.ndarray | None = None  # shape (n_params, n_params)\n    already_factorized: bool = False\n\n\nclass DampingFactors(NamedTuple):\n    candidate: float | None = None\n    lower_bound: float | None = None\n    upper_bound: float | None = None\n\n\ndef gqtpar(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200):\n    \"\"\"Solve the quadratic trust-region subproblem via nearly exact iterative method.\n\n    This subproblem solver is mainly based on Conn et al. (2000) \"Trust region methods\"\n    (:cite:`Conn2000`), pp. 169-200.\n\n    But ideas from Nocedal and Wright (2006) \"Numerical optimization\"\n    (:cite:`Nocedal2006`), pp. 83-91, who implement a similar algorithm,\n    were also used.\n\n    The original algorithm was developed by More and Sorensen (1983) (:cite:`More1983`)\n    and is known as \"GQTPAR\".\n\n    The vector x* is a global solution to the quadratic subproblem:\n\n        min_x f + g @ x + 0.5 * x.T @ H @ x,\n\n        if and only if ||x|| <= trustregion_radius\n        and if there is a scalar lambda >= 0, such that:\n\n    1) (H + lambda * I(n)) x* = -g\n    2) lambda (trustregion_radius - ||x*||) = 0\n    3) H + lambda * I is positive definite\n\n    where g denotes the gradient and H the hessian of the quadratic model,\n    respectively.\n\n    k_easy and k_hard are stopping criteria for the iterative subproblem solver.\n    See pp. 194-197 in :cite:`Conn2000` for a more detailed description.\n\n    Args:\n        model (NamedTuple): NamedTuple containing the parameters of the main model, i.e.\n            - ``linear_terms``, a np.ndarray of shape (n,) and\n            - ``square_terms``, a np.ndarray of shape (n,n).\n        x_candidate (np.ndarray): Initial guess for the solution of the subproblem.\n        k_easy (float): Stopping criterion for the \"easy\" case.\n        k_hard (float): Stopping criterion for the \"hard\" case.\n        maxiter (int): Maximum number of iterations to perform. If reached,\n            terminate.\n\n    Returns:\n        (dict): Result dictionary containing the following keys:\n            - ``x`` (np.ndarray): Solution vector of the subproblem of shape (n,)\n            - ``criterion`` (float): Minimum function value associated with the\n                solution.\n\n    \"\"\"\n    hessian_info = HessianInfo()\n\n    # Small floating point number signaling that for vectors smaller\n    # than that backward substituition is not reliable.\n    # See Golub, G. H., Van Loan, C. F. (2013), \"Matrix computations\", p.165.\n    zero_threshold = (\n        model.square_terms.shape[0]\n        * np.finfo(float).eps\n        * np.linalg.norm(model.square_terms, np.inf)\n    )\n    stopping_criteria = {\n        \"k_easy\": k_easy,\n        \"k_hard\": k_hard,\n    }\n\n    gradient_norm = np.linalg.norm(model.linear_terms)\n    lambdas = _get_initial_guess_for_lambdas(model)\n\n    converged = False\n\n    for _niter in range(maxiter):\n        if hessian_info.already_factorized:\n            hessian_info = hessian_info._replace(already_factorized=False)\n        else:\n            hessian_info, factorization_info = add_lambda_and_factorize_hessian(\n                model, hessian_info, lambdas\n            )\n\n        if factorization_info == 0 and gradient_norm > zero_threshold:\n            (\n                x_candidate,\n                hessian_info,\n                lambdas,\n                converged,\n            ) = _find_new_candidate_and_update_parameters(\n                model,\n                hessian_info,\n                lambdas,\n                stopping_criteria,\n                converged,\n            )\n\n        elif factorization_info == 0 and gradient_norm <= zero_threshold:\n            (\n                x_candidate,\n                lambdas,\n                converged,\n            ) = _check_for_interior_convergence_and_update(\n                x_candidate,\n                hessian_info,\n                lambdas,\n                stopping_criteria,\n                converged,\n            )\n\n        else:\n            lambdas = _update_lambdas_when_factorization_unsuccessful(\n                hessian_info,\n                lambdas,\n                factorization_info,\n            )\n\n        if converged:\n            break\n\n    f_min = (\n        model.linear_terms.T @ x_candidate\n        + 0.5 * x_candidate.T @ model.square_terms @ x_candidate\n    )\n    result = {\n        \"x\": x_candidate,\n        \"criterion\": f_min,\n        \"n_iterations\": _niter,\n        \"success\": converged,\n    }\n\n    return result\n\n\ndef _get_initial_guess_for_lambdas(\n    main_model,\n):\n    \"\"\"Return good initial guesses for lambda, its lower and upper bound.\n\n    Given a trust-region radius, good initial guesses for the damping factor lambda,\n    along with its lower bound and upper bound, are computed.\n\n    The values are chosen accordingly to the guidelines on\n    section 7.3.8 (p. 192) from :cite:`Conn2000`.\n\n    Args:\n        main_model (NamedTuple): Named tuple containing the parameters of the\n            main model, i.e.:\n            - ``linear_terms``, a np.ndarray of shape (n,) and\n            - ``square_terms``, a np.ndarray of shape (n,n).\n\n    Returns:\n        (dict): Dictionary containing the initial guess for the damping\n            factor lambda, along with its lower and upper bound.\n            The respective keys are:\n            - \"candidate\"\n            - \"upper_bound\"\n            - \"lower_bound\"\n\n    \"\"\"\n    gradient_norm = np.linalg.norm(main_model.linear_terms)\n    model_hessian = main_model.square_terms\n\n    hessian_infinity_norm = np.linalg.norm(model_hessian, np.inf)\n    hessian_frobenius_norm = np.linalg.norm(model_hessian, \"fro\")\n\n    hessian_gershgorin_lower, hessian_gershgorin_upper = _compute_gershgorin_bounds(\n        main_model\n    )\n\n    lambda_lower_bound = max(\n        0,\n        -min(model_hessian.diagonal()),\n        gradient_norm\n        - min(hessian_gershgorin_upper, hessian_frobenius_norm, hessian_infinity_norm),\n    )\n    lambda_upper_bound = max(\n        0,\n        gradient_norm\n        + min(-hessian_gershgorin_lower, hessian_frobenius_norm, hessian_infinity_norm),\n    )\n\n    if lambda_lower_bound == 0:\n        lambda_candidate = 0\n    else:\n        lambda_candidate = _get_new_lambda_candidate(\n            lower_bound=lambda_lower_bound, upper_bound=lambda_upper_bound\n        )\n\n    lambdas = DampingFactors(\n        candidate=lambda_candidate,\n        lower_bound=lambda_lower_bound,\n        upper_bound=lambda_upper_bound,\n    )\n\n    return lambdas\n\n\ndef add_lambda_and_factorize_hessian(main_model, hessian_info, lambdas):\n    \"\"\"Add lambda to hessian and factorize it into its upper triangular matrix.\n\n    Args:\n        main_model (NamedTuple): Named tuple containing the parameters of the\n            main model, i.e.:\n            - ``linear_terms``, a np.ndarray of shape (n,) and\n            - ``square_terms``, a np.ndarray of shape (n,n).\n        hessian_info (NamedTuple): Named tuple containing transformations\n            of the hessian, i.e. square_terms, from the main model. The keys are:\n\n            - ``hessian_plus_lambda`` (np.ndarray): The square terms of the main model\n                plus the identity matrix times lambda. 2d array of shape (n, n).\n            - ``upper_triangular`` (np.ndarray): Factorization of the hessian from the\n                main model into its upper triangular matrix. The diagonal is filled\n                and the lower lower triangular contains zeros.\n                2d array of shape (n, n).\n            - ``info_already_factorized`` (bool): Boolean indicating whether the hessian\n                has already been factorized for the current iteration.\n\n    Returns:\n        Tuple:\n        - hessian_info (dict): Named tuple containing the updated transformations\n            of the hessian, i.e. square_terms, from the main model. See above.\n        - factorization_info (int): Non-negative integer k indicating whether the\n            factorization of the hessian into its upper triangular matrix has been\n            successful.\n            If k = 0, the factorization has been successful.\n            A value k > 0 means that the leading k by k submatrix constitues the\n            first non-positive definite leading submatrix of the hessian.\n\n    \"\"\"\n    n = main_model.square_terms.shape[0]\n\n    hessian_plus_lambda = main_model.square_terms + lambdas.candidate * np.eye(n)\n    hessian_upper_triangular, factorization_info = compute_cholesky_factorization(\n        hessian_plus_lambda,\n        lower=False,\n        overwrite_a=False,\n        clean=True,\n    )\n\n    hessian_info_new = hessian_info._replace(\n        hessian_plus_lambda=hessian_plus_lambda,\n        upper_triangular=hessian_upper_triangular,\n    )\n\n    return hessian_info_new, factorization_info\n\n\ndef _find_new_candidate_and_update_parameters(\n    main_model,\n    hessian_info,\n    lambdas,\n    stopping_criteria,\n    converged,\n):\n    \"\"\"Find new candidate vector and update transformed hessian and lambdas.\"\"\"\n    x_candidate = cho_solve(\n        (hessian_info.upper_triangular, False), -main_model.linear_terms\n    )\n    x_norm = np.linalg.norm(x_candidate)\n\n    if x_norm <= 1 and lambdas.candidate == 0:\n        converged = True\n\n    w = solve_triangular(hessian_info.upper_triangular, x_candidate, trans=\"T\")\n    w_norm = np.linalg.norm(w)\n\n    newton_step = _compute_newton_step(lambdas, x_norm, w_norm)\n\n    if x_norm < 1:\n        (\n            x_candidate,\n            hessian_info,\n            lambdas_new,\n            converged,\n        ) = _update_candidate_and_parameters_when_candidate_within_trustregion(\n            x_candidate,\n            main_model,\n            hessian_info,\n            lambdas,\n            newton_step,\n            stopping_criteria,\n            converged,\n        )\n\n    else:\n        lambdas_new, converged = _update_lambdas_when_candidate_outside_trustregion(\n            lambdas,\n            newton_step,\n            x_norm,\n            stopping_criteria,\n            converged,\n        )\n\n    return (\n        x_candidate,\n        hessian_info,\n        lambdas_new,\n        converged,\n    )\n\n\ndef _check_for_interior_convergence_and_update(\n    x_candidate,\n    hessian_info,\n    lambdas,\n    stopping_criteria,\n    converged,\n):\n    \"\"\"Check for interior convergence, update candidate vector and lambdas.\"\"\"\n    if lambdas.candidate == 0:\n        x_candidate = np.zeros_like(x_candidate)\n        converged = True\n\n    s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular)\n    step_len = 2\n\n    if step_len**2 * s_min**2 <= stopping_criteria[\"k_hard\"] * lambdas.candidate:\n        x_candidate = step_len * z_min\n        converged = True\n\n    lambda_lower_bound = max(lambdas.lower_bound, lambdas.upper_bound - s_min**2)\n    lambda_new_candidate = _get_new_lambda_candidate(\n        lower_bound=lambda_lower_bound, upper_bound=lambdas.candidate\n    )\n\n    lambdas_new = lambdas._replace(\n        candidate=lambda_new_candidate,\n        lower_bound=lambda_lower_bound,\n        upper_bound=lambdas.candidate,\n    )\n\n    return x_candidate, lambdas_new, converged\n\n\ndef _update_lambdas_when_factorization_unsuccessful(\n    hessian_info, lambdas, factorization_info\n):\n    \"\"\"Update lambdas in the case that factorization of hessian not successful.\"\"\"\n    delta, v = _compute_terms_to_make_leading_submatrix_singular(\n        hessian_info,\n        factorization_info,\n    )\n    v_norm = np.linalg.norm(v)\n\n    lambda_lower_bound = max(lambdas.lower_bound, lambdas.candidate + delta / v_norm**2)\n    lambda_new_candidate = _get_new_lambda_candidate(\n        lower_bound=lambda_lower_bound, upper_bound=lambdas.upper_bound\n    )\n\n    lambdas_new = lambdas._replace(\n        candidate=lambda_new_candidate,\n        lower_bound=lambda_lower_bound,\n    )\n\n    return lambdas_new\n\n\ndef _get_new_lambda_candidate(lower_bound, upper_bound):\n    \"\"\"Update current lambda so that it lies within its bounds.\n\n    Args:\n        lower_boud (float): lower bound of the current candidate dumping factor.\n        upper_bound(float): upper bound of the current candidate dumping factor.\n\n    Returns:\n        float: New candidate for the damping factor lambda.\n\n    \"\"\"\n    lambda_new_candidate = max(\n        np.sqrt(np.clip(lower_bound * upper_bound, 0, np.inf)),\n        lower_bound + 0.01 * (upper_bound - lower_bound),\n    )\n\n    return lambda_new_candidate\n\n\ndef _compute_gershgorin_bounds(main_model):\n    \"\"\"Compute upper and lower Gregoshgorin bounds for a square matrix.\n\n    The Gregoshgorin bounds are the upper and lower bounds for the\n    eigenvalues of the square hessian matrix (i.e. the square terms of\n    the main model). See :cite:`Conn2000`.\n\n    Args:\n        main_model (NamedTuple): Named tuple containing the parameters of the\n            main model, i.e.:\n            - ``linear_terms``, a np.ndarray of shape (n,) and\n            - ``square_terms``, a np.ndarray of shape (n,n).\n\n    Returns:\n        Tuple:\n        - lower_bound (float): Lower Gregoshgorin bound.\n        - upper_bound (float): Upper Gregoshgorin bound.\n\n    \"\"\"\n    model_hessian = main_model.square_terms\n\n    hessian_diag = np.diag(model_hessian)\n    hessian_diag_abs = np.abs(hessian_diag)\n    hessian_row_sums = np.sum(np.abs(model_hessian), axis=1)\n\n    lower_gershgorin = np.min(hessian_diag + hessian_diag_abs - hessian_row_sums)\n    upper_gershgorin = np.max(hessian_diag - hessian_diag_abs + hessian_row_sums)\n\n    return lower_gershgorin, upper_gershgorin\n\n\ndef _compute_newton_step(lambdas, p_norm, w_norm):\n    \"\"\"Compute the Newton step.\n\n    Args:\n        lambdas (NamedTuple): Named tuple containing the current candidate\n            value for the damping factor lambda, its lower bound and upper bound.\n        p_norm (float): Frobenius (i.e. L2-norm) of the candidate vector.\n        w_norm (float): Frobenius (i.e. L2-norm) of vector w, which is the solution\n            to the following triangular system: U.T w = p.\n\n    Returns:\n        float: Newton step computed according to formula (4.44) p.87\n            from Nocedal and Wright (2006).\n\n    \"\"\"\n    return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1)\n\n\ndef _update_candidate_and_parameters_when_candidate_within_trustregion(\n    x_candidate,\n    main_model,\n    hessian_info,\n    lambdas,\n    newton_step,\n    stopping_criteria,\n    converged,\n):\n    \"\"\"Update candidate vector, hessian, and lambdas when x outside trust-region.\"\"\"\n    n = len(x_candidate)\n\n    s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular)\n    step_len = _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min)\n\n    quadratic_term = x_candidate.T @ hessian_info.hessian_plus_lambda @ x_candidate\n\n    relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambdas.candidate)\n    if relative_error <= stopping_criteria[\"k_hard\"]:\n        x_candidate = x_candidate + step_len * z_min\n        converged = True\n\n    lambda_new_lower_bound = max(lambdas.lower_bound, lambdas.candidate - s_min**2)\n\n    hessian_plus_lambda = main_model.square_terms + newton_step * np.eye(n)\n    _, factorization_unsuccessful = compute_cholesky_factorization(\n        hessian_plus_lambda,\n        lower=False,\n        overwrite_a=False,\n        clean=True,\n    )\n\n    if factorization_unsuccessful == 0:\n        hessian_already_factorized = True\n        lambda_new_candidate = newton_step\n    else:\n        hessian_already_factorized = hessian_info.already_factorized\n        lambda_new_lower_bound = max(lambda_new_lower_bound, newton_step)\n        lambda_new_candidate = _get_new_lambda_candidate(\n            lower_bound=lambda_new_lower_bound, upper_bound=lambdas.candidate\n        )\n\n    hessian_info_new = hessian_info._replace(\n        hessian_plus_lambda=hessian_plus_lambda,\n        already_factorized=hessian_already_factorized,\n    )\n\n    lambdas_new = lambdas._replace(\n        candidate=lambda_new_candidate,\n        lower_bound=lambda_new_lower_bound,\n        upper_bound=lambdas.candidate,\n    )\n\n    return x_candidate, hessian_info_new, lambdas_new, converged\n\n\ndef _update_lambdas_when_candidate_outside_trustregion(\n    lambdas, newton_step, p_norm, stopping_criteria, converged\n):\n    \"\"\"Update lambas in the case that candidate vector lies outside trust-region.\"\"\"\n    relative_error = abs(p_norm - 1)\n\n    if relative_error <= stopping_criteria[\"k_easy\"]:\n        converged = True\n\n    lambdas_new = lambdas._replace(candidate=newton_step, lower_bound=lambdas.candidate)\n\n    return lambdas_new, converged\n\n\ndef _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min):\n    \"\"\"Compute the smallest step length for the candidate vector.\n\n    Choose step_length with the smallest magnitude.\n    The reason for this choice is explained at p. 6 in :cite:`More1983`,\n    just before the formula for tau.\n\n    Args:\n        x_candidate (np.ndarray): Candidate vector of shape (n,).\n        z_min (float): Smallest singular value of the hessian matrix.\n\n    Returns:\n        float: Step length with the smallest magnitude.\n\n    \"\"\"\n    ta, tb = _solve_scalar_quadratic_equation(x_candidate, z_min)\n    step_len = min([ta, tb], key=abs)\n\n    return step_len\n\n\ndef _solve_scalar_quadratic_equation(z, d):\n    \"\"\"Return the sorted values that solve the scalar quadratic equation.\n\n    Solve the scalar quadratic equation ||z + t d|| == trustregion_radius.\n    This is like a line-sphere intersection.\n\n\n    Computation of the ``aux`` step, ``ta`` and ``tb`` is mathematically equivalent\n    to equivalent the following calculation:\n\n    ta = (-b - sqrt_discriminant) / (2*a)\n    tb = (-b + sqrt_discriminant) / (2*a)\n\n    but produces smaller round-off errors.\n    For more details, look at \"Matrix Computation\" p.97.\n\n    Args:\n        z (np.ndarray): Eigenvector of the upper triangular hessian matrix.\n        d (float): Smallest singular value of the upper triangular of the\n            hessian matrix.\n\n    Returns:\n        Tuple: The two values of t, sorted from low to high.\n        - (float) Lower value of t.\n        - (float) Higher value of t.\n\n    \"\"\"\n    a = d.T @ d\n    b = 2 * z.T @ d\n    c = z.T @ z - 1\n    sqrt_discriminant = np.sqrt(b * b - 4 * a * c)\n\n    aux = b + np.copysign(sqrt_discriminant, b)\n    ta = -aux / (2 * a)\n    tb = -2 * c / aux\n\n    return sorted([ta, tb])\n\n\ndef _compute_terms_to_make_leading_submatrix_singular(hessian_info, k):\n    \"\"\"Compute terms that make the leading submatrix of the hessian singular.\n\n    The \"hessian\" here refers to the matrix\n\n        H + lambda * I(n),\n\n    where H is the initial hessian, lambda is the current damping factor,\n    I the identity matrix, and m the number of rows/columns of the symmetric\n    hessian matrix.\n\n    Args:\n        hessian (np.ndarray): Symmetric k by k hessian matrix, which is not\n            positive definite.\n        upper_triangular (np.ndarray) Upper triangular matrix resulting of an\n            incomplete Cholesky decomposition of the hessian matrix.\n        k (int): Positive integer such that the leading k by k submatrix from\n            hessian is the first non-positive definite leading submatrix.\n\n    Returns:\n        Tuple:\n        - delta(float): Amount that should be added to the element (k, k) of\n            the leading k by k submatrix of the hessian to make it singular.\n        - v (np.ndarray): A vector such that ``v.T B v = 0``. Where B is the\n            hessian after ``delta`` is added to its element (k, k).\n\n    \"\"\"\n    hessian_plus_lambda = hessian_info.hessian_plus_lambda\n    upper_triangular = hessian_info.upper_triangular\n    n = len(hessian_plus_lambda)\n\n    delta = (\n        np.sum(upper_triangular[: k - 1, k - 1] ** 2)\n        - hessian_plus_lambda[k - 1, k - 1]\n    )\n\n    v = np.zeros(n)\n    v[k - 1] = 1\n\n    if k != 1:\n        v[: k - 1] = solve_triangular(\n            upper_triangular[: k - 1, : k - 1], -upper_triangular[: k - 1, k - 1]\n        )\n\n    return delta, v\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/linear_subsolvers.py",
    "content": "\"\"\"Collection of linear trust-region subsolvers.\"\"\"\n\nfrom typing import NamedTuple\n\nimport numpy as np\n\n\nclass LinearModel(NamedTuple):\n    intercept: float | None = None\n    linear_terms: np.ndarray | None = None  # shape (n_params, n_params)\n\n\ndef minimize_trsbox_linear(\n    linear_model, lower_bounds, upper_bounds, trustregion_radius, *, zero_treshold=1e-14\n):\n    \"\"\"Minimize a linear trust-region subproblem using the trsbox algorithm.\n\n    Solve the linear subproblem:\n\n      min_x   g.T @ x\n        s.t.   lower_bound <= x <= upper_bound\n              ||x||**2 <= trustregion_radius**2\n\n    using an active-set approach.\n\n    This algorithm is an implementation of the TRSBOX routine from\n    M. J. D. Powell (2009) \"The BOBYQA algorithm for bound constrained\n    optimization without derivatives.\" (cite:`Powell2009`).\n\n    Args:\n        linear_model (NamedTuple): Named tuple containing the parameters of the\n            linear model, i.e.:\n            - ``intercept`` (float): Intercept of the linear model.\n            - ``linear_terms`` (np.ndarray): 1d array of shape (n,) with the linear\n            terms of the mdoel.\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        trustregion_radius (float): Radius of the trust-region.\n        zero_treshold (float): Treshold for treating numerical values as zero.\n            Numbers smaller than this are considered zero up to machine precision.\n\n    Returns:\n        (np.ndarray): Solution vector for the linear trust-region subproblem.\n            Array of shape (n,).\n\n    \"\"\"\n    lower_bounds_internal = np.minimum(lower_bounds, -zero_treshold)\n    upper_bounds_internal = np.maximum(upper_bounds, zero_treshold)\n\n    model_gradient = linear_model.linear_terms\n    n = len(model_gradient)\n    x_candidate = np.zeros(n)\n\n    direction = -model_gradient\n\n    indices_inactive_directions = np.where(np.abs(direction) < zero_treshold)[0]\n    direction[indices_inactive_directions] = 0\n\n    active_directions = np.setdiff1d(np.arange(n), indices_inactive_directions)\n    set_active_directions = iter(active_directions)\n\n    for _ in range(n):\n        if np.linalg.norm(direction) < zero_treshold:\n            break\n\n        x_candidate_unconstr = _take_unconstrained_step_up_to_boundary(\n            x_candidate, direction, trustregion_radius, zero_treshold=zero_treshold\n        )\n\n        active_bound, index_active_bound = _find_next_active_bound(\n            x_candidate_unconstr,\n            lower_bounds_internal,\n            upper_bounds_internal,\n            set_active_directions,\n        )\n\n        if active_bound is None:\n            x_candidate = x_candidate_unconstr\n            break\n\n        else:\n            x_candidate, direction = _take_constrained_step_up_to_boundary(\n                x_candidate,\n                direction,\n                active_bound,\n                index_active_bound,\n            )\n\n    return x_candidate\n\n\ndef improve_geomtery_trsbox_linear(\n    x_center,\n    linear_model,\n    lower_bounds,\n    upper_bounds,\n    trustregion_radius,\n    *,\n    zero_treshold=1e-14,\n):\n    \"\"\"Maximize a Lagrange polynomial of degree one to improve geometry of the model.\n\n    Let a Lagrange polynomial of degree one be defined by:\n        L(x) = c + g.T @ (x - x_center),\n\n    where c and g denote the constant term and the linear terms (gradient)\n    of the linear model, respectively.\n\n    In order to maximize L(x), we maximize the absolute value of L(x) in a\n    trust-region setting. I.e. we solve:\n\n        max_x  abs(c + g.T @ (x - x_center))\n            s.t. lower_bound <= x <= upper_bound\n                 ||x - x_center|| <= trustregion_radius\n\n    In order to find the solution x*, we first minimize and then maximize\n    g.T @ (x - center).\n    The resulting candidate vectors are then plugged into the objective function L(x)\n    to check which one yields the largest absolute value of the Lagrange polynomial.\n\n    Args:\n        x_center (np.ndarray): 1d array of shape (n,) containing the center of the\n            parameter vector.\n        linear_model (NamedTuple): Named tuple containing the parameters of the\n            linear model that form the Lagrange polynomial, including:\n            - ``intercept`` (float): Intercept of the linear model.\n            - ``linear_terms`` (np.ndarray): 1d array of shape (n,) with the linear\n            terms of the mdoel.\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        trustregion_radius (float): Radius of the trust-region.\n        zero_treshold (float): Treshold for treating numerical values as zero.\n            Numbers smaller than this are considered zero up to machine precision.\n\n    Returns:\n        np.ndarray: Solution vector of shape (n,) that maximizes the Lagrange\n            polynomial.\n\n    \"\"\"\n    if np.any(lower_bounds > x_center + zero_treshold):\n        raise ValueError(\"x_center violates lower bound.\")\n    if np.any(x_center - zero_treshold > upper_bounds):\n        raise ValueError(\"x_center violates upper bound.\")\n\n    # Minimize and maximize g.T @ (x - x_center), respectively\n    linear_model_to_minimize = linear_model\n    linear_model_to_maximize = linear_model._replace(\n        linear_terms=-linear_model.linear_terms\n    )\n\n    x_candidate_min = minimize_trsbox_linear(\n        linear_model_to_minimize,\n        lower_bounds - x_center,\n        upper_bounds - x_center,\n        trustregion_radius,\n        zero_treshold=zero_treshold,\n    )\n    x_candidate_max = minimize_trsbox_linear(\n        linear_model_to_maximize,\n        lower_bounds - x_center,\n        upper_bounds - x_center,\n        trustregion_radius,\n        zero_treshold=zero_treshold,\n    )\n\n    lagrange_polynomial = lambda x: abs(\n        linear_model.intercept + linear_model.linear_terms.T @ x\n    )\n\n    if lagrange_polynomial(x_candidate_min) >= lagrange_polynomial(x_candidate_max):\n        x_lagrange = x_candidate_min + x_center\n    else:\n        x_lagrange = x_candidate_max + x_center\n\n    return x_lagrange\n\n\ndef _find_next_active_bound(\n    x_candidate_unconstr,\n    lower_bounds,\n    upper_bounds,\n    set_active_directions,\n):\n    \"\"\"Find the next active bound and return its index.\n\n    A (lower or upper) bound is considered active if\n        x_candidate <= lower_bounds\n        x_candidate >= upper_bounds\n\n    Args:\n        x_candidate_unconstr (np.ndarray): Unconstrained candidate vector of shape (n,),\n            which ignores bound constraints.\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        set_active_directions (iterator): Iterator over the indices of active search\n            directions, i.e. directions that are not zero.\n\n    Returns:\n        Tuple:\n            - active_bound (float or None): The next active bound. It can be a lower\n                or active bound. If None, there are no more active bounds left in the\n                set of active search directions.\n            - index_bound_active (int or None): Index where an active lower or\n                upper bound has been found. None, if no active bound has been detected.\n\n    \"\"\"\n    index_active = next(set_active_directions)\n\n    while True:\n        if x_candidate_unconstr[index_active] >= upper_bounds[index_active]:\n            active_bound = upper_bounds[index_active]\n            break\n\n        elif x_candidate_unconstr[index_active] <= lower_bounds[index_active]:\n            active_bound = lower_bounds[index_active]\n            break\n\n        else:\n            try:\n                index_active = next(set_active_directions)\n            except StopIteration:\n                active_bound = None\n                break\n\n    return active_bound, index_active\n\n\ndef _take_constrained_step_up_to_boundary(\n    x_candidate, direction, active_bound, index_bound_active\n):\n    \"\"\"Take largest constrained step possible until trust-region boundary is hit.\n\n    Args:\n        x_candidate (np.ndarray): Current candidate vector of shape (n,).\n        direction (np.ndarray): Direction vector of shape (n,).\n        active_bound (float): The active (lower or upper) bound.\n        index_bound_active (int): Index where an active lower or upper bound\n            has been found.\n\n    Returns:\n        Tuple:\n        - x_candidate (np.ndarray): New candidate vector of shape (n,).\n        - direction (np.ndarray): New direction vector of shape (n,), where the\n            search direction of the active_bound has been set to zero.\n\n    \"\"\"\n    step_size_constr = (active_bound - x_candidate[index_bound_active]) / direction[\n        index_bound_active\n    ]\n\n    x_candidate = x_candidate + step_size_constr * direction\n    x_candidate[index_bound_active] = active_bound\n\n    # Do not search in this direction anymore\n    direction[index_bound_active] = 0\n\n    return x_candidate, direction\n\n\ndef _take_unconstrained_step_up_to_boundary(\n    x_candidate, direction, trustregion_radius, zero_treshold\n):\n    \"\"\"Take largest unconstrained step possible until trust-region boundary is hit.\n\n    Args:\n        x_candidate (np.ndarray): Current candidate vector of shape (n,).\n        direction (np.ndarray): Direction vector of shape (n,).\n        trustregion_radius (float): Radius of the trust-region.\n        zero_treshold (float): Treshold for treating numerical values as zero.\n            Numbers smaller than this are considered zero up to machine precision.\n\n    Returns:\n        np.ndarray: Updated, unconstrained candidate vector of shape (n,).\n\n    \"\"\"\n    step_size_unconstr = _get_distance_to_trustregion_boundary(\n        x_candidate, direction, trustregion_radius, zero_treshold\n    )\n    x_candidate_unconstr = x_candidate + step_size_unconstr * direction\n\n    return x_candidate_unconstr\n\n\ndef _get_distance_to_trustregion_boundary(\n    x, direction, trustregion_radius, zero_treshold\n):\n    \"\"\"Compute the candidate vector's distance to the trustregion boundary.\n\n    Given the candidate vector, find the largest step alpha in direction g\n    that satisfies ||x|| <= trustregion_radius,\n\n    where g denotes the direction vector.\n\n    To find alpha, i.e. the candidate's distance to the trust-region boundary, solve\n      ||x + alpha * g||**2 = trustregion_radius**2\n         s.t. alpha >= 0\n\n    Using this method, the solution exists whenever ||x|| <= trustregion_radius**2.\n\n    Choose alpha = 0, if the direction vector is zero everywhere.\n\n    Args:\n        x (np.ndarray): Candidate vector of shape (n,).\n        direction (np.ndarray): Direction vector of shape (n,).\n        trustregion_radius (float): Radius of the trust-region.\n        zero_treshold (float): Treshold for treating numerical values as zero.\n            Numbers smaller than this are considered zero up to machine precision.\n\n    Returns:\n        float: Distance between the candidate vector and the trust-region boundary.\n\n    \"\"\"\n    g_dot_x = direction.T @ x\n    g_sumsq = direction @ direction\n    x_sumsq = x @ x\n\n    l2_norm = np.sqrt(g_sumsq)\n\n    if l2_norm < zero_treshold:\n        distance_to_boundary = 0\n    else:\n        distance_to_boundary = (\n            np.sqrt(\n                np.maximum(0, g_dot_x**2 + g_sumsq * (trustregion_radius**2 - x_sumsq))\n            )\n            - g_dot_x\n        ) / g_sumsq\n\n    return distance_to_boundary\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/pounders_auxiliary.py",
    "content": "\"\"\"Auxiliary functions for the pounders algorithm.\"\"\"\n\nfrom typing import NamedTuple\n\nimport numpy as np\nfrom scipy.linalg import qr_multiply\n\nfrom optimagic.optimizers._pounders.bntr import (\n    bntr,\n)\nfrom optimagic.optimizers._pounders.gqtpar import (\n    gqtpar,\n)\n\n\nclass ResidualModel(NamedTuple):\n    intercepts: np.ndarray | None = None  # shape (n_residuals,)\n    linear_terms: np.ndarray | None = None  # shape (n_residuals, n_params)\n    square_terms: np.ndarray | None = None  # shape (n_residuals, n_params, n_params)\n\n\nclass MainModel(NamedTuple):\n    linear_terms: np.ndarray | None = None  # shape (n_params,)\n    square_terms: np.ndarray | None = None  # shape (n_params, n_params)\n\n\ndef create_initial_residual_model(history, accepted_index, delta):\n    \"\"\"Update linear and square terms of the initial residual model.\n\n    Args:\n        history (LeastSquaresHistory): Class storing history of xs, residuals, and\n            critvals.\n        accepted_index (int): Index in history pointing to the currently\n            accepted candidate vector.\n        delta (float): Trust-region radius.\n\n    Returns:\n        ResidualModel: Residual model containing the initial parameters for\n            ``linear_terms`` and ``square_terms``.\n\n    \"\"\"\n    center_info = {\n        \"x\": history.get_best_x(),\n        \"residuals\": history.get_best_residuals(),\n        \"radius\": delta,\n    }\n    n_params = len(center_info[\"x\"])\n    n_residuals = center_info[\"residuals\"].shape[0]\n\n    indices_not_min = [i for i in range(n_params + 1) if i != accepted_index]\n\n    x_candidate, residuals_candidate, _ = history.get_centered_entries(\n        center_info=center_info,\n        index=indices_not_min,\n    )\n\n    linear_terms = np.linalg.solve(x_candidate, residuals_candidate)\n    square_terms = np.zeros((n_residuals, n_params, n_params))\n\n    residual_model = ResidualModel(\n        intercepts=history.get_best_residuals(),\n        linear_terms=linear_terms,\n        square_terms=square_terms,\n    )\n\n    return residual_model\n\n\ndef update_residual_model(residual_model, coefficients_to_add, delta, delta_old):\n    \"\"\"Update linear and square terms of the residual model.\n\n    Args:\n        residual_model (ResidualModel): Residual model with the following parameters:\n            ``intercepts``, ``linear_terms``, and ``square terms``.\n        coefficients_to_add (dict): Coefficients used for updating the\n            parameters of the residual model.\n        delta (float): Trust region radius of the current iteration.\n        delta_old (float): Trust region radius of the previous iteration.\n\n    Returns:\n        ResidualModel: Residual model containing the updated parameters\n            ``linear_terms`` and ``square_terms``.\n\n    \"\"\"\n    linear_terms_new = (\n        coefficients_to_add[\"linear_terms\"]\n        + (delta / delta_old) * residual_model.linear_terms\n    )\n\n    square_terms_new = (\n        coefficients_to_add[\"square_terms\"]\n        + (delta / delta_old) ** 2 * residual_model.square_terms\n    )\n\n    residual_model_updated = residual_model._replace(\n        linear_terms=linear_terms_new, square_terms=square_terms_new\n    )\n\n    return residual_model_updated\n\n\ndef create_main_from_residual_model(\n    residual_model, multiply_square_terms_with_intercepts=True\n):\n    \"\"\"Update linear and square terms of the main model via the residual model.\n\n    Args:\n        residual_model (ResidualModel): Residual model with the following parameters:\n            ``intercepts``, ``linear_terms``, and ``square terms``.\n        multiply_square_terms_with_residuals (bool): Indicator whether we\n            multiply the main model's ``square terms`` with the\n            intercepts of the residual model.\n\n    Returns:\n        MainModel: Main model containing the updated parameters\n            ``linear_terms`` and ``square terms``.\n\n    \"\"\"\n    linear_terms_main_model = residual_model.linear_terms @ residual_model.intercepts\n    square_terms_main_model = (\n        residual_model.linear_terms @ residual_model.linear_terms.T\n    )\n\n    if multiply_square_terms_with_intercepts is True:\n        square_terms_main_model = (\n            square_terms_main_model\n            + residual_model.square_terms.T @ residual_model.intercepts\n        )\n\n    main_model = MainModel(\n        linear_terms=linear_terms_main_model, square_terms=square_terms_main_model\n    )\n\n    return main_model\n\n\ndef update_main_model_with_new_accepted_x(main_model, x_candidate):\n    \"\"\"Use accepted candidate to update the linear terms of the residual model.\n\n    Args:\n         main_model (MainModel): Main model with the following parameters:\n             ``linear_terms`` and ``square terms``.\n        x_candidate (np.ndarray): Vector of centered x candidates of shape (n_params,).\n\n    Returns:\n        MainModel: Main model containing the updated ``linear_terms``.\n\n    \"\"\"\n    linear_terms_new = main_model.linear_terms + main_model.square_terms @ x_candidate\n    main_model_updated = main_model._replace(linear_terms=linear_terms_new)\n\n    return main_model_updated\n\n\ndef update_residual_model_with_new_accepted_x(residual_model, x_candidate):\n    \"\"\"Use accepted candidate to update residual model.\n\n    Args:\n        residual_model (ResidualModel): Residual model containing the parameters of\n            the residual model, i.e. ``intercepts``, ``linear_terms``, and\n            ``square terms``.\n        x_candidate (np.ndarray): Vector of centered x candidates of shape (n_params,).\n\n    Returns:\n        ResidualModel: Residual model containing the updated parameters\n            `intercepts`` and ``linear_terms``.\n\n    \"\"\"\n    intercepts_new = (\n        residual_model.intercepts\n        + x_candidate @ residual_model.linear_terms\n        + 0.5 * (x_candidate.T @ residual_model.square_terms @ x_candidate)\n    )\n\n    linear_terms_new = (\n        residual_model.linear_terms + (residual_model.square_terms @ x_candidate).T\n    )\n\n    residual_model_updated = residual_model._replace(\n        intercepts=intercepts_new, linear_terms=linear_terms_new\n    )\n\n    return residual_model_updated\n\n\ndef solve_subproblem(\n    x_accepted,\n    main_model,\n    lower_bounds,\n    upper_bounds,\n    delta,\n    solver,\n    *,\n    conjugate_gradient_method,\n    maxiter,\n    maxiter_gradient_descent,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n    gtol_abs_conjugate_gradient,\n    gtol_rel_conjugate_gradient,\n    k_easy,\n    k_hard,\n):\n    \"\"\"Solve the quadratic subproblem.\n\n    Args:\n        x_accepted (np.ndarray): Currently accepted candidate vector of shape\n            (n_params,).\n        delta (float): Current trust region radius.\n        main_model (MainModel): Main model with the following parameters:\n             ``linear_terms`` and ``square terms``.\n        lower_bounds (np.ndarray): 1d array of shape (n_params,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n_params,) with upper bounds\n            for the parameter vector x.\n        delta (float) Current trust-region radius\n        solver (str): Trust-region subsolver to use. Currently, two internal solvers\n            are supported:\n            - \"bntr\" (default, supports bound constraints)\n            - \"gqtpar\" (does not support bound constraints)\n        conjugate_gradient_method (str): Method for computing the conjugate gradient\n            step. Available conjugate gradient methods are:\n            - \"cg\"\n            - \"steihaug_toint\"\n            - \"trsbox\" (default)\n        maxiter (int): Maximum number of iterations to perform when solving the\n            trust-region subproblem.\n        maxiter_gradient_descent (int): Maximum number of gradient descent iterations\n            to perform when the trust-region subsolver \"bntr\" is used.\n        gtol_abs (float): Convergence tolerance for the absolute gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_rel (float): Convergence tolerance for the relative gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_scaled (float): Convergence tolerance for the scaled gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_abs_conjugate_gradient (float): Convergence tolerance for the absolute\n            gradient norm in the conjugate gradient step of the trust-region\n            subproblem (\"bntr\").\n        gtol_rel_conjugate_gradient (float): Convergence tolerance for the relative\n            gradient norm in the conjugate gradient step of the trust-region\n            subproblem (\"bntr\").\n        k_easy (float): Stopping criterion for the \"easy\" case in the trust-region\n            subproblem (\"gqtpar\").\n        k_hard (float): Stopping criterion for the \"hard\" case in the trust-region\n            subproblem (\"gqtpar\").\n\n    Returns:\n        (dict): Result dictionary containing the followng keys:\n            - \"x\" (np.ndarray): The solution vector of shape (n_params,)\n            - \"criterion\" (float): The value of the criterion functions associated\n                with the solution\n            - \"n_iterations\" (int): Number of iterations performed before termination.\n            - \"success\" (bool): Boolean indicating whether a solution has been found\n                before reaching maxiter.\n\n    \"\"\"\n    x0 = np.zeros_like(x_accepted)\n\n    # Normalize bounds. If none provided, use unit cube [-1, 1]\n    if lower_bounds is not None:\n        lower_bounds = (lower_bounds - x_accepted) / delta\n        lower_bounds[lower_bounds < -1] = -1\n    else:\n        lower_bounds = -np.ones_like(x_accepted)\n\n    if upper_bounds is not None:\n        upper_bounds = (upper_bounds - x_accepted) / delta\n        upper_bounds[upper_bounds > 1] = 1\n    else:\n        upper_bounds = np.ones_like(x_accepted)\n\n    # Check if bounds are valid\n    if np.max(lower_bounds - upper_bounds) > 1e-10:\n        raise ValueError(\"Upper bounds < lower bounds in subproblem.\")\n    if np.max(lower_bounds - x0) > 1e-10:\n        raise ValueError(\"Initial guess < lower bounds in subproblem.\")\n    if np.max(x0 - upper_bounds) > 1e-10:\n        raise ValueError(\"Initial guess > upper bounds in subproblem.\")\n\n    if solver == \"bntr\":\n        options = {\n            \"conjugate_gradient_method\": conjugate_gradient_method,\n            \"maxiter\": maxiter,\n            \"maxiter_gradient_descent\": maxiter_gradient_descent,\n            \"gtol_abs\": gtol_abs,\n            \"gtol_rel\": gtol_rel,\n            \"gtol_scaled\": gtol_scaled,\n            \"gtol_abs_conjugate_gradient\": gtol_abs_conjugate_gradient,\n            \"gtol_rel_conjugate_gradient\": gtol_rel_conjugate_gradient,\n        }\n        result = bntr(main_model, lower_bounds, upper_bounds, x_candidate=x0, **options)\n    elif solver == \"gqtpar\":\n        result = gqtpar(\n            main_model,\n            x_candidate=x0,\n            k_easy=k_easy,\n            k_hard=k_hard,\n            maxiter=maxiter,\n        )\n    else:\n        raise ValueError(\n            \"Invalid subproblem solver: {solver}. Must be one of bntr, gqtpar.\"\n        )\n\n    # Test bounds post-solution\n    if np.max(lower_bounds - result[\"x\"]) > 1e-5:\n        raise ValueError(\"Subproblem solution < lower bounds.\")\n    if np.max(result[\"x\"] - upper_bounds) > 1e-5:\n        raise ValueError(\"Subproblem solution > upper bounds.\")\n\n    return result\n\n\ndef find_affine_points(\n    history,\n    x_accepted,\n    model_improving_points,\n    project_x_onto_null,\n    delta,\n    theta1,\n    c,\n    model_indices,\n    n_modelpoints,\n):\n    \"\"\"Find affine points.\n\n    Args:\n        history (LeastSquaresHistory): Class storing history of xs, residuals,\n            and critvals.\n        x_accepted (np.ndarray): Accepted solution vector of the subproblem.\n            Shape (n_params,).\n        model_improving_points (np.ndarray): Array of shape (n_params, n_params)\n            including points to improve the main model, i.e. make the main model\n            fully linear, i.e. just-identified.\n            If *project_x_onto_null* is False, it is an array filled with zeros.\n        project_x_onto_null (int): Indicator whether to calculate the QR\n            decomposition of *model_improving_points* and multiply it\n            with vector *x_projected*.\n        delta (float): Delta, current trust-region radius.\n        theta1 (float): Threshold for adding the current x candidate to the model.\n        c (float): Threshold for acceptance of the norm of our current x candidate.\n        model_indices (np.ndarray): Indices related to the candidates of x\n            that are currently in the main model. Shape (2 * n_params + 1,).\n        n_modelpoints (int): Current number of model points.\n\n    Returns:\n        Tuple:\n        - model_improving_points (np.ndarray):  Array of shape (n_params, n_params)\n            including points to improve the main model, i.e. make the main model\n            fully linear, i.e. just-identified.\n        - model_indices (np.ndarray): Indices related to the candidates of x\n            that are currently in the main model. Shape (2 *n_params* + 1,).\n        - n_modelpoints (int): Current number of model points.\n        - project_x_onto_null (int): Indicator whether to calculate the QR\n            decomposition of *model_improving_points* and multiply it\n            with vector *x_projected*.\n            Relevant for next call of *find_affine_points()*.\n\n    \"\"\"\n    n_params = len(x_accepted)\n\n    for i in range(history.get_n_fun() - 1, -1, -1):\n        center_info = {\"x\": x_accepted, \"radius\": delta}\n        x_candidate = history.get_centered_xs(center_info, index=i)\n        candidate_norm = np.linalg.norm(x_candidate)\n\n        x_projected = x_candidate\n\n        if candidate_norm <= c:\n            if project_x_onto_null is True:\n                x_projected, _ = qr_multiply(model_improving_points, x_projected)\n\n            proj = np.linalg.norm(x_projected[n_modelpoints:])\n\n            # Add this index to the model\n            if proj >= theta1:\n                model_indices[n_modelpoints] = i\n                model_improving_points[:, n_modelpoints] = x_candidate\n                project_x_onto_null = True\n                n_modelpoints += 1\n\n            if n_modelpoints == n_params:\n                break\n\n    return model_improving_points, model_indices, n_modelpoints, project_x_onto_null\n\n\ndef add_geomtery_points_to_make_main_model_fully_linear(\n    history,\n    main_model,\n    model_improving_points,\n    model_indices,\n    x_accepted,\n    n_modelpoints,\n    delta,\n    criterion,\n    lower_bounds,\n    upper_bounds,\n    batch_fun,\n    n_cores,\n):\n    \"\"\"Add points until main model is fully linear.\n\n    Args:\n        history (LeastSquaresHistory): Class storing history of xs, residuals, and\n            critvals.\n        main_model (MainModel): Main model with the following parameters:\n             ``linear_terms`` and ``square terms``.\n        model_improving_points (np.ndarray): Array of shape (n_params, n_params)\n            including points to improve the main model.\n        model_indices (np.ndarray): Indices of the candidates of x that are\n            currently in the main model. Shape (2 * n_params + 1,).\n        x_accepted (np.ndarray): Accepted solution vector of the subproblem.\n            Shape (n_params,).\n        n_modelpoints (int): Current number of model points.\n        delta (float): Delta, current trust-region radius.\n        criterion (callable): Criterion function.\n        lower_bounds (np.ndarray): Lower bounds.\n            Must have same length as the initial guess of the\n            parameter vector. Equal to -1 if not provided by the user.\n        upper_bounds (np.ndarray): Upper bounds.\n            Must have same length as the initial guess of the\n            parameter vector. Equal to 1 if not provided by the user.\n        batch_fun (str or callable): Function that takes a list of parameter vectors\n            and evaluates the objective function on each of them.\n        n_cores (int): Number of processes used to parallelize the function\n            evaluations.\n\n    Returns:\n        Tuple:\n        - history (class): Class storing history of xs, residuals, and critvals.\n        - model_indices (np.ndarray): Indices of the candidates of x that are\n            currently in the main model. Shape (2 * n_params + 1,).\n\n    \"\"\"\n    n_params = len(x_accepted)\n\n    current_history = history.get_n_fun()\n\n    x_candidate = np.zeros_like(x_accepted)\n    x_candidates_list = []\n    criterion_candidates_list = []\n\n    model_improving_points, _ = qr_multiply(model_improving_points, np.eye(n_params))\n\n    for i in range(n_modelpoints, n_params):\n        change_direction = model_improving_points[:, i] @ main_model.linear_terms\n\n        if change_direction > 0:\n            model_improving_points[:, i] *= -1\n\n        x_candidate = delta * model_improving_points[:, i] + x_accepted\n\n        # Project into feasible region\n        if lower_bounds is not None and upper_bounds is not None:\n            x_candidate = np.median(\n                np.stack([lower_bounds, x_candidate, upper_bounds]), axis=0\n            )\n        x_candidates_list.append(x_candidate)\n        model_indices[i] = current_history + i - n_modelpoints\n\n    criterion_candidates_list = batch_fun(x_list=x_candidates_list, n_cores=n_cores)\n\n    history.add_entries(x_candidates_list, criterion_candidates_list)\n\n    return history, model_indices\n\n\ndef evaluate_residual_model(\n    centered_xs,\n    centered_residuals,\n    residual_model,\n):\n    \"\"\"Compute the difference between observed and predicted model evaluations.\n\n    We use a quadratic model of the form:\n\n        f(x) = a + x.T @ b + 0.5 x.T @ C @ x ,\n\n    where C is lower triangular. Note the connection of b and C to the gradient:\n    f'(x) = b + (C + C.T) @ x, and the Hessian: f''(x) = C + C.T.\n\n    Args:\n        residual_model (ResidualModel): The residual model. Has entries:\n            - ``intercept``: corresponds to 'a' in the above equation\n            - ``linear_terms``: corresponds to 'b' in the above equation\n            - ``square_terms``: corresponds to 'C' in the above equation\n        centered_xs (np.ndarray): Centered x sample. Shape (n_modelpoints, n_params).\n        centered_residuals (np.ndarray): Centered residuals, i.e. the observed model\n            evaluations. Shape (n_maxinterp, n_residuals).\n\n    Returns:\n        np.ndarray: Observed minus predicted model evaluations,\n            has shape (n_modelpoints, n_residuals).\n\n    \"\"\"\n    n_residuals = centered_residuals.shape[1]\n    n_modelpoints = centered_xs.shape[0]\n    y_residuals = np.empty((n_modelpoints, n_residuals), dtype=np.float64)\n\n    for j in range(n_residuals):\n        x_dot_square_terms = centered_xs @ residual_model.square_terms[j, :, :]\n\n        for i in range(n_modelpoints):\n            y_residuals[i, j] = (\n                centered_residuals[i, j]\n                - residual_model.linear_terms[:, j] @ centered_xs[i, :]\n                - 0.5 * (x_dot_square_terms[i, :] @ centered_xs[i, :])\n            )\n\n    return y_residuals\n\n\ndef get_feature_matrices_residual_model(\n    history, x_accepted, model_indices, delta, c2, theta2, n_maxinterp\n):\n    \"\"\"Obtain the feature matrices for fitting the residual model.\n\n    Pounders uses underdetermined sample sets, with at most n_maxinterp\n    points in the model. Hence, the fitting method is interpolation,\n    where the solution represents the quadratic whose Hessian matrix is of\n    minimum Frobenius norm.\n\n    For a mathematical exposition see :cite:`Wild2008`, p. 3-5.\n\n    Args:\n        history (LeastSquaresHistory): Class storing history of xs, residuals, and\n            critvals.\n        x_accepted (np.ndarray): Accepted solution vector of the subproblem.\n            Shape (n_params,).\n        model_indices (np.ndarray): Indices of the candidates of x that are\n            currently in the model. Shape (2 * n_params + 1,).\n        delta (float): Delta, current trust-region radius.\n        c2 (int): Threshold for acceptance of the norm of our current x candidate.\n            Equal to 10 by default.\n        theta2 (float): Threshold for adding the current x candidate to the model.\n        n_maxinterp (int): Maximum number of interpolation points. By default,\n            2 * n_params + 1 points.\n\n    Returns:\n        Tuple:\n        - m_mat (np.ndarray): Polynomial feature matrix of the linear terms.\n            Shape(n_params + 1, n_params + 1).\n        - n_mat (np.ndarray): Polynomial feature matrix of the square terms.\n            Shape(n_modelpoints, n_poly_features).\n        - z_mat (np.ndarray): Basis for the null space of m_mat.\n            Shape(n_modelpoints, n_modelpoints - n_params - 1).\n        - n_z_mat (np.ndarray): Lower triangular matrix of xs that form\n            the monomial basis. Shape(n_poly_features, n_modelpoints - n_params - 1).\n        - n_modelpoints (int): Current number of model points.\n\n    \"\"\"\n    n_params = len(x_accepted)\n    n_poly_features = n_params * (n_params + 1) // 2\n\n    m_mat = np.zeros((n_maxinterp, n_params + 1))\n    m_mat[:, 0] = 1\n    m_mat_pad = np.zeros((n_maxinterp, n_maxinterp))\n    m_mat_pad[:n_maxinterp, : n_params + 1] = m_mat\n\n    n_mat = np.zeros((n_maxinterp, n_poly_features))\n\n    center_info = {\"x\": x_accepted, \"radius\": delta}\n    for i in range(n_params + 1):\n        m_mat[i, 1:] = history.get_centered_xs(center_info, index=model_indices[i])\n        n_mat[i, :] = _get_monomial_basis(m_mat[i, 1:])\n\n    point = history.get_n_fun() - 1\n    n_modelpoints = n_params + 1\n\n    while (n_modelpoints < n_maxinterp) and (point >= 0):\n        reject = False\n\n        # Reject any points already in the model\n        for i in range(n_params + 1):\n            if point == model_indices[i]:\n                reject = True\n                break\n\n        if reject is False:\n            candidate_x = history.get_centered_xs(center_info, index=point)\n            candidate_norm = np.linalg.norm(candidate_x)\n\n            if candidate_norm > c2:\n                reject = True\n\n        if reject is True:\n            point -= 1\n            continue\n\n        m_mat[n_modelpoints, 1:] = history.get_centered_xs(center_info, index=point)\n        n_mat[n_modelpoints, :] = _get_monomial_basis(m_mat[n_modelpoints, 1:])\n\n        m_mat_pad = np.zeros((n_maxinterp, n_maxinterp))\n        m_mat_pad[:n_maxinterp, : n_params + 1] = m_mat\n\n        _n_z_mat, _ = qr_multiply(\n            m_mat_pad[: n_modelpoints + 1, :],\n            n_mat.T[:n_poly_features, : n_modelpoints + 1],\n        )\n        beta = np.linalg.svd(_n_z_mat.T[n_params + 1 :], compute_uv=False)\n\n        if beta[min(n_modelpoints - n_params, n_poly_features) - 1] > theta2:\n            # Accept point\n            model_indices[n_modelpoints] = point\n            n_z_mat = _n_z_mat\n\n            n_modelpoints += 1\n\n        point -= 1\n\n    z_mat, _ = qr_multiply(\n        m_mat_pad[:n_modelpoints, :],\n        np.eye(n_maxinterp)[:, :n_modelpoints],\n    )\n\n    # Just-identified case\n    if n_modelpoints == (n_params + 1):\n        n_z_mat = np.zeros((n_maxinterp, n_poly_features))\n        n_z_mat[:n_params, :n_params] = np.eye(n_params)\n\n    return (\n        m_mat[: n_params + 1, : n_params + 1],\n        n_mat[:n_modelpoints],\n        z_mat[:n_modelpoints, n_params + 1 : n_modelpoints],\n        n_z_mat[:, n_params + 1 : n_modelpoints],\n        n_modelpoints,\n    )\n\n\ndef fit_residual_model(\n    m_mat,\n    n_mat,\n    z_mat,\n    n_z_mat,\n    y_residuals,\n    n_modelpoints,\n):\n    \"\"\"Fit a linear model using the pounders fitting method.\n\n    Pounders uses underdetermined sample sets, with at most 2 * n_params + 1\n    points in the model. Hence, the fitting method is interpolation, where\n    the solution represents the quadratic whose Hessian matrix is of\n    minimum Frobenius norm.\n\n    For a mathematical exposition, see :cite:`Wild2008`, p. 3-5.\n\n    Args:\n        m_mat (np.ndarray): Polynomial feature matrix of the linear terms.\n            Shape(n_params + 1, n_params + 1).\n        n_mat (np.ndarray): Polynomial feature matrix of the square terms.\n            Shape(n_modelpoints, n_poly_features).\n        z_mat (np.ndarray): Basis for the null space of m_mat.\n            Shape(n_modelpoints, n_modelpoints - n_params - 1).\n        n_z_mat (np.ndarray): Lower triangular matrix of xs that form\n            the monomial basis. Shape(n_poly_features, n_modelpoints - n_params - 1).\n        n_modelpoints (int): Current number of model points.\n        y_residuals (np.ndarray): The dependent variable. Observed minus predicted\n            evaluations of the residual model. Shape (n_modelpoints, n_residuals).\n        n_maxinterp (int): Maximum number of interpolation points. By default,\n            2 * n_params + 1 points.\n\n    Returns:\n        dict: The coefficients of the residual model.\n\n    \"\"\"\n    n_params = m_mat.shape[1] - 1\n    n_residuals = y_residuals.shape[1]\n    n_poly_terms = n_params * (n_params + 1) // 2\n    _is_just_identified = n_modelpoints == (n_params + 1)\n\n    coeffs_linear = np.empty((n_residuals, n_params))\n    coeffs_square = np.empty((n_residuals, n_params, n_params))\n\n    if _is_just_identified:\n        coeffs_first_stage = np.zeros(n_params)\n        beta = np.zeros(n_poly_terms)\n    else:\n        n_z_mat_square = n_z_mat.T @ n_z_mat\n\n    for k in range(n_residuals):\n        if not _is_just_identified:\n            z_y_vec = np.dot(z_mat.T, y_residuals[:, k])\n            coeffs_first_stage = np.linalg.solve(\n                np.atleast_2d(n_z_mat_square),\n                np.atleast_1d(z_y_vec),\n            )\n\n            beta = np.atleast_2d(n_z_mat) @ coeffs_first_stage\n\n        rhs = y_residuals[:, k] - n_mat @ beta\n\n        alpha = np.linalg.solve(m_mat, rhs[: n_params + 1])\n        coeffs_linear[k, :] = alpha[1 : (n_params + 1)]\n\n        num = 0\n        for i in range(n_params):\n            coeffs_square[k, i, i] = beta[num]\n            num += 1\n            for j in range(i + 1, n_params):\n                coeffs_square[k, j, i] = beta[num] / np.sqrt(2)\n                coeffs_square[k, i, j] = beta[num] / np.sqrt(2)\n                num += 1\n\n    coef = {\n        \"linear_terms\": coeffs_linear.T,\n        \"square_terms\": coeffs_square,\n    }\n\n    return coef\n\n\ndef update_trustregion_radius(\n    result_subproblem,\n    rho,\n    model_is_valid,\n    delta,\n    delta_min,\n    delta_max,\n    eta1,\n    gamma0,\n    gamma1,\n):\n    \"\"\"Update the trust-region radius.\"\"\"\n    norm_x_sub = np.sqrt(np.sum(result_subproblem[\"x\"] ** 2))\n\n    if rho >= eta1 and norm_x_sub > 0.5 * delta:\n        delta = min(delta * gamma1, delta_max)\n    elif model_is_valid is True:\n        delta = max(delta * gamma0, delta_min)\n\n    return delta\n\n\ndef get_last_model_indices_and_check_for_repeated_model(\n    model_indices, last_model_indices, n_modelpoints, n_last_modelpoints\n):\n    \"\"\"Get the last model_indices and check if we have reused the same model.\"\"\"\n    if n_modelpoints == n_last_modelpoints:\n        same_model_used = True\n    else:\n        same_model_used = False\n\n    for i in range(n_modelpoints):\n        if same_model_used:\n            if model_indices[i] == last_model_indices[i]:\n                same_model_used = True\n            else:\n                same_model_used = False\n        last_model_indices[i] = model_indices[i]\n\n    n_last_modelpoints = n_modelpoints\n\n    return last_model_indices, n_last_modelpoints, same_model_used\n\n\ndef add_accepted_point_to_residual_model(model_indices, accepted_index, n_modelpoints):\n    \"\"\"Add accepted point to the residual model.\"\"\"\n    model_indices[1 : n_modelpoints + 1] = model_indices[:n_modelpoints]\n    model_indices[0] = accepted_index\n\n    return model_indices\n\n\ndef _get_monomial_basis(x):\n    \"\"\"Get the monomial basis (basis for quadratic functions) of x.\n\n    Monomial basis = .5*[x(1)^2  sqrt(2)*x(1)*x(2) ... sqrt(2)*x(1)*x(n_params) ...\n        ... x(2)^2 sqrt(2)*x(2)*x(3) .. x(n_params)^2]\n\n    Args:\n        x (np.ndarray): Parameter vector of shape (n_params,).\n\n    Returns:\n        np.ndarray: Monomial basis of x of shape (n_params * (n_params + 1) / 2,).\n\n    \"\"\"\n    n_params = len(x)\n    monomial_basis = np.zeros(int(n_params * (n_params + 1) / 2))\n\n    j = 0\n    for i in range(n_params):\n        monomial_basis[j] = 0.5 * x[i] ** 2\n        j += 1\n\n        for k in range(i + 1, n_params):\n            monomial_basis[j] = x[i] * x[k] / np.sqrt(2)\n            j += 1\n\n    return monomial_basis\n"
  },
  {
    "path": "src/optimagic/optimizers/_pounders/pounders_history.py",
    "content": "\"\"\"History class for pounders and similar optimizers.\"\"\"\n\nimport numpy as np\n\n\nclass LeastSquaresHistory:\n    \"\"\"Container to save and retrieve history entries for a least-square optimizer.\n\n    These entries are:\n    - xs\n    - residuals\n    - critvals\n\n    The class automatically determines the 'best' entries, i.e. entries related to\n    the x that yield the smallest critval - given all xs stored so far.\n\n    Xs and residuals can be both saved and accessed in their centered\n    and uncentered form. 'Centered' meaning that they are scaled by their\n    corresponding 'best' entry. 'Uncentered' simply being the raw entries.\n\n    Critvals don't need to be added explicitly, as they are computed internally\n    as the sum of squares of the residuals whenever new entries are added.\n\n    \"\"\"\n\n    def __init__(self):\n        self.xs = None\n        self.best_x = None\n        self.residuals = None\n        self.best_residuals = None\n        self.critvals = None\n        self.n_fun = 0\n        self.best_index = 0\n        self.best_critval = np.inf\n\n    def add_entries(self, xs, residuals):\n        \"\"\"Add new parameter vectors and residuals to the history.\n\n        Args:\n            xs (np.ndarray or list): 1d or 2d array or list of 1d arrays with\n                parameter vectors.\n            residuals (np.ndarray or list): 1d or 2d array or list of 1d arrays with\n                least square residuals.\n\n        \"\"\"\n        xs = np.atleast_2d(xs)\n        residuals = np.atleast_2d(residuals)\n        critvals = np.atleast_1d((residuals**2).sum(axis=-1))\n\n        argmin_candidate = critvals.argmin()\n        min_candidate = critvals[argmin_candidate]\n\n        if min_candidate < self.best_critval:\n            self.best_index = argmin_candidate + self.n_fun\n            self.best_x = xs[argmin_candidate]\n            self.best_residuals = residuals[argmin_candidate]\n\n        if len(xs) != len(residuals):\n            raise ValueError()\n\n        self.xs = _add_entries_to_array(self.xs, xs, self.n_fun)\n        self.residuals = _add_entries_to_array(self.residuals, residuals, self.n_fun)\n        self.critvals = _add_entries_to_array(self.critvals, critvals, self.n_fun)\n\n        self.n_fun += len(xs)\n\n    def add_centered_entries(self, xs, residuals, center_info):\n        \"\"\"Add new parameter vectors and residuals to the history.\n\n        Args:\n            xs (np.ndarray or list): 1d or 2d array or list of 1d arrays with\n                parameter vectors.\n            residuals (np.ndarray or list): 1d or 2d array or list of 1d arrays with\n                least square residuals.\n            center_info (dict): Dictionary with the entries \"x\", \"residuals\" and\n                \"radius\". The information is used to uncenter parameters and residuals\n                before adding them to the history.\n\n        \"\"\"\n        xs = np.atleast_2d(xs)\n        residuals = np.atleast_2d(residuals)\n        xs_uncentered = xs * center_info[\"radius\"] + center_info[\"x\"]\n        residuals_uncentered = residuals + center_info[\"residuals\"]\n        self.add_entries(xs_uncentered, residuals_uncentered)\n\n    def get_entries(self, index=None):\n        \"\"\"Retrieve xs, residuals and critvals from the history.\n\n        Args:\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with parameter vectors.\n            np.ndarray: 1d or 2d array with residuals.\n            np.ndarray: Float or 1d array with criterion values.\n\n        \"\"\"\n        names = [\"xs\", \"residuals\", \"critvals\"]\n\n        out = (getattr(self, name)[: self.n_fun] for name in names)\n\n        # Reducing arrays to length n_fun ensures that invalid indices raise IndexError\n        if index is not None:\n            out = [arr[index] for arr in out]\n\n        return tuple(out)\n\n    def get_xs(self, index=None):\n        \"\"\"Retrieve xs from history.\n\n        Args:\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with parameter vectors\n\n        \"\"\"\n        out = self.xs[: self.n_fun]\n        out = out[index] if index is not None else out\n\n        return out\n\n    def get_residuals(self, index=None):\n        \"\"\"Retrieve residuals from history.\n\n        Args:\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with residuals.\n\n        \"\"\"\n        out = self.residuals[: self.n_fun]\n        out = out[index] if index is not None else out\n\n        return out\n\n    def get_critvals(self, index=None):\n        \"\"\"Retrieve critvals from history.\n\n        Args:\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: Float or 1d array with criterion values.\n\n        \"\"\"\n        out = self.critvals[: self.n_fun]\n        out = out[index] if index is not None else out\n\n        return out\n\n    def get_centered_entries(self, center_info, index=None):\n        \"\"\"Retrieve xs, residuals and critvals from the history.\n\n        Args:\n            center_info (dict): Dictionary with the entries \"x\", \"residuals\" and\n                \"radius\". The information is used to center parameters, residuals\n                and critvals.\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with centered parameter vectors\n            np.ndarray: 1d or 2d array with centered residuals\n            np.ndarray: Float or 1d array with centered criterion values.\n\n        \"\"\"\n        xs_unc, residuals_unc, _ = self.get_entries(index=index)\n        xs = (xs_unc - center_info[\"x\"]) / center_info[\"radius\"]\n        residuals = residuals_unc - center_info[\"residuals\"]\n        critvals = (residuals**2).sum(axis=-1)\n\n        return xs, residuals, critvals\n\n    def get_centered_xs(self, center_info, index=None):\n        \"\"\"Retrieve centered xs from the history.\n\n        Args:\n            center_info (dict): Dictionary with the entries \"x\" and\n                \"radius\". The information is used to center parameters.\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with centered parameter vectors.\n\n        \"\"\"\n        xs_unc = self.get_xs(index=index)\n        xs = (xs_unc - center_info[\"x\"]) / center_info[\"radius\"]\n\n        return xs\n\n    def get_centered_residuals(self, center_info, index=None):\n        \"\"\"Retrieve centered residuals from the history.\n\n        Args:\n            center_info (dict): Dictionary with the entry \"residuals\".\n                The information is used to center residuals.\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: 1d or 2d array with centered residuals.\n\n        \"\"\"\n        residuals_unc = self.get_residuals(index=index)\n        residuals = residuals_unc - center_info[\"residuals\"]\n\n        return residuals\n\n    def get_centered_critvals(self, center_info, index=None):\n        \"\"\"Retrieve centered critvals from the history.\n\n        Args:\n            center_info (dict): Dictionary with the entry\"residuals\".\n                The information is used to center critvals.\n            index (None, int or np.ndarray): Specifies the subset of rows that will\n                be returned.\n\n        Returns:\n            np.ndarray: Float or 1d array with centered criterion values.\n\n        \"\"\"\n        residuals_unc = self.get_residuals(index=index)\n        residuals = residuals_unc - center_info[\"residuals\"]\n        critvals = (residuals**2).sum(axis=-1)\n\n        return critvals\n\n    def get_n_fun(self):\n        return self.n_fun\n\n    def get_best_index(self):\n        return self.best_index\n\n    def get_best_entries(self):\n        return self.get_entries(index=self.best_index)\n\n    def get_best_x(self):\n        return self.get_xs(index=self.best_index)\n\n    def get_best_residuals(self):\n        return self.get_residuals(index=self.best_index)\n\n    def get_best_critval(self):\n        return self.get_critvals(index=self.best_index)\n\n    def get_best_centered_entries(self, center_info):\n        return self.get_centered_entries(self, center_info, index=self.best_index)\n\n\ndef _add_entries_to_array(arr, new, position):\n    if arr is None:\n        shape = 100_000 if new.ndim == 1 else (100_000, new.shape[1])\n        arr = np.full(shape, np.nan)\n\n    if len(arr) - position - len(new) < 0:\n        n_extend = max(len(arr), len(new))\n        if arr.ndim == 2:\n            extension_shape = (n_extend, arr.shape[1])\n            arr = np.vstack([arr, np.full(extension_shape, np.nan)])\n        else:\n            arr = np.hstack([arr, np.full(n_extend, np.nan)])\n\n    arr[position : position + len(new)] = new\n\n    return arr\n"
  },
  {
    "path": "src/optimagic/optimizers/bayesian_optimizer.py",
    "content": "\"\"\"Implement Bayesian optimization using bayes_opt.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, Literal, Type\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom scipy.optimize import NonlinearConstraint\n\nfrom optimagic import mark\nfrom optimagic.config import IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import N_RESTARTS\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n    UnitIntervalFloat,\n)\n\nif TYPE_CHECKING:\n    from bayes_opt import BayesianOptimization\n    from bayes_opt.acquisition import AcquisitionFunction\n\n\n@mark.minimizer(\n    name=\"bayes_opt\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,  # temp\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass BayesOpt(Algorithm):\n    \"\"\"Minimize a scalar function using Bayesian Optimization with Gaussian Process.\n\n    This optimizer wraps the BayesianOptimization package :cite:`Nogueira2014`,\n    which implements a surrogate model-based global optimization algorithm.\n    It works by constructing a posterior distribution over the objective function\n    via a Gaussian process that best approximates it. Instead of directly optimizing\n    the expensive original function, it uses a proxy optimization problem by finding\n    the maximum of an acquisition function, which is computationally cheaper than\n    evaluating the original function.\n\n    The algorithm starts by sampling a few initial points (init_points) to gather\n    observations of the objective function. These observations are used to fit a\n    Gaussian process surrogate model that learns about the function's behavior. The\n    optimizer then uses an acquisition function to iteratively select promising new\n    points to evaluate, updates its model, and this continues for stopping_maxiter\n    iterations.\n\n    This optimizer is well-suited for expensive functions where each evaluation is\n    costly (simulations, experiments, model training), black-box optimization where\n    gradients are unavailable, and problems with a limited evaluation budget.\n\n    Default parameter values match those of the underlying BayesianOptimization package\n    where appropriate. Nonlinear constraints are currently not supported.\n\n    \"\"\"\n\n    init_points: PositiveInt = 5\n    \"\"\"Number of random points sampled before optimization.\n\n    More points improve initial GP fit but increase evaluation cost. Default = 5.\n\n    \"\"\"\n\n    stopping_maxiter: PositiveInt = 25\n    \"\"\"Number of Bayesian optimization iterations to perform after initial\n    exploration.\"\"\"\n\n    verbose: Literal[0, 1, 2] = 0\n    \"\"\"Verbosity level (0 for silent, 1 for brief, 2 for detailed output).\"\"\"\n\n    kappa: NonNegativeFloat = 2.576\n    \"\"\"Exploration-exploitation trade-off parameter for Upper Confidence Bound\n    acquisition.\n\n    Controls the balance between exploration and exploitation when using the Upper\n    Confidence Bound (UCB) acquisition function. Higher values favor exploration over\n    exploitation . This parameter is only used when the acquisition function is \"ucb\" or\n    \"upper_confidence_bound\". The default value of 2.576 corresponds to a 99% confidence\n    interval.\n\n    \"\"\"\n\n    xi: PositiveFloat = 0.01\n    \"\"\"Exploration-exploitation trade-off parameter for Expected/Probability of\n    Improvement.\n\n    Controls the balance between exploration and exploitation for Expected Improvement\n    (EI) and Probability of Improvement (POI) acquisition functions. Higher values favor\n    exploration over exploitation . This parameter is only used when the acquisition\n    function is \"ei\", \"expected_improvement\", \"poi\", or \"probability_of_improvement\".\n    The default value is 0.01.\n\n    \"\"\"\n\n    exploration_decay: UnitIntervalFloat | None = None\n    \"\"\"Rate at which exploration decays over time during optimization.\n\n    If specified, the exploration parameters (kappa or xi) are multiplied by this factor\n    after each iteration, gradually shifting from exploration to exploitation. Must be\n    between 0 and 1 (range: (0, 1]) If None, no decay is applied and exploration remains\n    constant.\n\n    \"\"\"\n\n    exploration_decay_delay: NonNegativeInt | None = None\n    \"\"\"Number of iterations to delay before applying exploration decay.\n\n    If specified, exploration decay only begins after this many iterations have\n    completed. If None, decay is applied from the first iteration.\n\n    \"\"\"\n\n    seed: int | None = None\n    \"\"\"Random seed for reproducible results.\"\"\"\n\n    acquisition_function: (\n        str | AcquisitionFunction | Type[AcquisitionFunction] | None\n    ) = None\n    \"\"\"Strategy for selecting the next evaluation point during optimization.\n\n    The acquisition function determines how to balance exploration and exploitation when\n    selecting the next point to evaluate. Supported options:\n\n    - String: \"ucb\"/\"upper_confidence_bound\", \"ei\"/\"expected_improvement\",\n        \"poi\"/\"probability_of_improvement\"\n    - AcquisitionFunction instance: Pre-configured acquisition function object\n    - AcquisitionFunction class: Class that will be instantiated with default parameters\n    - None: Uses package default (UCB for unconstrained, EI for constrained problems)\n\n    \"\"\"\n\n    allow_duplicate_points: bool = False\n    \"\"\"Whether to allow repeated evaluation of the same point.\"\"\"\n\n    enable_sdr: bool = False\n    \"\"\"Enable Sequential Domain Reduction (SDR).\n\n    When True, the search domain is iteratively shrunk around promising regions\n    using SDR parameters (`sdr_gamma_osc`, `sdr_gamma_pan`, `sdr_eta`,\n    `sdr_minimum_window`).\n\n    \"\"\"\n\n    sdr_gamma_osc: float = 0.7\n    \"\"\"Oscillation shrinkage parameter for SDR.\n\n    Controls how aggressively the search space shrinks in oscillating fashion. Only used\n    when enable_sdr is True. Typical range: [0.5, 0.7]. Default = 0.7.\n\n    \"\"\"\n\n    sdr_gamma_pan: float = 1.0\n    \"\"\"Panning parameter for SDR.\n\n    Controls the panning behavior during domain reduction. Only used when enable_sdr is\n    True. Typical value: 1.0. Default = 1.0.\n\n    \"\"\"\n\n    sdr_eta: float = 0.9\n    \"\"\"Zoom parameter for SDR.\n\n    Only used when enable_sdr is True. Default = 0.9.\n\n    \"\"\"\n\n    sdr_minimum_window: NonNegativeFloat = 0.0\n    \"\"\"Minimum window size for Sequential Domain Reduction.\n\n    Only used when enable_sdr is True. Default = 0.0.\n\n    \"\"\"\n\n    alpha: float = 1e-6\n    \"\"\"Noise parameter for the Gaussian Process model.\n\n    Controls the amount of noise assumed in the objective function observations. Default\n    is 1e-6.\n\n    \"\"\"\n\n    n_restarts: int = N_RESTARTS\n    \"\"\"Number of times to restart the optimization.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2:\n            raise NotInstalledError(\n                \"To use the 'bayes_opt' optimizer you need to install bayes_opt. \"\n                \"Use 'pip install bayesian-optimization'. \"\n                \"Check the documentation for more details: \"\n                \"https://bayesian-optimization.github.io/BayesianOptimization/index.html\"\n            )\n\n        from bayes_opt import BayesianOptimization\n\n        pbounds = _process_bounds(problem.bounds)\n\n        acq = _process_acquisition_function(\n            acquisition_function=self.acquisition_function,\n            kappa=self.kappa,\n            xi=self.xi,\n            exploration_decay=self.exploration_decay,\n            exploration_decay_delay=self.exploration_decay_delay,\n            random_seed=self.seed,\n        )\n\n        constraint = None\n        constraint = self._process_constraints(problem.nonlinear_constraints)\n\n        def objective(**kwargs: dict[str, float]) -> float:\n            x = _extract_params_from_kwargs(kwargs)\n            return -float(\n                problem.fun(x)\n            )  # Negate to convert minimization to maximization\n\n        bounds_transformer = None\n        if self.enable_sdr:\n            from bayes_opt import SequentialDomainReductionTransformer\n\n            bounds_transformer = SequentialDomainReductionTransformer(\n                gamma_osc=self.sdr_gamma_osc,\n                gamma_pan=self.sdr_gamma_pan,\n                eta=self.sdr_eta,\n                minimum_window=self.sdr_minimum_window,\n            )\n\n        optimizer = BayesianOptimization(\n            f=objective,\n            pbounds=pbounds,\n            acquisition_function=acq,\n            constraint=constraint,\n            random_state=self.seed,\n            verbose=self.verbose,\n            bounds_transformer=bounds_transformer,\n            allow_duplicate_points=self.allow_duplicate_points,\n        )\n\n        # Set Gaussian Process parameters\n        optimizer.set_gp_params(alpha=self.alpha, n_restarts_optimizer=self.n_restarts)\n\n        # Use initial point as first probe\n        probe_params = {f\"param{i}\": float(val) for i, val in enumerate(x0)}\n        optimizer.probe(\n            params=probe_params,\n            lazy=True,\n        )\n        optimizer.maximize(\n            init_points=self.init_points,\n            n_iter=self.stopping_maxiter,\n        )\n\n        res = _process_bayes_opt_result(optimizer=optimizer, x0=x0, problem=problem)\n        return res\n\n    def _process_constraints(\n        self, constraints: list[dict[str, Any]] | None\n    ) -> NonlinearConstraint | None:\n        \"\"\"Temporarily skip processing of nonlinear constraints.\n\n        Args:\n            constraints: List of constraint dictionaries from the problem\n\n        Returns:\n            None. Nonlinear constraint processing is deferred.\n\n        \"\"\"\n        # TODO: Implement proper handling of nonlinear constraints in future.\n        return None\n\n\ndef _process_bounds(bounds: InternalBounds) -> dict[str, tuple[float, float]]:\n    \"\"\"Process bounds for bayesian optimization.\n\n    Args:\n        bounds: Internal bounds object.\n\n    Returns:\n        Dictionary mapping parameter names to (lower, upper) bound tuples.\n\n    Raises:\n        ValueError: If bounds are None or infinite.\n\n    \"\"\"\n    if not (\n        bounds.lower is not None\n        and bounds.upper is not None\n        and np.all(np.isfinite(bounds.lower))\n        and np.all(np.isfinite(bounds.upper))\n    ):\n        raise ValueError(\n            \"Bayesian optimization requires finite bounds for all parameters. \"\n            \"Bounds cannot be None or infinite.\"\n        )\n\n    return {\n        f\"param{i}\": (lower, upper)\n        for i, (lower, upper) in enumerate(zip(bounds.lower, bounds.upper, strict=True))\n    }\n\n\ndef _extract_params_from_kwargs(params_dict: dict[str, Any]) -> NDArray[np.float64]:\n    \"\"\"Extract parameters from kwargs dictionary.\n\n    Args:\n        params_dict: Dictionary with parameter values.\n\n    Returns:\n        Array of parameter values.\n\n    \"\"\"\n    return np.array(list(params_dict.values()))\n\n\ndef _process_acquisition_function(\n    acquisition_function: (\n        str | AcquisitionFunction | Type[AcquisitionFunction] | None\n    ),\n    kappa: NonNegativeFloat,\n    xi: PositiveFloat,\n    exploration_decay: float | None,\n    exploration_decay_delay: NonNegativeInt | None,\n    random_seed: int | None,\n) -> AcquisitionFunction | None:\n    \"\"\"Create and return the appropriate acquisition function.\n\n    Args:\n        acquisition_function: The acquisition function specification.\n            Can be one of the following:\n            - A string: \"upper_confidence_bound\" (or \"ucb\"), \"expected_improvement\"\n              (or \"ei\"), \"probability_of_improvement\" (or \"poi\")\n            - An instance of `AcquisitionFunction`\n            - A class inheriting from `AcquisitionFunction`\n            - None (uses the default acquisition function from the bayes_opt package)\n        kappa: Exploration-exploitation trade-off parameter for Upper Confidence Bound\n            acquisition function. Higher values favor exploration over exploitation.\n        xi: Exploration-exploitation trade-off parameter for Expected Improvement and\n            Probability of Improvement acquisition functions. Higher values favor\n            exploration over exploitation.\n        exploration_decay: Rate at which exploration parameters (kappa or xi) decay\n            over time. None means no decay is applied.\n        exploration_decay_delay: Number of iterations before starting the decay.\n            None means decay is applied from the start.\n        random_seed: Random seed for reproducibility.\n\n    Returns:\n        The configured acquisition function instance or None for default.\n\n    Raises:\n        ValueError: If acquisition_function is an invalid string.\n        TypeError: If acquisition_function is not a string, an AcquisitionFunction\n            instance, a class inheriting from AcquisitionFunction, or None.\n\n    \"\"\"\n    from bayes_opt import acquisition\n\n    acquisition_function_aliases = {\n        \"ucb\": \"ucb\",\n        \"upper_confidence_bound\": \"ucb\",\n        \"ei\": \"ei\",\n        \"expected_improvement\": \"ei\",\n        \"poi\": \"poi\",\n        \"probability_of_improvement\": \"poi\",\n    }\n\n    if acquisition_function is None:\n        return None\n\n    elif isinstance(acquisition_function, str):\n        acq_name = acquisition_function.lower()\n\n        if acq_name not in acquisition_function_aliases:\n            raise ValueError(\n                f\"Invalid acquisition_function string: '{acquisition_function}'. \"\n                f\"Must be one of: {', '.join(acquisition_function_aliases.keys())}\"\n            )\n\n        canonical_name = acquisition_function_aliases[acq_name]\n\n        if canonical_name == \"ucb\":\n            return acquisition.UpperConfidenceBound(\n                kappa=kappa,\n                exploration_decay=exploration_decay,\n                exploration_decay_delay=exploration_decay_delay,\n                random_state=random_seed,\n            )\n        elif canonical_name == \"ei\":\n            return acquisition.ExpectedImprovement(\n                xi=xi,\n                exploration_decay=exploration_decay,\n                exploration_decay_delay=exploration_decay_delay,\n                random_state=random_seed,\n            )\n        elif canonical_name == \"poi\":\n            return acquisition.ProbabilityOfImprovement(\n                xi=xi,\n                exploration_decay=exploration_decay,\n                exploration_decay_delay=exploration_decay_delay,\n                random_state=random_seed,\n            )\n        else:\n            raise ValueError(f\"Unhandled canonical name: {canonical_name}\")\n\n    # If acquisition_function is an instance of AcquisitionFunction class\n    elif isinstance(acquisition_function, acquisition.AcquisitionFunction):\n        return acquisition_function\n\n    # If acquisition_function is a class inheriting from AcquisitionFunction\n    elif isinstance(acquisition_function, type) and issubclass(\n        acquisition_function, acquisition.AcquisitionFunction\n    ):\n        if issubclass(\n            acquisition_function, acquisition.ExpectedImprovement\n        ) or issubclass(acquisition_function, acquisition.ProbabilityOfImprovement):\n            return acquisition_function(\n                xi=xi,\n                exploration_decay=exploration_decay,\n                exploration_decay_delay=exploration_decay_delay,\n                random_state=random_seed,\n            )\n        elif issubclass(acquisition_function, acquisition.UpperConfidenceBound):\n            return acquisition_function(\n                kappa=kappa,\n                exploration_decay=exploration_decay,\n                exploration_decay_delay=exploration_decay_delay,\n                random_state=random_seed,\n            )\n        else:\n            return acquisition_function()\n\n    else:\n        raise TypeError(\n            \"acquisition_function must be None, a string, \"\n            \"an AcquisitionFunction instance, or a class inheriting from \"\n            \"AcquisitionFunction. \"\n            f\"Got type: {type(acquisition_function).__name__}\"\n        )\n\n\ndef _process_bayes_opt_result(\n    optimizer: BayesianOptimization,\n    x0: NDArray[np.float64],\n    problem: InternalOptimizationProblem,\n) -> InternalOptimizeResult:\n    \"\"\"Convert BayesianOptimization result to InternalOptimizeResult format.\n\n    Args:\n        optimizer: The BayesianOptimization instance after optimization\n        x0: Initial parameter values\n        problem: The internal optimization problem\n\n    Returns:\n        InternalOptimizeResult with processed results\n\n    \"\"\"\n    n_evals = len(optimizer.space)\n\n    if optimizer.max is not None:\n        best_params = optimizer.max[\"params\"]\n        best_x = _extract_params_from_kwargs(best_params)\n        best_y = -optimizer.max[\"target\"]  # Un-negate the result\n        success = True\n        message = \"Optimization succeeded\"\n    else:\n        best_x = x0\n        best_y = float(problem.fun(x0))\n        success = False\n        message = (\n            \"Optimization did not succeed \"\n            \"returning the initial point as the best available result.\"\n        )\n\n    return InternalOptimizeResult(\n        x=best_x,\n        fun=best_y,\n        success=success,\n        message=message,\n        n_iterations=n_evals,\n        n_fun_evals=n_evals,\n        n_jac_evals=0,\n    )\n"
  },
  {
    "path": "src/optimagic/optimizers/bhhh.py",
    "content": "\"\"\"Implement Berndt-Hall-Hall-Hausman (BHHH) algorithm.\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Callable, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt\n\n\n@mark.minimizer(\n    name=\"bhhh\",\n    solver_type=AggregationLevel.LIKELIHOOD,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass BHHH(Algorithm):\n    converence_gtol_abs: NonNegativeFloat = 1e-8\n    # TODO: Why is this 200?\n    stopping_maxiter: PositiveInt = 200\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = bhhh_internal(\n            fun_and_jac=cast(\n                Callable[[NDArray[np.float64]], NDArray[np.float64]],\n                problem.fun_and_jac,\n            ),\n            x=x0,\n            gtol_abs=self.converence_gtol_abs,\n            maxiter=self.stopping_maxiter,\n        )\n\n        return res\n\n\ndef bhhh_internal(\n    fun_and_jac: Callable[[NDArray[np.float64]], NDArray[np.float64]],\n    x: NDArray[np.float64],\n    gtol_abs: NonNegativeFloat,\n    maxiter: PositiveInt,\n) -> InternalOptimizeResult:\n    \"\"\"Minimize a likelihood function using the BHHH algorithm.\n\n    Args:\n        criterion_and_derivative: The objective function to be minimized.\n        x: Initial guess of the parameter vector (starting points).\n        convergence_absolute_gradient_tolerance: Stopping criterion for the\n            gradient tolerance.\n        stopping_max_iterations: Maximum number of iterations. If reached,\n            terminate.\n\n    Returns:\n        InternalOptimizeResult: The result of the optimization.\n\n    \"\"\"\n    criterion_accepted, gradient = fun_and_jac(x)\n    x_accepted = x\n\n    hessian_approx = np.dot(gradient.T, gradient)\n    gradient_sum = np.sum(gradient, axis=0)\n    direction = np.linalg.solve(hessian_approx, gradient_sum)\n    gtol = np.dot(gradient_sum, direction)\n\n    initial_step_size = 1.0\n    step_size = initial_step_size\n\n    niter = 1\n    while niter < maxiter:\n        niter += 1\n\n        x_candidate = x_accepted + step_size * direction\n        criterion_candidate, gradient = fun_and_jac(x_candidate)\n\n        # If previous step was accepted\n        if step_size == initial_step_size:\n            hessian_approx = np.dot(gradient.T, gradient)\n\n        else:\n            criterion_candidate, gradient = fun_and_jac(x_candidate)\n\n        # Line search\n        if np.sum(criterion_candidate) > np.sum(criterion_accepted):\n            step_size /= 2\n\n            if step_size <= 0.01:\n                # Accept step\n                x_accepted = x_candidate\n                criterion_accepted = criterion_candidate\n\n                # Reset step size\n                step_size = initial_step_size\n\n        # If decrease in likelihood, calculate new direction vector\n        else:\n            # Accept step\n            x_accepted = x_candidate\n            criterion_accepted = criterion_candidate\n\n            gradient_sum = np.sum(gradient, axis=0)\n            direction = np.linalg.solve(hessian_approx, gradient_sum)\n            gtol = np.dot(gradient_sum, direction)\n\n            if gtol < 0:\n                hessian_approx = np.dot(gradient.T, gradient)\n                direction = np.linalg.solve(hessian_approx, gradient_sum)\n\n            # Reset stepsize\n            step_size = initial_step_size\n\n        if gtol < gtol_abs:\n            break\n\n    res = InternalOptimizeResult(\n        x=x_accepted,\n        fun=criterion_accepted,\n        message=\"Under development\",\n        n_iterations=niter,\n    )\n\n    return res\n"
  },
  {
    "path": "src/optimagic/optimizers/fides.py",
    "content": "\"\"\"Implement the fides optimizer.\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Callable, Literal, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_FIDES_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_ABS,\n    CONVERGENCE_FTOL_REL,\n    CONVERGENCE_GTOL_ABS,\n    CONVERGENCE_GTOL_REL,\n    CONVERGENCE_XTOL_ABS,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n)\n\n\n@mark.minimizer(\n    name=\"fides\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_FIDES_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass Fides(Algorithm):\n    hessian_update_strategy: Literal[\n        \"bfgs\",\n        \"bb\",\n        \"bg\",\n        \"dfp\",\n        \"sr1\",\n    ] = \"bfgs\"\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    stopping_max_seconds: float = np.inf\n    trustregion_initial_radius: PositiveFloat = 1.0\n    trustregion_stepback_strategy: Literal[\n        \"truncate\",\n        \"reflect\",\n        \"reflect_single\",\n        \"mixed\",\n    ] = \"truncate\"\n    trustregion_subspace_dimension: Literal[\n        \"full\",\n        \"2D\",\n        \"scg\",\n    ] = \"full\"\n    trustregion_max_stepback_fraction: float = 0.95\n    trustregion_decrease_threshold: float = 0.25\n    trustregion_increase_threshold: float = 0.75\n    trustregion_decrease_factor: float = 0.25\n    trustregion_increase_factor: float = 2.0\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = fides_internal(\n            fun_and_jac=cast(\n                Callable[[NDArray[np.float64]], NDArray[np.float64]],\n                problem.fun_and_jac,\n            ),\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            hessian_update_strategy=self.hessian_update_strategy,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_gtol_abs=self.convergence_gtol_abs,\n            convergence_gtol_rel=self.convergence_gtol_rel,\n            stopping_maxiter=self.stopping_maxiter,\n            stopping_max_seconds=self.stopping_max_seconds,\n            trustregion_initial_radius=self.trustregion_initial_radius,\n            trustregion_stepback_strategy=self.trustregion_stepback_strategy,\n            trustregion_subspace_dimension=self.trustregion_subspace_dimension,\n            trustregion_max_stepback_fraction=self.trustregion_max_stepback_fraction,\n            trustregion_decrease_threshold=self.trustregion_decrease_threshold,\n            trustregion_increase_threshold=self.trustregion_increase_threshold,\n            trustregion_decrease_factor=self.trustregion_decrease_factor,\n            trustregion_increase_factor=self.trustregion_increase_factor,\n        )\n\n        return res\n\n\ndef fides_internal(\n    fun_and_jac: Callable[[NDArray[np.float64]], NDArray[np.float64]],\n    x: NDArray[np.float64],\n    lower_bounds: NDArray[np.float64] | None,\n    upper_bounds: NDArray[np.float64] | None,\n    hessian_update_strategy: Literal[\n        \"bfgs\",\n        \"bb\",\n        \"bg\",\n        \"dfp\",\n        \"sr1\",\n    ],\n    convergence_ftol_abs: NonNegativeFloat,\n    convergence_ftol_rel: NonNegativeFloat,\n    convergence_xtol_abs: NonNegativeFloat,\n    convergence_gtol_abs: NonNegativeFloat,\n    convergence_gtol_rel: NonNegativeFloat,\n    stopping_maxiter: PositiveInt,\n    stopping_max_seconds: float,\n    trustregion_initial_radius: PositiveFloat,\n    trustregion_stepback_strategy: Literal[\n        \"truncate\",\n        \"reflect\",\n        \"reflect_single\",\n        \"mixed\",\n    ],\n    trustregion_subspace_dimension: Literal[\n        \"full\",\n        \"2D\",\n        \"scg\",\n    ],\n    trustregion_max_stepback_fraction: float,\n    trustregion_decrease_threshold: float,\n    trustregion_increase_threshold: float,\n    trustregion_decrease_factor: float,\n    trustregion_increase_factor: float,\n) -> InternalOptimizeResult:\n    \"\"\"Minimize a scalar function using the Fides Optimizer.\n\n    For details see\n    :ref: `fides_algorithm`.\n\n    \"\"\"\n    if not IS_FIDES_INSTALLED:\n        raise NotInstalledError(\n            \"The 'fides' algorithm requires the fides package to be installed. \"\n            \"You can install it with `pip install fides>=0.7.4`.\"\n        )\n\n    from fides import Optimizer\n\n    fides_options = {\n        \"delta_init\": trustregion_initial_radius,\n        \"eta\": trustregion_increase_threshold,\n        \"fatol\": convergence_ftol_abs,\n        \"frtol\": convergence_ftol_rel,\n        \"gamma1\": trustregion_decrease_factor,\n        \"gamma2\": trustregion_increase_factor,\n        \"gatol\": convergence_gtol_abs,\n        \"grtol\": convergence_gtol_rel,\n        \"maxiter\": stopping_maxiter,\n        \"maxtime\": stopping_max_seconds,\n        \"mu\": trustregion_decrease_threshold,\n        \"stepback_strategy\": trustregion_stepback_strategy,\n        \"subspace_solver\": trustregion_subspace_dimension,\n        \"theta_max\": trustregion_max_stepback_fraction,\n        \"xtol\": convergence_xtol_abs,\n    }\n\n    hessian_instance = _create_hessian_updater_from_user_input(hessian_update_strategy)\n\n    lower_bounds = np.full(len(x), -np.inf) if lower_bounds is None else lower_bounds\n    upper_bounds = np.full(len(x), np.inf) if upper_bounds is None else upper_bounds\n\n    opt = Optimizer(\n        fun=fun_and_jac,\n        lb=lower_bounds,\n        ub=upper_bounds,\n        verbose=logging.ERROR,\n        options=fides_options,\n        funargs=None,\n        hessian_update=hessian_instance,\n        resfun=False,\n    )\n    raw_res = opt.minimize(x)\n    res = _process_fides_res(raw_res, opt)\n    out = InternalOptimizeResult(\n        x=res[\"solution_x\"],\n        fun=res[\"solution_criterion\"],\n        jac=res[\"solution_derivative\"],\n        hess=res[\"solution_hessian\"],\n        success=res[\"success\"],\n        message=res[\"message\"],\n        n_iterations=res[\"n_iterations\"],\n    )\n\n    return out\n\n\ndef _process_fides_res(raw_res, opt):\n    \"\"\"Create an optimagic results dictionary from the Fides output.\n\n    Args:\n        raw_res (tuple): Tuple containing the Fides result\n        opt (fides.Optimizer): Fides Optimizer after minimize has been called on it.\n\n    \"\"\"\n    fval, x, grad, hess = raw_res\n    res = {\n        \"solution_criterion\": fval,\n        \"solution_x\": x,\n        \"solution_derivative\": grad,\n        \"solution_hessian\": hess,\n        \"success\": opt.converged,\n        \"n_iterations\": opt.iteration,\n        \"message\": _process_exitflag(opt.exitflag),\n    }\n    return res\n\n\ndef _process_exitflag(exitflag):\n    messages = {\n        \"DID_NOT_RUN\": \"The optimizer did not run\",\n        \"MAXITER\": \"Reached maximum number of allowed iterations\",\n        \"MAXTIME\": \"Expected to reach maximum allowed time in next iteration\",\n        \"NOT_FINITE\": \"Encountered non-finite fval/grad/hess\",\n        \"EXCEEDED_BOUNDARY\": \"Exceeded specified boundaries\",\n        \"DELTA_TOO_SMALL\": \"Trust Region Radius too small to proceed\",\n        \"FTOL\": \"Converged according to fval difference\",\n        \"XTOL\": \"Converged according to x difference\",\n        \"GTOL\": \"Converged according to gradient norm\",\n    }\n\n    out = messages.get(exitflag.name)\n\n    return out\n\n\ndef _create_hessian_updater_from_user_input(hessian_update_strategy):\n    from fides import hessian_approximation\n\n    hessians_needing_residuals = (\n        hessian_approximation.FX,\n        hessian_approximation.SSM,\n        hessian_approximation.TSSM,\n        hessian_approximation.GNSBFGS,\n    )\n    unsupported_hess_msg = (\n        f\"{hessian_update_strategy} not supported because it requires \"\n        \"residuals. Choose one of 'BB', 'BFGS', 'BG', 'DFP' or 'SR1' or pass \"\n        \"an instance of the fides.hessian_approximation.HessianApproximation \"\n        \"class.\"\n    )\n\n    if hessian_update_strategy in (\"broyden\", \"Broyden\", \"BROYDEN\"):\n        raise ValueError(\n            \"You cannot use the Broyden update strategy without specifying the \"\n            \"interpolation parameter phi. Import the Broyden class from \"\n            \"`fides.hessian_approximation`, create an instance of it with your \"\n            \"desired value of phi and pass this instance instead.\"\n        )\n    elif isinstance(hessian_update_strategy, str):\n        if hessian_update_strategy.lower() in [\"fx\", \"ssm\", \"tssm\", \"gnsbfgs\"]:\n            raise NotImplementedError(unsupported_hess_msg)\n        else:\n            hessian_name = hessian_update_strategy.upper()\n            hessian_class = getattr(hessian_approximation, hessian_name)\n            hessian_instance = hessian_class()\n    elif isinstance(\n        hessian_update_strategy, hessian_approximation.HessianApproximation\n    ):\n        hessian_instance = hessian_update_strategy\n        if isinstance(hessian_instance, hessians_needing_residuals):\n            raise NotImplementedError(unsupported_hess_msg)\n    else:\n        raise TypeError(\n            \"You must provide a hessian_update_strategy that is either a string or an \"\n            \"instance of the fides.hessian_approximation.HessianApproximation class.\"\n        )\n    return hessian_instance\n"
  },
  {
    "path": "src/optimagic/optimizers/gfo_optimizers.py",
    "content": "from __future__ import annotations\n\nimport math\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_ABS,\n    STOPPING_MAXFUN_GLOBAL,\n    STOPPING_MAXITER,\n    get_population_size,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.parameters.conversion import Converter\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n    PyTree,\n)\nfrom optimagic.typing import UnitIntervalFloat as ProbabilityFloat\n\nif TYPE_CHECKING:\n    import pandas as pd\n    from gradient_free_optimizers.optimizers.base_optimizer import BaseOptimizer\n\n\n@dataclass(frozen=True)\nclass GFOCommonOptions:\n    \"\"\"Common options for all optimizers from GFO.\"\"\"\n\n    n_grid_points: PositiveInt | PyTree = 201\n    \"\"\"Number of grid points per dimension.\n\n    If an integer is provided, it will be used for all dimensions.\n\n    \"\"\"\n\n    n_init: PositiveInt = 20\n    \"\"\"Number of initialization steps to run.\n\n    Accordingly, N//2 positions will be initialized in a grid like pattern and remaining\n    initialized at the vertices and randomly in the search space.\n\n    \"\"\"\n\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    \"\"\"Maximum number of iterations.\"\"\"\n\n    stopping_maxtime: NonNegativeFloat | None = None\n    \"\"\"Maximum time in seconds before termination.\"\"\"\n\n    convergence_target_value: float | None = None\n    \"\"\"\"Stop the optimization if the objective function is less than this value.\"\"\"\n\n    convergence_iter_noimprove: PositiveInt = 1000000  # do not want to trigger this\n    \"\"\"Number of iterations without improvement before termination.\"\"\"\n\n    convergence_ftol_abs: NonNegativeFloat | None = (\n        CONVERGENCE_FTOL_ABS  # set to zero, so disabled\n    )\n    \"\"\"Converge if the absolute change in the objective function is less than this\n    value.\"\"\"\n\n    convergence_ftol_rel: NonNegativeFloat | None = None\n    \"\"\"Converge if the relative change in the objective function is less than this\n    value.\"\"\"\n\n    caching: bool = True\n    \"\"\"Whether to cache evaluated param and function values in a dictionary for\n    lookup.\"\"\"\n\n    extra_start_params: list[PyTree] | None = None\n    \"\"\"List of additional start points for the optimization run.\n\n    In case of population based optimizers, the initial_population can be provided\n    via `extra_start_params`\n\n    \"\"\"\n\n    warm_start: pd.DataFrame | None = None\n    \"\"\"Pandas dataframe that contains score and paramter information that will be\n    automatically loaded into the memory.\n\n    example:\n\n    score       x1      x2      x...\n    0.756       0.1     0.2     ...\n    0.823       0.3     0.1     ...\n    ... ...     ...     ...\n    ... ...     ...     ...\n\n    \"\"\"\n\n    verbosity: Literal[\"progress_bar\", \"print_results\", \"print_times\"] | bool = False\n    \"\"\"Determines what part of the optimization information will be printed.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Random seed for reproducibility.\"\"\"\n\n    rand_rest_p: ProbabilityFloat = 0\n    \"\"\"Probability for the optimization algorithm to jump to a random position in an\n    iteration step.\"\"\"\n\n\n# ==================================================================================\n# Local optimizers\n# ==================================================================================\n\n\n@mark.minimizer(\n    name=\"gfo_hillclimbing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOHillClimbing(GFOCommonOptions, Algorithm):\n    \"\"\"Minimize a scalar function using the HillClimbing algorithm.\n\n    This algorithm is a Python implementation of the HillClimbing algorithm through the\n    gradient_free_optimizers package.\n\n    Hill climbing is a local search algorithm suited for exploring combinatorial search\n    spaces.\n\n    “It starts at an initial point, which is the best point chosen from `n_init`\n    initialization runs, and continues to move to positions within its\n    neighbourhood with a better solution. It has no method against getting stuck in\n    local optima.\n\n    \"\"\"\n\n    epsilon: PositiveFloat = 0.03\n    \"\"\"The step-size of the hill climbing algorithm. If step_size is too large the newly\n    selected positions will be at the edge of the search space.\n\n    If its value is very low it might not find new positions.\n\n    \"\"\"\n\n    distribution: Literal[\"normal\", \"laplace\", \"logistic\", \"gumbel\"] = \"normal\"\n    \"\"\"The mathematical distribution the algorithm draws samples from.\n\n    All available distributions are taken from the numpy-package.\n\n    \"\"\"\n\n    n_neighbours: PositiveInt = 3\n    \"\"\"The number of positions the algorithm explores from its current postion before\n    setting its current position to the best of those neighbour positions.\n\n    If the value of n_neighbours is large the hill-climbing-based algorithm will take a\n    lot of time to choose the next position to move to, but the choice will probably be\n    a good one. It might be a prudent approach to increase n_neighbours of the search-\n    space has a lot of dimensions, because there are more possible directions to move\n    to.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.HillClimbingOptimizer\n        optimizer = partial(\n            opt,\n            epsilon=self.epsilon,\n            distribution=self.distribution,\n            n_neighbours=self.n_neighbours,\n        )\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_stochastichillclimbing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOStochasticHillClimbing(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using the Stochastic Hill Climbing algorithm.\n\n    This algorithm is a Python implementation of the StochasticHillClimbing algorithm\n    through the gradient_free_optimizers package.\n\n    Stochastic hill climbing extends the normal hill climbing by accepting worse\n    positions with a probability `p_accept` as a next position helping against getting\n    stuck in local optima.\n\n    \"\"\"\n\n    epsilon: PositiveFloat = 0.03\n    \"\"\"The step-size of the hill climbing algorithm. If step_size is too large the newly\n    selected positions will be at the edge of the search space.\n\n    If its value is very low it might not find new positions.\n\n    \"\"\"\n\n    distribution: Literal[\"normal\", \"laplace\", \"logistic\", \"gumbel\"] = \"normal\"\n    \"\"\"The mathematical distribution the algorithm draws samples from.\n\n    All available distributions are taken from the numpy-package.\n\n    \"\"\"\n\n    n_neighbours: PositiveInt = 3\n    \"\"\"The number of positions the algorithm explores from its current postion before\n    setting its current position to the best of those neighbour positions.\n\n    If the value of n_neighbours is large the hill-climbing-based algorithm will take a\n    lot of time to choose the next position to move to, but the choice will probably be\n    a good one. It might be a prudent approach to increase n_neighbours of the search-\n    space has a lot of dimensions, because there are more possible directions to move\n    to.\n\n    \"\"\"\n\n    p_accept: ProbabilityFloat = 0.5\n    \"\"\"The probability factor used in the equation to calculate if a worse position is\n    accepted as the new position.\n\n    If the new score is not better than the previous one the algorithm accepts worse\n    positions with probability p_accept.\n\n    .. math::\n        score_{normalized} = norm * \\\\frac{score_{current} - score_{new}}\n        {score_{current} + score_{new}}\n    .. math::\n        p = \\\\exp^{-score_{normalized}}\n\n    If p is less than p_accept the new position gets accepted anyways.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.StochasticHillClimbingOptimizer\n        optimizer = partial(\n            opt,\n            epsilon=self.epsilon,\n            distribution=self.distribution,\n            n_neighbours=self.n_neighbours,\n            p_accept=self.p_accept,\n        )\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_repulsinghillclimbing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFORepulsingHillClimbing(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using the Repulsing Hill Climbing algorithm.\n\n    This algorithm is a Python implementation of the Repulsing Hill Climbing algorithm\n    through the gradient_free_optimizers package.\n\n    The algorithm inherits from the Hill climbing which is a local search algorithm but\n    always activates its methods to escape local optima.\n\n    \"\"\"\n\n    epsilon: PositiveFloat = 0.03\n    \"\"\"The step-size of the hill climbing algorithm. If step_size is too large the newly\n    selected positions will be at the edge of the search space.\n\n    If its value is very low it might not find new positions.\n\n    \"\"\"\n\n    distribution: Literal[\"normal\", \"laplace\", \"logistic\", \"gumbel\"] = \"normal\"\n    \"\"\"The mathematical distribution the algorithm draws samples from.\n\n    All available distributions are taken from the numpy-package.\n\n    \"\"\"\n\n    n_neighbours: PositiveInt = 3\n    \"\"\"The number of positions the algorithm explores from its current position before\n    setting its current position to the best of those neighbour positions.\"\"\"\n\n    repulsion_factor: PositiveFloat = 5\n    \"\"\"The algorithm increases the step size by multiplying it with the repulsion_factor\n    for the next iteration. This way the algorithm escapes the region that does not\n    offer better positions.\n\n    .. math::\n        \\\\epsilon = \\\\epsilon * {repulsion factor}\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.RepulsingHillClimbingOptimizer\n        optimizer = partial(\n            opt,\n            epsilon=self.epsilon,\n            distribution=self.distribution,\n            n_neighbours=self.n_neighbours,\n            repulsion_factor=self.repulsion_factor,\n        )\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_simulatedannealing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n    experimental=True,\n)\n@dataclass(frozen=True)\nclass GFOSimulatedAnnealing(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using the Simulated Annealing algorithm.\n\n    This algorithm is a Python implementation of Simulated Annealing through the\n    gradient_free_optimizers package.\n\n    Simulated annealing chooses its next possible position similar to hill climbing, but\n    it accepts worse results with a probability that decreases with time. It simulates a\n    temperature that decreases with each iteration, similar to a material cooling down.\n\n    \"\"\"\n\n    epsilon: PositiveFloat = 0.03\n    \"\"\"The step-size of the algorithm.\n\n    If step_size is too large the newly selected positions will be at the edge of the\n    search space. If its value is very low it might not find new positions.\n\n    \"\"\"\n\n    distribution: Literal[\"normal\", \"laplace\", \"logistic\", \"gumbel\"] = \"normal\"\n    \"\"\"The mathematical distribution the algorithm draws samples from.\n\n    All available distributions are taken from the numpy-package.\n\n    \"\"\"\n\n    n_neighbours: PositiveInt = 3\n    \"\"\"The number of positions the algorithm explores from its current position before\n    setting its current position to the best of those neighbour positions.\"\"\"\n\n    start_temp: PositiveFloat = 1\n    \"\"\"The start_temp is a factor for the probability p of accepting a worse position.\n\n    .. math::\n        p = \\\\exp^{-\\\\frac{score_{normalized}}{temp}}\n\n    \"\"\"\n\n    annealing_rate: PositiveFloat = 0.97\n    \"\"\"Rate at which the temperatur-value of the algorithm decreases. An annealing rate\n    above 1 increases the temperature over time.\n\n    .. math::\n        start\\\\_temp \\\\leftarrow start\\\\_temp * annealing\\\\_rate\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.SimulatedAnnealingOptimizer\n        optimizer = partial(\n            opt,\n            epsilon=self.epsilon,\n            distribution=self.distribution,\n            n_neighbours=self.n_neighbours,\n            start_temp=self.start_temp,\n            annealing_rate=self.annealing_rate,\n        )\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_downhillsimplex\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n    experimental=True,\n)\n@dataclass(frozen=True)\nclass GFODownhillSimplex(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using the Downhill Simplex algorithm.\n\n    This algorithm is a Python implementation of the Downhill Simplex algorithm through\n    the gradient_free_optimizers package.\n\n    The Downhill simplex or Nelder mead algorithm works by grouping `number of\n    dimensions + 1` positions into a simplex, which can explore the search-space by\n    changing shape. The simplex changes shape by reflecting, expanding, contracting or\n    shrinking via the alpha, gamma, beta or sigma parameters. It needs at least `number\n    of dimensions + 1` initial positions to form a simplex in the search-space and the\n    movement of the positions in the simplex are affected by each other.\n\n    \"\"\"\n\n    simplex_reflection: PositiveFloat = 1\n    \"\"\"The reflection parameter of the simplex algorithm.\"\"\"\n\n    simplex_expansion: PositiveFloat = 2\n    \"\"\"The expansion parameter of the simplex algorithm.\"\"\"\n\n    simplex_contraction: PositiveFloat = 0.5\n    \"\"\"The contraction parameter of the simplex algorithm.\"\"\"\n\n    simplex_shrinking: PositiveFloat = 0.5\n    \"\"\"The shrinking parameter of the simplex algorithm.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.DownhillSimplexOptimizer\n        optimizer = partial(\n            opt,\n            alpha=self.simplex_reflection,\n            gamma=self.simplex_expansion,\n            beta=self.simplex_contraction,\n            sigma=self.simplex_shrinking,\n        )\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_powells_method\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOPowellsMethod(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using Powell's Method.\n\n    This algorithm is a Python implementation of the Powell's Method algorithm through\n    the gradient_free_optimizers package.\n\n    This powell's method implementation works by optimizing each search space dimension\n    at a time with the hill climbing algorithm. It works by setting the search space\n    range for all dimensions except one to a single value. The hill climbing algorithms\n    searches the best position within this dimension. After `iters_p_dim` iterations the\n    next dimension is searched, while the search space range from the\n    previously searched dimension is set to the best position,\n    This way the algorithm finds new best positions one dimension at a time.\n\n    \"\"\"\n\n    iters_p_dim: PositiveInt = 10\n    \"\"\"Number of iterations the algorithm will let the hill-climbing algorithm search to\n    find the best position before it changes to the next dimension of the search space.\n\n    Typical range: 5 to 15.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        opt = gfo.PowellsMethod\n        optimizer = partial(\n            opt,\n            iters_p_dim=self.iters_p_dim,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n        return res\n\n\n# ==================================================================================\n# Population Based\n# ==================================================================================\n\n\n@mark.minimizer(\n    name=\"gfo_pso\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOParticleSwarmOptimization(Algorithm, GFOCommonOptions):\n    r\"\"\"Minimize a scalar function using the Particle Swarm Optimization algorithm.\n\n    This algorithm is a Python implementation of the Particle Swarm Optimization\n    algorithm through the gradient_free_optimizers package.\n\n    Particle Swarm Optimization is a global population based algorithm.\n\n    The algorithm simulates a swarm of particles which move according to their own\n    inertia across the search space.\n    Each particle adjusts its position based on its own experience (cognitive weight)\n    and the experiences of its neighbors or the swarm (social weight), using\n    velocity updates.\n    The algorithm iteratively guides the swarm toward promising regions of the\n    search space.\n\n    The velocity of a particle is calculated by the following\n    equation:\n\n    .. math::\n        v_{n+1} = \\\\omega \\\\cdot v_n + c_k \\\\cdot r_1 \\\\cdot (p_{best}-p_n)\n        + c_s \\\\cdot r_2 \\\\cdot (g_{best} - p_n)\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    inertia: NonNegativeFloat = 0.5 / math.log(2.0)\n    \"\"\"The inertia of the movement of the individual particles in the population.\"\"\"\n\n    cognitive_weight: NonNegativeFloat = 0.5 + math.log(2.0)\n    \"\"\"A factor of the movement towards the personal best position of the individual\n    particles in the population.\"\"\"\n\n    social_weight: NonNegativeFloat = 0.5 + math.log(2.0)\n    \"\"\"A factor of the movement towards the global best position of the individual\n    particles in the population.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.ParticleSwarmOptimizer\n        optimizer = partial(\n            opt,\n            population=population_size,\n            inertia=self.inertia,\n            cognitive_weight=self.cognitive_weight,\n            social_weight=self.social_weight,\n            rand_rest_p=self.rand_rest_p,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_parallel_tempering\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOParallelTempering(Algorithm, GFOCommonOptions):\n    r\"\"\"Minimize a scalar function using the Parallel Tempering algorithm.\n\n    This algorithm is a Python implementation of the Parallel Tempering\n    algorithm through the gradient_free_optimizers package.\n\n    Parallel Tempering is a global optimization algorithm that is inspired by\n    metallurgical annealing.\n    It runs multiple optimizer instances at different\n    \"starting temperatures\" in parallel. Periodically, swaps between these runs are\n    attempted. Swaps between optimization runs at different temperatures allow the\n    optimizer to overcome local optima.\n\n    The probability of swapping temperatures for any combination of optimizer instances\n    is given by.\n\n    .. math::\n\n        p = \\\\min \\\\left( 1, \\\\exp\\\\left[{(\\\\text{score}_i-\n        \\\\text{score}_j)\\\\left(\\\\frac{1}{T_i}-\\\\frac{1}{T_j}\\\\right)}\\\\right] \\\\right)\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    n_iter_swap: PositiveInt = 10\n    \"\"\"The number of iterations the algorithm performs before switching temperatures of\n    the individual optimizers in the population.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.ParallelTemperingOptimizer\n        optimizer = partial(\n            opt,\n            population=population_size,\n            n_iter_swap=self.n_iter_swap,\n            rand_rest_p=self.rand_rest_p,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_spiral_optimization\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOSpiralOptimization(Algorithm, GFOCommonOptions):\n    r\"\"\"Minimize a scalar function using the Spiral Optimization algorithm.\n\n    This algorithm is a Python implementation of the Spiral Optimization\n    algorithm through the gradient_free_optimizers package.\n\n    Spiral Optimization is a population-based algorithm, in which a number of particles\n    move in a spiral-like pattern to explore the search space and converge to the\n    best known position as the spiral decays.\n\n    The position of each particle is updated according to the following equation:\n\n    .. math::\n\n        x_i (k+1) = x^* (k) + r(k) \\\\cdot R(\\\\theta) \\\\cdot (x_i(k)- x^*(k))\n\n    where:\n        - `k` = k-th iteration\n        - `x_i(k)` = current position.\n        - `x*(k)` = center position (known best position of all particles)\n        - `r(k)` = decay rate ,\n        - `R` = rotation matrix.\n\n    and rotation matrix R is given by\n\n    .. math::\n\n        R(\\\\theta) = \\\\begin{bmatrix}\n            0^{\\\\top}_{n-1} & -1 \\\\\\\\\n            I_{n-1} & 0_{n-1}\n        \\\\end{bmatrix}\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    decay_rate: NonNegativeFloat = 0.99\n    \"\"\"The decay rate `r` is a factor, by which the radius of the spiral movement of the\n    particles decays during their spiral movement.\n\n    Lower values accelerate the convergence of the particles to the best known position,\n    while values above 1 eventually lead to a movement where the particles spiral away\n    from each other. Typical range: 0.85 to 1.15.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.SpiralOptimization\n        optimizer = partial(\n            opt,\n            population=population_size,\n            decay_rate=self.decay_rate,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_genetic_algorithm\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOGeneticAlgorithm(Algorithm, GFOCommonOptions):\n    \"\"\"Minimize a scalar function using the Genetic Algorithm.\n\n    This algorithm is a Python implementation of the Genetic Algorithm through the\n    gradient_free_optimizers package.\n\n    The Genetic Algorithm is an evolutionary algorithm inspired by the process of\n    natural selection. It evolves a population of candidate solutions over generations\n    using mechanisms like selection, crossover, and mutation of genes(bits) to find the\n    best solution.\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    mutation_rate: ProbabilityFloat = 0.5\n    \"\"\"Probability of a mutation event occurring in an individual of the population.\n    Mutation helps in maintaining genetic diversity within the population and prevents\n    the algorithm from getting stuck in local optima. Bits are randomly altered with.\n\n    .. math::\n\n        x'_i =\n        \\\\begin{cases}\n            x_i & \\\\text{if } \\\\text{rand} > p_m \\\\\\\\\n            1 - x_i & \\\\text{if } \\\\text{rand} \\\\leq p_m\n        \\\\end{cases}\n\n    where p_m is mutation_rate.\n\n    \"\"\"\n\n    crossover_rate: ProbabilityFloat = 0.5\n    \"\"\"Probability of a crossover event occurring between two parents. A higher\n    crossover rate increases the diversity of the offspring, which can help in exploring\n    the search space more effectively. Crossover happens with.\n\n    .. math::\n\n        u_{i,j}^{(g)} =\n        \\\\begin{cases}\n            v_{i,j}^{(g)} & \\\\text{if } \\\\text{rand}_j \\\\leq C_r \\\\text{ or } j =\n            j_{\\\\text{rand}} \\\\\\\\\n            x_{i,j}^{(g)} & \\\\text{otherwise}\n        \\\\end{cases}\n\n    where C_r is crossover_rate .\n\n    \"\"\"\n\n    n_parents: PositiveInt = 2\n    \"\"\"The number of parents selected from the current population to participate in the\n    crossover process to produce offspring.\n\n    By default, pairs of parents are selected to generate new offspring.\n\n    \"\"\"\n\n    n_offsprings: PositiveInt = 10\n    \"\"\"The number of offsprings generated in each generation through the processes of\n    crossover and mutation.\n\n    Typically, the number of offspring is equal to the population size, ensuring that\n    the population size remains constant over generations.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.GeneticAlgorithmOptimizer\n        optimizer = partial(\n            opt,\n            population=population_size,\n            mutation_rate=self.mutation_rate,\n            crossover_rate=self.crossover_rate,\n            n_parents=self.n_parents,\n            offspring=self.n_offsprings,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_evolution_strategy\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFOEvolutionStrategy(Algorithm, GFOCommonOptions):\n    r\"\"\"Minimize a scalar function using the Evolution Strategy algorithm.\n\n    This algorithm is a Python implementation of the Evolution Strategy algorithm\n    through the gradient_free_optimizers package.\n\n    Evolution Strategy is a evolutionary algorithm inspired by natural evolution and\n    work by iteratively improving a population of candidate solutions through mutation,\n    crossover, and selection.\n    A population of parents generates offspring, and only the fittest individuals\n    from both parents and offspring are selected to form the next generation.\n\n    The algorithm uses both mutation and crossover to create new candidate solutions.\n    The choice between mutation and crossover is determined probabilistically based on\n    their respective rates in the following way.\n\n    .. math::\n\n        \\\\text{total_rate} = \\\\text{mutation_rate} + \\\\text{crossover_rate}\n    .. math::\n\n        R = \\\\text{random_float} (0 ... \\\\text{total_rate})\n\n    .. code-block::\n\n        if R <= mutation-rate:\n            do mutation\n        else:\n            do crossover\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of iterations.\"\"\"\n\n    mutation_rate: ProbabilityFloat = 0.7\n    \"\"\"Probability of a mutation event occurring in an individual.\"\"\"\n\n    crossover_rate: ProbabilityFloat = 0.3\n    \"\"\"Probability of an individual to perform a crossover with the best individual in\n    the population.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.EvolutionStrategyOptimizer\n        optimizer = partial(\n            opt,\n            population=population_size,\n            mutation_rate=self.mutation_rate,\n            crossover_rate=self.crossover_rate,\n            rand_rest_p=self.rand_rest_p,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"gfo_differential_evolution\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass GFODifferentialEvolution(Algorithm, GFOCommonOptions):\n    r\"\"\"Minimize a scalar function using the Differential Evolution algorithm.\n\n    This algorithm is a Python implementation of the Differential Evolution\n    algorithm through the gradient_free_optimizers package.\n\n    Differential Evolution is a population-based optimization algorithm that\n    creates iteratively improves a population of candidate solutions by combining and\n    perturbing them based on their differences.\n    It creates new\n    positions in the search space by adding the weighted difference between two\n    individuals in the population  to a third individual creating trial solutions that\n    are evaluated for their fitness and if a trial solution is better than the target\n    it replaces, ensures continual improvement.\n\n    A new trial solution is generated according to:\n\n    .. math::\n        x_{trial} = x_{r1} + F \\\\cdot (x_{r2} - x_{r3})\n\n    where :math:`r1, r2, r3` are random individuals from the population, and\n    :math:`F` is the differential weight or mutation_rate.\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Size of the population.\"\"\"\n\n    mutation_rate: ProbabilityFloat = 0.9\n    r\"\"\"Probability of a mutation event occurring in an individual.\n\n    The mutation rate influences the algorithm's ability to explore the search space.\n    A higher value of mutation_rate also called the differential weight `F` increases\n    the diversity of the mutant individuals, leading to broader exploration,\n    while a lower value encourages convergence by making smaller adjustments.\n\n    .. math::\n\n        \\mathbf{v}_{i,G+1} = \\mathbf{x}_{r1,G} + F \\cdot (\\mathbf{x}_{r2,G} -\n        \\mathbf{x}_{r3,G})\n\n    \"\"\"\n\n    crossover_rate: ProbabilityFloat = 0.9\n    \"\"\"Probability of a crossover event occurring between two parents. It determines how\n    much of the trial vector inherits its components from the mutant individual versus\n    the target individual. A high crossover rate means that more components will come\n    from the mutant individual, promoting exploration of new solutions. Conversely, a\n    low crossover rate results in more components being taken from the target\n    individual, which can help maintain existing solutions and refine them.\n\n    .. math::\n\n        u_{i,j,G+1} =\n        \\\\begin{cases}\n            v_{i,j,G+1} & \\\\text{if } \\\\text{rand}_j(0,1) \\\\leq CR \\\\text{ or } j =\n              j_{\\\\text{rand}} \\\\\\\\\n            x_{i,j,G} & \\\\text{otherwise}\n        \\\\end{cases}\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        import gradient_free_optimizers as gfo\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        opt = gfo.DifferentialEvolutionOptimizer\n        optimizer = partial(\n            opt,\n            population=population_size,\n            mutation_rate=self.mutation_rate,\n            crossover_rate=self.crossover_rate,\n        )\n\n        res = _gfo_internal(\n            common_options=self,\n            problem=problem,\n            x0=x0,\n            optimizer=optimizer,\n        )\n\n        return res\n\n\n# ==================================================================================\n# Helper functions\n# ==================================================================================\n\n\ndef _gfo_internal(\n    common_options: GFOCommonOptions,\n    problem: InternalOptimizationProblem,\n    x0: NDArray[np.float64],\n    optimizer: BaseOptimizer,\n) -> InternalOptimizeResult:\n    \"\"\"Internal helper function.\n\n    Define the search space and inital params, define the objective function and run\n    optimization.\n\n    \"\"\"\n    # Use common options from GFOCommonOptions\n    common = common_options\n\n    # set early stopping criterion\n    early_stopping = {\n        \"n_iter_no_change\": common.convergence_iter_noimprove,\n        \"tol_abs\": common.convergence_ftol_abs,\n        \"tol_rel\": common.convergence_ftol_rel,\n    }\n\n    # define search space, initial params, initial_population and constraints\n    opt = optimizer(\n        search_space=_get_search_space_gfo(\n            problem.bounds,\n            common.n_grid_points,\n            problem.converter,\n        ),\n        initialize=_get_initialize_gfo(\n            x0, common.n_init, common.extra_start_params, problem.converter\n        ),\n        constraints=_get_gfo_constraints(),\n        random_state=common.seed,\n    )\n\n    # define objective function, negate to perform minimize\n    def objective_function(para: dict[str, float]) -> float | NDArray[np.float64]:\n        x = np.array(opt.conv.para2value(para))\n        return -problem.fun(x)\n\n    # negate in case of minimize\n    convergence_target_value = (\n        -1 * common.convergence_target_value\n        if common.convergence_target_value is not None\n        else None\n    )\n\n    # run optimization\n    opt.search(\n        objective_function=objective_function,\n        n_iter=common.stopping_maxiter,\n        max_time=common.stopping_maxtime,\n        max_score=convergence_target_value,\n        early_stopping=early_stopping,\n        memory=common.caching,\n        memory_warm_start=common.warm_start,\n        verbosity=common.verbosity,\n    )\n\n    return _process_result_gfo(opt)\n\n\ndef _get_search_space_gfo(\n    bounds: InternalBounds, n_grid_points: PositiveInt | PyTree, converter: Converter\n) -> dict[str, NDArray[np.float64]]:\n    \"\"\"Create search space.\n\n    Args:\n        bounds: Internal Bounds\n        n_grid_points: number of grid points in each dimension\n    Returns:\n    dict: search_space dictionary\n\n    \"\"\"\n    search_space = {}\n    if bounds.lower is not None and bounds.upper is not None:\n        dim = len(bounds.lower)\n        upper = bounds.upper\n        lower = bounds.lower\n\n    if isinstance(n_grid_points, int):\n        n_grid_points = [n_grid_points] * dim\n    else:\n        n_grid_points = list(map(int, converter.params_to_internal(n_grid_points)))\n\n    for i in range(dim):\n        search_space[f\"x{i}\"] = np.linspace(lower[i], upper[i], n_grid_points[i])\n\n    return search_space\n\n\ndef _get_gfo_constraints() -> list[Any]:\n    \"\"\"Process constraints.\"\"\"\n    return []\n\n\ndef _get_initialize_gfo(\n    x0: NDArray[np.float64],\n    n_init: PositiveInt,\n    extra_start_points: list[PyTree] | None,\n    converter: Converter,\n) -> dict[str, Any]:\n    \"\"\"Set initial params x0, additional start params for the optimization run or the\n    initial_population. Here, warm_start is actually extra_start_params.\n\n    Args:\n    x0: initial param\n\n    Returns:\n    dict: initialize dictionary with initial parameters set\n\n    \"\"\"\n    init = _value2para(x0)\n    x_list = [init]\n    if extra_start_points is not None:\n        internal_values = [converter.params_to_internal(x) for x in extra_start_points]\n        extra_start_points = [_value2para(x) for x in internal_values]\n        x_list += extra_start_points\n\n    initialize = {\n        \"warm_start\": x_list,\n        \"vertices\": n_init // 2,\n        \"grid\": n_init // 2,\n    }\n    return initialize\n\n\ndef _process_result_gfo(opt: \"BaseOptimizer\") -> InternalOptimizeResult:\n    \"\"\"Process result.\n\n    Args:\n        opt: Optimizer instance after optimization run is complete\n\n    Returns:\n        InternalOptimizeResult: Internal optimization result.\n\n    \"\"\"\n    res = InternalOptimizeResult(\n        x=np.array(opt.best_value),\n        fun=-opt.best_score,  # negate once again\n        success=True,\n        n_fun_evals=len(opt.eval_times),\n        n_jac_evals=0,\n        n_hess_evals=0,\n        n_iterations=opt.n_iter_search,\n    )\n\n    return res\n\n\ndef _value2para(x: NDArray[np.float64]) -> dict[str, float]:\n    \"\"\"Convert values to dict.\n\n    Args:\n        x: Array of parameter values\n\n    Returns:\n        dict: Dictionary of parameter values with key-value pair as { x{i} : x[i]}\n\n    \"\"\"\n    para = {}\n    for i in range(len(x)):\n        para[f\"x{i}\"] = x[i]\n    return para\n"
  },
  {
    "path": "src/optimagic/optimizers/iminuit_migrad.py",
    "content": "\"\"\"Implement the MIGRAD algorithm from iminuit.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Optional\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_IMINUIT_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    N_RESTARTS,\n    STOPPING_MAXFUN,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import AggregationLevel\n\nif TYPE_CHECKING:\n    from iminuit import Minuit\n\n\n@mark.minimizer(\n    name=\"iminuit_migrad\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_IMINUIT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass IminuitMigrad(Algorithm):\n    r\"\"\"Minimize a scalar differentiable function using the MIGRAD algorithm from\n    iminuit.\n\n    This optimizer wraps the MIGRAD algorithm from the iminuit package, which provides a\n    Python interface to the Minuit2 C++ library developed and maintained by CERN.\n\n    MIGRAD is a local optimization method in the quasi-Newton family. It iteratively\n    builds an approximation of the inverse Hessian matrix using the DFP variable-metric\n    method to efficiently navigate optimization landscapes.\n\n    At each iteration, the algorithm attempts a Newton step, using gradient and Hessian\n    approximations to move toward the function’s minimum. If this step fails to reduce\n    the objective function, MIGRAD conducts a line search along the gradient direction\n    to maintain progress. This continues until the convergence criteria, such as the\n    Estimated Distance to Minimum (EDM) are met, that is, they fall below preset\n    thresholds.\n\n    MIGRAD is designed for statistical optimization problems where accurate parameter\n    uncertainty estimates are essential. It excels at maximum-likelihood and least-\n    squares fits common in scientific computing, and is best suited for smooth,\n    differentiable cost functions.\n\n    For best performance, supply analytical gradients. Convergence and solution will\n    depend on your starting values. Bound constraints (limits) supported.\n\n    \"\"\"\n\n    stopping_maxfun: int = STOPPING_MAXFUN\n    \"\"\"Maximum number of function evaluations.\"\"\"\n\n    n_restarts: int = N_RESTARTS\n    \"\"\"Number of times to restart the optimizer if convergence is not reached.\n\n    A value of 1 (the default) indicates that the optimizer will only run once,\n    disabling the restart feature. Values greater than 1 specify the maximum number of\n    restart attempts.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, params: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_IMINUIT_INSTALLED:\n            raise NotInstalledError(  # pragma: no cover\n                \"To use the 'iminuit_migrad` optimizer you need to install iminuit. \"\n                \"Use 'pip install iminuit' or 'conda install -c conda-forge iminuit'. \"\n                \"Check the iminuit documentation for more details: \"\n                \"https://scikit-hep.org/iminuit/install.html\"\n            )\n        from iminuit import Minuit\n\n        def wrapped_objective(x: NDArray[np.float64]) -> float:\n            return float(problem.fun(x))\n\n        m = Minuit(wrapped_objective, params, grad=problem.jac)\n\n        bounds = _convert_bounds_to_minuit_limits(\n            problem.bounds.lower, problem.bounds.upper\n        )\n\n        for i, (lower, upper) in enumerate(bounds):\n            if lower is not None or upper is not None:\n                m.limits[i] = (lower, upper)\n\n        m.migrad(\n            ncall=self.stopping_maxfun,\n            iterate=self.n_restarts,\n        )\n\n        res = _process_minuit_result(m)\n        return res\n\n\ndef _process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult:\n    \"\"\"Convert iminuit result to optimagic's internal result format.\"\"\"\n    res = InternalOptimizeResult(\n        x=np.array(minuit_result.values),\n        fun=minuit_result.fval,\n        success=minuit_result.valid,\n        message=repr(minuit_result.fmin),\n        n_fun_evals=minuit_result.nfcn,\n        n_jac_evals=minuit_result.ngrad,\n        n_hess_evals=None,\n        n_iterations=minuit_result.nfcn,\n        status=None,\n        jac=None,\n        hess=None,\n        hess_inv=np.array(minuit_result.covariance),\n        max_constraint_violation=None,\n        info=None,\n        history=None,\n    )\n    return res\n\n\ndef _convert_bounds_to_minuit_limits(\n    lower_bounds: Optional[NDArray[np.float64]],\n    upper_bounds: Optional[NDArray[np.float64]],\n) -> list[tuple[Optional[float], Optional[float]]]:\n    \"\"\"Convert optimization bounds to Minuit-compatible limit format.\n\n    Transforms numpy arrays of bounds into List of tuples as expected by iminuit.\n    Handles special values like np.inf, -np.inf, and np.nan by converting\n    them to None where appropriate, as required by Minuit's limits API.\n\n    Parameters\n    ----------\n    lower_bounds : Optional[NDArray[np.float64]]\n        Array of lower bounds for parameters.\n    upper_bounds : Optional[NDArray[np.float64]]\n        Array of upper bounds for parameters.\n\n    Returns:\n    -------\n    list[tuple[Optional[float], Optional[float]]]\n        List of (lower, upper) limit tuples in Minuit format, where:\n        - None indicates unbounded (equivalent to infinity)\n        - Float values represent actual bounds\n\n    Notes:\n    -----\n    Minuit expects bounds as tuples of (lower, upper) where:\n    - `None` indicates no bound (equivalent to -inf or +inf)\n    - A finite float value indicates a specific bound\n    - Bounds can be asymmetric (e.g., one side bounded, one side not)\n\n    \"\"\"\n    if lower_bounds is None or upper_bounds is None:\n        return []\n\n    return [\n        (\n            None if np.isneginf(lower) or np.isnan(lower) else float(lower),\n            None if np.isposinf(upper) or np.isnan(upper) else float(upper),\n        )\n        for lower, upper in zip(lower_bounds, upper_bounds, strict=True)\n    ]\n"
  },
  {
    "path": "src/optimagic/optimizers/ipopt.py",
    "content": "\"\"\"Implement cyipopt's Interior Point Optimizer.\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Any, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom scipy.optimize import Bounds as ScipyBounds\n\nfrom optimagic import mark\nfrom optimagic.config import IS_CYIPOPT_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_REL,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.optimizers.scipy_optimizers import process_scipy_result\nfrom optimagic.typing import (\n    AggregationLevel,\n    GtOneFloat,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n    YesNoBool,\n)\n\n\n@mark.minimizer(\n    name=\"ipopt\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_CYIPOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass Ipopt(Algorithm):\n    # convergence criteria\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    dual_inf_tol: PositiveFloat = 1.0\n    constr_viol_tol: PositiveFloat = 0.0001\n    compl_inf_tol: PositiveFloat = 0.0001\n    s_max: float = 100\n    mu_target: NonNegativeFloat = 0.0\n    # stopping criteria\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    stopping_max_wall_time_seconds: PositiveFloat = 1e20\n    stopping_max_cpu_time: PositiveFloat = 1e20\n    # acceptable criteria\n    acceptable_iter: NonNegativeInt = 15\n    acceptable_tol: PositiveFloat = 1e-6\n    acceptable_dual_inf_tol: PositiveFloat = 1e-10\n    acceptable_constr_viol_tol: PositiveFloat = 0.01\n    acceptable_compl_inf_tol: PositiveFloat = 0.01\n    acceptable_obj_change_tol: PositiveFloat = 1e20\n    diverging_iterates_tol: PositiveFloat = 1e20\n    nlp_lower_bound_inf: float = -1e19\n    nlp_upper_bound_inf: float = 1e19\n    fixed_variable_treatment: Literal[\n        \"make_parameter\",\n        \"make_parameter_nodual\",\n        \"relax_bounds\",\n        \"make_constraint\",\n    ] = \"make_parameter\"\n    dependency_detector: Literal[\"none\", \"mumps\", \"wsmp\", \"ma28\"] | None = None\n    dependency_detection_with_rhs: YesNoBool = False\n    # bounds\n    kappa_d: NonNegativeFloat = 1e-5\n    bound_relax_factor: NonNegativeFloat = 1e-8\n    honor_original_bounds: YesNoBool = False\n    # derivatives\n    check_derivatives_for_naninf: YesNoBool = False\n    # not sure if we should support the following:\n    jac_c_constant: YesNoBool = False\n    jac_d_constant: YesNoBool = False\n    hessian_constant: YesNoBool = False\n    # scaling\n    nlp_scaling_method: (\n        Literal[\n            \"none\",\n            \"user-scaling\",\n            \"gradient-based\",\n            \"equilibration-based\",\n        ]\n        | None\n    ) = \"gradient-based\"\n    obj_scaling_factor: float = 1\n    nlp_scaling_max_gradient: PositiveFloat = 100\n    nlp_scaling_obj_target_gradient: NonNegativeFloat = 0.0\n    nlp_scaling_constr_target_gradient: NonNegativeFloat = 0.0\n    nlp_scaling_min_value: NonNegativeFloat = 1e-8\n    # initialization\n    bound_push: PositiveFloat = 0.01\n    # TODO: refine type to fix the range (0,0.5]\n    bound_frac: PositiveFloat = 0.01\n    slack_bound_push: PositiveFloat = 0.01\n    # TODO: refine type to fix the range (0,0.5]\n    slack_bound_frac: PositiveFloat = 0.01\n    constr_mult_init_max: NonNegativeFloat = 1000\n    bound_mult_init_val: PositiveFloat = 1\n    bound_mult_init_method: Literal[\n        \"constant\",\n        \"mu-based\",\n    ] = \"constant\"\n    least_square_init_primal: YesNoBool = False\n    least_square_init_duals: YesNoBool = False\n    # warm start\n    warm_start_init_point: YesNoBool = False\n    warm_start_same_structure: YesNoBool = False\n    warm_start_bound_push: PositiveFloat = 0.001\n    warm_start_bound_frac: PositiveFloat = 0.001\n    warm_start_slack_bound_push: PositiveFloat = 0.001\n    # TODO: refine type to fix the range (0,0.5])\n    warm_start_slack_bound_frac: PositiveFloat = 0.001\n    warm_start_mult_bound_push: PositiveFloat = 0.001\n    warm_start_mult_init_max: float = 1e6\n    warm_start_entire_iterate: YesNoBool = False\n    warm_start_target_mu: float = 0.0\n    # miscellaneous\n    option_file_name: str = \"\"\n    replace_bounds: YesNoBool = False\n    skip_finalize_solution_call: YesNoBool = False\n    timing_statistics: YesNoBool = False\n    # barrier parameter update\n    mu_max_fact: PositiveFloat = 1000\n    mu_max: PositiveFloat = 100_000\n    mu_min: PositiveFloat = 1e-11\n    adaptive_mu_globalization: Literal[\n        \"obj-constr-filter\",\n        \"kkt-error\",\n        \"never-monotone-mode\",\n    ] = \"obj-constr-filter\"\n    adaptive_mu_kkterror_red_iters: NonNegativeInt = 4\n    # TODO: refine type to fix the range (0,1)\n    adaptive_mu_kkterror_red_fact: PositiveFloat = 0.9999\n    # TODO: refine type to fix the range (0,1)\n    filter_margin_fact: PositiveFloat = 1e-5\n    filter_max_margin: PositiveFloat = 1\n    adaptive_mu_restore_previous_iterate: YesNoBool = False\n    adaptive_mu_monotone_init_factor: PositiveFloat = 0.8\n    adaptive_mu_kkt_norm_type: Literal[\n        \"max-norm\",\n        \"2-norm-squared\",\n        \"1-norm\",\n        \"2-norm\",\n    ] = \"2-norm-squared\"\n    mu_strategy: Literal[\"monotone\", \"adaptive\"] = \"monotone\"\n    mu_oracle: Literal[\n        \"probing\",\n        \"quality-function\",\n        \"loqo\",\n    ] = \"quality-function\"\n    fixed_mu_oracle: Literal[\n        \"probing\",\n        \"loqo\",\n        \"quality-function\",\n        \"average_compl\",\n    ] = \"average_compl\"\n    mu_init: PositiveFloat = 0.1\n    barrier_tol_factor: PositiveFloat = 10\n    # TODO: refine type to fix the range (0,1)\n    mu_linear_decrease_factor: PositiveFloat = 0.2\n    # TODO: refine type to fix the range (1,2)\n    mu_superlinear_decrease_power: GtOneFloat = 1.5\n    mu_allow_fast_monotone_decrease: YesNoBool = True\n    # TODO: refine type to fix the range (0,1)\n    tau_min: PositiveFloat = 0.99\n    sigma_max: PositiveFloat = 100\n    sigma_min: NonNegativeFloat = 1e-6\n    quality_function_norm_type: Literal[\n        \"max-norm\",\n        \"2-norm-squared\",\n        \"1-norm\",\n        \"2-norm\",\n    ] = \"2-norm-squared\"\n    quality_function_centrality: (\n        Literal[\n            \"none\",\n            \"reciprocal\",\n            \"log\",\n            \"cubed-reciprocal\",\n        ]\n        | None\n    ) = None\n    quality_function_balancing_term: Literal[\"none\", \"cubic\"] | None = None\n    quality_function_max_section_steps: NonNegativeInt = 8\n    # TODO: refine type to fix the range [0,1)\n    quality_function_section_sigma_tol: NonNegativeFloat = 0.01\n    # TODO: refine type to fix the range [0,1)\n    quality_function_section_qf_tol: NonNegativeFloat = 0.0\n    # line search\n    line_search_method: Literal[\n        \"filter\",\n        \"penalty\",\n        \"cg-penalty\",\n    ] = \"filter\"\n    # TODO: refine type to fix the range (0,1)\n    alpha_red_factor: PositiveFloat = 0.5\n    accept_every_trial_step: YesNoBool = False\n    accept_after_max_steps: Literal[-1] | NonNegativeInt = -1\n    alpha_for_y: Literal[\n        \"primal\",\n        \"bound-mult\",\n        \"min\",\n        \"max\",\n        \"full\",\n        \"min-dual-infeas\",\n        \"safer-min-dual-infeas\",\n        \"primal-and-full\",\n        \"dual-and-full\",\n        \"acceptor\",\n    ] = \"primal\"\n    alpha_for_y_tol: NonNegativeFloat = 10\n    tiny_step_tol: NonNegativeFloat = 2.22045 * 1e-15\n    tiny_step_y_tol: NonNegativeFloat = 0.01\n    watchdog_shortened_iter_trigger: NonNegativeInt = 10\n    watchdog_trial_iter_max: PositiveInt = 3\n    theta_max_fact: PositiveFloat = 10_000\n    theta_min_fact: PositiveFloat = 0.0001\n    # TODO: refine type to fix the range (0,0.5)\n    eta_phi: PositiveFloat = 1e-8\n    delta: PositiveFloat = 1\n    s_phi: GtOneFloat = 2.3\n    s_theta: GtOneFloat = 1.1\n    # TODO: refine type to fix the range (0,1)\n    gamma_phi: PositiveFloat = 1e-8\n    # TODO: refine type to fix the range (0,1)\n    gamma_theta: PositiveFloat = 1e-5\n    # TODO: refine type to fix the range (0,1)\n    alpha_min_frac: PositiveFloat = 0.05\n    max_soc: NonNegativeInt = 4\n    kappa_soc: PositiveFloat = 0.99\n    obj_max_inc: float = 5.0\n    max_filter_resets: NonNegativeInt = 5\n    filter_reset_trigger: PositiveInt = 5\n    corrector_type: (\n        Literal[\n            \"none\",\n            \"affine\",\n            \"primal-dual\",\n        ]\n        | None\n    ) = None\n    skip_corr_if_neg_curv: YesNoBool = True\n    skip_corr_in_monotone_mode: YesNoBool = True\n    corrector_compl_avrg_red_fact: PositiveFloat = 1\n    soc_method: Literal[0, 1] = 0\n    nu_init: PositiveFloat = 1e-6\n    nu_inc: PositiveFloat = 0.0001\n    # TODO: refine type to fix the range (0,1)\n    rho: PositiveFloat = 0.1\n    kappa_sigma: PositiveFloat = 1e10\n    recalc_y: YesNoBool = False\n    recalc_y_feas_tol: PositiveFloat = 1e-6\n    slack_move: NonNegativeFloat = 1.81899 * 1e-12\n    constraint_violation_norm_type: Literal[\n        \"1-norm\",\n        \"2-norm\",\n        \"max-norm\",\n    ] = \"1-norm\"\n    # step calculation\n    mehrotra_algorithm: YesNoBool = False\n    fast_step_computation: YesNoBool = False\n    min_refinement_steps: NonNegativeInt = 1\n    max_refinement_steps: NonNegativeInt = 10\n    residual_ratio_max: PositiveFloat = 1e-10\n    residual_ratio_singular: PositiveFloat = 1e-5\n    residual_improvement_factor: PositiveFloat = 1\n    neg_curv_test_tol: NonNegativeFloat = 0\n    neg_curv_test_reg: YesNoBool = True\n    max_hessian_perturbation: PositiveFloat = 1e20\n    min_hessian_perturbation: NonNegativeFloat = 1e-20\n    perturb_inc_fact_first: GtOneFloat = 100\n    perturb_inc_fact: GtOneFloat = 8\n    # TODO: refine type to fix the range (0,1)\n    perturb_dec_fact: PositiveFloat = 0.333333\n    first_hessian_perturbation: PositiveFloat = 0.0001\n    jacobian_regularization_value: NonNegativeFloat = 1e-8\n    jacobian_regularization_exponent: NonNegativeFloat = 0.25\n    perturb_always_cd: YesNoBool = False\n    # restoration phase\n    expect_infeasible_problem: YesNoBool = False\n    expect_infeasible_problem_ctol: NonNegativeFloat = 0.001\n    expect_infeasible_problem_ytol: PositiveFloat = 1e8\n    start_with_resto: YesNoBool = False\n    soft_resto_pderror_reduction_factor: NonNegativeFloat = 0.9999\n    max_soft_resto_iters: NonNegativeInt = 10\n    # TODO: refine type to fix the range [0,1)\n    required_infeasibility_reduction: NonNegativeFloat = 0.9\n    max_resto_iter: NonNegativeInt = 3_000_000\n    evaluate_orig_obj_at_resto_trial: YesNoBool = True\n    resto_penalty_parameter: PositiveFloat = 1000\n    resto_proximity_weight: NonNegativeFloat = 1\n    bound_mult_reset_threshold: NonNegativeFloat = 1000\n    constr_mult_reset_threshold: NonNegativeFloat = 0\n    resto_failure_feasibility_threshold: NonNegativeFloat | None = None\n    # hessian approximation\n    limited_memory_aug_solver: Literal[\n        \"sherman-morrison\",\n        \"extended\",\n    ] = \"sherman-morrison\"\n    limited_memory_max_history: NonNegativeInt = 6\n    limited_memory_update_type: Literal[\n        \"bfgs\",\n        \"sr1\",\n    ] = \"bfgs\"\n    limited_memory_initialization: Literal[\n        \"scalar1\",\n        \"scalar2\",\n        \"scalar3\",\n        \"scalar4\",\n        \"constant\",\n    ] = \"scalar1\"\n    limited_memory_init_val: PositiveFloat = 1\n    limited_memory_init_val_max: PositiveFloat = 1e8\n    limited_memory_init_val_min: PositiveFloat = 1e-8\n    limited_memory_max_skipping: PositiveInt = 2\n    limited_memory_special_for_resto: YesNoBool = False\n    hessian_approximation: Literal[\n        \"limited-memory\",\n        \"exact\",\n    ] = \"limited-memory\"\n    hessian_approximation_space: Literal[\n        \"nonlinear-variables\",\n        \"all-variables\",\n    ] = \"nonlinear-variables\"\n    # linear solver\n    linear_solver: Literal[\n        \"mumps\", \"ma27\", \"ma57\", \"ma77\", \"ma86\", \"ma97\", \"pardiso\", \"custom\"\n    ] = \"mumps\"\n    linear_solver_options: dict[str, Any] | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_CYIPOPT_INSTALLED:\n            raise NotInstalledError(\n                \"The 'ipopt' algorithm requires the cyipopt package to be installed.\\n\"\n                \"You can install it with: `conda install -c conda-forge cyipopt`.\"\n            )\n\n        import cyipopt\n\n        if self.acceptable_tol <= self.convergence_ftol_rel:\n            raise ValueError(\n                \"The acceptable tolerance must be larger than the desired tolerance.\"\n            )\n        if self.mu_strategy not in [\"monotone\", \"adaptive\"]:\n            raise ValueError(\n                f\"Unknown barrier strategy: {self.mu_strategy}.\"\n                \" It must be 'monotone' or 'adaptive'.\"\n            )\n        if self.nlp_upper_bound_inf < 0:\n            raise ValueError(\"nlp_upper_bound_inf should be > 0.\")\n        if self.nlp_lower_bound_inf > 0:\n            raise ValueError(\"nlp_lower_bound_inf should be < 0.\")\n        linear_solver_options = (\n            {} if self.linear_solver_options is None else self.linear_solver_options\n        )\n        if self.resto_failure_feasibility_threshold is None:\n            resto_failure_feasibility_threshold = 1e2 * self.convergence_ftol_rel\n        else:\n            resto_failure_feasibility_threshold = (\n                self.resto_failure_feasibility_threshold\n            )\n\n        # convert None to str none section\n        linear_solver_options_with_none = [\n            \"ma86_scaling\",\n            \"ma97_scaling\",\n            \"ma97_scaling1\",\n            \"ma97_scaling2\",\n            \"ma97_scaling3\",\n            \"spral_scaling\",\n            \"spral_scaling_1\",\n            \"spral_scaling_2\",\n            \"spral_scaling_3\",\n            \"linear_system_scaling\",\n        ]\n        for key, val in linear_solver_options.items():\n            if key in linear_solver_options_with_none:\n                linear_solver_options[key] = _convert_none_to_str(val)\n        boolean_linear_solver_options = [\n            \"linear_scaling_on_demand\"\n            \"ma27_skip_inertia_check\"\n            \"ma27_ignore_singularity\"\n            \"ma57_automatic_scaling\"\n            \"ma97_solve_blas3\"\n            \"pardiso_redo_symbolic_fact_only_if_inertia_wrong\"\n            \"pardiso_repeated_perturbation_means_singular\"\n            \"pardiso_skip_inertia_check\"\n            \"pardiso_iterative\"\n            \"pardisomkl_redo_symbolic_fact_only_if_inertia_wrong\"\n            \"pardisomkl_repeated_perturbation_means_singular\"\n            \"pardisomkl_skip_inertia_check\"\n            \"spral_ignore_numa\"\n            \"spral_use_gpu\"\n            \"wsmp_skip_inertia_check\"\n            \"wsmp_no_pivoting\"\n        ]\n        for key, val in linear_solver_options.items():\n            if key in boolean_linear_solver_options:\n                linear_solver_options[key] = _convert_bool_to_str(val, key)\n\n        convert_bool_to_str_options = {\n            \"dependency_detection_with_rhs\": self.dependency_detection_with_rhs,\n            \"check_derivatives_for_naninf\": self.check_derivatives_for_naninf,\n            \"jac_c_constant\": self.jac_c_constant,\n            \"jac_d_constant\": self.jac_d_constant,\n            \"hessian_constant\": self.hessian_constant,\n            \"least_square_init_primal\": self.least_square_init_primal,\n            \"least_square_init_duals\": self.least_square_init_duals,\n            \"warm_start_init_point\": self.warm_start_init_point,\n            \"warm_start_same_structure\": self.warm_start_same_structure,\n            \"warm_start_entire_iterate\": self.warm_start_entire_iterate,\n            \"replace_bounds\": self.replace_bounds,\n            \"skip_finalize_solution_call\": self.skip_finalize_solution_call,\n            \"timing_statistics\": self.timing_statistics,\n            \"adaptive_mu_restore_previous_iterate\": (\n                self.adaptive_mu_restore_previous_iterate\n            ),\n            \"mu_allow_fast_monotone_decrease\": self.mu_allow_fast_monotone_decrease,\n            \"accept_every_trial_step\": self.accept_every_trial_step,\n            \"skip_corr_if_neg_curv\": self.skip_corr_if_neg_curv,\n            \"skip_corr_in_monotone_mode\": self.skip_corr_in_monotone_mode,\n            \"recalc_y\": self.recalc_y,\n            \"mehrotra_algorithm\": self.mehrotra_algorithm,\n            \"fast_step_computation\": self.fast_step_computation,\n            \"neg_curv_test_reg\": self.neg_curv_test_reg,\n            \"perturb_always_cd\": self.perturb_always_cd,\n            \"expect_infeasible_problem\": self.expect_infeasible_problem,\n            \"start_with_resto\": self.start_with_resto,\n            \"evaluate_orig_obj_at_resto_trial\": self.evaluate_orig_obj_at_resto_trial,\n            \"limited_memory_special_for_resto\": self.limited_memory_special_for_resto,\n            \"honor_original_bounds\": self.honor_original_bounds,\n        }\n        converted_bool_to_str_options = {\n            key: _convert_bool_to_str(val, key)\n            for key, val in convert_bool_to_str_options.items()\n        }\n\n        options = {\n            # disable verbosity\n            \"print_level\": 0,\n            \"ma77_print_level\": -1,\n            \"ma86_print_level\": -1,\n            \"ma97_print_level\": -1,\n            \"pardiso_msglvl\": 0,\n            # disable derivative checker\n            \"derivative_test\": \"none\",\n            \"s_max\": float(self.s_max),\n            \"max_iter\": self.stopping_maxiter,\n            \"max_wall_time\": float(self.stopping_max_wall_time_seconds),\n            \"max_cpu_time\": self.stopping_max_cpu_time,\n            \"dual_inf_tol\": self.dual_inf_tol,\n            \"constr_viol_tol\": self.constr_viol_tol,\n            \"compl_inf_tol\": self.compl_inf_tol,\n            # acceptable heuristic\n            \"acceptable_iter\": self.acceptable_iter,\n            \"acceptable_tol\": self.acceptable_tol,\n            \"acceptable_dual_inf_tol\": self.acceptable_dual_inf_tol,\n            \"acceptable_constr_viol_tol\": self.acceptable_constr_viol_tol,\n            \"acceptable_compl_inf_tol\": self.acceptable_compl_inf_tol,\n            \"acceptable_obj_change_tol\": self.acceptable_obj_change_tol,\n            # bounds and more\n            \"diverging_iterates_tol\": self.diverging_iterates_tol,\n            \"nlp_lower_bound_inf\": self.nlp_lower_bound_inf,\n            \"nlp_upper_bound_inf\": self.nlp_upper_bound_inf,\n            \"fixed_variable_treatment\": self.fixed_variable_treatment,\n            \"dependency_detector\": _convert_none_to_str(self.dependency_detector),\n            \"kappa_d\": self.kappa_d,\n            \"bound_relax_factor\": self.bound_relax_factor,\n            \"honor_original_bounds\": self.honor_original_bounds,\n            # scaling\n            \"nlp_scaling_method\": _convert_none_to_str(self.nlp_scaling_method),\n            \"obj_scaling_factor\": float(self.obj_scaling_factor),\n            \"nlp_scaling_max_gradient\": float(self.nlp_scaling_max_gradient),\n            \"nlp_scaling_obj_target_gradient\": float(\n                self.nlp_scaling_obj_target_gradient\n            ),\n            \"nlp_scaling_constr_target_gradient\": float(\n                self.nlp_scaling_constr_target_gradient\n            ),\n            \"nlp_scaling_min_value\": float(self.nlp_scaling_min_value),\n            # initialization\n            \"bound_push\": self.bound_push,\n            \"bound_frac\": self.bound_frac,\n            \"slack_bound_push\": self.slack_bound_push,\n            \"slack_bound_frac\": self.slack_bound_frac,\n            \"constr_mult_init_max\": float(self.constr_mult_init_max),\n            \"bound_mult_init_val\": float(self.bound_mult_init_val),\n            \"bound_mult_init_method\": self.bound_mult_init_method,\n            # warm start\n            \"warm_start_bound_push\": self.warm_start_bound_push,\n            \"warm_start_bound_frac\": self.warm_start_bound_frac,\n            \"warm_start_slack_bound_push\": self.warm_start_slack_bound_push,\n            \"warm_start_slack_bound_frac\": self.warm_start_slack_bound_frac,\n            \"warm_start_mult_bound_push\": self.warm_start_mult_bound_push,\n            \"warm_start_mult_init_max\": self.warm_start_mult_init_max,\n            \"warm_start_target_mu\": self.warm_start_target_mu,\n            # more miscellaneous\n            \"option_file_name\": self.option_file_name,\n            # barrier parameter update\n            \"mu_target\": float(self.mu_target),\n            \"mu_max_fact\": float(self.mu_max_fact),\n            \"mu_max\": float(self.mu_max),\n            \"mu_min\": float(self.mu_min),\n            \"adaptive_mu_globalization\": self.adaptive_mu_globalization,\n            \"adaptive_mu_kkterror_red_iters\": self.adaptive_mu_kkterror_red_iters,\n            \"adaptive_mu_kkterror_red_fact\": self.adaptive_mu_kkterror_red_fact,\n            \"filter_margin_fact\": float(self.filter_margin_fact),\n            \"filter_max_margin\": float(self.filter_max_margin),\n            \"adaptive_mu_monotone_init_factor\": self.adaptive_mu_monotone_init_factor,\n            \"adaptive_mu_kkt_norm_type\": self.adaptive_mu_kkt_norm_type,\n            \"mu_strategy\": self.mu_strategy,\n            \"mu_oracle\": self.mu_oracle,\n            \"fixed_mu_oracle\": self.fixed_mu_oracle,\n            \"mu_init\": self.mu_init,\n            \"barrier_tol_factor\": float(self.barrier_tol_factor),\n            \"mu_linear_decrease_factor\": self.mu_linear_decrease_factor,\n            \"mu_superlinear_decrease_power\": self.mu_superlinear_decrease_power,\n            \"tau_min\": self.tau_min,\n            \"sigma_max\": float(self.sigma_max),\n            \"sigma_min\": float(self.sigma_min),\n            \"quality_function_norm_type\": self.quality_function_norm_type,\n            \"quality_function_centrality\": _convert_none_to_str(\n                self.quality_function_centrality\n            ),\n            \"quality_function_balancing_term\": _convert_none_to_str(\n                self.quality_function_balancing_term\n            ),\n            \"quality_function_max_section_steps\": (\n                self.quality_function_max_section_steps\n            ),\n            \"quality_function_section_sigma_tol\": (\n                self.quality_function_section_sigma_tol\n            ),\n            \"quality_function_section_qf_tol\": self.quality_function_section_qf_tol,\n            # linear search\n            \"line_search_method\": self.line_search_method,\n            \"alpha_red_factor\": self.alpha_red_factor,\n            \"accept_after_max_steps\": self.accept_after_max_steps,\n            \"alpha_for_y\": self.alpha_for_y,\n            \"alpha_for_y_tol\": float(self.alpha_for_y_tol),\n            \"tiny_step_tol\": self.tiny_step_tol,\n            \"tiny_step_y_tol\": self.tiny_step_y_tol,\n            \"watchdog_shortened_iter_trigger\": self.watchdog_shortened_iter_trigger,\n            \"watchdog_trial_iter_max\": self.watchdog_trial_iter_max,\n            \"theta_max_fact\": float(self.theta_max_fact),\n            \"theta_min_fact\": self.theta_min_fact,\n            \"eta_phi\": self.eta_phi,\n            \"delta\": float(self.delta),\n            \"s_phi\": self.s_phi,\n            \"s_theta\": self.s_theta,\n            \"gamma_phi\": self.gamma_phi,\n            \"gamma_theta\": self.gamma_theta,\n            \"alpha_min_frac\": self.alpha_min_frac,\n            \"max_soc\": self.max_soc,\n            \"kappa_soc\": self.kappa_soc,\n            \"obj_max_inc\": float(self.obj_max_inc),\n            \"max_filter_resets\": self.max_filter_resets,\n            \"filter_reset_trigger\": self.filter_reset_trigger,\n            \"corrector_type\": _convert_none_to_str(self.corrector_type),\n            \"corrector_compl_avrg_red_fact\": float(self.corrector_compl_avrg_red_fact),\n            \"soc_method\": self.soc_method,\n            \"nu_init\": self.nu_init,\n            \"nu_inc\": self.nu_inc,\n            \"rho\": self.rho,\n            \"kappa_sigma\": self.kappa_sigma,\n            \"recalc_y_feas_tol\": self.recalc_y_feas_tol,\n            \"slack_move\": self.slack_move,\n            \"constraint_violation_norm_type\": self.constraint_violation_norm_type,\n            # step calculation\n            \"min_refinement_steps\": self.min_refinement_steps,\n            \"max_refinement_steps\": self.max_refinement_steps,\n            \"residual_ratio_max\": self.residual_ratio_max,\n            \"residual_ratio_singular\": self.residual_ratio_singular,\n            \"residual_improvement_factor\": float(self.residual_improvement_factor),\n            \"neg_curv_test_tol\": float(self.neg_curv_test_tol),\n            \"max_hessian_perturbation\": self.max_hessian_perturbation,\n            \"min_hessian_perturbation\": self.min_hessian_perturbation,\n            \"perturb_inc_fact_first\": float(self.perturb_inc_fact_first),\n            \"perturb_inc_fact\": float(self.perturb_inc_fact),\n            \"perturb_dec_fact\": float(self.perturb_dec_fact),\n            \"first_hessian_perturbation\": float(self.first_hessian_perturbation),\n            \"jacobian_regularization_value\": float(self.jacobian_regularization_value),\n            \"jacobian_regularization_exponent\": float(\n                self.jacobian_regularization_exponent\n            ),\n            # restoration phase\n            \"expect_infeasible_problem_ctol\": self.expect_infeasible_problem_ctol,\n            \"expect_infeasible_problem_ytol\": self.expect_infeasible_problem_ytol,\n            \"soft_resto_pderror_reduction_factor\": (\n                self.soft_resto_pderror_reduction_factor\n            ),\n            \"max_soft_resto_iters\": self.max_soft_resto_iters,\n            \"required_infeasibility_reduction\": float(\n                self.required_infeasibility_reduction\n            ),\n            \"max_resto_iter\": self.max_resto_iter,\n            \"resto_penalty_parameter\": float(self.resto_penalty_parameter),\n            \"resto_proximity_weight\": float(self.resto_proximity_weight),\n            \"bound_mult_reset_threshold\": float(self.bound_mult_reset_threshold),\n            \"constr_mult_reset_threshold\": float(self.constr_mult_reset_threshold),\n            \"resto_failure_feasibility_threshold\": float(\n                resto_failure_feasibility_threshold\n            ),\n            # hessian approximation\n            \"limited_memory_aug_solver\": self.limited_memory_aug_solver,\n            \"limited_memory_max_history\": self.limited_memory_max_history,\n            \"limited_memory_update_type\": self.limited_memory_update_type,\n            \"limited_memory_initialization\": self.limited_memory_initialization,\n            \"limited_memory_init_val\": float(self.limited_memory_init_val),\n            \"limited_memory_init_val_max\": self.limited_memory_init_val_max,\n            \"limited_memory_init_val_min\": self.limited_memory_init_val_min,\n            \"limited_memory_max_skipping\": self.limited_memory_max_skipping,\n            \"hessian_approximation\": self.hessian_approximation,\n            \"hessian_approximation_space\": self.hessian_approximation_space,\n            # linear solver\n            \"linear_solver\": self.linear_solver,\n            **linear_solver_options,\n            **converted_bool_to_str_options,\n        }\n\n        raw_res = cyipopt.minimize_ipopt(\n            fun=problem.fun,\n            x0=x0,\n            bounds=_get_scipy_bounds(problem.bounds),\n            jac=problem.jac,\n            constraints=problem.nonlinear_constraints,\n            tol=self.convergence_ftol_rel,\n            options=options,\n        )\n\n        res = process_scipy_result(raw_res)\n\n        return res\n\n\ndef _get_scipy_bounds(bounds: InternalBounds) -> ScipyBounds:\n    return ScipyBounds(lb=bounds.lower, ub=bounds.upper)\n\n\ndef _convert_bool_to_str(var, name):\n    \"\"\"Convert input to either 'yes' or 'no' and check the output is yes or no.\n\n    Args:\n        var (str or bool): user input\n        name (str): name of the variable.\n\n    Returns:\n        out (str): \"yes\" or \"no\".\n\n    \"\"\"\n    if var is True:\n        out = \"yes\"\n    elif var is False:\n        out = \"no\"\n    else:\n        out = var\n    if out not in {\"yes\", \"no\"}:\n        raise ValueError(\n            f\"{name} must be 'yes', 'no', True or False. You specified {var}.\"\n        )\n    return out\n\n\ndef _convert_none_to_str(var):\n    out = \"none\" if var is None else var\n    return out\n"
  },
  {
    "path": "src/optimagic/optimizers/nag_optimizers.py",
    "content": "\"\"\"Implement algorithms by the (Numerical Algorithms Group)[https://www.nag.com/].\n\nThe following arguments are not supported as ``algo_options``:\n\n- ``scaling_within_bounds``\n- ``init.run_in_parallel``\n- ``do_logging``, ``print_progress`` and all their advanced options.\n\n\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Literal, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_DFOLS_INSTALLED, IS_PYBOBYQA_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import STOPPING_MAXFUN\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveInt,\n)\nfrom optimagic.utilities import calculate_trustregion_initial_radius\n\nCONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE = 1e-8\n\"\"\"float: Stop when the lower trust region radius falls below this value.\"\"\"\n\nCONVERGENCE_SLOW_PROGRESS = {\n    \"threshold_to_characterize_as_slow\": 1e-8,\n    \"max_insufficient_improvements\": None,\n    \"comparison_period\": 5,\n}\n\"\"\"dict: Specification of when to terminate or reset the optimization because of only\n    slow improvements. This is similar to an absolute criterion tolerance only that\n    instead of a single improvement the average over several iterations must be small.\n\n    Possible entries are:\n        threshold_to_characterize_as_slow (float): Threshold whether an improvement\n            is insufficient. Note: the improvement is divided by the\n            ``comparison_period``.\n            So this is the required average improvement per iteration over the\n            comparison period.\n        max_insufficient_improvements (int): Number of consecutive\n            insufficient improvements before termination (or reset). Default is\n            ``20 * len(x)``.\n        comparison_period (int):\n            How many iterations to go back to calculate the improvement.\n            For example 5 would mean that each criterion evaluation is compared to the\n            criterion value from 5 iterations before.\n\n\"\"\"\n\nTHRESHOLD_FOR_SAFETY_STEP = 0.5\nr\"\"\"float: Threshold for when to call the safety step (:math:`\\gamma_s`).\n\n    :math:`\\text{proposed step} \\leq \\text{threshold_for_safety_step} \\cdot\n    \\text{current_lower_trustregion_radius}`.\n\n\"\"\"\n\nCONVERGENCE_NOISE_CORRECTED_FTOL = 1.0\n\"\"\"float: Stop when the evaluations on the set of interpolation points all fall within\n    this factor of the noise level. The default is 1, i.e. when all evaluations are\n    within the noise level. If you want to not use this criterion but still flag your\n    criterion function as noisy, set this tolerance to 0.0.\n\n    .. warning::\n        Very small values, as in most other tolerances don't make sense here.\n\n\"\"\"\n\n\nTRUSTREGION_THRESHOLD_SUCCESSFUL = 0.1\n\"\"\"float: Share of the predicted improvement that has to be achieved for a trust\n    region iteration to count as successful.\n\n\"\"\"\n\nTRUSTREGION_THRESHOLD_VERY_SUCCESSFUL = 0.7\n\"\"\"float: Share of predicted improvement that has to be achieved for a trust region\n    iteration to count as very successful.``criterion_noisy``\n\n\"\"\"\n\nTRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL = None\n\"\"\"float: Ratio by which to shrink the upper trust region radius when realized\n    improvement does not match the ``threshold_successful``. The default is 0.98\n    if the criterion is noisy and 0.5 else.\n\n\"\"\"\n\nTRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL = 2.0\nr\"\"\"float: Ratio by which to expand the upper trust region radius :math:`\\Delta_k`\n    in very successful iterations (:math:`\\gamma_{inc}` in the notation of the paper).\n\n\"\"\"\n\nTRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL = 4.0\nr\"\"\"float: Ratio of the proposed step ($\\|s_k\\|$) by which to expand the upper trust\n    region radius (:math:`\\Delta_k`) in very successful iterations\n    (:math:`\\overline{\\gamma}_{inc}` in the notation of the paper).\n\n\"\"\"\n\nTRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS = None\nr\"\"\"float: Ratio by which to shrink the lower trust region radius (:math:`\\rho_k`)\n    (:math:`\\alpha_1` in the notation of the paper). Default is 0.9 if\n    the criterion is noisy and 0.1 else.\n\n\"\"\"\n\nTRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS = None\nr\"\"\"float: Ratio of the current lower trust region (:math:`\\rho_k`) by which to shrink\n    the upper trust region radius (:math:`\\Delta_k`) when the lower one is shrunk\n    (:math:`\\alpha_2` in the notation of the paper). Default is 0.95 if the\n    criterion is noisy and 0.5 else.\"\"\"\n\nRANDOM_DIRECTIONS_ORTHOGONAL = True\n\"\"\"bool: Whether to make randomly drawn initial directions orthogonal.\"\"\"\n\n\nINTERPOLATION_ROUNDING_ERROR = 0.1\nr\"\"\"float: Internally, all the NAG algorithms store interpolation points with respect\n    to a base point :math:`x_b`; that is, we store :math:`\\{y_t-x_b\\}`,\n    which reduces the risk of roundoff errors. We shift :math:`x_b` to :math:`x_k` when\n    :math:`\\text{proposed step} \\leq \\text{interpolation_rounding_error} \\cdot\n    \\|x_k-x_b\\|`.\n\n\"\"\"\n\nCLIP_CRITERION_IF_OVERFLOWING = True\n\"\"\"bool: Whether to clip the criterion to avoid ``OverflowError``.\"\"\"\n\n\nTRUSTREGION_PRECONDITION_INTERPOLATION = True\n\"\"\"bool: whether to scale the interpolation linear system to improve conditioning.\"\"\"\n\n\nRESET_OPTIONS = {\n    \"use_resets\": None,\n    \"minimal_trustregion_radius_tolerance_scaling_at_reset\": 1.0,\n    \"reset_type\": \"soft\",\n    \"move_center_at_soft_reset\": True,\n    \"reuse_criterion_value_at_hard_reset\": True,\n    \"max_iterations_without_new_best_after_soft_reset\": None,\n    \"auto_detect\": True,\n    \"auto_detect_history\": 30,\n    \"auto_detect_min_jacobian_increase\": 0.015,\n    \"auto_detect_min_correlations\": 0.1,\n    \"points_to_replace_at_soft_reset\": 3,\n    \"max_consecutive_unsuccessful_resets\": 10,\n    # just bobyqa\n    \"max_unsuccessful_resets\": None,\n    \"trust_region_scaling_at_unsuccessful_reset\": None,\n    # just dfols\n    \"max_interpolation_points\": None,\n    \"n_extra_interpolation_points_per_soft_reset\": 0,\n    \"n_extra_interpolation_points_per_hard_reset\": 0,\n    \"n_additional_extra_points_to_replace_per_reset\": 0,\n}\nr\"\"\"dict: Options for reseting the optimization.\n\n    Possible entries are:\n\n        use_resets (bool): Whether to do resets when the lower trust\n            region radius (:math:`\\rho_k`) reaches the stopping criterion\n            (:math:`\\rho_{end}`), or (optionally) when all interpolation points are\n            within noise level. Default is ``True`` if the criterion is noisy.\n        minimal_trustregion_radius_tolerance_scaling_at_reset (float): Factor with\n            which the trust region stopping criterion is multiplied at each reset.\n\n        reset_type (str): Whether to use \"soft\" or \"hard\" resets. Default is \"soft\".\n\n        move_center_at_soft_reset (bool): Whether to move the trust region center\n            ($x_k$) to the best new point evaluated in stead of keeping it constant.\n        points_to_replace_at_soft_reset (int): Number of interpolation points to move\n            at each soft reset.\n        reuse_criterion_value_at_hard_reset (bool): Whether or not to recycle the\n            criterion value at the best iterate found when performing a hard reset.\n            This saves one criterion evaluation.\n        max_iterations_without_new_best_after_soft_reset (int):\n            The maximum number of successful steps in a given run where the new\n            criterion value is worse than the best value found in previous runs before\n            terminating. Default is ``max_criterion_evaluations``.\n        auto_detect (bool): Whether or not to\n            automatically determine when to reset. This is an additional condition\n            and resets can still be triggered by small upper trust region radius, etc.\n            There are two criteria used: upper trust region radius shrinkage\n            (no increases over the history, more decreases than no changes) and\n            changes in the model Jacobian (consistently increasing trend as measured\n            by slope and correlation coefficient of the line of best fit).\n        auto_detect_history (int):\n            How many iterations of model changes and trust region radii to store.\n        auto_detect_min_jacobian_increase (float):\n            Minimum rate of increase of the Jacobian over past iterations to cause a\n            reset.\n        auto_detect_min_correlations (float):\n            Minimum correlation of the Jacobian data set required to cause a reset.\n        max_consecutive_unsuccessful_resets (int): maximum number of consecutive\n            unsuccessful resets allowed (i.e. resets which did not outperform the\n            best known value from earlier runs).\n\n    Only used when using nag_bobyqa:\n\n        max_unsuccessful_resets (int):\n            number of total unsuccessful resets allowed.\n            Default is 20 if ``seek_global_optimum`` and else unrestricted.\n        trust_region_scaling_at_unsuccessful_reset (float): Factor by which to\n            expand the initial lower trust region radius (:math:`\\rho_{beg}`) after\n            unsuccessful resets. Default is 1.1 if ``seek_global_optimum`` else 1.\n\n    Only used when using nag_dfols:\n\n        max_interpolation_points (int): Maximum allowed value of the number of\n            interpolation points. This is useful if the number of interpolation points\n            increases with each reset, e.g. when\n            ``n_extra_interpolation_points_per_soft_reset > 0``. The default is\n            ``n_interpolation_points``.\n        n_extra_interpolation_points_per_soft_reset (int): Number of points to add to\n            the interpolation set with each soft reset.\n        n_extra_interpolation_points_per_hard_reset (int): Number of points to add to\n            the interpolation set with each hard reset.\n        n_additional_extra_points_to_replace_per_reset (int): This parameter modifies\n            ``n_extra_points_to_replace_successful``. With each reset\n            ``n_extra_points_to_replace_successful`` is increased by this number.\n\n\"\"\"\n\n\nTRUSTREGION_FAST_START_OPTIONS = {\n    \"min_inital_points\": None,\n    \"method\": \"auto\",\n    \"scale_of_trustregion_step_perturbation\": None,\n    \"scale_of_jacobian_components_perturbation\": 1e-2,\n    # the following will be growing.full_rank.min_sing_val\n    # but it not supported yet by DF-OLS.\n    \"floor_of_jacobian_singular_values\": 1,\n    \"jacobian_max_condition_number\": 1e8,\n    \"geometry_improving_steps\": False,\n    \"safety_steps\": True,\n    \"shrink_upper_radius_in_safety_steps\": False,\n    \"full_geometry_improving_step\": False,\n    \"reset_trustregion_radius_after_fast_start\": False,\n    \"reset_min_trustregion_radius_after_fast_start\": False,\n    \"shrinking_factor_not_successful\": None,\n    \"n_extra_search_directions_per_iteration\": 0,\n}\nr\"\"\"dict: Options to start the optimization while building the full trust region model.\n\n    To activate this, set the number of interpolation points at which to evaluate the\n    criterion before doing the first step, `min_initial_points`, to something smaller\n    than the number of parameters.\n\n    The following options can be specified:\n\n        min_initial_points (int): Number of initial interpolation\n            points in addition to the start point. This should only be changed to\n            a value less than ``len(x)``, and only if the default setup cost\n            of ``len(x) + 1`` evaluations of the criterion is impractical.\n            If this is set to be less than the default, the input value of\n            ``n_interpolation_points`` should be set to ``len(x)``.\n            If the default is used, all the other parameters have no effect.\n            Default is ``n_interpolation_points - 1``.\n            If the default setup costs of the evaluations are very large, DF-OLS\n            can start with less than ``len(x)`` interpolation points and add points\n            to the trust region model with every iteration.\n        method (\"jacobian\", \"trustregion\" or \"auto\"):\n            When there are less interpolation points than ``len(x)`` the model is\n            underdetermined. This can be fixed in two ways:\n            If \"jacobian\", the interpolated Jacobian is perturbed to have full\n            rank, allowing the trust region step to include components in the full\n            search space. This is the default if\n            ``len(x) \\geq number of root contributions``.\n            If \"trustregion_step\", the trust region step is perturbed by an\n            orthogonal direction not yet searched. It is the default if\n            ``len(x) < number of root contributions``.\n        scale_of_trustregion_step_perturbation (float):\n            When adding new search directions, the length of the step is the trust\n            region radius multiplied by this value. The default is 0.1 if\n            ``method == \"trustregion\"`` else 1.\n        scale_of_jacobian_components_perturbation (float): Magnitude of extra\n            components added to the Jacobian. Default is 1e-2.\n        floor_of_jacobian_singular_values (float): Floor singular\n            values of the Jacobian at this factor of the last non zero value.\n            As of version 1.2.1 this option is not yet supported by DF-OLS!\n        scale_of_jacobian_singular_value_floor (float):\n            Floor singular values of the Jacobian at this factor of the last nonzero\n            value.\n        jacobian_max_condition_number (float): Cap on the condition number\n            of Jacobian after applying floors to singular values\n            (effectively another floor on the smallest singular value, since the\n            largest singular value is fixed).\n        geometry_improving_steps (bool): Whether to do geometry-improving steps in the\n            trust region algorithm, as per the usual algorithm during the fast start.\n        safety_steps (bool):\n            Whether to perform safety steps.\n        shrink_upper_radius_in_safety_steps (bool): During the fast start whether to\n            shrink the upper trust region radius in safety steps.\n        full_geometry_improving_step (bool): During the fast start whether to do a\n            full geometry-improving step within safety steps (the same as the post fast\n            start phase of the algorithm). Since this involves reducing the upper trust\n            region radius, this can only be `True` if\n            `shrink_upper_radius_in_safety_steps == False`.\n        reset_trustregion_radius_after_fast_start (bool):\n            Whether or not to reset the upper trust region radius to its initial value\n            at the end of the fast start phase.\n        reset_min_trustregion_radius_after_fast_start (bool):\n            Whether or not to reset the minimum trust region radius\n            (:math:`\\rho_k`) to its initial value at the end of the fast start phase.\n        shrinking_factor_not_successful (float):\n            Ratio by which to shrink the trust region radius when realized\n            improvement does not match the ``threshold_for_successful_iteration``\n            during the fast start phase.  By default it is the same as\n            ``reduction_when_not_successful``.\n        n_extra_search_directions_per_iteration (int): Number of new search\n            directions to add with each iteration where we do not have a full set\n            of search directions. This approach is not recommended! Default is 0.\n\n\"\"\"\n\n\n@mark.minimizer(\n    name=\"nag_dfols\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=IS_DFOLS_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NagDFOLS(Algorithm):\n    clip_criterion_if_overflowing: bool = CLIP_CRITERION_IF_OVERFLOWING\n    convergence_minimal_trustregion_radius_tolerance: NonNegativeFloat = (\n        CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE  # noqa: E501\n    )\n    convergence_noise_corrected_criterion_tolerance: NonNegativeFloat = (\n        CONVERGENCE_NOISE_CORRECTED_FTOL  # noqa: E501\n    )\n    convergence_ftol_scaled: NonNegativeFloat = 0.0\n    convergence_slow_progress: dict[str, Any] | None = None\n    initial_directions: Literal[\n        \"coordinate\",\n        \"random\",\n    ] = \"coordinate\"\n    interpolation_rounding_error: float = INTERPOLATION_ROUNDING_ERROR\n    noise_additive_level: float | None = None\n    noise_multiplicative_level: float | None = None\n    noise_n_evals_per_point: NonNegativeInt | None = None\n    random_directions_orthogonal: bool = RANDOM_DIRECTIONS_ORTHOGONAL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    threshold_for_safety_step: NonNegativeFloat = THRESHOLD_FOR_SAFETY_STEP\n    trustregion_expansion_factor_successful: NonNegativeFloat = (\n        TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL\n    )\n    trustregion_expansion_factor_very_successful: NonNegativeFloat = (\n        TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL  # noqa: E501\n    )\n    trustregion_fast_start_options: dict[str, Any] | None = None\n    trustregion_initial_radius: NonNegativeFloat | None = None\n    trustregion_method_to_replace_extra_points: (\n        Literal[\"geometry_improving\", \"momentum\"] | None\n    ) = \"geometry_improving\"\n    trustregion_n_extra_points_to_replace_successful: NonNegativeInt = 0\n    trustregion_n_interpolation_points: NonNegativeInt | None = None\n    trustregion_precondition_interpolation: bool = (\n        TRUSTREGION_PRECONDITION_INTERPOLATION\n    )\n    trustregion_reset_options: dict[str, Any] | None = None\n    trustregion_shrinking_factor_not_successful: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL  # noqa: E501\n    )\n    trustregion_shrinking_factor_lower_radius: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS\n    )\n    trustregion_shrinking_factor_upper_radius: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS\n    )\n    trustregion_threshold_successful: float = TRUSTREGION_THRESHOLD_SUCCESSFUL\n    trustregion_threshold_very_successful: float = TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = nag_dfols_internal(\n            criterion=problem.fun,\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            clip_criterion_if_overflowing=self.clip_criterion_if_overflowing,\n            convergence_minimal_trustregion_radius_tolerance=self.convergence_minimal_trustregion_radius_tolerance,  # noqa: E501\n            convergence_noise_corrected_criterion_tolerance=self.convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n            convergence_ftol_scaled=self.convergence_ftol_scaled,\n            convergence_slow_progress=self.convergence_slow_progress,\n            initial_directions=self.initial_directions,\n            interpolation_rounding_error=self.interpolation_rounding_error,\n            noise_additive_level=self.noise_additive_level,\n            noise_multiplicative_level=self.noise_multiplicative_level,\n            noise_n_evals_per_point=self.noise_n_evals_per_point,\n            random_directions_orthogonal=self.random_directions_orthogonal,\n            stopping_maxfun=self.stopping_maxfun,\n            threshold_for_safety_step=self.threshold_for_safety_step,\n            trustregion_expansion_factor_successful=self.trustregion_expansion_factor_successful,\n            trustregion_expansion_factor_very_successful=self.trustregion_expansion_factor_very_successful,  # noqa: E501\n            trustregion_fast_start_options=self.trustregion_fast_start_options,\n            trustregion_initial_radius=self.trustregion_initial_radius,\n            trustregion_method_to_replace_extra_points=self.trustregion_method_to_replace_extra_points,\n            trustregion_n_extra_points_to_replace_successful=self.trustregion_n_extra_points_to_replace_successful,\n            trustregion_n_interpolation_points=self.trustregion_n_interpolation_points,\n            trustregion_precondition_interpolation=self.trustregion_precondition_interpolation,\n            trustregion_reset_options=self.trustregion_reset_options,\n            trustregion_shrinking_factor_not_successful=self.trustregion_shrinking_factor_not_successful,\n            trustregion_shrinking_factor_lower_radius=self.trustregion_shrinking_factor_lower_radius,\n            trustregion_shrinking_factor_upper_radius=self.trustregion_shrinking_factor_upper_radius,\n            trustregion_threshold_successful=self.trustregion_threshold_successful,\n            trustregion_threshold_very_successful=self.trustregion_threshold_very_successful,\n        )\n        return res\n\n\ndef nag_dfols_internal(\n    criterion,\n    x,\n    lower_bounds,\n    upper_bounds,\n    clip_criterion_if_overflowing,\n    convergence_minimal_trustregion_radius_tolerance,  # noqa: E501\n    convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n    convergence_ftol_scaled,\n    convergence_slow_progress,\n    initial_directions,\n    interpolation_rounding_error,\n    noise_additive_level,\n    noise_multiplicative_level,\n    noise_n_evals_per_point,\n    random_directions_orthogonal,\n    stopping_maxfun,\n    threshold_for_safety_step,\n    trustregion_expansion_factor_successful,\n    trustregion_expansion_factor_very_successful,  # noqa: E501\n    trustregion_fast_start_options,\n    trustregion_initial_radius,\n    trustregion_method_to_replace_extra_points,\n    trustregion_n_extra_points_to_replace_successful,\n    trustregion_n_interpolation_points,\n    trustregion_precondition_interpolation,\n    trustregion_reset_options,\n    trustregion_shrinking_factor_not_successful,  # noqa: E501\n    trustregion_shrinking_factor_lower_radius,\n    trustregion_shrinking_factor_upper_radius,\n    trustregion_threshold_successful,\n    trustregion_threshold_very_successful,\n):\n    r\"\"\"Minimize a function with least squares structure using DFO-LS.\n\n    For details see\n    :ref: `list_of_nag_algorithms`.\n\n    \"\"\"\n    if not IS_DFOLS_INSTALLED:\n        raise NotInstalledError(\n            \"The 'nag_dfols' algorithm requires the DFO-LS package to be installed.\"\n            \"You can install it with 'pip install DFO-LS'. \"\n            \"For additional installation instructions visit: \",\n            r\"https://numericalalgorithmsgroup.github.io/dfols/build/html/install.html\",\n        )\n    import dfols\n\n    if trustregion_method_to_replace_extra_points == \"momentum\":\n        trustregion_use_momentum = True\n    elif trustregion_method_to_replace_extra_points in [\"geometry_improving\", None]:\n        trustregion_use_momentum = False\n    else:\n        raise ValueError(\n            \"trustregion_method_to_replace_extra_points must be \"\n            \"'geometry_improving', 'momentum' or None.\"\n        )\n\n    advanced_options, trustregion_reset_options = _create_nag_advanced_options(\n        x=x,\n        noise_multiplicative_level=noise_multiplicative_level,\n        noise_additive_level=noise_additive_level,\n        noise_n_evals_per_point=noise_n_evals_per_point,\n        convergence_noise_corrected_criterion_tolerance=convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n        trustregion_initial_radius=trustregion_initial_radius,\n        trustregion_reset_options=trustregion_reset_options,\n        convergence_slow_progress=convergence_slow_progress,\n        interpolation_rounding_error=interpolation_rounding_error,\n        threshold_for_safety_step=threshold_for_safety_step,\n        clip_criterion_if_overflowing=clip_criterion_if_overflowing,\n        initial_directions=initial_directions,\n        random_directions_orthogonal=random_directions_orthogonal,\n        trustregion_precondition_interpolation=trustregion_precondition_interpolation,\n        trustregion_threshold_successful=trustregion_threshold_successful,\n        trustregion_threshold_very_successful=trustregion_threshold_very_successful,\n        trustregion_shrinking_factor_not_successful=trustregion_shrinking_factor_not_successful,  # noqa: E501\n        trustregion_expansion_factor_successful=trustregion_expansion_factor_successful,\n        trustregion_expansion_factor_very_successful=trustregion_expansion_factor_very_successful,  # noqa: E501\n        trustregion_shrinking_factor_lower_radius=trustregion_shrinking_factor_lower_radius,  # noqa: E501\n        trustregion_shrinking_factor_upper_radius=trustregion_shrinking_factor_upper_radius,  # noqa: E501\n    )\n\n    fast_start = _build_options_dict(\n        user_input=trustregion_fast_start_options,\n        default_options=TRUSTREGION_FAST_START_OPTIONS,\n    )\n    if fast_start[\"floor_of_jacobian_singular_values\"] != 1:\n        warnings.warn(\n            \"Setting the `floor_of_jacobian_singular_values` is not supported by \"\n            \"DF-OLS as of version 1.2.1.\"\n        )\n    if (\n        fast_start[\"shrink_upper_radius_in_safety_steps\"]\n        and fast_start[\"full_geometry_improving_step\"]\n    ):\n        raise ValueError(\n            \"full_geometry_improving_step of the trustregion_fast_start_options can \"\n            \"only be True if shrink_upper_radius_in_safety_steps is False.\"\n        )\n\n    (\n        faststart_jac,\n        faststart_step,\n    ) = _get_fast_start_method(fast_start[\"method\"])\n\n    if (\n        trustregion_reset_options[\"n_extra_interpolation_points_per_soft_reset\"]\n        < trustregion_reset_options[\"n_extra_interpolation_points_per_soft_reset\"]\n    ):\n        raise ValueError(\n            \"In the trustregion_reset_options \"\n            \"'n_extra_interpolation_points_per_soft_reset' must \"\n            \"be larger or the same as 'n_extra_interpolation_points_per_hard_reset'.\"\n        )\n\n    dfols_options = {\n        \"growing.full_rank.use_full_rank_interp\": faststart_jac,\n        \"growing.perturb_trust_region_step\": faststart_step,\n        \"restarts.hard.use_old_rk\": trustregion_reset_options[\n            \"reuse_criterion_value_at_hard_reset\"\n        ],\n        \"restarts.auto_detect.min_chgJ_slope\": trustregion_reset_options[\n            \"auto_detect_min_jacobian_increase\"\n        ],\n        \"restarts.max_npt\": trustregion_reset_options[\"max_interpolation_points\"],\n        \"restarts.increase_npt\": trustregion_reset_options[\n            \"n_extra_interpolation_points_per_soft_reset\"\n        ]\n        > 0,\n        \"restarts.increase_npt_amt\": trustregion_reset_options[\n            \"n_extra_interpolation_points_per_soft_reset\"\n        ],\n        \"restarts.hard.increase_ndirs_initial_amt\": trustregion_reset_options[\n            \"n_extra_interpolation_points_per_hard_reset\"\n        ]\n        - trustregion_reset_options[\"n_extra_interpolation_points_per_soft_reset\"],\n        \"model.rel_tol\": convergence_ftol_scaled,\n        \"regression.num_extra_steps\": trustregion_n_extra_points_to_replace_successful,\n        \"regression.momentum_extra_steps\": trustregion_use_momentum,\n        \"regression.increase_num_extra_steps_with_restart\": trustregion_reset_options[\n            \"n_additional_extra_points_to_replace_per_reset\"\n        ],\n        \"growing.ndirs_initial\": fast_start[\"min_inital_points\"],\n        \"growing.delta_scale_new_dirns\": fast_start[\n            \"scale_of_trustregion_step_perturbation\"\n        ],\n        \"growing.full_rank.scale_factor\": fast_start[\n            \"scale_of_jacobian_components_perturbation\"\n        ],\n        \"growing.full_rank.svd_max_jac_cond\": fast_start[\n            \"jacobian_max_condition_number\"\n        ],\n        \"growing.do_geom_steps\": fast_start[\"geometry_improving_steps\"],\n        \"growing.safety.do_safety_step\": fast_start[\"safety_steps\"],\n        \"growing.safety.reduce_delta\": fast_start[\n            \"shrink_upper_radius_in_safety_steps\"\n        ],\n        \"growing.safety.full_geom_step\": fast_start[\"full_geometry_improving_step\"],\n        \"growing.reset_delta\": fast_start[\"reset_trustregion_radius_after_fast_start\"],\n        \"growing.reset_rho\": fast_start[\n            \"reset_min_trustregion_radius_after_fast_start\"\n        ],\n        \"growing.gamma_dec\": fast_start[\"shrinking_factor_not_successful\"],\n        \"growing.num_new_dirns_each_iter\": fast_start[\n            \"n_extra_search_directions_per_iteration\"\n        ],\n        \"logging.save_diagnostic_info\": True,\n        \"logging.save_xk\": True,\n    }\n\n    advanced_options.update(dfols_options)\n\n    raw_res = dfols.solve(\n        criterion,\n        x0=x,\n        bounds=(lower_bounds, upper_bounds),\n        maxfun=stopping_maxfun,\n        rhobeg=trustregion_initial_radius,\n        npt=trustregion_n_interpolation_points,\n        rhoend=convergence_minimal_trustregion_radius_tolerance,\n        nsamples=noise_n_evals_per_point,\n        objfun_has_noise=noise_additive_level or noise_multiplicative_level,\n        scaling_within_bounds=False,\n        do_logging=False,\n        print_progress=False,\n        user_params=advanced_options,\n    )\n\n    res = _process_nag_result(raw_res, len(x))\n    out = InternalOptimizeResult(\n        x=res[\"solution_x\"],\n        fun=res[\"solution_criterion\"],\n        success=res[\"success\"],\n        message=res[\"message\"],\n        n_iterations=res[\"n_iterations\"],\n        n_fun_evals=res[\"n_fun_evals\"],\n    )\n    return out\n\n\n@mark.minimizer(\n    name=\"nag_pybobyqa\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYBOBYQA_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NagPyBOBYQA(Algorithm):\n    clip_criterion_if_overflowing: bool = CLIP_CRITERION_IF_OVERFLOWING\n    convergence_minimal_trustregion_radius_tolerance: NonNegativeFloat = (\n        CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE  # noqa: E501\n    )\n    convergence_noise_corrected_criterion_tolerance: NonNegativeFloat = (\n        CONVERGENCE_NOISE_CORRECTED_FTOL  # noqa: E501\n    )\n    convergence_criterion_value: float | None = None\n    convergence_slow_progress: dict[str, Any] | None = None\n    initial_directions: Literal[\n        \"coordinate\",\n        \"random\",\n    ] = \"coordinate\"\n    interpolation_rounding_error: float = INTERPOLATION_ROUNDING_ERROR\n    noise_additive_level: float | None = None\n    noise_multiplicative_level: float | None = None\n    noise_n_evals_per_point: NonNegativeInt | None = None\n    random_directions_orthogonal: bool = RANDOM_DIRECTIONS_ORTHOGONAL\n    seek_global_optimum: bool = False\n    stopping_max_criterion_evaluations: PositiveInt = STOPPING_MAXFUN\n    threshold_for_safety_step: NonNegativeFloat = THRESHOLD_FOR_SAFETY_STEP\n    trustregion_expansion_factor_successful: NonNegativeFloat = (\n        TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL\n    )\n    trustregion_expansion_factor_very_successful: NonNegativeFloat = (\n        TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL  # noqa: E501\n    )\n    trustregion_initial_radius: NonNegativeFloat | None = None\n    trustregion_minimum_change_hession_for_underdetermined_interpolation: bool = True\n    trustregion_n_interpolation_points: NonNegativeInt | None = None\n    trustregion_precondition_interpolation: bool = (\n        TRUSTREGION_PRECONDITION_INTERPOLATION\n    )\n    trustregion_reset_options: dict[str, Any] | None = None\n    trustregion_shrinking_factor_not_successful: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL  # noqa: E501\n    )\n    trustregion_shrinking_factor_lower_radius: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS\n    )\n    trustregion_shrinking_factor_upper_radius: NonNegativeFloat | None = (\n        TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS\n    )\n    trustregion_threshold_successful: float = TRUSTREGION_THRESHOLD_SUCCESSFUL\n    trustregion_threshold_very_successful: float = TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = nag_pybobyqa_internal(\n            criterion=cast(\n                Callable[[NDArray[np.float64]], NDArray[np.float64]],\n                problem.fun,\n            ),\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            clip_criterion_if_overflowing=self.clip_criterion_if_overflowing,\n            convergence_minimal_trustregion_radius_tolerance=self.convergence_minimal_trustregion_radius_tolerance,  # noqa: E501\n            convergence_noise_corrected_criterion_tolerance=self.convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n            convergence_slow_progress=self.convergence_slow_progress,\n            convergence_criterion_value=self.convergence_criterion_value,\n            initial_directions=self.initial_directions,\n            interpolation_rounding_error=self.interpolation_rounding_error,\n            noise_additive_level=self.noise_additive_level,\n            noise_multiplicative_level=self.noise_multiplicative_level,\n            noise_n_evals_per_point=self.noise_n_evals_per_point,\n            random_directions_orthogonal=self.random_directions_orthogonal,\n            seek_global_optimum=self.seek_global_optimum,\n            stopping_max_criterion_evaluations=self.stopping_max_criterion_evaluations,\n            threshold_for_safety_step=self.threshold_for_safety_step,\n            trustregion_expansion_factor_successful=self.trustregion_expansion_factor_successful,\n            trustregion_expansion_factor_very_successful=self.trustregion_expansion_factor_very_successful,  # noqa: E501\n            trustregion_initial_radius=self.trustregion_initial_radius,\n            trustregion_minimum_change_hession_for_underdetermined_interpolation=self.trustregion_minimum_change_hession_for_underdetermined_interpolation,  # noqa: E501\n            trustregion_n_interpolation_points=self.trustregion_n_interpolation_points,\n            trustregion_precondition_interpolation=self.trustregion_precondition_interpolation,\n            trustregion_reset_options=self.trustregion_reset_options,\n            trustregion_shrinking_factor_not_successful=self.trustregion_shrinking_factor_not_successful,\n            trustregion_shrinking_factor_lower_radius=self.trustregion_shrinking_factor_lower_radius,\n            trustregion_shrinking_factor_upper_radius=self.trustregion_shrinking_factor_upper_radius,\n            trustregion_threshold_successful=self.trustregion_threshold_successful,\n            trustregion_threshold_very_successful=self.trustregion_threshold_very_successful,\n        )\n        return res\n\n\ndef nag_pybobyqa_internal(\n    criterion,\n    x,\n    lower_bounds,\n    upper_bounds,\n    clip_criterion_if_overflowing,\n    convergence_criterion_value,\n    convergence_minimal_trustregion_radius_tolerance,  # noqa: E501\n    convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n    convergence_slow_progress,\n    initial_directions,\n    interpolation_rounding_error,\n    noise_additive_level,\n    noise_multiplicative_level,\n    noise_n_evals_per_point,\n    random_directions_orthogonal,\n    seek_global_optimum,\n    stopping_max_criterion_evaluations,\n    threshold_for_safety_step,\n    trustregion_expansion_factor_successful,\n    trustregion_expansion_factor_very_successful,  # noqa: E501\n    trustregion_initial_radius,\n    trustregion_minimum_change_hession_for_underdetermined_interpolation,\n    trustregion_n_interpolation_points,\n    trustregion_precondition_interpolation,\n    trustregion_reset_options,\n    trustregion_shrinking_factor_not_successful,  # noqa: E501\n    trustregion_shrinking_factor_lower_radius,\n    trustregion_shrinking_factor_upper_radius,\n    trustregion_threshold_successful,\n    trustregion_threshold_very_successful,\n):\n    r\"\"\"Minimize a function using the BOBYQA algorithm.\n\n    For details see\n    :ref: `list_of_nag_algorithms`.\n\n    \"\"\"\n    if not IS_PYBOBYQA_INSTALLED:\n        raise NotInstalledError(\n            \"The 'nag_pybobyqa' algorithm requires the Py-BOBYQA package to be \"\n            \"installed. You can install it with 'pip install Py-BOBYQA'. \"\n            \"For additional installation instructions visit: \",\n            r\"https://numericalalgorithmsgroup.github.io/pybobyqa/build/html/\"\n            \"install.html\",\n        )\n    import pybobyqa\n\n    if convergence_criterion_value is None:\n        convergence_criterion_value = -np.inf\n\n    advanced_options, trustregion_reset_options = _create_nag_advanced_options(\n        x=x,\n        noise_multiplicative_level=noise_multiplicative_level,\n        noise_additive_level=noise_additive_level,\n        trustregion_initial_radius=trustregion_initial_radius,\n        noise_n_evals_per_point=noise_n_evals_per_point,\n        convergence_noise_corrected_criterion_tolerance=convergence_noise_corrected_criterion_tolerance,  # noqa: E501\n        trustregion_reset_options=trustregion_reset_options,\n        convergence_slow_progress=convergence_slow_progress,\n        interpolation_rounding_error=interpolation_rounding_error,\n        threshold_for_safety_step=threshold_for_safety_step,\n        clip_criterion_if_overflowing=clip_criterion_if_overflowing,\n        initial_directions=initial_directions,\n        random_directions_orthogonal=random_directions_orthogonal,\n        trustregion_precondition_interpolation=trustregion_precondition_interpolation,\n        trustregion_threshold_successful=trustregion_threshold_successful,\n        trustregion_threshold_very_successful=trustregion_threshold_very_successful,\n        trustregion_shrinking_factor_not_successful=trustregion_shrinking_factor_not_successful,  # noqa: E501\n        trustregion_expansion_factor_successful=trustregion_expansion_factor_successful,\n        trustregion_expansion_factor_very_successful=trustregion_expansion_factor_very_successful,  # noqa: E501\n        trustregion_shrinking_factor_lower_radius=trustregion_shrinking_factor_lower_radius,  # noqa: E501\n        trustregion_shrinking_factor_upper_radius=trustregion_shrinking_factor_upper_radius,  # noqa: E501\n    )\n\n    pybobyqa_options = {\n        \"model.abs_tol\": convergence_criterion_value,\n        \"interpolation.minimum_change_hessian\": trustregion_minimum_change_hession_for_underdetermined_interpolation,  # noqa: E501\n        \"restarts.max_unsuccessful_restarts_total\": trustregion_reset_options[\n            \"max_unsuccessful_resets\"\n        ],\n        \"restarts.rhobeg_scale_after_unsuccessful_restart\": trustregion_reset_options[\n            \"trust_region_scaling_at_unsuccessful_reset\"\n        ],\n        \"restarts.hard.use_old_fk\": trustregion_reset_options[\n            \"reuse_criterion_value_at_hard_reset\"\n        ],\n        \"restarts.auto_detect.min_chg_model_slope\": trustregion_reset_options[\n            \"auto_detect_min_jacobian_increase\"\n        ],\n        \"logging.save_diagnostic_info\": True,\n        \"logging.save_xk\": True,\n    }\n\n    advanced_options.update(pybobyqa_options)\n\n    raw_res = pybobyqa.solve(\n        criterion,\n        x0=x,\n        bounds=(lower_bounds, upper_bounds),\n        maxfun=stopping_max_criterion_evaluations,\n        rhobeg=trustregion_initial_radius,\n        user_params=advanced_options,\n        scaling_within_bounds=False,\n        do_logging=False,\n        print_progress=False,\n        objfun_has_noise=noise_additive_level or noise_multiplicative_level,\n        nsamples=noise_n_evals_per_point,\n        npt=trustregion_n_interpolation_points,\n        rhoend=convergence_minimal_trustregion_radius_tolerance,\n        seek_global_minimum=seek_global_optimum,\n    )\n\n    res = _process_nag_result(raw_res, len(x))\n\n    out = InternalOptimizeResult(\n        x=res[\"solution_x\"],\n        fun=res[\"solution_criterion\"],\n        success=res[\"success\"],\n        message=res[\"message\"],\n        n_iterations=res[\"n_iterations\"],\n    )\n\n    return out\n\n\ndef _process_nag_result(nag_result_obj, len_x):\n    \"\"\"Convert the NAG result object to our result dictionary.\n\n    Args:\n        nag_result_obj: NAG result object\n        len_x (int): length of the supplied parameters, i.e. the dimensionality of the\n            problem.\n\n\n    Returns:\n        results (dict): See :ref:`internal_optimizer_output` for details.\n\n    \"\"\"\n    if hasattr(nag_result_obj, \"f\"):\n        solution_fun = nag_result_obj.f\n    else:\n        solution_fun = nag_result_obj.obj\n\n    processed = {\n        \"solution_criterion\": solution_fun,\n        \"n_fun_evals\": nag_result_obj.nx,\n        \"message\": nag_result_obj.msg,\n        \"success\": nag_result_obj.flag == nag_result_obj.EXIT_SUCCESS,\n        \"reached_convergence_criterion\": None,\n        \"diagnostic_info\": nag_result_obj.diagnostic_info,\n    }\n    try:\n        n_iterations = int(nag_result_obj.diagnostic_info[\"iters_total\"].iloc[-1])\n        processed[\"n_iterations\"] = n_iterations\n    except (KeyboardInterrupt, SystemExit):\n        raise\n    except Exception:\n        processed[\"n_iterations\"] = None\n\n    if hasattr(nag_result_obj, \"states\"):\n        processed.update({\"states\": nag_result_obj.states})\n    if hasattr(nag_result_obj, \"history_params\"):\n        processed.update({\"history_params\": nag_result_obj.history_params})\n    if nag_result_obj.x is not None:\n        processed[\"solution_x\"] = nag_result_obj.x\n    else:\n        processed[\"solution_x\"] = np.array([np.nan] * len_x)\n    return processed\n\n\ndef _create_nag_advanced_options(\n    x,\n    noise_multiplicative_level,\n    noise_additive_level,\n    trustregion_initial_radius,\n    noise_n_evals_per_point,\n    convergence_noise_corrected_criterion_tolerance,\n    trustregion_reset_options,\n    convergence_slow_progress,\n    interpolation_rounding_error,\n    threshold_for_safety_step,\n    clip_criterion_if_overflowing,\n    initial_directions,\n    random_directions_orthogonal,\n    trustregion_precondition_interpolation,\n    trustregion_threshold_successful,\n    trustregion_threshold_very_successful,\n    trustregion_shrinking_factor_not_successful,\n    trustregion_expansion_factor_successful,\n    trustregion_expansion_factor_very_successful,\n    trustregion_shrinking_factor_lower_radius,\n    trustregion_shrinking_factor_upper_radius,\n):\n    if noise_multiplicative_level is not None and noise_additive_level is not None:\n        raise ValueError(\"You cannot specify both multiplicative and additive noise.\")\n    if trustregion_initial_radius is None:\n        trustregion_initial_radius = calculate_trustregion_initial_radius(x)\n    # -np.inf as a default leads to errors when building the documentation with sphinx.\n    noise_n_evals_per_point = _change_evals_per_point_interface(noise_n_evals_per_point)\n    trustregion_reset_options = _build_options_dict(\n        user_input=trustregion_reset_options,\n        default_options=RESET_OPTIONS,\n    )\n    if trustregion_reset_options[\"reset_type\"] not in [\"soft\", \"hard\"]:\n        raise ValueError(\n            \"reset_type in the trustregion_reset_options must be soft or hard.\"\n        )\n    if initial_directions not in [\"coordinate\", \"random\"]:\n        raise ValueError(\"inital_directions must be either 'coordinate' or 'random'.\")\n    convergence_slow_progress = _build_options_dict(\n        user_input=convergence_slow_progress,\n        default_options=CONVERGENCE_SLOW_PROGRESS,\n    )\n\n    is_noisy = bool(noise_additive_level or noise_multiplicative_level)\n\n    advanced_options = {\n        \"general.rounding_error_constant\": interpolation_rounding_error,\n        \"general.safety_step_thresh\": threshold_for_safety_step,\n        \"general.check_objfun_for_overflow\": clip_criterion_if_overflowing,\n        \"tr_radius.eta1\": trustregion_threshold_successful,\n        \"tr_radius.eta2\": trustregion_threshold_very_successful,\n        \"tr_radius.gamma_dec\": trustregion_shrinking_factor_not_successful,\n        \"tr_radius.gamma_inc\": trustregion_expansion_factor_successful,\n        \"tr_radius.gamma_inc_overline\": trustregion_expansion_factor_very_successful,\n        \"tr_radius.alpha1\": trustregion_shrinking_factor_lower_radius,\n        \"tr_radius.alpha2\": trustregion_shrinking_factor_upper_radius,\n        \"init.random_initial_directions\": initial_directions == \"random\",\n        \"init.random_directions_make_orthogonal\": random_directions_orthogonal,\n        \"slow.thresh_for_slow\": convergence_slow_progress[\n            \"threshold_to_characterize_as_slow\"\n        ],\n        \"slow.max_slow_iters\": convergence_slow_progress[\n            \"max_insufficient_improvements\"\n        ],\n        \"slow.history_for_slow\": convergence_slow_progress[\"comparison_period\"],\n        \"noise.multiplicative_noise_level\": noise_multiplicative_level,\n        \"noise.additive_noise_level\": noise_additive_level,\n        \"noise.quit_on_noise_level\": (\n            convergence_noise_corrected_criterion_tolerance > 0\n        )\n        and is_noisy,\n        \"noise.scale_factor_for_quit\": convergence_noise_corrected_criterion_tolerance,\n        \"interpolation.precondition\": trustregion_precondition_interpolation,\n        \"restarts.use_restarts\": trustregion_reset_options[\"use_resets\"],\n        \"restarts.max_unsuccessful_restarts\": trustregion_reset_options[\n            \"max_consecutive_unsuccessful_resets\"\n        ],\n        \"restarts.rhoend_scale\": trustregion_reset_options[\n            \"minimal_trustregion_radius_tolerance_scaling_at_reset\"\n        ],\n        \"restarts.use_soft_restarts\": trustregion_reset_options[\"reset_type\"] == \"soft\",\n        \"restarts.soft.move_xk\": trustregion_reset_options[\"move_center_at_soft_reset\"],\n        \"restarts.soft.max_fake_successful_steps\": trustregion_reset_options[\n            \"max_iterations_without_new_best_after_soft_reset\"\n        ],\n        \"restarts.auto_detect\": trustregion_reset_options[\"auto_detect\"],\n        \"restarts.auto_detect.history\": trustregion_reset_options[\n            \"auto_detect_history\"\n        ],\n        \"restarts.auto_detect.min_correl\": trustregion_reset_options[\n            \"auto_detect_min_correlations\"\n        ],\n        \"restarts.soft.num_geom_steps\": trustregion_reset_options[\n            \"points_to_replace_at_soft_reset\"\n        ],\n    }\n\n    return advanced_options, trustregion_reset_options\n\n\ndef _change_evals_per_point_interface(func):\n    \"\"\"Change the interface of the user supplied function to the one expected by NAG.\n\n    Args:\n        func (callable or None): function mapping from our names to\n            noise_n_evals_per_point.\n\n    Returns:\n        adjusted_noise_n_evals_per_point (callable): function mapping from the\n            argument names expected by pybobyqa and df-ols to noise_n_evals_per_point.\n\n    \"\"\"\n    if func is not None:\n\n        def adjusted_noise_n_evals_per_point(delta, rho, iter, nrestarts):  # noqa: A002\n            return func(\n                upper_trustregion_radius=delta,\n                lower_trustregion_radius=rho,\n                n_iterations=iter,\n                n_resets=nrestarts,\n            )\n\n        return adjusted_noise_n_evals_per_point\n\n\ndef _build_options_dict(user_input, default_options):\n    \"\"\"Create the full dictionary of trust region fast start options from user input.\n\n    Args:\n        user_input (dict or None): dictionary to update the default options with.\n            May only contain keys present in the default options.\n        default_options (dict): the default values.\n\n    Returns:\n        full_options (dict)\n\n    \"\"\"\n    full_options = default_options.copy()\n    user_input = {} if user_input is None else user_input\n    invalid = [x for x in user_input if x not in full_options]\n    if len(invalid) > 0:\n        raise ValueError(\n            f\"You specified illegal options {', '.join(invalid)}. Allowed are: , \".join(\n                full_options.keys()\n            )\n        )\n    full_options.update(user_input)\n    return full_options\n\n\ndef _get_fast_start_method(user_value):\n    \"\"\"Get fast start method arguments from user value.\"\"\"\n    allowed_values = [\"auto\", \"jacobian\", \"trustregion\"]\n    if user_value not in allowed_values:\n        raise ValueError(\n            \"`perturb_jacobian_or_trustregion_step` must be one of \"\n            f\"{allowed_values}. You provided {user_value}.\"\n        )\n    if user_value == \"auto\":\n        faststart_jac = None\n        faststart_step = None\n    else:\n        faststart_jac = user_value == \"jacobian\"\n        faststart_step = not faststart_jac\n\n    return faststart_jac, faststart_step\n"
  },
  {
    "path": "src/optimagic/optimizers/neldermead.py",
    "content": "\"\"\"Implementation of parallelosation of Nelder-Mead algorithm.\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Callable, Literal, cast\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_SECOND_BEST_FTOL_ABS,\n    CONVERGENCE_SECOND_BEST_XTOL_ABS,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt\n\nInitSimplexLiteral = Literal[\"pfeffer\", \"nash\", \"gao_han\", \"varadhan_borchers\"]\nInitSimplexCallable = Callable[[NDArray[np.float64]], NDArray[np.float64]]\nfrom optimagic.typing import BatchEvaluator, BatchEvaluatorLiteral\n\n\n@mark.minimizer(\n    name=\"neldermead_parallel\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=True,\n)\n@dataclass(frozen=True)\nclass NelderMeadParallel(Algorithm):\n    r\"\"\"Parallel Nelder-Mead algorithm following Lee D., Wiswall M., A parallel\n    implementation of the simplex function minimization routine, Computational\n    Economics, 2007.\n\n    Parameters\n    ----------\n    criterion (callable): A function that takes a Numpy array_like as\n        an argument and return scalar floating point.\n\n    x (array_like): 1-D array of initial value of parameters\n\n    init_simplex_method (string or callable): Name of the method to create initial\n        simplex or callable which takes as an argument initial value of parameters\n        and returns initial simplex as j+1 x j array, where j is length of x.\n        The default is \"gao_han\".\n\n    n_cores (int): Degrees of parallization. The default is 1 (no parallelization).\n\n    adaptive (bool): Adjust parameters of Nelder-Mead algorithm to accounf\n        for simplex size.\n        The default is True.\n\n    stopping_maxiter (int): Maximum number of algorithm iterations.\n        The default is STOPPING_MAX_ITERATIONS.\n\n    convergence_ftol_abs (float): maximal difference between\n        function value evaluated on simplex points.\n\n    convergence_xtol_abs (float): maximal distance between points in\n        the simplex.\n\n    batch_evaluator (string or callable): See :ref:`batch_evaluators` for\n        details. Default \"joblib\".\n\n    Returns:\n    -------\n    TYPE\n        DESCRIPTION.\n\n    \"\"\"\n\n    init_simplex_method: InitSimplexLiteral | InitSimplexCallable = \"gao_han\"\n    n_cores: PositiveInt = 1\n    adaptive: bool = True\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_XTOL_ABS\n    batch_evaluator: BatchEvaluator | BatchEvaluatorLiteral = \"joblib\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        raw = neldermead_parallel(\n            criterion=cast(\n                Callable[[NDArray[np.float64]], float],\n                problem.fun,\n            ),\n            x=x0,\n            init_simplex_method=self.init_simplex_method,\n            n_cores=self.n_cores,\n            adaptive=self.adaptive,\n            stopping_maxiter=self.stopping_maxiter,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            batch_evaluator=self.batch_evaluator,\n        )\n\n        res = InternalOptimizeResult(\n            x=raw[\"solution_x\"],\n            fun=raw[\"solution_criterion\"],\n            n_iterations=raw[\"n_iterations\"],\n            success=raw[\"success\"],\n            message=raw[\"reached_convergence_criterion\"],\n        )\n\n        return res\n\n\ndef neldermead_parallel(\n    criterion,\n    x,\n    *,\n    init_simplex_method=\"gao_han\",\n    n_cores=1,\n    adaptive=True,\n    stopping_maxiter=STOPPING_MAXITER,\n    convergence_ftol_abs=CONVERGENCE_SECOND_BEST_FTOL_ABS,\n    convergence_xtol_abs=CONVERGENCE_SECOND_BEST_XTOL_ABS,\n    batch_evaluator=\"joblib\",\n):\n    if x.ndim >= 1:\n        x = x.ravel()  # check if the vector of initial values is one-dimensional\n\n    j = len(x)  # size of the parameter vector\n\n    if n_cores <= 1:\n        p = 1  # if number of cores is nonpositive, set it to 1\n    else:\n        if n_cores >= j:  # number of parallelisation cannot be bigger than\n            # the number of parameters minus 1\n            p = int(j - 1)\n        else:\n            p = int(n_cores)\n\n    # set parameters of Nelder-Mead algorithm\n    # for a discussion about Nlder-Mead parameters see Gao F., Han L., Implementing the\n    # Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimization\n    # and Applications, 2012\n    alpha, gamma, beta, tau = _init_algo_params(adaptive, j)\n\n    # construct initial simplex using one of feasible methods\n    # see Wssing, Simon, Proper initialization is crucial for\n    # the Nelder-Mead simplex search, Optimization Letters, 2019\n    # for a discussion about the choice of initialization\n\n    if not callable(init_simplex_method):\n        s = globals()[\"_\" + init_simplex_method](x)\n    else:\n        s = init_simplex_method(x)\n\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n\n    # calculate criterion values for the initial simplex\n    f_s = np.array(batch_evaluator(func=criterion, arguments=s, n_cores=n_cores))[\n        :, None\n    ]\n\n    # parallelized function\n    def func_parallel(args):\n        criterion, s_j, s_j_r, f_s_0, f_s_j, f_s_j_1, m = args  # read arguments\n\n        f_s_j_r = criterion(\n            s_j_r\n        )  # calculate value of the criterion at the reflection point\n\n        if f_s_j_r < f_s_0:  # if the reflection point is better than the best point\n            s_j_e = m + gamma * (s_j_r - m)  # calculate expansion point\n            f_s_j_e = criterion(\n                s_j_e\n            )  # calculate value of the criterion at the expansion point\n\n            if f_s_j_e < f_s_0:  # if the expansion point is better than the best point\n                return np.hstack(\n                    [s_j_e, f_s_j_e, 0]\n                )  # return the expansion point as a new point\n\n            else:  # if the expansion point is worse than the best point\n                return np.hstack(\n                    [s_j_r, f_s_j_r, 0]\n                )  # return the reflection point as a new point\n\n        elif (\n            f_s_j_r < f_s_j_1\n        ):  # if reflection point is better than the next worst point\n            return np.hstack(\n                [s_j_r, f_s_j_r, 0]\n            )  # return reflection point as a new point\n\n        else:  # if the reflection point is worse than the next worst point\n            if (\n                f_s_j_r < f_s_j\n            ):  # if value of the criterion at reflection point is better than\n                # value of the criterion at initial point\n                s_j_c = m + beta * (s_j_r - m)  # calculate outside contraction point\n            else:\n                s_j_c = m - beta * (s_j_r - m)  # calculate inside contraction point\n\n            f_s_j_c = criterion(\n                s_j_c\n            )  # calculate a value of the criterion at contraction point\n\n            if f_s_j_c < np.minimum(\n                f_s_j, f_s_j_r\n            ):  # if ta value of the criterion at contraction point is better\n                # than original and refrelction point\n\n                return np.hstack(\n                    [s_j_c, f_s_j_c, 0]\n                )  # return contraction point as as new point\n\n            else:\n                if f_s_j_r < f_s_j:\n                    return np.hstack(\n                        [s_j_r, f_s_j_r, 1]\n                    )  # return reflection point as a new point\n\n                else:  # if value of the criterion at contraction point is worse\n                    # than the value uf the criterion at the reflection\n                    # and the initial points\n                    return np.hstack(\n                        [s_j, f_s_j, 1]\n                    )  # return the old point as a new point\n\n    optimal = False  # optmisation condition, if True stop the algorithem\n    iterations = 0  # number of criterion evaluations\n\n    while not optimal:\n        iterations += 1  # new iteration\n\n        # sort points and arguments increasing\n        row = np.argsort(f_s.ravel())\n        s = np.take(s, row, axis=0)\n        f_s = np.take(f_s, row, axis=0)\n\n        # calculate centroid\n        m = (s[:-p, :].sum(axis=0)) / (j - p + 1)\n\n        # calculate reflaction points\n        s_j_r = m + alpha * (m - s[-p:, :])\n\n        # calculate new points of simplex\n        s[-p:, :], f_s[-p:, :], shrink_count = np.split(\n            np.vstack(\n                batch_evaluator(\n                    func=func_parallel,\n                    arguments=tuple(\n                        (\n                            criterion,\n                            s[j + 1 - p + i, :],\n                            s_j_r[i, :],\n                            f_s[0, :],\n                            f_s[j + 1 - p + i, :],\n                            f_s[j - p + i, :],\n                            m,\n                        )\n                        for i in range(p)\n                    ),\n                    n_cores=p,\n                )\n            ),\n            [-2, -1],\n            axis=1,\n        )\n\n        # shrink simplex if there is no improvement in every process\n        if shrink_count.sum() == p:\n            s = (\n                tau * s[0:1, :] + (1 - tau) * s\n            )  # new simplex is a linear combination of the best point\n            # and remaining points\n            # evaluate function at new simplex\n            f_s = np.array(\n                batch_evaluator(\n                    func=criterion,\n                    arguments=s,\n                    n_cores=n_cores,\n                )\n            )[:, None]\n\n        # termination criteria\n        if (\n            np.max(np.abs(f_s[0, :] - f_s[1:, :])) <= convergence_ftol_abs\n            and np.max(np.abs(s[0, :] - s[1:,])) <= convergence_xtol_abs\n        ):\n            optimal = True\n            converge = True\n            reason_to_stop = \"Termination codition satisfied\"\n        elif (\n            iterations >= stopping_maxiter\n        ):  # if maximum amount of iteration is exceeded\n            optimal = True\n            converge = False\n            reason_to_stop = \"Maximum number of interation exceeded\"\n\n    # save results\n    result = {\n        \"solution_x\": s[np.nonzero(f_s == f_s.min())[0][0], :],\n        \"solution_criterion\": f_s.min(),\n        \"n_iterations\": iterations,\n        \"success\": converge,\n        \"reached_convergence_criterion\": reason_to_stop,\n    }\n    return result\n\n\n# set parameters of Nelder-Mead algorithm\n# for a discussion about Nlder-Mead parameters see Gao F., Han L., Implementing the\n# Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimization\n# and Applications, 2012\ndef _init_algo_params(adaptive, j):\n    if adaptive:\n        # algorithem parameters alla Gao-Han (adaptive)\n        return (\n            1,\n            1 + 2 / j,\n            0.75 - 1 / (2 * j),\n            1 - 1 / j,\n        )\n    else:\n        # standard setting of Nelder-Mead\n        return (\n            1,\n            2,\n            0.5,\n            0.5,\n        )\n\n\n# initial structure of the simplex\ndef _init_simplex(x):\n    s = np.vstack(\n        [\n            x,\n        ]\n        * (len(x) + 1)\n    ).astype(np.float64)\n\n    return s\n\n\n# initilize due to L. Pfeffer at Standford (Matlab fminsearch and SciPy default option)\ndef _pfeffer(x):\n    s = _init_simplex(x)\n\n    # method parameters\n    c_p = 1.05\n\n    # initial simplex\n    np.fill_diagonal(s[1:, :], x * c_p * (x != 0) + 0.00025 * (x == 0))\n\n    return s\n\n\n# adopted from Nash (R default option)\n# see Nash, J.C.: Compact numerical methods for computers: linear algebra and\n# function minimisation, 2nd edn. Adam Hilger Ltd., Bristol (1990) for details\ndef _nash(x):\n    s = _init_simplex(x)\n\n    # method parameters\n    c_n = 0.1\n\n    # initial simplex\n    np.fill_diagonal(s[1:, :], (x != 0) * (np.max(x) * c_n + x) + c_n * (x == 0))\n    return s\n\n\n# adopted from Gao F., Han L., Implementing the\n# Nelder-Mead siplex algorithm with adaptive parameters, Computational Optimizatio\ndef _gao_han(x):\n    s = _init_simplex(x)\n\n    # method parameters\n    c_h = np.minimum(np.maximum(np.max(x), 1), 10)\n    j = len(x)\n\n    # initial simplex\n    s = (\n        s\n        + np.vstack(\n            [\n                np.array([[(1 - (j + 1) ** 0.5) / j]]) * np.ones([1, j]),\n                np.eye(j),\n            ]\n        )\n        * c_h\n    )\n\n    return s\n\n\n# adopted by Varadhan and Borchers for the R package dfoptim\n# see Varadhan, R., Borchers, H.W.: Dfoptim: derivative-free optimization (2016).\n# https://CRAN.R-project. org/package=dfoptim. R package version 2016.7-1 for details\ndef _varadhan_borchers(x):\n    s = _init_simplex(x)\n\n    # method parameters\n    j = len(x)\n    c_s = np.maximum(1, ((x**2).sum()) ** 0.5)\n    beta1 = c_s / (j * 2**0.5) * ((j + 1) ** 0.5 + j - 1)\n    beta2 = c_s / (j * 2**0.5) * ((j + 1) ** 0.5 - 1)\n\n    # initial simplex\n    s[1:, :] = s[1:, :] + np.full([j, j], beta2) + np.eye(j) * (beta1 - beta2)\n\n    return s\n"
  },
  {
    "path": "src/optimagic/optimizers/nevergrad_optimizers.py",
    "content": "\"\"\"Implement optimizers from the nevergrad package.\"\"\"\n\nfrom __future__ import annotations\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_BAYESOPTIM_INSTALLED, IS_NEVERGRAD_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_ABS,\n    CONVERGENCE_FTOL_REL,\n    CONVERGENCE_XTOL_ABS,\n    STOPPING_MAXFUN_GLOBAL,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n)\n\nif TYPE_CHECKING:\n    from nevergrad.optimization.base import ConfiguredOptimizer\n\n\nNEVERGRAD_NOT_INSTALLED_ERROR = (\n    \"This optimizer requires the 'nevergrad' package to be installed. \"\n    \"You can install it with `pip install nevergrad`. \"\n    \"Visit https://facebookresearch.github.io/nevergrad/getting_started.html \"\n    \"for more detailed installation instructions.\"\n)\n\n\n@mark.minimizer(\n    name=\"nevergrad_pso\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradPSO(Algorithm):\n    \"\"\"Minimize a scalar function using the Particle Swarm Optimization algorithm.\n\n    The Particle Swarm Optimization algorithm was originally proposed by\n    :cite:`Kennedy1995`.The implementation in Nevergrad is based on\n    :cite:`Zambrano2013`.\n\n    PSO solves an optimization problem by evolving a swarm of particles\n    (candidate solutions) across the search space. Each particle adjusts its position\n    based on its own experience (cognitive component) and the experiences\n    of its neighbors or the swarm (social component), using velocity updates. The\n    algorithm iteratively guides the swarm toward promising regions of the search\n    space.\n\n    \"\"\"\n\n    transform: Literal[\"arctan\", \"gaussian\", \"identity\"] = \"arctan\"\n    \"\"\"The transform used to map from PSO optimization space to real space.\"\"\"\n\n    population_size: int | None = None\n    \"\"\"The number of particles in the swarm.\"\"\"\n\n    n_cores: int = 1\n    \"\"\"The number of CPU cores to use for parallel computation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Random seed for reproducibility.\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations.\"\"\"\n\n    inertia: float = 0.5 / math.log(2.0)\n    r\"\"\"Inertia weight ω.\n\n    Controls the influence of a particle's previous velocity. Must be less than 1 to\n    avoid divergence.\n\n    \"\"\"\n\n    cognitive: float = 0.5 + math.log(2.0)\n    r\"\"\"Cognitive coefficient :math:`\\phi_p`.\n\n    Controls the influence of a particle's own best known position. Typical values: 1.0\n    to 3.0.\n\n    \"\"\"\n\n    social: float = 0.5 + math.log(2.0)\n    r\"\"\"Social coefficient.\n\n    Denoted by :math:`\\phi_g`. Controls the influence of the swarm's best known\n    position. Typical values: 1.0 to 3.0.\n\n    \"\"\"\n\n    quasi_opp_init: bool = False\n    \"\"\"Whether to use quasi-opposition initialization.\n\n    Default is False.\n\n    \"\"\"\n\n    speed_quasi_opp_init: bool = False\n    \"\"\"Whether to apply quasi-opposition initialization to speed.\n\n    Default is False.\n\n    \"\"\"\n\n    special_speed_quasi_opp_init: bool = False\n    \"\"\"Whether to use special quasi-opposition initialization for speed.\n\n    Default is False.\n\n    \"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²) in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.ConfPSO(\n            transform=self.transform,\n            popsize=self.population_size,\n            omega=self.inertia,\n            phip=self.cognitive,\n            phig=self.social,\n            qo=self.quasi_opp_init,\n            sqo=self.speed_quasi_opp_init,\n            so=self.special_speed_quasi_opp_init,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_cmaes\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradCMAES(Algorithm):\n    \"\"\"Minimize a scalar function using the Covariance Matrix Adaptation Evolution\n    Strategy (CMA-ES) algorithm.\n\n    The CMA-ES is a state-of-the-art evolutionary algorithm for difficult non-linear,\n    non-convex, black-box optimization problems in continuous domains. It is typically\n    applied to unconstrained or bounded problems with dimensionality between 3 and 100.\n    CMA-ES adapts a multivariate normal distribution to approximate the objective\n    function's shape by estimating a positive-definite covariance matrix, akin to the\n    inverse Hessian in convex-quadratic problems, but without requiring derivatives.\n\n    This implementation is a python wrapper over the original code.\n\n    Original paper can be accessed at `cma-es\n    <https://cma-es.github.io/>`_.\n\n    \"\"\"\n\n    scale: NonNegativeFloat = 1.0\n    \"\"\"Scale of the search.\"\"\"\n\n    elitist: bool = False\n    \"\"\"Whether to switch to elitist mode (also known as (μ,λ)-CMA-ES).\n\n    In elitist mode, the best point in the population is always retained.\n\n    \"\"\"\n\n    population_size: int | None = None\n    \"\"\"Population size.\"\"\"\n\n    diagonal: bool = False\n    \"\"\"Use the diagonal version of CMA, which is more efficient for high-dimensional\n    problems.\"\"\"\n\n    high_speed: bool = False\n    \"\"\"Use a metamodel for recommendation to speed up optimization.\"\"\"\n\n    fast_cmaes: bool = False\n    \"\"\"Use the fast CMA-ES implementation.\n\n    Cannot be used with diagonal=True. Produces equivalent results and is preferable for\n    high dimensions or when objective function evaluations are fast.\n\n    \"\"\"\n\n    random_init: bool = False\n    \"\"\"If True, initialize the optimizer with random parameters.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    step_size_adaptive: bool | str = True\n    \"\"\"Whether to adapt the step size.\n\n    Can be a boolean or a string specifying the adaptation strategy.\n\n    \"\"\"\n\n    CSA_dampfac: PositiveFloat = 1.0\n    \"\"\"Damping factor for step size adaptation.\"\"\"\n\n    CMA_dampsvec_fade: PositiveFloat = 0.1\n    \"\"\"Damping rate for step size adaptation.\"\"\"\n\n    CSA_squared: bool = False\n    \"\"\"Whether to use squared step sizes in updates.\"\"\"\n\n    CMA_on: float = 1.0\n    \"\"\"Learning rate for the covariance matrix update.\"\"\"\n\n    CMA_rankone: float = 1.0\n    \"\"\"Multiplier for the rank-one update learning rate of the covariance matrix.\"\"\"\n\n    CMA_rankmu: float = 1.0\n    \"\"\"Multiplier for the rank-mu update learning rate of the covariance matrix.\"\"\"\n\n    CMA_cmean: float = 1.0\n    \"\"\"Learning rate for the mean update.\"\"\"\n\n    CMA_diagonal_decoding: float = 0.0\n    \"\"\"Learning rate for the diagonal update.\"\"\"\n\n    num_parents: int | None = None\n    \"\"\"Number of parents (μ) for recombination.\"\"\"\n\n    CMA_active: bool = True\n    \"\"\"Whether to use negative updates for the covariance matrix.\"\"\"\n\n    CMA_mirrormethod: Literal[0, 1, 2] = 2\n    \"\"\"Strategy for mirror sampling.\n\n    0: Unconditional, 1: Selective, 2: Selective\n    with delay.\n\n    \"\"\"\n\n    CMA_const_trace: bool | Literal[\"arithm\", \"geom\", \"aeig\", \"geig\"] = False\n    \"\"\"How to normalize the trace of the covariance matrix.\n\n    False: No normalization,\n    True: Normalize to 1. Other options: 'arithm', 'geom', 'aeig', 'geig'.\n\n    \"\"\"\n\n    CMA_diagonal: int | bool = False\n    \"\"\"Number of iterations to use diagonal covariance matrix before switching to full\n    matrix.\n\n    If False, always use full matrix.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    \"\"\"Maximum number of iterations before termination.\"\"\"\n\n    stopping_maxtime: PositiveFloat = float(\"inf\")\n    \"\"\"Maximum time in seconds before termination.\"\"\"\n\n    stopping_cov_mat_cond: NonNegativeFloat = 1e14\n    \"\"\"Maximum condition number of the covariance matrix before termination.\"\"\"\n\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    \"\"\"Absolute tolerance on function value changes for convergence.\"\"\"\n\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    \"\"\"Relative tolerance on function value changes for convergence.\"\"\"\n\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    \"\"\"Absolute tolerance on parameter changes for convergence.\"\"\"\n\n    convergence_iter_noimprove: PositiveInt | None = None\n    \"\"\"Number of iterations without improvement before termination.\"\"\"\n\n    invariant_path: bool = False\n    \"\"\"Whether evolution path (pc) should be invariant to transformations.\"\"\"\n\n    eval_final_mean: bool = True\n    \"\"\"Whether to evaluate the final mean solution.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed used by the internal random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        cma_options = {\n            \"AdaptSigma\": self.step_size_adaptive,\n            \"CSA_dampfac\": self.CSA_dampfac,\n            \"CMA_dampsvec_fade\": self.CMA_dampsvec_fade,\n            \"CSA_squared\": self.CSA_squared,\n            \"CSA_invariant_path\": self.invariant_path,\n            \"CMA_on\": self.CMA_on,\n            \"CMA_rankone\": self.CMA_rankone,\n            \"CMA_rankmu\": self.CMA_rankmu,\n            \"CMA_cmean\": self.CMA_cmean,\n            \"CMA_diagonal_decoding\": self.CMA_diagonal_decoding,\n            \"CMA_mu\": self.num_parents,\n            \"CMA_active\": self.CMA_active,\n            \"CMA_mirrormethod\": self.CMA_mirrormethod,\n            \"CMA_const_trace\": self.CMA_const_trace,\n            \"CMA_diagonal\": self.CMA_diagonal,\n            \"maxfevals\": self.stopping_maxfun,\n            \"maxiter\": self.stopping_maxiter,\n            \"timeout\": self.stopping_maxtime,\n            \"tolconditioncov\": self.stopping_cov_mat_cond,\n            \"tolfun\": self.convergence_ftol_abs,\n            \"tolfunrel\": self.convergence_ftol_rel,\n            \"tolx\": self.convergence_xtol_abs,\n            \"tolstagnation\": self.convergence_iter_noimprove,\n            \"eval_final_mean\": self.eval_final_mean,\n        }\n\n        configured_optimizer = ng.optimizers.ParametrizedCMA(\n            scale=self.scale,\n            popsize=self.population_size,\n            elitist=self.elitist,\n            diagonal=self.diagonal,\n            high_speed=self.high_speed,\n            fcmaes=self.fast_cmaes,\n            inopts=cma_options,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_oneplusone\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradOnePlusOne(Algorithm):\n    \"\"\"Minimize a scalar function using the One-Plus-One Evolutionary algorithm.\n\n    The One-Plus-One evolutionary algorithm iterates to find a set of parameters\n    that minimizes the loss function. It does this by perturbing, or mutating,\n    the parameters from the last iteration (the parent). If the new (child)\n    parameters yield a better result, the child becomes the new parent whose\n    parameters are perturbed, perhaps more aggressively. If the parent yields a\n    better result, it remains the parent and the next perturbation is less\n    aggressive.\n\n    Originally proposed by :cite:`Rechenberg1973`. The implementation in\n    Nevergrad is based on the one-fifth adaptation rule from :cite:`Schumer1968`.\n\n    \"\"\"\n\n    noise_handling: (\n        Literal[\"random\", \"optimistic\"]\n        | tuple[Literal[\"random\", \"optimistic\"], float]\n        | None\n    ) = None\n    \"\"\"Method for handling noise.\n\n    'random' reevaluates a random point, while 'optimistic' reevaluates the best\n    optimistic point. A float coefficient can be provided to tune the regularity of\n    these reevaluations.\n\n    \"\"\"\n\n    mutation: Literal[\n        \"gaussian\",\n        \"cauchy\",\n        \"discrete\",\n        \"fastga\",\n        \"rls\",\n        \"doublefastga\",\n        \"adaptive\",\n        \"coordinatewise_adaptive\",\n        \"portfolio\",\n        \"discreteBSO\",\n        \"lengler\",\n        \"lengler2\",\n        \"lengler3\",\n        \"lenglerhalf\",\n        \"lenglerfourth\",\n        \"doerr\",\n        \"lognormal\",\n        \"xlognormal\",\n        \"xsmalllognormal\",\n        \"tinylognormal\",\n        \"smalllognormal\",\n        \"biglognormal\",\n        \"hugelognormal\",\n    ] = \"gaussian\"\n    \"\"\"Type of mutation to apply.\n\n    'gaussian' is the default. Other options include 'cauchy', 'discrete', 'fastga',\n    'rls', and 'portfolio'.\n\n    \"\"\"\n\n    annealing: (\n        Literal[\n            \"none\", \"Exp0.9\", \"Exp0.99\", \"Exp0.9Auto\", \"Lin100.0\", \"Lin1.0\", \"LinAuto\"\n        ]\n        | None\n    ) = None\n    \"\"\"Annealing schedule for mutation amplitude.\n\n    Can be 'none', exponential (e.g., 'Exp0.9'), or linear (e.g., 'Lin100.0').\n\n    \"\"\"\n\n    sparse: bool = False\n    \"\"\"Whether to apply random mutations that set variables to zero.\"\"\"\n\n    super_radii: bool = False\n    \"\"\"Whether to apply extended radii beyond standard bounds for candidate generation,\n    enabling broader exploration.\"\"\"\n\n    smoother: bool = False\n    \"\"\"Whether to suggest smooth mutations.\"\"\"\n\n    roulette_size: PositiveInt = 64\n    \"\"\"Size of the roulette wheel used for selection, affecting sampling diversity from\n    past candidates.\"\"\"\n\n    antismooth: NonNegativeInt = 4\n    \"\"\"Degree of anti-smoothing to prevent premature convergence by penalizing overly\n    smooth improvements.\"\"\"\n\n    crossover: bool = False\n    \"\"\"Whether to include a genetic crossover step every other iteration.\"\"\"\n\n    crossover_type: (\n        Literal[\"none\", \"rand\", \"max\", \"min\", \"onepoint\", \"twopoint\"] | None\n    ) = None\n    \"\"\"Method for genetic crossover.\n\n    Options include 'rand', 'onepoint', and 'twopoint'.\n\n    \"\"\"\n\n    tabu_length: NonNegativeInt = 1000\n    \"\"\"Length of the tabu list to prevent revisiting recent candidates and help escape\n    local minima.\"\"\"\n\n    rotation: bool = False\n    \"\"\"Whether to apply rotational transformations to the search space to enhance search\n    performance.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel computation.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)if bounds are not\n    provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.ParametrizedOnePlusOne(\n            noise_handling=self.noise_handling,\n            mutation=self.mutation,\n            crossover=self.crossover,\n            rotation=self.rotation,\n            annealing=self.annealing or \"none\",\n            sparse=self.sparse,\n            smoother=self.smoother,\n            super_radii=self.super_radii,\n            roulette_size=self.roulette_size,\n            antismooth=self.antismooth,\n            crossover_type=self.crossover_type or \"none\",\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_de\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradDifferentialEvolution(Algorithm):\n    \"\"\"Minimize a scalar function using the Differential Evolution optimizer.\n\n    Differential Evolution is typically used for continuous optimization. It uses\n    differences between points in the population for performing mutations in fruitful\n    directions. It is a kind of covariance adaptation without any explicit covariance,\n    making it very fast in high dimensions.\n\n    \"\"\"\n\n    initialization: Literal[\"parametrization\", \"LHS\", \"QR\", \"QO\", \"SO\"] = (\n        \"parametrization\"\n    )\n    \"\"\"Algorithm for initialization.\n\n    'LHS' is Latin Hypercube Sampling, 'QR' is Quasi-Random.\n\n    \"\"\"\n\n    scale: float | str = 1.0\n    \"\"\"Scale of random component of updates.\"\"\"\n\n    recommendation: Literal[\"pessimistic\", \"optimistic\", \"mean\", \"noisy\"] = (\n        \"pessimistic\"\n    )\n    \"\"\"Criterion for selecting the best point to recommend.\"\"\"\n\n    crossover: (\n        float\n        | Literal[\n            \"dimension\",\n            \"random\",\n            \"onepoint\",\n            \"twopoints\",\n            \"rotated_twopoints\",\n            \"parametrization\",\n        ]\n    ) = 0.5\n    \"\"\"Crossover rate or strategy.\n\n    Can be a float, 'dimension' (1/dim), 'random', 'onepoint', or 'twopoints'.\n\n    \"\"\"\n\n    F1: PositiveFloat = 0.8\n    \"\"\"Differential weight #1 (scaling factor).\"\"\"\n\n    F2: PositiveFloat = 0.8\n    \"\"\"Differential weight #2 (scaling factor).\"\"\"\n\n    population_size: int | Literal[\"standard\", \"dimension\", \"large\"] = \"standard\"\n    \"\"\"Population size.\n\n    Can be an integer or a string like 'standard', 'dimension', or 'large' to set it\n    automatically.\n\n    \"\"\"\n\n    high_speed: bool = False\n    \"\"\"If True, uses a metamodel for recommendations to speed up optimization.\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)if bounds are not\n    provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.DifferentialEvolution(\n            initialization=self.initialization,\n            scale=self.scale,\n            recommendation=self.recommendation,\n            crossover=self.crossover,\n            F1=self.F1,\n            F2=self.F2,\n            popsize=self.population_size,\n            high_speed=self.high_speed,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_bo\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED and IS_BAYESOPTIM_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradBayesOptim(Algorithm):\n    \"\"\"Minimize a scalar function using the Bayesian Optimization (BO) algorithm.\n\n    This wrapper uses the BO and PCA-BO algorithms from the `bayes_optim` package\n    :cite:`bayesoptimimpl`. PCA-BO (Principal Component Analysis for Bayesian\n    Optimization) is a dimensionality reduction technique for black-box\n    optimization. It applies PCA to the input space before performing Bayesian\n    optimization, improving efficiency in high dimensions by focusing on\n    directions of greatest variance.\n\n    \"\"\"\n\n    init_budget: int | None = None\n    \"\"\"Number of initialization algorithm steps.\"\"\"\n\n    pca: bool = False\n    \"\"\"Whether to use the PCA transformation, defining PCA-BO rather than standard\n    BO.\"\"\"\n\n    n_components: NonNegativeFloat = 0.95\n    \"\"\"Number of principal axes, representing the percentage of explained variance\n    (e.g., 0.95 means 95% variance retained).\"\"\"\n\n    prop_doe_factor: NonNegativeFloat | None = 1\n    \"\"\"Percentage of the initial budget used for Design of Experiments (DoE).\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: int | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.BayesOptim(\n            init_budget=self.init_budget,\n            pca=self.pca,\n            n_components=self.n_components,\n            prop_doe_factor=self.prop_doe_factor,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_emna\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradEMNA(Algorithm):\n    \"\"\"Minimize a scalar function using the Estimation of Multivariate Normal Algorithm.\n\n    EMNA is a distribution-based evolutionary algorithm that models the search\n    space using a multivariate Gaussian. It learns the full covariance matrix,\n    resulting in a cubic time complexity with respect to each sampling. It is\n    efficient in parallel settings but other methods should be considered first.\n    See :cite:`emnaimpl`.\n\n    \"\"\"\n\n    isotropic: bool = True\n    \"\"\"If True, uses an isotropic (identity covariance) Gaussian.\n\n    If False, uses a separable (diagonal covariance) Gaussian.\n\n    \"\"\"\n\n    noise_handling: bool = True\n    \"\"\"If True, returns the best individual found.\n\n    If False (recommended for noisy problems), returns the average of the final\n    population.\n\n    \"\"\"\n\n    population_size_adaptation: bool = False\n    \"\"\"If True, the population size is adjusted automatically based on the optimization\n    landscape and noise level.\"\"\"\n\n    initial_popsize: int | None = None\n    \"\"\"Initial population size.\n\n    Defaults to 4 times the problem dimension.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.EMNA(\n            isotropic=self.isotropic,\n            naive=self.noise_handling,\n            population_size_adaptation=self.population_size_adaptation,\n            initial_popsize=self.initial_popsize,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_cga\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradCGA(Algorithm):\n    \"\"\"Minimize a scalar function using the Compact Genetic Algorithm.\n\n    The Compact Genetic Algorithm (cGA) is a memory-efficient genetic algorithm\n    that represents the population as a probability vector over gene values. It\n    simulates the behavior of a simple GA with uniform crossover by updating\n    probabilities instead of maintaining an explicit population. See :cite:`cgaimpl`.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.cGA\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_eda\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradEDA(Algorithm):\n    \"\"\"Minimize a scalar function using the Estimation of Distribution Algorithm.\n\n    Estimation of Distribution Algorithms (EDAs) optimize by building and sampling\n    a probabilistic model of promising solutions. Instead of using traditional\n    variation operators like crossover or mutation, EDAs update a distribution\n    based on selected individuals and sample new candidates from it.\n    Refer to :cite:`edaimpl`.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.EDA\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_tbpsa\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradTBPSA(Algorithm):\n    r\"\"\"Minimize a scalar function using the Test-based Population Size Adaptation\n    algorithm.\n\n    TBPSA adapts population size based on fitness trend detection using linear\n    regression. If no significant improvement is found (via hypothesis testing),\n    the population size is increased to improve robustness, making it effective\n    for noisy optimization problems. For more details, refer to :cite:`tbpsaimpl`.\n\n    \"\"\"\n\n    noise_handling: bool = True\n    \"\"\"If True, returns the best individual.\n\n    If False (recommended for noisy problems), returns the average of the final\n    population to reduce noise.\n\n    \"\"\"\n\n    initial_popsize: int | None = None\n    \"\"\"Initial population size.\n\n    If not specified, defaults to 4 times the problem dimension.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.ParametrizedTBPSA(\n            naive=self.noise_handling,\n            initial_popsize=self.initial_popsize,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_randomsearch\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradRandomSearch(Algorithm):\n    \"\"\"Minimize a scalar function using the Random Search algorithm.\n\n    This is a one-shot optimization method that provides random suggestions and serves\n    as a simple baseline for other optimizers.\n\n    \"\"\"\n\n    middle_point: bool = False\n    \"\"\"Enforces that the first suggested point is the zero vector.\"\"\"\n\n    opposition_mode: Literal[\"opposite\", \"quasi\"] | None = None\n    \"\"\"Symmetrizes exploration with respect to the center.\n\n    'opposite' enables full symmetry, while 'quasi' applies randomized symmetry.\n\n    \"\"\"\n\n    sampler: Literal[\"parametrization\", \"gaussian\", \"cauchy\"] = \"parametrization\"\n    \"\"\"The probability distribution for sampling points.\n\n    'gaussian' and 'cauchy' are available alternatives.\n\n    \"\"\"\n\n    scale: PositiveFloat | Literal[\"random\", \"auto\", \"autotune\"] = \"auto\"\n    \"\"\"Scalar used to multiply suggested point values.\n\n    Can be a float or a string for auto-scaling ('random', 'auto', 'autotune').\n\n    \"\"\"\n\n    recommendation_rule: Literal[\n        \"average_of_best\", \"pessimistic\", \"average_of_exp_best\"\n    ] = \"pessimistic\"\n    \"\"\"Specifies how the final recommendation is chosen, e.g., 'pessimistic' (default)\n    or 'average_of_best'.\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.RandomSearchMaker(\n            stupid=False,\n            middle_point=self.middle_point,\n            opposition_mode=self.opposition_mode,\n            sampler=self.sampler,\n            scale=self.scale,\n            recommendation_rule=self.recommendation_rule,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=None,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_samplingsearch\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradSamplingSearch(Algorithm):\n    \"\"\"Minimize a scalar function using SamplingSearch.\n\n    This is a one-shot optimization method that is better than random search because it\n    uses low-discrepancy sequences to ensure more uniform coverage of the search space.\n    It is recommended to use \"Hammersley\" as the sampler if the budget is known, and to\n    set `scrambled=True` in high dimensions.\n\n    \"\"\"\n\n    sampler: Literal[\"Halton\", \"LHS\", \"Hammersley\"] = \"Halton\"\n    \"\"\"Choice of the low-discrepancy sampler used for generating points.\n\n    'LHS' is Latin Hypercube Sampling.\n\n    \"\"\"\n\n    scrambled: bool = False\n    \"\"\"If True, adds scrambling to the search sequence, which is highly recommended for\n    high-dimensional problems.\"\"\"\n\n    middle_point: bool = False\n    \"\"\"If True, the first suggested point is the zero vector, useful for initializing at\n    the center of the search space.\"\"\"\n\n    cauchy: bool = False\n    \"\"\"If True, uses the inverse Cauchy distribution instead of Gaussian when projecting\n    samples to a real-valued space.\"\"\"\n\n    scale: bool | NonNegativeFloat = 1.0\n    \"\"\"A float multiplier to scale all generated points.\"\"\"\n\n    rescaled: bool = False\n    \"\"\"If True, rescales the sampling pattern to ensure better coverage of the\n    boundaries.\"\"\"\n\n    recommendation_rule: Literal[\"average_of_best\", \"pessimistic\"] = \"pessimistic\"\n    \"\"\"How the final recommendation is chosen.\n\n    'pessimistic' is the default.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = ng.optimizers.SamplingSearch(\n            sampler=self.sampler,\n            scrambled=self.scrambled,\n            middle_point=self.middle_point,\n            cauchy=self.cauchy,\n            scale=self.scale,\n            rescaled=self.rescaled,\n            recommendation_rule=self.recommendation_rule,\n        )\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_ngopt\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradNGOpt(Algorithm):\n    \"\"\"Minimize a scalar function using a Meta Optimizer from Nevergrad.\n\n    These are meta-optimizers that intelligently combine multiple different\n    optimization algorithms to solve a problem. The specific portfolio of\n    optimizers can be selected via the `optimizer` parameter.\n\n    \"\"\"\n\n    optimizer: Literal[\n        \"NGOpt\",\n        \"NGOpt4\",\n        \"NGOpt8\",\n        \"NGOpt10\",\n        \"NGOpt12\",\n        \"NGOpt13\",\n        \"NGOpt14\",\n        \"NGOpt15\",\n        \"NGOpt16\",\n        \"NGOpt21\",\n        \"NGOpt36\",\n        \"NGOpt38\",\n        \"NGOpt39\",\n        \"NGOptRW\",\n        \"NGOptF\",\n        \"NGOptF2\",\n        \"NGOptF3\",\n        \"NGOptF5\",\n        \"NgIoh2\",\n        \"NgIoh3\",\n        \"NgIoh4\",\n        \"NgIoh5\",\n        \"NgIoh6\",\n        \"NgIoh7\",\n        \"NgIoh11\",\n        \"NgIoh14\",\n        \"NgIoh13\",\n        \"NgIoh15\",\n        \"NgIoh12\",\n        \"NgIoh16\",\n        \"NgIoh17\",\n        \"NgIoh21\",\n        \"NgIoh20\",\n        \"NgIoh19\",\n        \"NgIoh18\",\n        \"NgIoh10\",\n        \"NgIoh9\",\n        \"NgIoh8\",\n        \"NgIoh12b\",\n        \"NgIoh13b\",\n        \"NgIoh14b\",\n        \"NgIoh15b\",\n        \"NgDS\",\n        \"NgDS2\",\n        \"NGDSRW\",\n        \"NGO\",\n        \"NgIohRW2\",\n        \"NgIohTuned\",\n        \"CSEC\",\n        \"CSEC10\",\n        \"CSEC11\",\n        \"Wiz\",\n    ] = \"NGOpt\"\n    \"\"\"The specific Nevergrad meta-optimizer to use.\n\n    Each option is a portfolio of different algorithms.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²)in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = getattr(ng.optimizers, self.optimizer)\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nevergrad_meta\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NEVERGRAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NevergradMeta(Algorithm):\n    \"\"\"Minimize a scalar function using a Meta Optimizer from Nevergrad.\n\n    This algorithm utilizes a combination of local and global optimizers to find\n    the best solution. The specific portfolio of optimizers can be selected via\n    the `optimizer` parameter.\n\n    \"\"\"\n\n    optimizer: Literal[\n        \"MultiBFGSPlus\",\n        \"LogMultiBFGSPlus\",\n        \"SqrtMultiBFGSPlus\",\n        \"MultiCobylaPlus\",\n        \"MultiSQPPlus\",\n        \"BFGSCMAPlus\",\n        \"LogBFGSCMAPlus\",\n        \"SqrtBFGSCMAPlus\",\n        \"SQPCMAPlus\",\n        \"LogSQPCMAPlus\",\n        \"SqrtSQPCMAPlus\",\n        \"MultiBFGS\",\n        \"LogMultiBFGS\",\n        \"SqrtMultiBFGS\",\n        \"MultiCobyla\",\n        \"ForceMultiCobyla\",\n        \"MultiSQP\",\n        \"BFGSCMA\",\n        \"LogBFGSCMA\",\n        \"SqrtBFGSCMA\",\n        \"SQPCMA\",\n        \"LogSQPCMA\",\n        \"SqrtSQPCMA\",\n        \"FSQPCMA\",\n        \"F2SQPCMA\",\n        \"F3SQPCMA\",\n        \"MultiDiscrete\",\n        \"CMandAS2\",\n        \"CMandAS3\",\n        \"MetaCMA\",\n        \"CMA\",\n        \"PCEDA\",\n        \"MPCEDA\",\n        \"MEDA\",\n        \"NoisyBandit\",\n        \"Shiwa\",\n        \"Carola3\",\n    ] = \"Shiwa\"\n    \"\"\"The specific Nevergrad meta-optimizer to use.\n\n    Each option is a portfolio of different local and global algorithms.\n\n    \"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    \"\"\"Maximum number of function evaluations before termination.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores to use for parallel function evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Seed for the random number generator for reproducibility.\"\"\"\n\n    sigma: float | None = None\n    \"\"\"Standard deviation for sampling initial population from N(0, σ²) in case bounds\n    are not provided.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_NEVERGRAD_INSTALLED:\n            raise NotInstalledError(NEVERGRAD_NOT_INSTALLED_ERROR)\n\n        import nevergrad as ng\n\n        configured_optimizer = getattr(ng.optimizers, self.optimizer)\n\n        res = _nevergrad_internal(\n            problem=problem,\n            x0=x0,\n            configured_optimizer=configured_optimizer,\n            stopping_maxfun=self.stopping_maxfun,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            sigma=self.sigma,\n            nonlinear_constraints=problem.nonlinear_constraints,\n        )\n\n        return res\n\n\ndef _nevergrad_internal(\n    problem: InternalOptimizationProblem,\n    x0: NDArray[np.float64],\n    n_cores: int,\n    configured_optimizer: ConfiguredOptimizer,\n    stopping_maxfun: int,\n    seed: int | None,\n    sigma: float | None,\n    nonlinear_constraints: list[dict[str, Any]] | None,\n) -> InternalOptimizeResult:\n    \"\"\"Internal helper function for nevergrad.\n\n    Handle the optimization loop.\n\n    Args:\n        problem (InternalOptimizationProblem): Internal optimization problem to solve.\n        x0 (np.ndarray): Initial parameter vector of shape (n_params,).\n        n_cores (int):  Number of processes used to parallelize the function\n            evaluations.\n        configured_optimizer (ConfiguredOptimizer): Nevergrad optimizer instance\n            configured with options.\n        stopping_maxfun (int): Maximum number of function evaluations.\n        seed (int): Random seed for reproducibility. Defaults to None.\n\n    Returns:\n        InternalOptimizeResult: Internal optimization result\n\n    \"\"\"\n    import nevergrad as ng\n\n    param = ng.p.Array(\n        init=x0,\n        lower=problem.bounds.lower,\n        upper=problem.bounds.upper,\n    )\n\n    instrum = ng.p.Instrumentation(param)\n\n    # In case bounds are not provided, the initial population is sampled\n    # from a gaussian with mean = 0 and sd = 1,\n    # which can be set through this method.\n    param.set_mutation(sigma=sigma)\n\n    if seed is not None:\n        instrum.random_state.seed(seed)\n\n    optimizer = configured_optimizer(\n        parametrization=instrum, budget=stopping_maxfun, num_workers=n_cores\n    )\n\n    ### Skip handling of non_linear constraints until improve constraint handling.\n    # if nonlinear_constraints:\n    #     constraints = _process_nonlinear_constraints(nonlinear_constraints)\n    ###\n\n    # optimization loop using the ask-and-tell interface\n    while optimizer.num_ask < stopping_maxfun:\n        x_list = [\n            optimizer.ask()\n            for _ in range(min(n_cores, stopping_maxfun - optimizer.num_ask))\n        ]\n\n        losses = problem.batch_fun([x.value[0][0] for x in x_list], n_cores=n_cores)\n\n        if not nonlinear_constraints:\n            for x, loss in zip(x_list, losses, strict=True):\n                optimizer.tell(x, loss)\n\n    ### Skip handling of non_linear constraints until improve constraint handling.\n    # else:\n    # constraint_violations = _batch_constraint_evaluations(\n    #     constraints, [x.value[0][0] for x in x_list], n_cores\n    # )\n    # for x, loss, cv in zip(x_list, losses, constraint_violations, strict=True):\n    #     optimizer.tell(x, loss, cv)\n    ###\n\n    recommendation = optimizer.provide_recommendation()\n    best_x = recommendation.value[0][0]\n    loss = recommendation.loss\n\n    # In case of CMA, loss is not provided by the optimizer, in that case,\n    # evaluate it manually using problem.fun\n    if loss is None:\n        loss = problem.fun(best_x)\n\n    result = InternalOptimizeResult(\n        x=best_x,\n        fun=loss,\n        success=True,\n        n_fun_evals=optimizer.num_ask,\n        n_jac_evals=0,\n        n_hess_evals=0,\n    )\n\n    return result\n\n\n### Skip handling of non_linear constraints until improve constraint handling.\n\n# def _process_nonlinear_constraints(\n#     constraints: list[dict[str, Any]],\n# ) -> list[dict[str, Any]]:\n#     \"\"\"Process stacked inequality constraints as single constraints.\n\n#     Returns a list of single constraints.\n\n#     \"\"\"\n#     processed_constraints = []\n#     for c in constraints:\n#         new = _vector_to_list_of_scalar(c)\n#         processed_constraints.extend(new)\n#     return processed_constraints\n\n\n# def _get_constraint_evaluations(\n#     constraints: list[dict[str, Any]], x: NDArray[np.float64]\n# ) -> list[NDArray[np.float64]]:\n#     \"\"\"In optimagic, inequality constraints are internally defined as g(x) >= 0.\n#    Nevergrad uses h(x) <= 0 hence a sign flip is required. Passed equality\n#    constraints are treated as inequality constraints with lower bound equal to\n#    value. Return a list of constraint evaluations at x.\n\n#     \"\"\"\n#     results = [-c[\"fun\"](x) for c in constraints]\n#     results = [np.atleast_1d(i) for i in results]\n#     return results\n\n\n# def _batch_constraint_evaluations(\n#     constraints: list[dict[str, Any]], x_list: list[Any], n_cores: int\n# ) -> list[list[NDArray[np.float64]]]:\n#     \"\"\"Batch version of _get_constraint_evaluations.\"\"\"\n#     batch = process_batch_evaluator(\"joblib\")\n#     func = partial(_get_constraint_evaluations, constraints)\n#     results = batch(func=func, arguments=[x for x in x_list], n_cores=n_cores)\n#     return results\n###\n"
  },
  {
    "path": "src/optimagic/optimizers/nlopt_optimizers.py",
    "content": "\"\"\"Implement `nlopt` algorithms.\n\nThe documentation is heavily based on (nlopt documentation)[nlopt.readthedocs.io].\n\n\"\"\"\n\nfrom dataclasses import dataclass\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_NLOPT_INSTALLED\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_ABS,\n    CONVERGENCE_FTOL_REL,\n    CONVERGENCE_XTOL_ABS,\n    CONVERGENCE_XTOL_REL,\n    STOPPING_MAXFUN,\n    STOPPING_MAXFUN_GLOBAL,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.parameters.nonlinear_constraints import (\n    equality_as_inequality_constraints,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveInt,\n)\n\nif IS_NLOPT_INSTALLED:\n    import nlopt\n\n\n@mark.minimizer(\n    name=\"nlopt_bobyqa\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptBOBYQA(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LN_BOBYQA,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_neldermead\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptNelderMead(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LN_NELDERMEAD,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_praxis\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptPRAXIS(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LN_PRAXIS,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_cobyla\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptCOBYLA(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LN_COBYLA,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_sbplx\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptSbplx(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LN_SBPLX,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_newuoa\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptNEWUOA(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if problem.bounds.lower is None or problem.bounds.upper is None:\n            algo = nlopt.LN_NEWUOA\n        elif np.any(np.isfinite(problem.bounds.lower)) or np.any(\n            np.isfinite(problem.bounds.upper)\n        ):\n            algo = nlopt.LN_NEWUOA_BOUND\n        else:\n            algo = nlopt.LN_NEWUOA\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=algo,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_tnewton\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptTNewton(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LD_TNEWTON,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_lbfgsb\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptLBFGSB(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LD_LBFGS,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_ccsaq\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptCCSAQ(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LD_CCSAQ,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_mma\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptMMA(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        nonlinear_constraints = equality_as_inequality_constraints(\n            problem.nonlinear_constraints\n        )\n\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LD_MMA,\n            nonlinear_constraints=nonlinear_constraints,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_var\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptVAR(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    rank_1_update: bool = True\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.rank_1_update:\n            algo = nlopt.LD_VAR1\n        else:\n            algo = nlopt.LD_VAR2\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=algo,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_slsqp\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptSLSQP(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.LD_SLSQP,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_direct\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptDirect(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    locally_biased: bool = False\n    random_search: bool = False\n    unscaled_bounds: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if (\n            not self.locally_biased\n            and not self.random_search\n            and not self.unscaled_bounds\n        ):\n            algo = nlopt.GN_DIRECT\n        elif (\n            self.locally_biased and not self.random_search and not self.unscaled_bounds\n        ):\n            algo = nlopt.GN_DIRECT_L\n        elif self.locally_biased and not self.random_search and self.unscaled_bounds:\n            algo = nlopt.GN_DIRECT_L_NOSCAL\n        elif self.locally_biased and self.random_search and not self.unscaled_bounds:\n            algo = nlopt.GN_DIRECT_L_RAND\n        elif self.locally_biased and self.random_search and self.unscaled_bounds:\n            algo = nlopt.GN_DIRECT_L_RAND_NOSCAL\n        elif (\n            not self.locally_biased and not self.random_search and self.unscaled_bounds\n        ):\n            algo = nlopt.GN_DIRECT_NOSCAL\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=algo,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_esch\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptESCH(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.GN_ESCH,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_isres\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptISRES(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.GN_ISRES,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"nlopt_crs2_lm\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_NLOPT_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass NloptCRS2LM(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    population_size: PositiveInt | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.population_size is None:\n            population_size = 10 * (len(x0) + 1)\n        else:\n            population_size = self.population_size\n        res = _minimize_nlopt(\n            problem=problem,\n            x0=x0,\n            is_global=self.algo_info.is_global,\n            convergence_xtol_rel=self.convergence_xtol_rel,\n            convergence_xtol_abs=self.convergence_xtol_abs,\n            convergence_ftol_rel=self.convergence_ftol_rel,\n            convergence_ftol_abs=self.convergence_ftol_abs,\n            stopping_max_eval=self.stopping_maxfun,\n            algorithm=nlopt.GN_CRS2_LM,\n            population_size=population_size,\n        )\n\n        return res\n\n\ndef _minimize_nlopt(\n    problem,\n    x0,\n    algorithm,\n    is_global,\n    *,\n    convergence_xtol_rel=None,\n    convergence_xtol_abs=None,\n    convergence_ftol_rel=None,\n    convergence_ftol_abs=None,\n    stopping_max_eval=None,\n    population_size=None,\n    nonlinear_constraints=None,\n):\n    \"\"\"Run actual nlopt optimization argument, set relevant attributes.\"\"\"\n\n    def func(x, grad):\n        if grad.size > 0:\n            fun, jac = problem.fun_and_jac(x)\n            grad[:] = jac\n        else:\n            fun = problem.fun(x)\n        return fun\n\n    if nonlinear_constraints is None:\n        nonlinear_constraints = problem.nonlinear_constraints\n    opt = nlopt.opt(algorithm, x0.shape[0])\n    if convergence_ftol_rel is not None:\n        opt.set_ftol_rel(convergence_ftol_rel)\n    if convergence_ftol_abs is not None:\n        opt.set_ftol_abs(convergence_ftol_abs)\n    if convergence_xtol_rel is not None:\n        opt.set_xtol_rel(convergence_xtol_rel)\n    if convergence_xtol_abs is not None:\n        opt.set_xtol_abs(convergence_xtol_abs)\n    if problem.bounds.lower is not None:\n        opt.set_lower_bounds(problem.bounds.lower)\n    if problem.bounds.upper is not None:\n        opt.set_upper_bounds(problem.bounds.upper)\n    if stopping_max_eval is not None:\n        opt.set_maxeval(stopping_max_eval)\n    if population_size is not None:\n        opt.set_population(population_size)\n    if nonlinear_constraints:\n        for constr in _get_nlopt_constraints(nonlinear_constraints, filter_type=\"eq\"):\n            opt.add_equality_mconstraint(constr[\"fun\"], constr[\"tol\"])\n        for constr in _get_nlopt_constraints(nonlinear_constraints, filter_type=\"ineq\"):\n            opt.add_inequality_mconstraint(constr[\"fun\"], constr[\"tol\"])\n    opt.set_min_objective(func)\n    solution_x = opt.optimize(x0)\n    return _process_nlopt_results(opt, solution_x, is_global)\n\n\ndef _process_nlopt_results(nlopt_obj, solution_x, is_global):\n    messages = {\n        1: \"Convergence achieved \",\n        2: (\n            \"Optimizer stopped because maximum value of criterion function was reached\"\n        ),\n        3: (\n            \"Optimizer stopped because convergence_ftol_rel or \"\n            \"convergence_ftol_abs was reached\"\n        ),\n        4: (\n            \"Optimizer stopped because convergence_xtol_rel or \"\n            \"convergence_xtol_abs was reached\"\n        ),\n        5: \"Optimizer stopped because max_criterion_evaluations was reached\",\n        6: \"Optimizer stopped because max running time was reached\",\n        -1: \"Optimizer failed\",\n        -2: \"Invalid arguments were passed\",\n        -3: \"Memory error\",\n        -4: \"Halted because roundoff errors limited progress\",\n        -5: \"Halted because of user specified forced stop\",\n    }\n    success = nlopt_obj.last_optimize_result() in [1, 2, 3, 4]\n    if is_global and not success:\n        success = None\n    processed = InternalOptimizeResult(\n        x=solution_x,\n        fun=nlopt_obj.last_optimum_value(),\n        n_fun_evals=nlopt_obj.get_numevals(),\n        success=success,\n        message=messages[nlopt_obj.last_optimize_result()],\n    )\n\n    return processed\n\n\ndef _get_nlopt_constraints(constraints, filter_type):\n    \"\"\"Transform internal nonlinear constraints to NLOPT readable format.\"\"\"\n    filtered = [c for c in constraints if c[\"type\"] == filter_type]\n    nlopt_constraints = [_internal_to_nlopt_constaint(c) for c in filtered]\n    return nlopt_constraints\n\n\ndef _internal_to_nlopt_constaint(c):\n    \"\"\"Sign flip description:\n\n    In optimagic, inequality constraints are internally defined as g(x) >= 0. NLOPT uses\n    h(x) <= 0, which is why we need to flip the sign.\n\n    \"\"\"\n    tol = c[\"tol\"]\n    if np.isscalar(tol):\n        tol = np.tile(tol, c[\"n_constr\"])\n\n    def _constraint(result, x, grad):\n        result[:] = -c[\"fun\"](x)  # see docstring for sign flip\n        if grad.size > 0:\n            grad[:] = -c[\"jac\"](x)  # see docstring for sign flip\n\n    new_constr = {\n        \"fun\": _constraint,\n        \"tol\": tol,\n    }\n    return new_constr\n"
  },
  {
    "path": "src/optimagic/optimizers/pounders.py",
    "content": "\"\"\"Implement the POUNDERS algorithm.\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Any, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import DEFAULT_N_CORES\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.optimizers._pounders.pounders_auxiliary import (\n    add_accepted_point_to_residual_model,\n    add_geomtery_points_to_make_main_model_fully_linear,\n    create_initial_residual_model,\n    create_main_from_residual_model,\n    evaluate_residual_model,\n    find_affine_points,\n    fit_residual_model,\n    get_feature_matrices_residual_model,\n    get_last_model_indices_and_check_for_repeated_model,\n    solve_subproblem,\n    update_main_model_with_new_accepted_x,\n    update_residual_model,\n    update_residual_model_with_new_accepted_x,\n    update_trustregion_radius,\n)\nfrom optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n)\n\n\n@mark.minimizer(\n    name=\"pounders\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass Pounders(Algorithm):\n    convergence_gtol_abs: NonNegativeFloat = 1e-8\n    convergence_gtol_rel: NonNegativeFloat = 1e-8\n    # TODO: Why can this a bool\n    convergence_gtol_scaled: NonNegativeFloat | bool = False\n    max_interpolation_points: PositiveInt | None = None\n    # TODO: Why is this not higher?\n    stopping_maxiter: PositiveInt = 2_000\n    trustregion_initial_radius: PositiveFloat = 0.1\n    trustregion_minimal_radius: PositiveFloat = 1e-6\n    trustregion_maximal_radius: PositiveFloat = 1e6\n    trustregion_shrinking_factor_not_successful: PositiveFloat = 0.5\n    trustregion_expansion_factor_successful: PositiveFloat = 2\n    theta1: PositiveFloat = 1e-5\n    theta2: PositiveFloat = 1e-4\n    trustregion_threshold_acceptance: NonNegativeFloat = 0\n    trustregion_threshold_successful: NonNegativeFloat = 0.1\n    c1: NonNegativeFloat | None = None\n    c2: NonNegativeFloat = 10\n    trustregion_subproblem_solver: Literal[\n        \"bntr\",\n        \"gqtpar\",\n    ] = \"bntr\"\n    trustregion_subsolver_options: dict[str, Any] | None = None\n    n_cores: PositiveInt = DEFAULT_N_CORES\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.max_interpolation_points is None:\n            max_interpolation_points = 2 * len(x0) + 1\n        else:\n            max_interpolation_points = self.max_interpolation_points\n\n        if self.c1 is None:\n            c1 = np.sqrt(x0.shape[0])\n        else:\n            c1 = self.c1\n\n        if self.trustregion_subsolver_options is None:\n            trustregion_subsolver_options = {}\n        else:\n            trustregion_subsolver_options = self.trustregion_subsolver_options\n\n        default_options = {\n            \"conjugate_gradient_method\": \"trsbox\",\n            \"maxiter\": 50,\n            \"maxiter_gradient_descent\": 5,\n            \"gtol_abs\": 1e-8,\n            \"gtol_rel\": 1e-8,\n            \"gtol_scaled\": 0,\n            \"gtol_abs_cg\": 1e-8,\n            \"gtol_rel_cg\": 1e-6,\n            \"k_easy\": 0.1,\n            \"k_hard\": 0.2,\n        }\n        trustregion_subsolver_options = {\n            **default_options,\n            **trustregion_subsolver_options,\n        }\n\n        result = internal_solve_pounders(\n            criterion=problem.fun,\n            x0=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            gtol_abs=self.convergence_gtol_abs,\n            gtol_rel=self.convergence_gtol_rel,\n            gtol_scaled=self.convergence_gtol_scaled,\n            maxinterp=max_interpolation_points,\n            maxiter=self.stopping_maxiter,\n            delta=self.trustregion_initial_radius,\n            delta_min=self.trustregion_minimal_radius,\n            delta_max=self.trustregion_maximal_radius,\n            gamma0=self.trustregion_shrinking_factor_not_successful,\n            gamma1=self.trustregion_expansion_factor_successful,\n            theta1=self.theta1,\n            theta2=self.theta2,\n            eta0=self.trustregion_threshold_acceptance,\n            eta1=self.trustregion_threshold_successful,\n            c1=c1,\n            c2=self.c2,\n            solver_sub=self.trustregion_subproblem_solver,\n            conjugate_gradient_method_sub=trustregion_subsolver_options[\n                \"conjugate_gradient_method\"\n            ],\n            maxiter_sub=trustregion_subsolver_options[\"maxiter\"],\n            maxiter_gradient_descent_sub=trustregion_subsolver_options[\n                \"maxiter_gradient_descent\"\n            ],\n            gtol_abs_sub=trustregion_subsolver_options[\"gtol_abs\"],\n            gtol_rel_sub=trustregion_subsolver_options[\"gtol_rel\"],\n            gtol_scaled_sub=trustregion_subsolver_options[\"gtol_scaled\"],\n            gtol_abs_conjugate_gradient_sub=trustregion_subsolver_options[\n                \"gtol_abs_cg\"\n            ],\n            gtol_rel_conjugate_gradient_sub=trustregion_subsolver_options[\n                \"gtol_rel_cg\"\n            ],\n            k_easy_sub=trustregion_subsolver_options[\"k_easy\"],\n            k_hard_sub=trustregion_subsolver_options[\"k_hard\"],\n            batch_fun=problem.batch_fun,\n            n_cores=self.n_cores,\n        )\n\n        return result\n\n\ndef internal_solve_pounders(\n    criterion,\n    x0,\n    lower_bounds,\n    upper_bounds,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n    maxinterp,\n    maxiter,\n    delta,\n    delta_min,\n    delta_max,\n    gamma0,\n    gamma1,\n    theta1,\n    theta2,\n    eta0,\n    eta1,\n    c1,\n    c2,\n    solver_sub,\n    conjugate_gradient_method_sub,\n    maxiter_sub,\n    maxiter_gradient_descent_sub,\n    gtol_abs_sub,\n    gtol_rel_sub,\n    gtol_scaled_sub,\n    gtol_abs_conjugate_gradient_sub,\n    gtol_rel_conjugate_gradient_sub,\n    k_easy_sub,\n    k_hard_sub,\n    batch_fun,\n    n_cores,\n):\n    \"\"\"Find the local minimum to a non-linear least-squares problem using POUNDERS.\n\n    Args:\n        criterion (callable): Function that returns criterion.\n        x0 (np.ndarray): Initial guess for the parameter vector (starting points).\n        lower_bounds (np.ndarray): 1d array of shape (n,) with lower bounds\n            for the parameter vector x.\n        upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds\n            for the parameter vector x.\n        gtol_abs (float): Convergence tolerance for the absolute gradient norm.\n            Stop if norm of the gradient is less than this.\n        gtol_rel (float): Convergence tolerance for the relative gradient norm.\n            Stop if norm of the gradient relative to the criterion value is less\n            than this.\n        gtol_scaled (float): Convergence tolerance for the scaled gradient norm.\n            Stop if norm of the gradient divided by norm of the gradient at the\n            initial parameters is less than this.\n        maxinterp (int): Maximum number of interpolation points.\n            Default is `2 * n + 1`, where `n` is the length of the parameter vector.\n        maxiter (int): Maximum number of iterations. If reached, terminate.\n        delta (float): Delta, initial trust-region radius.\n        delta_min (float): Minimal trust-region radius.\n        delta_max (float): Maximal trust-region radius.\n        gamma0 (float): Shrinking factor of the trust-region radius in case the\n            solution vector of the suproblem is not accepted, but the model is fully\n            linar (i.e. \"valid\").\n        gamma1 (float): Expansion factor of the trust-region radius in case the\n            solution vector of the suproblem is accepted.\n        theta1 (float): Threshold for adding the current candidate vector\n            to the model. Function argument to find_affine_points().\n        theta2 (float): Threshold for adding the current candidate vector\n            to the model. Argument to get_interpolation_matrices_residual_model().\n        eta0 (float): Threshold for accepting the solution vector of the trust-region\n            subproblem as the best candidate.\n        eta1 (float): Threshold for successfully accepting the solution vector of the\n            trust-region subproblem as the best candidate.\n        c1 (float): Treshold for accepting the norm of our current x candidate.\n            Equal to sqrt(n) by default. Argument to find_affine_points() in case\n            the input array *model_improving_points* is zero.\n        c2 (int)): Treshold for accepting the norm of our current candidate vector.\n            Equal to 10 by default. Argument to find_affine_points() in case\n            the input array *model_improving_points* is not zero.\n        solver_sub (str): Solver to use for the trust-region subproblem.\n            Two internal solvers are supported:\n            - \"bntr\": Bounded Newton Trust-Region (default, supports bound constraints)\n            - \"gqtpar\": (does not support bound constraints)\n        conjugate_gradient_method_sub (str): Method for computing the conjugate\n            gradient step (\"bntr\").\n            Available conjugate gradient methods are:\n            - \"cg\"\n            - \"steihaug_toint\"\n            - \"trsbox\" (default)\n        maxiter_sub (int): Maximum number of iterations in the trust-region subproblem.\n        maxiter_gradient_descent_sub (int): Maximum number of gradient descent\n            iterations to perform (\"bntr\").\n        gtol_abs_sub (float): Convergence tolerance for the absolute gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_rel_sub (float): Convergence tolerance for the relative gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_scaled_sub (float): Convergence tolerance for the scaled gradient norm\n            in the trust-region subproblem (\"bntr\").\n        gtol_abs_conjugate_gradient_sub (float): Convergence tolerance for the\n            absolute gradient norm in the conjugate gradient step of the trust-region\n            subproblem if \"cg\" is used as ``conjugate_gradient_method_sub`` (\"bntr\").\n        gtol_rel_conjugate_gradient_sub (float): Convergence tolerance for the\n            relative gradient norm in the conjugate gradient step of the trust-region\n            subproblem if \"cg\" is used as ``conjugate_gradient_method_sub`` (\"bntr\").\n        k_easy_sub (float): Stopping criterion for the \"easy\" case in the trust-region\n            subproblem (\"gqtpar\").\n        k_hard_sub (float): Stopping criterion for the \"hard\" case in the trust-region\n            subproblem (\"gqtpar\").\n        batch_evaluator (str or callable): Name of a pre-implemented batch evaluator\n            (currently 'joblib' and 'pathos_mp') or callable with the same interface\n            as the optimagic batch_evaluators.\n        n_cores (int): Number of processes used to parallelize the function\n            evaluations. Default is 1.\n\n    Returns:\n        (dict) Result dictionary containing:\n        - solution_x (np.ndarray): Solution vector of shape (n,).\n        - solution_criterion (np.ndarray): Values of the criterion function at the\n            solution vector. Shape (n_obs,).\n        - history_x (np.ndarray): Entire history of x. Shape (history.get_n_fun(), n).\n        - history_criterion (np.ndarray): Entire history of the criterion function\n            evaluations. Shape (history.get_n_fun(), n_obs)\n        - n_iterations (int): Number of iterations the algorithm ran before finding a\n            solution vector or reaching maxiter.\n        - success (bool): Boolean indicating whether a solution has been found\n            before reaching maxiter.\n\n    \"\"\"\n    history = LeastSquaresHistory()\n\n    n = len(x0)\n    model_indices = np.zeros(maxinterp, dtype=int)\n\n    n_last_modelpoints = 0\n\n    if lower_bounds is not None and upper_bounds is not None:\n        if np.max(x0 + delta - upper_bounds) > 1e-10:\n            raise ValueError(\"Starting points + delta > upper bounds.\")\n\n    xs = [x0]\n    for i in range(n):\n        x1 = x0.copy()\n        x1[i] += delta\n        xs.append(x1)\n\n    residuals = batch_fun(x_list=xs, n_cores=n_cores)\n\n    history.add_entries(xs, residuals)\n    accepted_index = history.get_best_index()\n\n    residual_model = create_initial_residual_model(\n        history=history, accepted_index=accepted_index, delta=delta\n    )\n    main_model = create_main_from_residual_model(\n        residual_model=residual_model, multiply_square_terms_with_intercepts=False\n    )\n\n    x_accepted = history.get_best_x()\n    gradient_norm_initial = np.linalg.norm(main_model.linear_terms)\n    gradient_norm_initial *= delta\n\n    valid = True\n    n_modelpoints = n + 1\n    last_model_indices = np.zeros(maxinterp, dtype=int)\n\n    converged = False\n    convergence_reason = \"Continue iterating.\"\n\n    for niter in range(maxiter + 1):\n        result_sub = solve_subproblem(\n            x_accepted=x_accepted,\n            main_model=main_model,\n            lower_bounds=lower_bounds,\n            upper_bounds=upper_bounds,\n            delta=delta,\n            solver=solver_sub,\n            conjugate_gradient_method=conjugate_gradient_method_sub,\n            maxiter=maxiter_sub,\n            maxiter_gradient_descent=maxiter_gradient_descent_sub,\n            gtol_abs=gtol_abs_sub,\n            gtol_rel=gtol_rel_sub,\n            gtol_scaled=gtol_scaled_sub,\n            gtol_abs_conjugate_gradient=gtol_abs_conjugate_gradient_sub,\n            gtol_rel_conjugate_gradient=gtol_rel_conjugate_gradient_sub,\n            k_easy=k_easy_sub,\n            k_hard=k_hard_sub,\n        )\n\n        x_candidate = x_accepted + result_sub[\"x\"] * delta\n        residuals_candidate = criterion(x_candidate)\n        history.add_entries(x_candidate, residuals_candidate)\n\n        predicted_reduction = history.get_critvals(\n            accepted_index\n        ) - history.get_critvals(-1)\n        actual_reduction = -result_sub[\"criterion\"]\n\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n            rho = np.divide(predicted_reduction, actual_reduction)\n\n        if (rho >= eta1) or (rho > eta0 and valid):\n            residual_model = residual_model._replace(\n                intercepts=history.get_residuals(index=accepted_index)\n            )\n            center_info = {\"x\": history.get_best_x(), \"radius\": delta}\n            x_candidate = history.get_centered_xs(center_info, index=-1)\n\n            residual_model = update_residual_model_with_new_accepted_x(\n                residual_model=residual_model, x_candidate=x_candidate\n            )\n            main_model = update_main_model_with_new_accepted_x(\n                main_model=main_model, x_candidate=x_candidate\n            )\n            x_accepted = history.get_best_x()\n            accepted_index = history.get_best_index()\n\n        critval_accepted = history.get_critvals(index=accepted_index)\n\n        # The model is deemend \"not valid\" if it has less than n model points.\n        # Otherwise, if the model has n points, it is considered \"valid\" or\n        # \"fully linear\" or \"just identified\".\n        # Note: valid is True in the first iteration\n        if not valid:\n            (\n                model_improving_points,\n                model_indices,\n                n_modelpoints,\n                project_x_onto_null,\n            ) = find_affine_points(\n                history=history,\n                x_accepted=x_accepted,\n                model_improving_points=np.zeros((n, n)),\n                project_x_onto_null=False,\n                delta=delta,\n                theta1=theta1,\n                c=c1,\n                model_indices=model_indices,\n                n_modelpoints=0,\n            )\n\n            if n_modelpoints < n:\n                (\n                    history,\n                    model_indices,\n                ) = add_geomtery_points_to_make_main_model_fully_linear(\n                    history=history,\n                    main_model=main_model,\n                    model_improving_points=model_improving_points,\n                    model_indices=model_indices,\n                    x_accepted=x_accepted,\n                    n_modelpoints=n_modelpoints,\n                    delta=delta,\n                    criterion=criterion,\n                    lower_bounds=lower_bounds,\n                    upper_bounds=upper_bounds,\n                    batch_fun=batch_fun,\n                    n_cores=n_cores,\n                )\n                n_modelpoints = n\n\n        delta_old = delta\n        delta = update_trustregion_radius(\n            result_subproblem=result_sub,\n            rho=rho,\n            model_is_valid=valid,\n            delta=delta,\n            delta_min=delta_min,\n            delta_max=delta_max,\n            eta1=eta1,\n            gamma0=gamma0,\n            gamma1=gamma1,\n        )\n\n        (\n            model_improving_points,\n            model_indices,\n            n_modelpoints,\n            project_x_onto_null,\n        ) = find_affine_points(\n            history=history,\n            x_accepted=x_accepted,\n            model_improving_points=np.zeros((n, n)),\n            project_x_onto_null=False,\n            delta=delta,\n            theta1=theta1,\n            c=c1,\n            model_indices=model_indices,\n            n_modelpoints=0,\n        )\n\n        if n_modelpoints == n:\n            valid = True\n        else:\n            valid = False\n            (\n                model_improving_points,\n                model_indices,\n                n_modelpoints,\n                project_x_onto_null,\n            ) = find_affine_points(\n                history=history,\n                x_accepted=x_accepted,\n                model_improving_points=model_improving_points,\n                project_x_onto_null=project_x_onto_null,\n                delta=delta,\n                theta1=theta1,\n                c=c2,\n                model_indices=model_indices,\n                n_modelpoints=n_modelpoints,\n            )\n\n            if n_modelpoints < n:\n                (\n                    history,\n                    model_indices,\n                ) = add_geomtery_points_to_make_main_model_fully_linear(\n                    history=history,\n                    main_model=main_model,\n                    model_improving_points=model_improving_points,\n                    model_indices=model_indices,\n                    x_accepted=x_accepted,\n                    n_modelpoints=n_modelpoints,\n                    delta=delta,\n                    criterion=criterion,\n                    lower_bounds=lower_bounds,\n                    upper_bounds=upper_bounds,\n                    batch_fun=batch_fun,\n                    n_cores=n_cores,\n                )\n\n        model_indices = add_accepted_point_to_residual_model(\n            model_indices, accepted_index, n_modelpoints\n        )\n\n        (\n            x_sample_monomial_basis,\n            monomial_basis,\n            basis_null_space,\n            lower_triangular,\n            n_modelpoints,\n        ) = get_feature_matrices_residual_model(\n            history=history,\n            x_accepted=x_accepted,\n            model_indices=model_indices,\n            delta=delta,\n            c2=c2,\n            theta2=theta2,\n            n_maxinterp=maxinterp,\n        )\n\n        center_info = {\"x\": x_accepted, \"radius\": delta_old}\n        centered_xs = history.get_centered_xs(\n            center_info, index=model_indices[:n_modelpoints]\n        )\n\n        center_info = {\"residuals\": residual_model.intercepts}\n        centered_residuals = history.get_centered_residuals(\n            center_info, index=model_indices\n        )\n\n        y_residuals = evaluate_residual_model(\n            centered_xs=centered_xs,\n            centered_residuals=centered_residuals,\n            residual_model=residual_model,\n        )\n\n        coefficients_residual_model = fit_residual_model(\n            m_mat=x_sample_monomial_basis,\n            n_mat=monomial_basis,\n            z_mat=basis_null_space,\n            n_z_mat=lower_triangular,\n            y_residuals=y_residuals,\n            n_modelpoints=n_modelpoints,\n        )\n\n        residual_model = residual_model._replace(\n            intercepts=history.get_residuals(index=accepted_index)\n        )\n        residual_model = update_residual_model(\n            residual_model=residual_model,\n            coefficients_to_add=coefficients_residual_model,\n            delta=delta,\n            delta_old=delta_old,\n        )\n\n        main_model = create_main_from_residual_model(residual_model)\n\n        gradient_norm = np.linalg.norm(main_model.linear_terms)\n        gradient_norm *= delta\n\n        (\n            last_model_indices,\n            n_last_modelpoints,\n            same_model_used,\n        ) = get_last_model_indices_and_check_for_repeated_model(\n            model_indices=model_indices,\n            last_model_indices=last_model_indices,\n            n_modelpoints=n_modelpoints,\n            n_last_modelpoints=n_last_modelpoints,\n        )\n\n        converged, convergence_reason = _check_for_convergence(\n            gradient_norm=gradient_norm,\n            gradient_norm_initial=gradient_norm_initial,\n            critval=critval_accepted,\n            delta=delta,\n            delta_old=delta_old,\n            same_model_used=same_model_used,\n            converged=converged,\n            reason=convergence_reason,\n            niter=niter,\n            gtol_abs=gtol_abs,\n            gtol_rel=gtol_rel,\n            gtol_scaled=gtol_scaled,\n            maxiter=maxiter,\n        )\n\n        if converged:\n            break\n\n    result = InternalOptimizeResult(\n        x=history.get_xs(index=accepted_index),\n        fun=history.get_best_residuals(),\n        n_iterations=niter,\n        success=converged,\n        message=convergence_reason,\n    )\n\n    return result\n\n\ndef _check_for_convergence(\n    gradient_norm,\n    gradient_norm_initial,\n    critval,\n    delta,\n    delta_old,\n    same_model_used,\n    converged,\n    reason,\n    niter,\n    *,\n    gtol_abs,\n    gtol_rel,\n    gtol_scaled,\n    maxiter,\n):\n    \"\"\"Check for convergence.\"\"\"\n    if same_model_used and delta == delta_old:\n        converged = True\n        reason = \"Identical model used in successive iterations.\"\n    elif gradient_norm < gtol_abs:\n        converged = True\n        reason = \"Norm of the gradient is less than absolute_gradient_tolerance.\"\n    elif critval != 0 and abs(gradient_norm / critval) < gtol_rel:\n        converged = True\n        reason = (\n            \"Norm of the gradient relative to the criterion value is less than \"\n            \"relative_gradient_tolerance.\"\n        )\n    elif (\n        gradient_norm_initial != 0\n        and gradient_norm / gradient_norm_initial < gtol_scaled\n    ):\n        converged = True\n        reason = (\n            \"Norm of the gradient divided by norm of the gradient at the \"\n            \"initial parameters is less than scaled_gradient_tolerance.\"\n        )\n    elif gradient_norm_initial != 0 and gradient_norm == 0 and gtol_scaled == 0:\n        converged = True\n        reason = (\n            \"Norm of the gradient divided by norm of the gradient at the \"\n            \"initial parameters is less than scaled_gradient_tolerance.\"\n        )\n    elif critval <= -np.inf:\n        converged = True\n        reason = \"Criterion value is negative infinity.\"\n    elif niter == maxiter:\n        reason = \"Maximum number of iterations reached.\"\n\n    return converged, reason\n"
  },
  {
    "path": "src/optimagic/optimizers/pygad/__init__.py",
    "content": "\"\"\"PyGAD optimizer configuration classes and utilities.\n\nThis module provides easy access to PyGAD mutation classes and Protocols.\n\nExample:\n    # >>> import optimagic as om\n    # >>> mutation = om.optimizers.pygad.RandomMutation(\n    # ...     probability=0.15,\n    # ...     by_replacement=True,\n    # ... )\n    # >>> result = om.minimize(\n    # ...     ...,\n    # ...     algorithm=om.algos.pygad(mutation=mutation),\n    # ... )\n\n\"\"\"\n\nfrom optimagic.optimizers.pygad_optimizer import (\n    AdaptiveMutation as _AdaptiveMutation,\n)\nfrom optimagic.optimizers.pygad_optimizer import (\n    CrossoverFunction,\n    GeneConstraintFunction,\n    MutationFunction,\n    ParentSelectionFunction,\n)\nfrom optimagic.optimizers.pygad_optimizer import (\n    InversionMutation as _InversionMutation,\n)\nfrom optimagic.optimizers.pygad_optimizer import (\n    RandomMutation as _RandomMutation,\n)\nfrom optimagic.optimizers.pygad_optimizer import (\n    ScrambleMutation as _ScrambleMutation,\n)\nfrom optimagic.optimizers.pygad_optimizer import (\n    SwapMutation as _SwapMutation,\n)\n\nRandomMutation = _RandomMutation\nAdaptiveMutation = _AdaptiveMutation\nSwapMutation = _SwapMutation\nInversionMutation = _InversionMutation\nScrambleMutation = _ScrambleMutation\n\n__all__ = [\n    \"RandomMutation\",\n    \"AdaptiveMutation\",\n    \"SwapMutation\",\n    \"InversionMutation\",\n    \"ScrambleMutation\",\n    \"MutationFunction\",\n    \"CrossoverFunction\",\n    \"ParentSelectionFunction\",\n    \"GeneConstraintFunction\",\n]\n"
  },
  {
    "path": "src/optimagic/optimizers/pygad_optimizer.py",
    "content": "\"\"\"Implement PyGAD genetic algorithm optimizer.\"\"\"\n\nfrom __future__ import annotations\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import (\n    Any,\n    Callable,\n    ClassVar,\n    Literal,\n    Protocol,\n    runtime_checkable,\n)\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_PYGAD_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_GENERATIONS_NOIMPROVE,\n    CONVERGENCE_TARGET_VALUE,\n    STOPPING_MAXITER,\n    get_population_size,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    Direction,\n    PositiveFloat,\n    PositiveInt,\n    ProbabilityFloat,\n    PyTree,\n)\n\n\n@runtime_checkable\nclass ParentSelectionFunction(Protocol):\n    \"\"\"Protocol for user-defined parent selection functions.\n\n    Args:\n        fitness: Array of fitness values for all solutions in the population.\n        num_parents: Number of parents to select.\n        ga_instance: The PyGAD GA instance.\n\n    Returns:\n        Tuple of (selected_parents, parent_indices) where:\n        - selected_parents: 2D array of selected parent solutions\n        - parent_indices: 1D array of indices of selected parents\n\n    \"\"\"\n\n    def __call__(\n        self, fitness: NDArray[np.float64], num_parents: int, ga_instance: Any\n    ) -> tuple[NDArray[np.float64], NDArray[np.int_]]: ...\n\n\n@runtime_checkable\nclass CrossoverFunction(Protocol):\n    \"\"\"Protocol for user-defined crossover functions.\n\n    Args:\n        parents: 2D array of parent solutions selected for mating.\n        offspring_size: Tuple (num_offspring, num_genes) specifying the shape\n            of the offspring population to be generated.\n        ga_instance: The PyGAD GA instance.\n\n    Returns:\n        2D array of offspring solutions generated from the parents.\n\n    \"\"\"\n\n    def __call__(\n        self,\n        parents: NDArray[np.float64],\n        offspring_size: tuple[int, int],\n        ga_instance: Any,\n    ) -> NDArray[np.float64]: ...\n\n\n@runtime_checkable\nclass MutationFunction(Protocol):\n    \"\"\"Protocol for user-defined mutation functions.\n\n    Args:\n        offspring: 2D array of offspring solutions to be mutated.\n        ga_instance: The PyGAD GA instance.\n\n    Returns:\n        2D array of mutated offspring solutions.\n\n    \"\"\"\n\n    def __call__(\n        self, offspring: NDArray[np.float64], ga_instance: Any\n    ) -> NDArray[np.float64]: ...\n\n\n@runtime_checkable\nclass GeneConstraintFunction(Protocol):\n    \"\"\"Protocol for user-defined gene constraint functions.\n\n    Gene constraint functions are applied to individual genes to enforce\n    specific constraints on their values. Each function receives the current\n    solution and a list of candidate values, then returns the constrained\n    values.\n\n    Args:\n        solution: Current solution array containing all gene values.\n        values: List or array of candidate values for the gene being\n            constrained.\n\n    Returns:\n        Constrained values as a list or array, ensuring they satisfy the\n        gene's specific constraints.\n\n    \"\"\"\n\n    def __call__(\n        self,\n        solution: NDArray[np.float64],\n        values: list[float] | NDArray[np.float64],\n    ) -> list[float] | NDArray[np.float64]: ...\n\n\n@dataclass(frozen=True)\nclass _BuiltinMutation:\n    \"\"\"Base class for all built-in PyGAD mutation configurations.\n\n    Note:\n        This is an internal base class. Users should not inherit from it\n        directly. To configure a built-in mutation, use one of its subclasses\n        (e.g., `RandomMutation`, `AdaptiveMutation`). To define a custom\n        mutation, provide a function that conforms to the `MutationFunction`\n        protocol.\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"random\"\n\n    def to_pygad_params(self) -> dict[str, Any]:\n        \"\"\"Convert mutation configuration to PyGAD parameters.\n\n        Default implementation that works for simple mutations. Complex\n        mutations (RandomMutation, AdaptiveMutation) should override this.\n\n        Returns:\n            Dictionary of PyGAD mutation parameters.\n\n        \"\"\"\n        return {\n            \"mutation_type\": self.mutation_type,\n            \"mutation_probability\": None,\n            \"mutation_percent_genes\": \"default\",\n            \"mutation_num_genes\": None,\n            \"mutation_by_replacement\": False,\n        }\n\n\n@dataclass(frozen=True)\nclass RandomMutation(_BuiltinMutation):\n    \"\"\"Configuration for the random mutation in PyGAD.\n\n    The random mutation selects a subset of genes in each solution and either\n    replaces each selected gene with a new random value or adds a random value\n    to it.\n\n    The exact behavior depends on the `by_replacement` parameter: If\n    `by_replacement` is True, the selected genes are replaced with new values;\n    if False, random values are added to the existing gene values.\n\n    The mutation rate is determined by the mutation probability, the number of\n    genes, or the percentage of genes (with priority: probability > num_genes\n    > percent_genes).\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"random\"\n\n    probability: ProbabilityFloat | None = None\n    \"\"\"Probability of mutating each gene.\n\n    If specified, takes precedence over num_genes and percent_genes. Range [0, 1].\n\n    \"\"\"\n\n    num_genes: PositiveInt | None = None\n    \"\"\"Number of genes to mutate per solution.\n\n    Takes precedence over percent_genes but is ignored if probability is specified.\n\n    \"\"\"\n\n    percent_genes: PositiveFloat | str = \"default\"\n    \"\"\"Percentage of genes to mutate in each solution.\n\n    - \"default\": Uses 10% of genes (PyGAD default)\n    - Numeric value: Percentage (0-100)\n\n    Ignored if probability or num_genes are specified.\n\n    \"\"\"\n\n    by_replacement: bool = False\n    \"\"\"If True, replace gene values with random values.\n\n    If False, add random values to existing gene values.\n\n    \"\"\"\n\n    def to_pygad_params(self) -> dict[str, Any]:\n        \"\"\"Convert RandomMutation configuration to PyGAD parameters.\"\"\"\n        return {\n            \"mutation_type\": self.mutation_type,\n            \"mutation_probability\": self.probability,\n            \"mutation_percent_genes\": self.percent_genes,\n            \"mutation_num_genes\": self.num_genes,\n            \"mutation_by_replacement\": self.by_replacement,\n        }\n\n\n@dataclass(frozen=True)\nclass SwapMutation(_BuiltinMutation):\n    \"\"\"Configuration for the swap mutation in PyGAD.\n\n    The swap mutation selects two random genes and exchanges their values. This\n    operation maintains all gene values, altering only their positions within the\n    chromosome.\n\n    No additional parameters are required for this mutation type.\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"swap\"\n\n\n@dataclass(frozen=True)\nclass InversionMutation(_BuiltinMutation):\n    \"\"\"Configuration for the inversion mutation in PyGAD.\n\n    The inversion mutation selects a contiguous segment of genes and reverses their\n    order. All gene values remain unchanged; only the ordering within the selected\n    segment is altered.\n\n    No additional parameters are required for this mutation type.\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"inversion\"\n\n\n@dataclass(frozen=True)\nclass ScrambleMutation(_BuiltinMutation):\n    \"\"\"Configuration for the scramble mutation in PyGAD.\n\n    The scramble mutation randomly shuffles the genes within a contiguous segment. This\n    preserves gene values but changes their order within the chosen segment.\n\n    No additional parameters are required for this mutation type.\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"scramble\"\n\n\n@dataclass(frozen=True)\nclass AdaptiveMutation(_BuiltinMutation):\n    \"\"\"Configuration for the adaptive mutation in PyGAD.\n\n    The adaptive mutation dynamically adjusts the mutation rate based on\n    solution quality. Solutions whose objective value is worse than the\n    current population median receive a higher mutation rate to encourage\n    exploration, while better-than-median solutions receive a lower rate\n    to preserve promising traits.\n\n    If no mutation rate parameters are specified, this mutation defaults to using\n    probabilities, with a 10% rate for bad solutions (`probability_bad=0.1`)\n    and a 5% rate for good solutions (`probability_good=0.05`).\n\n    **Parameter Precedence:**\n    The mutation rate is determined by the first set of parameters found, in the\n    following order of priority:\n    1. `probability_bad` and `probability_good`\n    2. `num_genes_bad` and `num_genes_good`\n    3. `percent_genes_bad` and `percent_genes_good`\n\n    \"\"\"\n\n    mutation_type: ClassVar[str] = \"adaptive\"\n\n    probability_bad: ProbabilityFloat | None = None\n    \"\"\"Probability of mutating each gene for below-average fitness solutions.\n\n    If specified, takes precedence over num_genes_bad and percent_genes_bad. Range [0,\n    1]. If no mutation rate parameters are provided at all, this defaults to\n    0.1 (10% mutation rate for bad fitness solutions).\n\n    \"\"\"\n\n    probability_good: ProbabilityFloat | None = None\n    \"\"\"Probability of mutating each gene for above-average fitness solutions.\n\n    If specified, takes precedence over num_genes_good and percent_genes_good. Range [0,\n    1]. If no mutation rate parameters are provided at all, this defaults to\n    0.05 (5% mutation rate for good fitness solutions).\n\n    \"\"\"\n\n    num_genes_bad: PositiveInt | None = None\n    \"\"\"Number of genes to mutate for below-average fitness solutions.\n\n    Takes precedence over percent_genes_bad but is ignored if probability_bad is\n    specified.\n\n    \"\"\"\n\n    num_genes_good: PositiveInt | None = None\n    \"\"\"Number of genes to mutate for above-average fitness solutions.\n\n    Takes precedence over percent_genes_good but is ignored if probability_good is\n    specified.\n\n    \"\"\"\n\n    percent_genes_bad: PositiveFloat | None = None\n    \"\"\"Percentage of genes to mutate for below-average fitness solutions.\n\n    Ignored if probability_bad or num_genes_bad are specified.\n\n    \"\"\"\n\n    percent_genes_good: PositiveFloat | None = None\n    \"\"\"Percentage of genes to mutate for above-average fitness solutions.\n\n    Ignored if probability_good or num_genes_good are specified.\n\n    \"\"\"\n\n    by_replacement: bool = False\n    \"\"\"If True, replace gene values with random values.\n\n    If False, add random values to existing gene values.\n\n    \"\"\"\n\n    def to_pygad_params(self) -> dict[str, Any]:\n        \"\"\"Convert AdaptiveMutation configuration to PyGAD parameters.\"\"\"\n        mutation_probability: list[float] | None = None\n        mutation_num_genes: list[int] | None = None\n        mutation_percent_genes: list[float] | str | None = None\n\n        if self.probability_bad is not None and self.probability_good is not None:\n            mutation_probability = [self.probability_bad, self.probability_good]\n        elif self.num_genes_bad is not None and self.num_genes_good is not None:\n            mutation_num_genes = [self.num_genes_bad, self.num_genes_good]\n        elif self.percent_genes_bad is not None and self.percent_genes_good is not None:\n            mutation_percent_genes = [self.percent_genes_bad, self.percent_genes_good]\n        else:\n            mutation_probability = [\n                self.probability_bad or 0.1,\n                self.probability_good or 0.05,\n            ]\n\n        return {\n            \"mutation_type\": self.mutation_type,\n            \"mutation_probability\": mutation_probability,\n            \"mutation_percent_genes\": mutation_percent_genes,\n            \"mutation_num_genes\": mutation_num_genes,\n            \"mutation_by_replacement\": self.by_replacement,\n        }\n\n\n@mark.minimizer(\n    name=\"pygad\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGAD_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass Pygad(Algorithm):\n    \"\"\"Minimize a scalar function using the PyGAD genetic algorithm.\n\n    This optimizer wraps the PyGAD genetic algorithm package :cite:`gad2023pygad`,\n    a population-based evolutionary method for global optimization. It maintains a\n    population of candidate solutions and evolves them over generations using\n    biologically inspired operations: selection (choosing parents based on fitness),\n    crossover (combining genes from parents), and mutation (introducing random\n    variations).\n\n    The algorithm is well-suited for global optimization problems with multiple local\n    optima, black-box optimization where gradients are unavailable or difficult to\n    compute.\n\n    All variables must have finite bounds. Parallel fitness evaluation is supported via\n    batch processing.\n\n    For more details, see the\n    `PyGAD documentation <https://pygad.readthedocs.io/en/latest/>`_.\n\n    \"\"\"\n\n    population_size: PositiveInt | None = None\n    \"\"\"Number of solutions in each generation.\n\n    Larger populations explore the search space more thoroughly but require\n    more fitness evaluations per generation. If None, optimagic sets this to\n    ``max(10, 10 * (problem_dimension + 1))``.\n\n    \"\"\"\n\n    num_parents_mating: PositiveInt | None = 10\n    \"\"\"Number of parents selected for mating in each generation.\n\n    Higher values can speed up convergence but may risk premature convergence.\n    If None, defaults to ``max(2, population_size // 2)``.\n\n    \"\"\"\n\n    num_generations: PositiveInt | None = 50\n    \"\"\"Number of generations to evolve the population.\"\"\"\n\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    \"\"\"Maximum number of iterations (generations) to run.\n\n    This corresponds to PyGAD's num_generations parameter.\n\n    \"\"\"\n\n    initial_population: list[PyTree] | None = None\n    \"\"\"Optional initial population as a list of parameter PyTrees.\n\n    If None, the population is initialized randomly within parameter bounds.\n\n    \"\"\"\n\n    parent_selection_type: (\n        Literal[\"sss\", \"rws\", \"sus\", \"rank\", \"random\", \"tournament\"]\n        | ParentSelectionFunction\n    ) = \"sss\"\n    \"\"\"Parent selection strategy used to choose parents for crossover.\n\n    Available methods:\n\n    * ``\"sss\"``: Steady-State Selection (selects the best individuals to continue)\n    * ``\"rws\"``: Roulette Wheel Selection (probabilistic, fitness-proportional)\n    * ``\"sus\"``: Stochastic Universal Sampling (even sampling across population)\n    * ``\"rank\"``: Rank Selection (selects based on rank order)\n    * ``\"random\"``: Random Selection\n    * ``\"tournament\"``: Tournament Selection (best from K randomly chosen individuals)\n\n    Alternatively, provide a custom function with signature\n    ``(fitness, num_parents, ga_instance) -> tuple[NDArray, NDArray]``.\n\n    \"\"\"\n\n    keep_parents: int = -1\n    \"\"\"Number of best parents to keep in the next generation.\n\n    Only used if ``keep_elitism = 0``. Values:\n\n    * ``-1``: Keep all parents in the next generation (default)\n    * ``0``: Keep no parents in the next generation\n    * Positive integer: Keep the specified number of best parents\n\n    \"\"\"\n\n    keep_elitism: int = 1\n    \"\"\"Number of elite (best) solutions preserved each generation.\n\n    Range: 0 to population_size. If greater than 0, takes precedence over\n    ``keep_parents``. When 0, elitism is disabled and ``keep_parents``\n    controls parent retention.\n\n    \"\"\"\n\n    K_tournament: PositiveInt = 3\n    \"\"\"Tournament size for parent selection when\n    ``parent_selection_type=\"tournament\"``.\"\"\"\n\n    crossover_type: (\n        Literal[\"single_point\", \"two_points\", \"uniform\", \"scattered\"]\n        | CrossoverFunction\n        | None\n    ) = \"single_point\"\n    \"\"\"Crossover operator for generating offspring.\n\n    Available methods:\n\n    * ``\"single_point\"``: Single-point crossover\n    * ``\"two_points\"``: Two-point crossover\n    * ``\"uniform\"``: Uniform crossover (randomly mixes genes)\n    * ``\"scattered\"``: Scattered crossover (random mask)\n\n    Or provide a custom function with signature\n    ``(parents, offspring_size, ga_instance) -> NDArray``.\n\n    \"\"\"\n\n    crossover_probability: ProbabilityFloat | None = None\n    \"\"\"Probability of applying crossover to selected parents.\n\n    Range [0, 1]. If None, uses PyGAD's default.\n\n    \"\"\"\n\n    mutation: (\n        Literal[\"random\", \"swap\", \"inversion\", \"scramble\", \"adaptive\"]\n        | type[_BuiltinMutation]\n        | _BuiltinMutation\n        | MutationFunction\n        | None\n    ) = \"random\"\n    \"\"\"Mutation operator for introducing genetic diversity.\n\n    Available options:\n\n    **String values for default configurations:**\n\n    * ``\"random\"``: Random mutation with default parameters\n    * ``\"swap\"``: Swap mutation with default parameters\n    * ``\"inversion\"``: Inversion mutation with default parameters\n    * ``\"scramble\"``: Scramble mutation with default parameters\n    * ``\"adaptive\"``: Adaptive random mutation with default parameters\n\n    **Mutation classes for default configurations:**\n\n    * Any mutation class (e.g., ``RandomMutation``, ``SwapMutation``,\n      ``AdaptiveMutation``, etc.)\n    * All classes can be used without parameters for default behavior\n\n    **Configured mutation instances:**\n\n    * Any mutation instance (e.g., ``RandomMutation(...)``,\n      ``SwapMutation()``, etc.)\n    * All mutation classes inherit from ``_BuiltinMutation``\n\n    **Custom function:**\n\n    * Custom function with signature ``(offspring, ga_instance) -> NDArray``\n\n    **Disable mutation:**\n\n    * ``None`` to disable mutation\n\n    \"\"\"\n\n    allow_duplicate_genes: bool = True\n    \"\"\"If True, duplicate gene values are allowed within a solution.\"\"\"\n\n    gene_constraint: list[GeneConstraintFunction | None] | None = None\n    \"\"\"Optional list of per-gene constraint functions.\n\n    Each with signature ``(solution, values) -> list[float] | NDArray``.\n\n    \"\"\"\n\n    sample_size: PositiveInt = 100\n    \"\"\"Number of values to sample when enforcing uniqueness or gene constraints.\"\"\"\n\n    batch_size: PositiveInt | None = None\n    \"\"\"Number of solutions to evaluate in parallel batches.\n\n    If None and ``n_cores > 1``, automatically set to ``n_cores``.\n\n    \"\"\"\n\n    convergence_target_value: PositiveFloat | None = CONVERGENCE_TARGET_VALUE\n    \"\"\"Target value for early stopping.\n\n    Default: None.\n\n    \"\"\"\n\n    convergence_generations_noimprove: PositiveInt | None = (\n        CONVERGENCE_GENERATIONS_NOIMPROVE\n    )\n    \"\"\"Maximum generations without fitness improvement before stopping.\n\n    Default: None.\n\n    \"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of CPU cores for parallel fitness evaluation.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Random seed for reproducibility.\"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_PYGAD_INSTALLED:\n            raise NotInstalledError(\n                \"The 'pygad' algorithm requires the pygad package to be \"\n                \"installed. You can install it with 'pip install pygad'.\"\n            )\n\n        _validate_user_defined_functions(\n            parent_selection_type=self.parent_selection_type,\n            crossover_type=self.crossover_type,\n            gene_constraint=self.gene_constraint,\n        )\n        import pygad\n\n        if (\n            problem.bounds.lower is None\n            or problem.bounds.upper is None\n            or not np.isfinite(problem.bounds.lower).all()\n            or not np.isfinite(problem.bounds.upper).all()\n        ):\n            raise ValueError(\"pygad requires finite bounds for all parameters.\")\n\n        # Determine effective batch_size for parallel processing\n        effective_batch_size = _determine_effective_batch_size(\n            self.batch_size, self.n_cores\n        )\n\n        if (\n            effective_batch_size is not None\n            and effective_batch_size > 1\n            and self.n_cores > 1\n        ):\n\n            def _fitness_func_batch(\n                _ga_instance: Any,\n                batch_solutions: NDArray[np.float64],\n                _batch_indices: list[int] | NDArray[np.int_],\n            ) -> list[float]:\n                solutions_list: list[NDArray[np.float64]] = [\n                    np.asarray(batch_solutions[i])\n                    for i in range(batch_solutions.shape[0])\n                ]\n                batch_results = problem.batch_fun(\n                    solutions_list,\n                    n_cores=self.n_cores,\n                    batch_size=effective_batch_size,\n                )\n\n                return [-float(result) for result in batch_results]\n\n            fitness_function: Any = _fitness_func_batch\n        else:\n\n            def _fitness_func_single(\n                _ga_instance: Any, solution: NDArray[np.float64], _solution_idx: int\n            ) -> float:\n                return -float(problem.fun(solution))\n\n            fitness_function = _fitness_func_single\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        num_parents_mating = (\n            self.num_parents_mating\n            if self.num_parents_mating is not None\n            else max(2, population_size // 2)\n        )\n\n        if self.initial_population is not None:\n            initial_population = np.array(\n                [\n                    problem.converter.params_to_internal(params)\n                    for params in self.initial_population\n                ]\n            )\n        else:\n            num_genes = len(x0)\n\n            initial_population = np.random.uniform(\n                problem.bounds.lower,\n                problem.bounds.upper,\n                size=(population_size, num_genes),\n            )\n\n            initial_population[0] = x0\n\n        gene_space = [\n            {\"low\": problem.bounds.lower[i], \"high\": problem.bounds.upper[i]}\n            for i in range(len(x0))\n        ]\n\n        # Convert mutation parameter to PyGAD parameters\n        mutation_params = _convert_mutation_to_pygad_params(self.mutation)\n\n        # Build stop criteria from convergence parameters\n        stop_criteria = _build_stop_criteria(\n            self.convergence_target_value,\n            self.convergence_generations_noimprove,\n            direction=problem.direction,\n        )\n\n        ga_instance = pygad.GA(\n            num_generations=self.num_generations,\n            num_parents_mating=num_parents_mating,\n            fitness_func=fitness_function,\n            fitness_batch_size=effective_batch_size,\n            initial_population=initial_population,\n            gene_space=gene_space,\n            parent_selection_type=self.parent_selection_type,\n            keep_parents=self.keep_parents,\n            keep_elitism=self.keep_elitism,\n            K_tournament=self.K_tournament,\n            crossover_type=self.crossover_type,\n            crossover_probability=self.crossover_probability,\n            mutation_type=mutation_params[\"mutation_type\"],\n            mutation_probability=mutation_params[\"mutation_probability\"],\n            mutation_by_replacement=mutation_params[\"mutation_by_replacement\"],\n            mutation_percent_genes=mutation_params[\"mutation_percent_genes\"],\n            mutation_num_genes=mutation_params[\"mutation_num_genes\"],\n            allow_duplicate_genes=self.allow_duplicate_genes,\n            gene_constraint=self.gene_constraint,\n            sample_size=self.sample_size,\n            stop_criteria=stop_criteria,\n            parallel_processing=None,\n            random_seed=self.seed,\n        )\n\n        ga_instance.run()\n\n        result = _process_pygad_result(ga_instance)\n\n        return result\n\n\ndef _convert_mutation_to_pygad_params(mutation: Any) -> dict[str, Any]:\n    \"\"\"Convert the mutation parameter to PyGAD mutation parameters.\n\n    Handles strings, classes, instances, and custom functions using the\n    new mutation dataclass system with built-in conversion methods.\n\n    Returns:\n        Dictionary of PyGAD mutation parameters.\n\n    \"\"\"\n    params: dict[str, Any]\n\n    if mutation is None:\n        params = _get_default_mutation_params(mutation_type=None)\n\n    elif isinstance(mutation, str):\n        mutation_instance = _create_mutation_from_string(mutation)\n        params = mutation_instance.to_pygad_params()\n\n    elif isinstance(mutation, type) and issubclass(mutation, _BuiltinMutation):\n        mutation_instance = mutation()\n        params = mutation_instance.to_pygad_params()\n\n    elif isinstance(mutation, _BuiltinMutation):\n        params = mutation.to_pygad_params()\n\n    elif isinstance(mutation, MutationFunction):\n        params = _get_default_mutation_params(mutation_type=mutation)\n\n    else:\n        raise ValueError(f\"Unsupported mutation type: {type(mutation)}\")\n\n    return params\n\n\ndef _get_default_mutation_params(mutation_type: Any = \"random\") -> dict[str, Any]:\n    \"\"\"Get default PyGAD mutation parameters.\"\"\"\n    return {\n        \"mutation_type\": mutation_type,\n        \"mutation_probability\": None,\n        \"mutation_percent_genes\": None if mutation_type is None else \"default\",\n        \"mutation_num_genes\": None,\n        \"mutation_by_replacement\": None if mutation_type is None else False,\n    }\n\n\ndef _create_mutation_from_string(mutation_type: str) -> _BuiltinMutation:\n    \"\"\"Create a mutation instance from a string type.\n\n    Args:\n        mutation_type: String mutation type (e.g., \"random\", \"swap\", etc.)\n\n    Returns:\n        Appropriate mutation instance.\n\n    Raises:\n        ValueError: If mutation_type is not supported.\n\n    \"\"\"\n    mutation_map = {\n        \"random\": RandomMutation,\n        \"swap\": SwapMutation,\n        \"inversion\": InversionMutation,\n        \"scramble\": ScrambleMutation,\n        \"adaptive\": AdaptiveMutation,\n    }\n\n    if mutation_type not in mutation_map:\n        raise ValueError(f\"Unsupported mutation type: {mutation_type}\")\n\n    return mutation_map[mutation_type]()\n\n\ndef _determine_effective_batch_size(batch_size: int | None, n_cores: int) -> int | None:\n    \"\"\"Determine the effective batch_size for parallel processing.\n\n    Behavior:\n    - If `batch_size` is explicitly provided:\n      - The value is returned unchanged.\n      - A warning is issued if it is less than `n_cores`, as this may\n        underutilize available cores.\n    - If `batch_size` is `None`:\n      - If `n_cores` > 1, defaults to `n_cores`.\n      - Otherwise, returns None (i.e., single-threaded evaluation).\n\n    Args:\n        batch_size: User-specified batch size or None\n        n_cores: Number of cores for parallel processing\n\n    Returns:\n        Effective batch size for PyGAD, or None for single-threaded\n        processing\n\n    \"\"\"\n    result = None\n\n    if batch_size is not None:\n        if batch_size < n_cores:\n            warnings.warn(\n                f\"batch_size ({batch_size}) is smaller than \"\n                f\"n_cores ({n_cores}). This may reduce parallel efficiency. \"\n                f\"Consider setting batch_size >= n_cores.\"\n            )\n        result = batch_size\n    elif n_cores > 1:\n        result = n_cores\n\n    return result\n\n\ndef _build_stop_criteria(\n    target_criterion: float | None,\n    saturate_generations: int | None,\n    direction: Direction,\n) -> str | list[str] | None:\n    \"\"\"Build PyGAD stop criteria from optimagic convergence parameters.\n\n    Args:\n        target_criterion: Target value that the objective function should reach.\n        saturate_generations: Max generations without improvement before stopping.\n        direction: Direction of optimization (Direction.MINIMIZE or Direction.MAXIMIZE).\n\n    Returns:\n        PyGAD stop criteria string, list of strings, or None.\n\n    \"\"\"\n    criteria = []\n\n    if target_criterion is not None:\n        pygad_target_fitness = (\n            -target_criterion if direction is Direction.MINIMIZE else target_criterion\n        )\n        criteria.append(f\"reach_{pygad_target_fitness}\")\n\n    if saturate_generations is not None:\n        criteria.append(f\"saturate_{saturate_generations}\")\n\n    return criteria[0] if len(criteria) == 1 else (criteria or None)\n\n\ndef _validate_user_defined_functions(\n    parent_selection_type: str | Callable[..., object] | None,\n    crossover_type: str | Callable[..., object] | None,\n    gene_constraint: list[GeneConstraintFunction | None] | None,\n) -> None:\n    \"\"\"Validate user-provided functions for selection, crossover, and constraints.\"\"\"\n    if parent_selection_type is None:\n        pass\n    elif isinstance(parent_selection_type, str):\n        _validate_string_choice(\n            parent_selection_type,\n            [\"sss\", \"rws\", \"sus\", \"rank\", \"random\", \"tournament\"],\n            \"parent_selection_type\",\n        )\n    elif callable(parent_selection_type):\n        _validate_protocol_function(\n            parent_selection_type,\n            ParentSelectionFunction,\n            \"parent_selection_type\",\n        )\n    else:\n        raise ValueError(\n            \"parent_selection_type must be a string, callable, or None, \"\n            f\"got {type(parent_selection_type)}\"\n        )\n\n    if crossover_type is None:\n        pass\n    elif isinstance(crossover_type, str):\n        _validate_string_choice(\n            crossover_type,\n            [\"single_point\", \"two_points\", \"uniform\", \"scattered\"],\n            \"crossover_type\",\n        )\n    elif callable(crossover_type):\n        _validate_protocol_function(\n            crossover_type,\n            CrossoverFunction,\n            \"crossover_type\",\n        )\n    else:\n        raise ValueError(\n            \"crossover_type must be a string, callable, or None, \"\n            f\"got {type(crossover_type)}\"\n        )\n\n    if gene_constraint is not None:\n        if not isinstance(gene_constraint, list):\n            raise ValueError(\n                f\"gene_constraint must be a list or None, got {type(gene_constraint)}\"\n            )\n        for i, constraint_func in enumerate(gene_constraint):\n            if constraint_func is not None:\n                if not callable(constraint_func):\n                    raise TypeError(\n                        f\"gene_constraint[{i}] must be callable, or None, \"\n                        f\"got {type(constraint_func)}\"\n                    )\n                _validate_protocol_function(\n                    constraint_func,\n                    GeneConstraintFunction,\n                    f\"gene_constraint[{i}]\",\n                )\n\n\ndef _validate_string_choice(value: str, valid_choices: list[str], name: str) -> None:\n    \"\"\"Ensure a string parameter is one of the allowed choices.\"\"\"\n    if value not in valid_choices:\n        raise ValueError(f\"{name} must be one of {valid_choices}, got '{value}'.\")\n\n\ndef _validate_protocol_function(\n    func: Callable[..., Any], protocol: Any, name: str\n) -> None:\n    \"\"\"Ensure a callable satisfies the expected protocol interface.\"\"\"\n    if not isinstance(func, protocol):\n        raise TypeError(f\"{name} must implement {protocol.__name__}.\")\n\n\ndef _process_pygad_result(ga_instance: Any) -> InternalOptimizeResult:\n    \"\"\"Process PyGAD result into InternalOptimizeResult.\n\n    Args:\n        ga_instance: The PyGAD instance after running the optimization\n\n    Returns:\n        InternalOptimizeResult: Processed optimization results\n\n    \"\"\"\n    best_solution, best_fitness, _ = ga_instance.best_solution()\n\n    best_criterion = -best_fitness\n\n    completed_generations = ga_instance.generations_completed\n    success = ga_instance.run_completed\n    if success:\n        message = (\n            \"Optimization terminated successfully.\\n\"\n            f\"Generations completed: {completed_generations}\"\n        )\n    else:\n        message = (\n            \"Optimization failed to complete.\\n\"\n            f\"Generations completed: {completed_generations}\"\n        )\n\n    return InternalOptimizeResult(\n        x=best_solution,\n        fun=best_criterion,\n        success=success,\n        message=message,\n        n_fun_evals=ga_instance.generations_completed * ga_instance.pop_size[0],\n    )\n"
  },
  {
    "path": "src/optimagic/optimizers/pygmo_optimizers.py",
    "content": "\"\"\"Implement pygmo optimizers.\n\nNotes for converting to the new algorithm interface:\n\n- `create_algo_options` is not needed anymore because the only thing it did was mixing\n  options that are supported by all optimizers (e.g. population_size, seed) with\n  specific options. Then later they had to be taken appart again. Instead you need\n  to pass  a few more arguments directly to `_minimize_pygmo`.\n- Calling `_check_that_every_param_is_bounded` is not needed anymore. I do that check\n  once in `_minimize_pygmo`.\n- The documentation often just says float where I suspect PositiveFloats; Leave it at\n  float for now and add todos where needed. Don't spend a lot of time on it.\n- There are some specific type checks and type conversions that should not be needed\n  anymore after switching to the new interface.\n- Whenever we had batch_evaluator as `algo_option` we don't need it anymore but we\n  should have `n_cores` in those algorithms.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Any, List, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_PYGMO_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_XTOL_REL,\n    STOPPING_MAXFUN_GLOBAL,\n    get_population_size,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n)\n\nSTOPPING_MAX_ITERATIONS_GENETIC = 250\n\nif TYPE_CHECKING:\n    import pygmo as pg\n\n\n@mark.minimizer(\n    name=\"pygmo_gaco\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoGaco(Algorithm):\n    population_size: int | None = None\n    n_cores: int = 1\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    kernel_size: PositiveInt = 63\n    speed_parameter_q: PositiveFloat = 1.0\n    oracle: float = 0.0\n    accuracy: PositiveFloat = 0.01\n    threshold: PositiveInt = 1\n    speed_of_std_values_convergence: int = 7\n    stopping_max_n_without_improvements: PositiveInt = 100000\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    focus: NonNegativeFloat = 0.0\n    cache: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"ker\": self.kernel_size,\n            \"q\": self.speed_parameter_q,\n            \"oracle\": self.oracle,\n            \"acc\": self.accuracy,\n            \"threshold\": self.threshold,\n            \"n_gen_mark\": self.speed_of_std_values_convergence,\n            \"impstop\": self.stopping_max_n_without_improvements,\n            \"evalstop\": self.stopping_maxfun,\n            \"focus\": self.focus,\n            \"memory\": self.cache,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"gaco\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_bee_colony\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoBeeColony(Algorithm):\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    seed: int | None = None\n    discard_start_params: bool = False\n    max_n_trials: PositiveInt = 1\n    population_size: int | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=20\n        )\n\n        algo_specific_options = {\n            \"limit\": self.max_n_trials,\n            \"gen\": self.stopping_maxiter,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"bee_colony\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_de\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoDe(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,2]\n    weight_coefficient: NonNegativeFloat = 0.8\n    # TODO: Probably refine type to fix range [0,1]\n    crossover_probability: NonNegativeFloat = 0.9\n    mutation_variant: Literal[\n        \"best/1/exp\",\n        \"rand/1/exp\",\n        \"rand-to-best/1/exp\",\n        \"best/2/exp\",\n        \"rand/2/exp\",\n        \"best/1/bin\",\n        \"rand/1/bin\",\n        \"rand-to-best/1/bin\",\n        \"best/2/bin\",\n        \"rand/2/bin\",\n    ] = \"rand/1/exp\"\n    convergence_criterion_tolerance: NonNegativeFloat = 1e-6\n    convergence_relative_params_tolerance: NonNegativeFloat = CONVERGENCE_XTOL_REL\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n        # support both integer and string specification of the mutation variant\n        mutation_variant_str_to_int = {\n            \"best/1/exp\": 1,\n            \"rand/1/exp\": 2,\n            \"rand-to-best/1/exp\": 3,\n            \"best/2/exp\": 4,\n            \"rand/2/exp\": 5,\n            \"best/1/bin\": 6,\n            \"rand/1/bin\": 7,\n            \"rand-to-best/1/bin\": 8,\n            \"best/2/bin\": 9,\n            \"rand/2/bin\": 10,\n        }\n        mutation_variant = _convert_str_to_int(\n            str_to_int=mutation_variant_str_to_int, value=self.mutation_variant\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"F\": self.weight_coefficient,\n            \"CR\": self.crossover_probability,\n            \"variant\": mutation_variant,\n            \"ftol\": self.convergence_criterion_tolerance,\n            \"xtol\": self.convergence_relative_params_tolerance,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"de\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_sea\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoSea(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = (\n        10_000  # Each generation will compute the objective once\n    )\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"sea\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_sga\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoSga(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,1]\n    crossover_probability: NonNegativeFloat = 0.9\n    crossover_strategy: Literal[\n        \"exponential\",\n        \"sbx\",\n        \"single\",\n        \"binomial\",\n    ] = \"exponential\"\n    # TODO: Refine type to fix range [1,100]\n    eta_c: PositiveFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    mutation_probability: NonNegativeFloat = 0.02\n    mutation_strategy: Literal[\"uniform\", \"polynomial\"] = \"polynomial\"\n    # TODO: Refine type to fix range [0,1]\n    mutation_polynomial_distribution_index: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    mutation_gaussian_width: NonNegativeFloat | None = None\n    selection_strategy: Literal[\"tournament\", \"truncated\"] = \"tournament\"\n    # TODO: Check if should be NonNegativeInt\n    selection_truncated_n_best: int | None = None\n    # TODO Check if should be NonNegativeInt\n    selection_tournament_size: int | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n\n        if self.eta_c is not None and self.crossover_strategy != \"sbx\":\n            warnings.warn(\n                f\"You specified crossover strategy {self.crossover_strategy}\"\n                \"and eta_c. However, eta_c is ignored because it is only used when \"\n                \"the crossover_strategy is set to sbx.\"\n            )\n        eta_c = 1.0 if self.eta_c is None else self.eta_c\n\n        if (\n            self.mutation_polynomial_distribution_index is not None\n        ) and self.mutation_strategy != \"polynomial\":\n            warnings.warn(\n                \"You specified a mutation_polynomial_distribution_index but\"\n                \"did not choose polynomial as your mutation_strategy. Thus, \"\n                \"mutation_polynomial_distribution_index will be ignored.\"\n            )\n        if (\n            self.mutation_gaussian_width is not None\n            and self.mutation_strategy != \"gaussian\"\n        ):\n            warnings.warn(\n                \"You specified a mutation_gaussian_width but \"\n                \"did not choose gaussion as your mutation_strategy. \"\n                \"Thus, mutation_gaussian_width will be ignored.\"\n            )\n        if (\n            self.selection_strategy != \"truncated\"\n            and self.selection_truncated_n_best is not None\n        ):\n            warnings.warn(\n                \"You specified selection_truncated_n_best but \"\n                \"did not specify truncated as your selection strategy. \"\n                \"Therefore, selection_truncated_n_best is ignored.\"\n            )\n        if (\n            self.selection_strategy != \"tournament\"\n            and self.selection_tournament_size is not None\n        ):\n            warnings.warn(\n                \"You specified selection_tournament_size but \"\n                \"did not specify tournament as your selection strategy. \"\n                \"Therefore, selection_tournament_size is ignored.\"\n            )\n\n        if (\n            self.mutation_strategy == \"gaussian\"\n            and self.mutation_gaussian_width is not None\n        ):\n            param_m = self.mutation_gaussian_width\n        elif (\n            self.mutation_strategy == \"polynomial\"\n            and self.mutation_polynomial_distribution_index is not None\n        ):\n            param_m = self.mutation_polynomial_distribution_index\n        else:\n            param_m = 1.0\n\n        if (\n            self.selection_strategy == \"truncated\"\n            and self.selection_truncated_n_best is not None\n        ):\n            param_s = self.selection_truncated_n_best\n        elif (\n            self.selection_strategy == \"tournament\"\n            and self.selection_tournament_size is not None\n        ):\n            param_s = self.selection_tournament_size\n        else:\n            param_s = 2\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"cr\": self.crossover_probability,\n            \"eta_c\": eta_c,\n            \"m\": self.mutation_probability,\n            \"param_m\": param_m,\n            \"crossover\": self.crossover_strategy,\n            \"mutation\": self.mutation_strategy,\n            \"selection\": self.selection_strategy,\n            \"param_s\": param_s,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"sga\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_sade\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoSade(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    jde: bool = True\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    mutation_variant: Literal[\n        \"best/1/exp\",\n        \"rand/1/exp\",\n        \"rand-to-best/1/exp\",\n        \"best/2/exp\",\n        \"rand/2/exp\",\n        \"best/1/bin\",\n        \"rand/1/bin\",\n        \"rand-to-best/1/bin\",\n        \"best/2/bin\",\n        \"rand/2/bin\",\n        \"rand/3/exp\",\n        \"rand/3/bin\",\n        \"best/3/exp\",\n        \"best/3/bin\",\n        \"rand-to-current/2/exp\",\n        \"rand-to-current/2/bin\",\n        \"rand-to-best-and-current/2/exp\",\n        \"rand-to-best-and-current/2/bin\",\n    ] = \"rand/1/exp\"\n    keep_adapted_params: bool = False\n    ftol: NonNegativeFloat = 1e-6\n    xtol: NonNegativeFloat = 1e-6\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n        mutation_variant_str_to_int = {\n            \"best/1/exp\": 1,\n            \"rand/1/exp\": 2,\n            \"rand-to-best/1/exp\": 3,\n            \"best/2/exp\": 4,\n            \"rand/2/exp\": 5,\n            \"best/1/bin\": 6,\n            \"rand/1/bin\": 7,\n            \"rand-to-best/1/bin\": 8,\n            \"best/2/bin\": 9,\n            \"rand/2/bin\": 10,\n            \"rand/3/exp\": 11,\n            \"rand/3/bin\": 12,\n            \"best/3/exp\": 13,\n            \"best/3/bin\": 14,\n            \"rand-to-current/2/exp\": 15,\n            \"rand-to-current/2/bin\": 16,\n            \"rand-to-best-and-current/2/exp\": 17,\n            \"rand-to-best-and-current/2/bin\": 18,\n        }\n        mutation_variant = _convert_str_to_int(\n            str_to_int=mutation_variant_str_to_int, value=self.mutation_variant\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"variant\": mutation_variant,\n            \"variant_adptv\": 1 if self.jde else 2,\n            \"ftol\": self.ftol,\n            \"xtol\": self.xtol,\n            \"memory\": self.keep_adapted_params,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"sade\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_cmaes\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoCmaes(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,1]\n    backward_horizon: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    variance_loss_compensation: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    learning_rate_rank_one_update: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    learning_rate_rank_mu_update: NonNegativeFloat | None = None\n    # TODO: Check if should be NonNegativeFloat\n    initial_step_size: float = 0.5\n    ftol: NonNegativeFloat = 1e-6\n    xtol: NonNegativeFloat = 1e-6\n    keep_adapted_params: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"cc\": self.backward_horizon if self.backward_horizon is not None else -1.0,\n            \"cs\": self.variance_loss_compensation\n            if self.variance_loss_compensation is not None\n            else -1.0,\n            \"c1\": self.learning_rate_rank_one_update\n            if self.learning_rate_rank_one_update is not None\n            else -1.0,\n            \"cmu\": self.learning_rate_rank_mu_update\n            if self.learning_rate_rank_mu_update is not None\n            else -1.0,\n            \"sigma0\": self.initial_step_size,\n            \"ftol\": self.ftol,\n            \"xtol\": self.xtol,\n            \"memory\": self.keep_adapted_params,\n            \"force_bounds\": True,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"cmaes\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_simulated_annealing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoSimulatedAnnealing(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    start_temperature: PositiveFloat = 10.0\n    # TODO: Check if type should be same as start_temperature\n    end_temperature: float = 0.01\n    # TODO: Check if type should be NonNegativeInt\n    n_temp_adjustments: int = 10\n    # TODO: Check if type should be NonNegativeInt\n    n_range_adjustments: int = 10\n    # TODO: Check if type should be NonNegativeInt\n    bin_size: int = 10\n    # TODO: Refine type to fix range [0,1]\n    start_range: NonNegativeFloat = 1.0\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n\n        algo_specific_options = {\n            \"Ts\": self.start_temperature,\n            \"Tf\": self.end_temperature,\n            \"n_T_adj\": self.n_temp_adjustments,\n            \"n_range_adj\": self.n_range_adjustments,\n            \"bin_size\": self.bin_size,\n            \"start_range\": self.start_range,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"simulated_annealing\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_pso\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoPso(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,1]\n    omega: NonNegativeFloat = 0.7298\n    # TODO: Refine type to fix range [0,4]\n    force_of_previous_best: NonNegativeFloat = 2.05\n    # TODO: Refine type to fix range [0,4]\n    force_of_best_in_neighborhood: NonNegativeFloat = 2.05\n    # TODO: Refine type to fix range [0,1]\n    max_velocity: NonNegativeFloat = 0.5\n    algo_variant: Literal[\n        \"canonical_inertia\",\n        \"social_and_cog_rand\",\n        \"all_components_rand\",\n        \"one_rand\",\n        \"canonical_constriction\",\n        \"fips\",\n    ] = \"canonical_constriction\"\n    neighbor_definition: Literal[\n        \"gbest\",\n        \"lbest\",\n        \"Von Neumann\",\n        \"Adaptive random\",\n    ] = \"lbest\"\n    neighbor_param: int | None = None\n    keep_velocities: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if (\n            self.neighbor_definition in [\"gbest\", \"Von Neumann\"]\n            and self.neighbor_param is not None\n        ):\n            warnings.warn(\n                \"You gave a neighbor parameter but selected a neighbor_definition \"\n                \"that ignores this parameter.\"\n            )\n\n        neighbor_param = 4 if self.neighbor_param is None else self.neighbor_param\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        neighbor_definition_str_to_int = {\n            \"gbest\": 1,\n            \"lbest\": 2,\n            \"Von Neumann\": 3,\n            \"Adaptive random\": 4,\n        }\n        algo_variant_str_to_int = {\n            \"canonical_inertia\": 1,\n            \"social_and_cog_rand\": 2,\n            \"all_components_rand\": 3,\n            \"one_rand\": 4,\n            \"canonical_constriction\": 5,\n            \"fips\": 6,\n        }\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"omega\": self.omega,\n            \"eta1\": self.force_of_previous_best,\n            \"eta2\": self.force_of_best_in_neighborhood,\n            \"max_vel\": self.max_velocity,\n            \"variant\": _convert_str_to_int(algo_variant_str_to_int, self.algo_variant),\n            \"neighb_type\": _convert_str_to_int(\n                neighbor_definition_str_to_int, self.neighbor_definition\n            ),\n            \"neighb_param\": neighbor_param,\n            \"memory\": self.keep_velocities,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"pso\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_pso_gen\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoPsoGen(Algorithm):\n    population_size: int | None = None\n    n_cores: PositiveInt = 1\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,1]\n    omega: NonNegativeFloat = 0.7298\n    # TODO: Refine type to fix range [0,4]\n    force_of_previous_best: NonNegativeFloat = 2.05\n    # TODO: Refine type to fix range [0,4]\n    force_of_best_in_neighborhood: NonNegativeFloat = 2.05\n    # TODO: Refine type to fix range [0,1]\n    max_velocity: NonNegativeFloat = 0.5\n    algo_variant: Literal[\n        \"canonical_inertia\",\n        \"social_and_cog_rand\",\n        \"all_components_rand\",\n        \"one_rand\",\n        \"canonical_constriction\",\n        \"fips\",\n    ] = \"canonical_constriction\"\n    neighbor_definition: Literal[\n        \"gbest\",\n        \"lbest\",\n        \"Von Neumann\",\n        \"Adaptive random\",\n    ] = \"lbest\"\n    neighbor_param: int | None = None\n    keep_velocities: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if (\n            self.neighbor_definition in [\"gbest\", \"Von Neumann\"]\n            and self.neighbor_param is not None\n        ):\n            warnings.warn(\n                \"You gave a neighbor parameter but selected a neighbor_definition \"\n                \"that ignores this parameter.\"\n            )\n        neighbor_param = 4 if self.neighbor_param is None else self.neighbor_param\n        neighbor_str_to_int = {\n            \"gbest\": 1,\n            \"lbest\": 2,\n            \"Von Neumann\": 3,\n            \"Adaptive random\": 4,\n        }\n        neighbor_type = _convert_str_to_int(\n            neighbor_str_to_int, self.neighbor_definition\n        )\n        algo_variant_str_to_int = {\n            \"canonical_inertia\": 1,\n            \"social_and_cog_rand\": 2,\n            \"all_components_rand\": 3,\n            \"one_rand\": 4,\n            \"canonical_constriction\": 5,\n            \"fips\": 6,\n        }\n        algo_variant = _convert_str_to_int(algo_variant_str_to_int, self.algo_variant)\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=10\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"omega\": self.omega,\n            \"eta1\": self.force_of_previous_best,\n            \"eta2\": self.force_of_best_in_neighborhood,\n            \"max_vel\": self.max_velocity,\n            \"variant\": algo_variant,\n            \"neighb_type\": neighbor_type,\n            \"neighb_param\": neighbor_param,\n            \"memory\": self.keep_velocities,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"pso_gen\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=self.n_cores,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_mbh\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoMbh(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    inner_algorithm: pg.algorithm | None = None\n    # this is 30 instead of 5 in pygmo for our sum of squares test to pass\n    stopping_max_inner_runs_without_improvement: PositiveInt = 30\n    perturbation: float = 0.01\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        # the min default population size is this large to pass our sum of\n        # squares tests.\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=250\n        )\n\n        algo_specific_options = {\n            \"algo\": self.inner_algorithm,\n            \"stop\": self.stopping_max_inner_runs_without_improvement,\n            \"perturb\": self.perturbation,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"mbh\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n            n_cores=1,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_xnes\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoXnes(Algorithm):\n    population_size: float | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Refine type to fix range [0,1]\n    learning_rate_mean_update: NonNegativeFloat | None = 1.0\n    # TODO: Refine type to fix range [0,1]\n    learning_rate_step_size_update: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    learning_rate_cov_matrix_update: NonNegativeFloat | None = None\n    # TODO: Refine type to fix range [0,1]\n    initial_search_share: NonNegativeFloat | None = 1.0\n    ftol: NonNegativeFloat = 1e-6\n    xtol: NonNegativeFloat = 1e-6\n    keep_adapted_params: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n        eta_mu = (\n            -1\n            if self.learning_rate_mean_update is None\n            else self.learning_rate_mean_update\n        )\n        eta_sigma = (\n            -1\n            if self.learning_rate_step_size_update is None\n            else self.learning_rate_step_size_update\n        )\n        eta_b = (\n            -1\n            if self.learning_rate_cov_matrix_update is None\n            else self.learning_rate_cov_matrix_update\n        )\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"eta_mu\": eta_mu,\n            \"eta_sigma\": eta_sigma,\n            \"eta_b\": eta_b,\n            \"sigma0\": self.initial_search_share,\n            \"ftol\": self.ftol,\n            \"xtol\": self.xtol,\n            \"memory\": self.keep_adapted_params,\n            \"force_bounds\": True,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"xnes\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_gwo\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoGwo(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n        }\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"gwo\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_compass_search\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoCompassSearch(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    # TODO: Refine type to fix range (0,1]\n    start_range: PositiveFloat = 0.1\n    # TODO?: mus be in (0,start_range]\n    stop_range: PositiveFloat = 0.01\n    # TODO: Refine type to fix range (0,1)\n    reduction_coeff: PositiveFloat = 0.5\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.population_size is not None:\n            warnings.warn(\n                f\"You specified population size {self.population_size}. \"\n                \"compass_search does not have a population so this argument is ignored.\"\n            )\n            population_size = self.population_size\n        else:\n            # if discard_start_params is False population_size - 1\n            # must still be positive\n            population_size = 100\n\n        algo_specific_options = {\n            \"max_fevals\": self.stopping_maxfun,\n            \"start_range\": self.start_range,\n            \"stop_range\": self.stop_range,\n            \"reduction_coeff\": self.reduction_coeff,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"compass_search\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_ihs\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoIhs(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    # TODO: Probably refine type to fix range [0,1]\n    choose_from_memory_probability: NonNegativeFloat = 0.85\n    # TODO: Refine type to fix range [0,1]\n    min_pitch_adjustment_rate: NonNegativeFloat = 0.35\n    # TODO: Refine type to fix range [0,1]\n    max_pitch_adjustment_rate: NonNegativeFloat = 0.99\n    min_distance_bandwidth: PositiveFloat = 1e-5\n    max_distance_bandwidth: PositiveFloat = 1.0\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.population_size is not None:\n            warnings.warn(\"The population size has no effect on IHS' performance.\")\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=1\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"phmcr\": self.choose_from_memory_probability,\n            \"ppar_min\": self.min_pitch_adjustment_rate,\n            \"ppar_max\": self.max_pitch_adjustment_rate,\n            \"bw_min\": self.min_distance_bandwidth,\n            \"bw_max\": self.max_distance_bandwidth,\n        }\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"ihs\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"pygmo_de1220\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYGMO_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PygmoDe1220(Algorithm):\n    population_size: int | None = None\n    seed: int | None = None\n    discard_start_params: bool = False\n    jde: bool = True\n    stopping_maxiter: PositiveInt = STOPPING_MAX_ITERATIONS_GENETIC\n    allowed_variants: List[str] | None = None\n    keep_adapted_params: bool = False\n    ftol: NonNegativeFloat = 1e-6\n    xtol: NonNegativeFloat = 1e-6\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        variant_str_to_int = {\n            \"best/1/exp\": 1,\n            \"rand/1/exp\": 2,\n            \"rand-to-best/1/exp\": 3,\n            \"best/2/exp\": 4,\n            \"rand/2/exp\": 5,\n            \"best/1/bin\": 6,\n            \"rand/1/bin\": 7,\n            \"rand-to-best/1/bin\": 8,\n            \"best/2/bin\": 9,\n            \"rand/2/bin\": 10,\n            \"rand/3/exp\": 11,\n            \"rand/3/bin\": 12,\n            \"best/3/exp\": 13,\n            \"best/3/bin\": 14,\n            \"rand-to-current/2/exp\": 15,\n            \"rand-to-current/2/bin\": 16,\n            \"rand-to-best-and-current/2/exp\": 17,\n            \"rand-to-best-and-current/2/bin\": 18,\n        }\n        if self.allowed_variants is None:\n            allowed_variant_codes = [2, 3, 7, 10, 13, 14, 15, 16]\n        else:\n            allowed_variant_codes = [\n                _convert_str_to_int(variant_str_to_int, variant)\n                for variant in self.allowed_variants\n            ]\n\n        population_size = get_population_size(\n            population_size=self.population_size, x=x0, lower_bound=64\n        )\n\n        algo_specific_options = {\n            \"gen\": self.stopping_maxiter,\n            \"variant_adptv\": 1 if self.jde else 2,\n            \"ftol\": self.ftol,\n            \"xtol\": self.xtol,\n            \"memory\": self.keep_adapted_params,\n            \"allowed_variants\": allowed_variant_codes,\n        }\n\n        res = _minimize_pygmo(\n            problem=problem,\n            x0=x0,\n            method=\"de1220\",\n            specific_options=algo_specific_options,\n            population_size=population_size,\n            n_cores=1,\n            seed=self.seed,\n            discard_start_params=self.discard_start_params,\n        )\n        return res\n\n\n# ====================================================================================\n\n\ndef _minimize_pygmo(\n    problem: InternalOptimizationProblem,\n    x0: NDArray[np.float64],\n    method: str,\n    specific_options: dict[str, Any],\n    population_size: PositiveInt,\n    n_cores: int,\n    seed: int | None,\n    discard_start_params: bool,\n) -> InternalOptimizeResult:\n    if not IS_PYGMO_INSTALLED:\n        raise NotInstalledError(\n            f\"The {method} algorithm requires the pygmo package to be installed. \"\n            \"You can install it with 'conda install -c conda-forge pygmo'. Visit \"\n            \"https://esa.github.io/pygmo2/install.html for more detailed installation \"\n            \"instructions.\"\n        )\n\n    bounds = problem.bounds\n    if bounds is None or bounds.lower is None or bounds.upper is None:\n        raise ValueError(f\"{method} requires finitel bounds for all parameters.\")\n    elif not np.isfinite(bounds.lower).all() or not np.isfinite(bounds.upper).all():\n        raise ValueError(f\"{method} requires finite bounds for all parameters.\")\n\n    pygmo_problem = _create_pygmo_problem(problem, len(x0), n_cores)\n    algo = _create_algorithm(method, specific_options, n_cores)\n    pop = _create_population(\n        problem=pygmo_problem,\n        population_size=population_size,\n        x=x0,\n        seed=seed,\n        discard_start_params=discard_start_params,\n    )\n    evolved = algo.evolve(pop)\n    result = _process_pygmo_result(evolved)\n    return result\n\n\ndef _create_pygmo_problem(\n    problem: InternalOptimizationProblem, dim: int, n_cores: int\n) -> pg.problem:\n    import pygmo as pg\n\n    class Problem:\n        def fitness(self, x):\n            return [problem.fun(x)]\n\n        def get_bounds(self):\n            return (problem.bounds.lower, problem.bounds.upper)\n\n        def gradient(self, dv):  # noqa: ARG002\n            raise ValueError(\"No pygmo optimizer should use a gradient.\")\n\n        def batch_fitness(self, dvs):\n            x_list = list(dvs.reshape(-1, dim))\n            eval_list = problem.batch_fun(x_list, n_cores=n_cores)\n            evals = np.array(eval_list)\n            return evals\n\n    pygmo_problem = pg.problem(Problem())\n    return pygmo_problem\n\n\ndef _create_algorithm(\n    method: str, algo_options: dict[str, Any], n_cores: int\n) -> pg.algorithm:\n    \"\"\"Create a pygmo algorithm.\"\"\"\n    import pygmo as pg\n\n    pygmo_uda = getattr(pg, method)\n    algo = pygmo_uda(**algo_options)\n    try:\n        algo.set_bfe(pg.bfe())\n    except AttributeError:\n        if n_cores >= 2:\n            warnings.warn(\n                f\"Your specified algorithm {method} does not support parallelization. \"\n                \"Choose another algorithm such as pygmo_gaco to parallelize.\"\n            )\n    out = pg.algorithm(algo)\n    return out\n\n\ndef _create_population(\n    problem: InternalOptimizationProblem,\n    population_size: int,\n    x: NDArray[np.float64],\n    seed: int | None,\n    discard_start_params: bool,\n) -> pg.population:\n    import pygmo as pg\n\n    if not discard_start_params:\n        population_size = population_size - 1\n\n    pop = pg.population(\n        problem,\n        size=population_size,\n        seed=seed,\n        b=pg.bfe(),\n    )\n    if not discard_start_params:\n        pop.push_back(x)\n    return pop\n\n\ndef _process_pygmo_result(evolved: pg.population) -> InternalOptimizeResult:\n    result = InternalOptimizeResult(\n        x=evolved.champion_x,\n        fun=evolved.champion_f[0],\n        success=True,\n        message=\"Number of generations reached.\",\n        n_fun_evals=evolved.problem.get_fevals(),\n        n_jac_evals=evolved.problem.get_gevals(),\n    )\n\n    return result\n\n\ndef _convert_str_to_int(str_to_int, value):\n    if value in str_to_int:\n        out = str_to_int[value]\n    elif value not in str_to_int.values():\n        raise ValueError(\n            f\"You specified {value} as value. \"\n            f\"It must be one of {', '.join(str_to_int.keys())}\"\n        )\n    else:\n        out = value\n    return out\n"
  },
  {
    "path": "src/optimagic/optimizers/pyswarms_optimizers.py",
    "content": "\"\"\"Implement PySwarms particle swarm optimization algorithms.\n\nThis module provides optimagic-compatible wrappers for PySwarms particle swarm\noptimization algorithms including global best, local best, and general PSO variants with\nsupport for different topologies.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_PYSWARMS_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n    PyTree,\n)\n\nPYSWARMS_NOT_INSTALLED_ERROR = (\n    \"This optimizer requires the 'pyswarms' package to be installed. \"\n    \"You can install it with `pip install pyswarms`. \"\n    \"Visit https://pyswarms.readthedocs.io/en/latest/installation.html \"\n    \"for more detailed installation instructions.\"\n)\n\n\n# ======================================================================================\n# 1. Topology Dataclasses\n# ======================================================================================\n\n\n@dataclass(frozen=True)\nclass Topology:\n    \"\"\"Base class for all topology configurations.\"\"\"\n\n\n@dataclass(frozen=True)\nclass StarTopology(Topology):\n    \"\"\"Star topology configuration.\n\n    All particles are connected to the global best.\n\n    \"\"\"\n\n\n@dataclass(frozen=True)\nclass RingTopology(Topology):\n    \"\"\"Ring topology configuration.\n\n    Particles are connected in a ring structure.\n\n    \"\"\"\n\n    k_neighbors: PositiveInt = 3\n    \"\"\"Number of neighbors for each particle.\"\"\"\n\n    p_norm: Literal[1, 2] = 2\n    \"\"\"Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).\"\"\"\n\n    static: bool = False\n    \"\"\"Whether to use a static or dynamic ring topology.\n\n    When True, the neighborhood structure is fixed throughout optimization. When False,\n    neighbors are recomputed at each iteration based on current particle positions.\n\n    \"\"\"\n\n\n@dataclass(frozen=True)\nclass VonNeumannTopology(Topology):\n    \"\"\"Von Neumann topology configuration.\n\n    Particles are arranged on a 2D grid.\n\n    \"\"\"\n\n    p_norm: Literal[1, 2] = 2\n    \"\"\"Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).\"\"\"\n\n    range_param: PositiveInt = 1\n    r\"\"\"Range parameter :math:`r` for neighborhood size.\"\"\"\n\n\n@dataclass(frozen=True)\nclass PyramidTopology(Topology):\n    \"\"\"Pyramid topology configuration.\"\"\"\n\n    static: bool = False\n    \"\"\"Whether to use a static or dynamic pyramid topology.\n\n    When True, the neighborhood structure is fixed throughout optimization. When False,\n    neighbors are recomputed at each iteration based on current particle positions.\n\n    \"\"\"\n\n\n@dataclass(frozen=True)\nclass RandomTopology(Topology):\n    \"\"\"Random topology configuration.\n\n    Particles are connected to random neighbors.\n\n    \"\"\"\n\n    k_neighbors: PositiveInt = 3\n    \"\"\"Number of neighbors for each particle.\"\"\"\n\n    static: bool = False\n    \"\"\"Whether to use a static or dynamic random topology.\n\n    When True, the neighborhood structure is fixed throughout optimization. When False,\n    neighbors are recomputed at each iteration based on current particle positions.\n\n    \"\"\"\n\n\n# ======================================================================================\n# Common PSO Options\n# ======================================================================================\n\n\n@dataclass(frozen=True)\nclass PSOCommonOptions:\n    \"\"\"Common options for PySwarms optimizers.\"\"\"\n\n    n_particles: PositiveInt = 10\n    \"\"\"Number of particles in the swarm.\"\"\"\n\n    cognitive_parameter: PositiveFloat = 0.5\n    \"\"\"Cognitive parameter (c1) - attraction to personal best.\"\"\"\n\n    social_parameter: PositiveFloat = 0.3\n    \"\"\"Social parameter (c2) - attraction to neighborhood/global best.\"\"\"\n\n    inertia_weight: PositiveFloat = 0.9\n    \"\"\"Inertia weight (w) - momentum control.\"\"\"\n\n    stopping_maxiter: PositiveInt = 1000\n    \"\"\"Maximum number of iterations.\"\"\"\n\n    initial_positions: list[PyTree] | None = None\n    \"\"\"Option to set the initial particle positions.\n\n    If None, positions are generated randomly within the given bounds, or within [0, 1]\n    if bounds are not specified.\n\n    \"\"\"\n\n    oh_strategy: dict[str, str] | None = None\n    \"\"\"Dictionary of strategies for time-varying options.\"\"\"\n\n    boundary_strategy: Literal[\n        \"periodic\", \"reflective\", \"shrink\", \"random\", \"intermediate\"\n    ] = \"periodic\"\n    \"\"\"Strategy for handling out-of-bounds particles.\n\n    Available options: periodic (default),\n    reflective, shrink, random, intermediate.\n\n    \"\"\"\n\n    velocity_strategy: Literal[\"unmodified\", \"adjust\", \"invert\", \"zero\"] = \"unmodified\"\n    \"\"\"Strategy for handling out-of-bounds velocities.\n\n    Available options: unmodified (default),\n    adjust, invert, zero.\n\n    \"\"\"\n\n    velocity_clamp_min: float | None = None\n    \"\"\"Minimum velocity limit for particles.\"\"\"\n\n    velocity_clamp_max: float | None = None\n    \"\"\"Maximum velocity limit for particles.\"\"\"\n\n    convergence_ftol_rel: NonNegativeFloat = 0\n    \"\"\"Stop when relative change in objective function is less than this value.\"\"\"\n\n    convergence_ftol_iter: PositiveInt = 1\n    \"\"\"Number of iterations to check for convergence.\"\"\"\n\n    n_cores: PositiveInt = 1\n    \"\"\"Number of cores for parallel evaluation.\"\"\"\n\n    center_init: PositiveFloat = 1.0\n    \"\"\"Scaling factor for initial particle positions.\"\"\"\n\n    verbose: bool = False\n    \"\"\"Enable or disable the logs and progress bar.\"\"\"\n\n    seed: int | None = None\n    \"\"\"Random seed for initial positions.\n\n    For full reproducibility, set a global seed with `np.random.seed()`.\n\n    \"\"\"\n\n\n# ======================================================================================\n# Algorithm Classes\n# ======================================================================================\n\n\n@mark.minimizer(\n    name=\"pyswarms_global_best\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYSWARMS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PySwarmsGlobalBestPSO(Algorithm, PSOCommonOptions):\n    r\"\"\"Minimize a scalar function using Global Best Particle Swarm Optimization.\n\n    A population-based stochastic, global optimization optimization algorithm that\n    simulates the social behavior of bird flocking or fish schooling. Particles\n    (candidate solutions) move through the search space, adjusting their positions\n    based on their own experience (cognitive component) and the experience of their\n    neighbors or the entire swarm (social component).\n\n    This implementation uses a star topology where all particles are connected to\n    each other, making each particle aware of the global best solution found by the\n    entire swarm.\n\n    The position update follows:\n\n    .. math::\n\n        x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)\n\n    The velocity update follows:\n\n    .. math::\n\n        v_{ij}(t+1) = w \\cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\n                      + c_2 r_{2j}(t)[\\hat{y}_j(t) - x_{ij}(t)]\n\n    Where:\n        - :math:`w`: inertia weight controlling momentum\n        - :math:`c_1`: cognitive parameter for attraction to personal best\n        - :math:`c_2`: social parameter for attraction to global best\n        - :math:`r_{1j}, r_{2j}`: random numbers in [0,1]\n        - :math:`y_{ij}(t)`: personal best position of particle i\n        - :math:`\\hat{y}_j(t)`: global best position\n\n    This algorithm is an adaptation of the original Particle Swarm Optimization method\n    by :cite:`Kennedy1995`\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_PYSWARMS_INSTALLED:\n            raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR)\n\n        import pyswarms as ps\n\n        pso_options_dict = {\n            \"c1\": self.cognitive_parameter,\n            \"c2\": self.social_parameter,\n            \"w\": self.inertia_weight,\n        }\n        optimizer_kwargs = {\"options\": pso_options_dict}\n\n        res = _pyswarms_internal(\n            problem=problem,\n            x0=x0,\n            optimizer_class=ps.single.GlobalBestPSO,\n            optimizer_kwargs=optimizer_kwargs,\n            algo_options=self,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"pyswarms_local_best\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYSWARMS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PySwarmsLocalBestPSO(Algorithm, PSOCommonOptions):\n    r\"\"\"Minimize a scalar function using Local Best Particle Swarm Optimization.\n\n    A variant of PSO that uses local neighborhoods instead of a single global best.\n    Each particle is influenced only by the best position found within its local\n    neighborhood, which is determined by the k-nearest neighbors using distance metrics.\n\n    This approach uses a ring topology where particles are connected to their local\n    neighbors, making each particle aware of only the best solution found within its\n    neighborhood.\n\n    The position update follows:\n\n    .. math::\n\n        x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)\n\n    The velocity update follows:\n\n    .. math::\n\n        v_{ij}(t+1) = w \\cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\n                      + c_2 r_{2j}(t)[\\hat{y}_{lj}(t) - x_{ij}(t)]\n\n    Where:\n        - :math:`w`: inertia weight controlling momentum\n        - :math:`c_1`: cognitive parameter for attraction to personal best\n        - :math:`c_2`: social parameter for attraction to local best\n        - :math:`r_{1j}, r_{2j}`: random numbers in [0,1]\n        - :math:`y_{ij}(t)`: personal best position of particle i\n        - :math:`\\hat{y}_{lj}(t)`: local best position in particle i's neighborhood\n\n    The algorithm is based on the original Particle Swarm Optimization method by\n    :cite:`Kennedy1995` and the local best concept introduced in\n    :cite:`EberhartKennedy1995`.\n\n    \"\"\"\n\n    topology: RingTopology = RingTopology()\n    \"\"\"Configuration for the Ring topology.\n\n    This algorithm uses a fixed ring topology where particles are connected to their\n    local neighbors. This parameter allows customization of the number of neighbors,\n    distance metric, and whether the topology remains static throughout optimization.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_PYSWARMS_INSTALLED:\n            raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR)\n\n        import pyswarms as ps\n\n        pso_options_dict = {\n            \"c1\": self.cognitive_parameter,\n            \"c2\": self.social_parameter,\n            \"w\": self.inertia_weight,\n            \"k\": self.topology.k_neighbors,\n            \"p\": self.topology.p_norm,\n        }\n\n        optimizer_kwargs = {\n            \"options\": pso_options_dict,\n            \"static\": self.topology.static,\n        }\n\n        res = _pyswarms_internal(\n            problem=problem,\n            x0=x0,\n            optimizer_class=ps.single.LocalBestPSO,\n            optimizer_kwargs=optimizer_kwargs,\n            algo_options=self,\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"pyswarms_general\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_PYSWARMS_INSTALLED,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass PySwarmsGeneralPSO(Algorithm, PSOCommonOptions):\n    r\"\"\"Minimize a scalar function using General Particle Swarm Optimization with custom\n    topologies.\n\n    A flexible PSO implementation that allows selection of different neighborhood\n    topologies, providing control over the balance between exploration and exploitation.\n    The topology determines how particles communicate and share information, directly\n    affecting the algorithm's search behavior.\n\n    The position update follows:\n\n    .. math::\n\n        x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)\n\n    The velocity update follows:\n\n    .. math::\n\n        v_{ij}(t+1) = w \\cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\n                      + c_2 r_{2j}(t)[\\hat{y}_{nj}(t) - x_{ij}(t)]\n\n    Where:\n        - :math:`w`: inertia weight controlling momentum\n        - :math:`c_1`: cognitive parameter for attraction to personal best\n        - :math:`c_2`: social parameter for attraction to neighborhood best\n        - :math:`r_{1j}, r_{2j}`: random numbers in [0,1]\n        - :math:`y_{ij}(t)`: personal best position of particle i\n        - :math:`\\hat{y}_{nj}(t)`: neighborhood best position\n\n    This algorithm is based on the original Particle Swarm Optimization method by\n    :cite:`Kennedy1995` with configurable topology structures. For topology references,\n    see :cite:`Lane2008SpatialPSO, Ni2013`.\n\n    \"\"\"\n\n    topology: Literal[\"star\", \"ring\", \"vonneumann\", \"random\", \"pyramid\"] | Topology = (\n        \"star\"\n    )\n    \"\"\"Topology structure for particle communication.\n\n    The `topology` can be specified in two ways:\n\n    1.  **By name (string):** e.g., ``\"star\"``, ``\"ring\"``. This uses the default\n        parameter values for that topology.\n    2.  **By dataclass instance:** e.g., ``RingTopology(k_neighbors=5, p_norm=1)``.\n        This allows for detailed configuration of topology-specific parameters.\n\n    Available topologies: ``StarTopology``, ``RingTopology``, ``VonNeumannTopology``,\n    ``RandomTopology``, ``PyramidTopology``.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_PYSWARMS_INSTALLED:\n            raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR)\n\n        import pyswarms as ps\n\n        pyswarms_topology, topology_options = _resolve_topology_config(self.topology)\n        base_options = {\n            \"c1\": self.cognitive_parameter,\n            \"c2\": self.social_parameter,\n            \"w\": self.inertia_weight,\n        }\n        pso_options_dict = {**base_options, **topology_options}\n\n        optimizer_kwargs = {\n            \"options\": pso_options_dict,\n            \"topology\": pyswarms_topology,\n        }\n\n        res = _pyswarms_internal(\n            problem=problem,\n            x0=x0,\n            optimizer_class=ps.single.GeneralOptimizerPSO,\n            optimizer_kwargs=optimizer_kwargs,\n            algo_options=self,\n        )\n\n        return res\n\n\ndef _pyswarms_internal(\n    problem: InternalOptimizationProblem,\n    x0: NDArray[np.float64],\n    optimizer_class: Any,\n    optimizer_kwargs: dict[str, Any],\n    algo_options: PSOCommonOptions,\n) -> InternalOptimizeResult:\n    \"\"\"Internal function for PySwarms optimization.\n\n    Args:\n        problem: Internal optimization problem.\n        x0: Initial parameter vector.\n        optimizer_class: PySwarms optimizer class to use.\n        optimizer_kwargs: Arguments for optimizer class.\n        algo_options: The PySwarms common options.\n\n    Returns:\n        InternalOptimizeResult: Internal optimization result.\n\n    \"\"\"\n    if algo_options.seed is not None:\n        warnings.warn(\n            \"The 'seed' parameter only makes initial particle positions reproducible. \"\n            \"PySwarms still uses NumPy's global random functions for generating \"\n            \"velocities, updating coefficients, and handling other stochastic \"\n            \"operations. For fully deterministic results, set a global seed with \"\n            \"'np.random.seed()' before running the optimizer.\",\n            UserWarning,\n        )\n\n    rng = np.random.default_rng(algo_options.seed)\n\n    velocity_clamp = _build_velocity_clamp(\n        algo_options.velocity_clamp_min, algo_options.velocity_clamp_max\n    )\n    bounds = _get_pyswarms_bounds(problem.bounds)\n\n    if algo_options.initial_positions is not None:\n        init_pos = np.array(\n            [\n                problem.converter.params_to_internal(position)\n                for position in algo_options.initial_positions\n            ]\n        )\n    else:\n        init_pos = _create_initial_positions(\n            x0=x0,\n            n_particles=algo_options.n_particles,\n            bounds=bounds,\n            center=algo_options.center_init,\n            rng=rng,\n        )\n\n    optimizer = optimizer_class(\n        n_particles=algo_options.n_particles,\n        dimensions=len(x0),\n        bounds=bounds,\n        init_pos=init_pos,\n        velocity_clamp=velocity_clamp,\n        oh_strategy=algo_options.oh_strategy,\n        bh_strategy=algo_options.boundary_strategy,\n        vh_strategy=algo_options.velocity_strategy,\n        ftol=algo_options.convergence_ftol_rel,\n        ftol_iter=algo_options.convergence_ftol_iter,\n        **optimizer_kwargs,\n    )\n\n    objective_wrapper = _create_batch_objective(problem, algo_options.n_cores)\n\n    result = optimizer.optimize(\n        objective_func=objective_wrapper,\n        iters=algo_options.stopping_maxiter,\n        verbose=algo_options.verbose,\n    )\n\n    res = _process_pyswarms_result(result=result, optimizer=optimizer)\n\n    return res\n\n\ndef _resolve_topology_config(\n    config: Literal[\"star\", \"ring\", \"vonneumann\", \"random\", \"pyramid\"] | Topology,\n) -> tuple[Any, dict[str, float | int]]:\n    \"\"\"Resolves the topology config into a pyswarms topology instance and options\n    dict.\n    \"\"\"\n    from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann\n\n    if isinstance(config, str):\n        default_topologies = {\n            \"star\": StarTopology(),\n            \"ring\": RingTopology(),\n            \"vonneumann\": VonNeumannTopology(),\n            \"random\": RandomTopology(),\n            \"pyramid\": PyramidTopology(),\n        }\n        if config not in default_topologies:\n            raise ValueError(f\"Unknown topology string: '{config}'\")\n        config = default_topologies[config]\n\n    topology_instance: Any\n    options: dict[str, float | int] = {}\n\n    if isinstance(config, StarTopology):\n        topology_instance = Star()\n    elif isinstance(config, RingTopology):\n        topology_instance = Ring(static=config.static)\n        options = {\"k\": config.k_neighbors, \"p\": config.p_norm}\n    elif isinstance(config, VonNeumannTopology):\n        topology_instance = VonNeumann()\n        options = {\"p\": config.p_norm, \"r\": config.range_param}\n    elif isinstance(config, RandomTopology):\n        topology_instance = Random(static=config.static)\n        options = {\"k\": config.k_neighbors}\n    elif isinstance(config, PyramidTopology):\n        topology_instance = Pyramid(static=config.static)\n    else:\n        raise TypeError(f\"Unsupported topology configuration type: {type(config)}\")\n\n    return topology_instance, options\n\n\ndef _build_velocity_clamp(\n    velocity_clamp_min: float | None, velocity_clamp_max: float | None\n) -> tuple[float, float] | None:\n    \"\"\"Build velocity clamp tuple.\"\"\"\n    clamp = None\n    if velocity_clamp_min is not None and velocity_clamp_max is not None:\n        clamp = (velocity_clamp_min, velocity_clamp_max)\n    return clamp\n\n\ndef _get_pyswarms_bounds(\n    bounds: InternalBounds,\n) -> tuple[NDArray[np.float64], NDArray[np.float64]] | None:\n    \"\"\"Convert optimagic bounds to PySwarms format.\"\"\"\n    pyswarms_bounds = None\n\n    if bounds.lower is not None and bounds.upper is not None:\n        if not np.all(np.isfinite(bounds.lower)) or not np.all(\n            np.isfinite(bounds.upper)\n        ):\n            raise ValueError(\"PySwarms does not support infinite bounds.\")\n\n        pyswarms_bounds = (bounds.lower, bounds.upper)\n\n    return pyswarms_bounds\n\n\ndef _create_initial_positions(\n    x0: NDArray[np.float64],\n    n_particles: int,\n    bounds: tuple[NDArray[np.float64], NDArray[np.float64]] | None,\n    center: float,\n    rng: np.random.Generator,\n) -> NDArray[np.float64]:\n    \"\"\"Create an initial swarm positions.\n\n    Args:\n        x0: Initial parameter vector.\n        n_particles: Number of particles in the swarm.\n        bounds: Tuple of (lower_bounds, upper_bounds) arrays or None.\n        center: Scaling factor for initial particle positions around bounds.\n        rng: NumPy random number generator instance.\n\n    Returns:\n        Initial positions array of shape (n_particles, n_dimensions)\n        where each row represents one particle's starting position.\n\n    \"\"\"\n    n_dimensions = len(x0)\n    if bounds is None:\n        lower_bounds: NDArray[np.float64] = np.zeros(n_dimensions, dtype=np.float64)\n        upper_bounds: NDArray[np.float64] = np.ones(n_dimensions, dtype=np.float64)\n    else:\n        lower_bounds, upper_bounds = bounds\n\n    # Generate random initial positions within the bounds, scaled by center\n    init_pos = center * rng.uniform(\n        low=lower_bounds, high=upper_bounds, size=(n_particles, n_dimensions)\n    )\n\n    init_pos[0] = x0\n    init_pos = np.clip(init_pos, lower_bounds, upper_bounds)\n\n    return init_pos\n\n\ndef _create_batch_objective(\n    problem: InternalOptimizationProblem,\n    n_cores: int,\n) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]:\n    \"\"\"Return an batch objective function.\"\"\"\n\n    def batch_objective(positions: NDArray[np.float64]) -> NDArray[np.float64]:\n        \"\"\"Compute objective values for all particles in positions.\n\n        Args:\n            positions: 2D array of shape (n_particles, n_dimensions) with\n            particle positions.\n\n        Returns:\n            1D array of shape (n_particles,) with objective values.\n\n        \"\"\"\n        arguments = [position for position in positions]\n        results = problem.batch_fun(arguments, n_cores=n_cores)\n\n        return np.array(results)\n\n    return batch_objective\n\n\ndef _process_pyswarms_result(\n    result: tuple[float, NDArray[np.float64]], optimizer: Any\n) -> InternalOptimizeResult:\n    \"\"\"Convert PySwarms result to optimagic format.\"\"\"\n    best_cost, best_position = result\n    n_iterations = len(optimizer.cost_history)\n    n_particles = optimizer.n_particles\n\n    return InternalOptimizeResult(\n        x=best_position,\n        fun=best_cost,\n        success=True,\n        message=\"PySwarms optimization completed\",\n        n_fun_evals=n_particles * n_iterations,\n        n_jac_evals=0,\n        n_hess_evals=0,\n        n_iterations=n_iterations,\n    )\n"
  },
  {
    "path": "src/optimagic/optimizers/scipy_optimizers.py",
    "content": "\"\"\"Implement scipy algorithms.\n\nThe following ``scipy`` algorithms are not supported because they\nrequire the specification of the Hessian:\n\n- dogleg\n- trust-ncg\n- trust-exact\n- trust-krylov\n\nThe following arguments are not supported as part of ``algo_options``:\n\n- ``disp``\n    If set to True would print a convergence message.\n    In optimagic it's always set to its default False.\n    Refer to optimagic's result dictionary's \"success\" entry for the convergence\n    message.\n- ``return_all``\n    If set to True, a list of the best solution at each iteration is returned.\n    In optimagic it's always set to its default False.\n- ``tol``\n    This argument of minimize (not an options key) is passed as different types of\n    tolerance (gradient, parameter or criterion, as well as relative or absolute)\n    depending on the selected algorithm. We require the user to explicitely input\n    the tolerance criteria or use our defaults instead.\n- ``args``\n    This argument of minimize (not an options key) is partialed into the function\n    for the user. Specify ``criterion_kwargs`` in ``maximize`` or ``minimize`` to\n    achieve the same behavior.\n- ``callback``\n    This argument would be called after each iteration and the algorithm would\n    terminate if it returned True.\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, List, Literal, SupportsInt, Tuple\n\nimport numpy as np\nimport scipy\nimport scipy.optimize\nfrom numpy.typing import NDArray\nfrom scipy.optimize import Bounds as ScipyBounds\nfrom scipy.optimize import NonlinearConstraint\nfrom scipy.optimize import OptimizeResult as ScipyOptimizeResult\n\nfrom optimagic import mark\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_FTOL_ABS,\n    CONVERGENCE_FTOL_REL,\n    CONVERGENCE_GTOL_ABS,\n    CONVERGENCE_GTOL_REL,\n    CONVERGENCE_SECOND_BEST_FTOL_ABS,\n    CONVERGENCE_SECOND_BEST_XTOL_ABS,\n    CONVERGENCE_XTOL_ABS,\n    CONVERGENCE_XTOL_REL,\n    LIMITED_MEMORY_STORAGE_LENGTH,\n    MAX_LINE_SEARCH_STEPS,\n    STOPPING_MAXFUN,\n    STOPPING_MAXFUN_GLOBAL,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n)\nfrom optimagic.parameters.nonlinear_constraints import (\n    equality_as_inequality_constraints,\n    vector_as_list_of_scalar_constraints,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    BatchEvaluator,\n    BatchEvaluatorLiteral,\n    NegativeFloat,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n)\nfrom optimagic.utilities import calculate_trustregion_initial_radius\n\n\n@mark.minimizer(\n    name=\"scipy_lbfgsb\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyLBFGSB(Algorithm):\n    \"\"\"Minimize a scalar differentiable function using the L-BFGS-B algorithm.\n\n    The optimizer is taken from scipy, which calls the Fortran code written by the\n    original authors of the algorithm. The Fortran code includes the corrections\n    and improvements that were introduced in a follow up paper.\n\n    lbfgsb is a limited memory version of the original bfgs algorithm, that deals with\n    lower and upper bounds via an active set approach.\n\n    The lbfgsb algorithm is well suited for differentiable scalar optimization problems\n    with up to several hundred parameters.\n\n    It is a quasi-newton line search algorithm. At each trial point it evaluates the\n    criterion function and its gradient to find a search direction. It then approximates\n    the hessian using the stored history of gradients and uses the hessian to calculate\n    a candidate step size. Then it uses a gradient based line search algorithm to\n    determine the actual step length. Since the algorithm always evaluates the gradient\n    and criterion function jointly, the user should provide a ``fun_and_jac`` function\n    that exploits the synergies in the calculation of criterion and gradient.\n\n    The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary\n    to scale the parameters.\n\n    \"\"\"\n\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    r\"\"\"Converge if the relative change in the objective function is less than this\n    value. More formally, this is expressed as.\n\n    .. math::\n\n        \\frac{f^k - f^{k+1}}{\\max\\{{|f^k|, |f^{k+1}|, 1}\\}} \\leq\n        \\textsf{convergence_ftol_rel}.\n\n    \"\"\"\n\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    \"\"\"Converge if the absolute values in the gradient of the objective function are\n    less than this value.\"\"\"\n\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    \"\"\"Maximum number of function evaluations.\"\"\"\n\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    \"\"\"Maximum number of iterations.\"\"\"\n\n    limited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH\n    \"\"\"The maximum number of variable metric corrections used to define the limited\n    memory matrix. This is the 'maxcor' parameter in the SciPy documentation.\n\n    The default value is taken from SciPy's L-BFGS-B implementation. Larger values use\n    more memory but may converge faster for some problems.\n\n    \"\"\"\n\n    max_line_search_steps: PositiveInt = MAX_LINE_SEARCH_STEPS\n    \"\"\"The maximum number of line search steps. This is the 'maxls' parameter in the\n    SciPy documentation.\n\n    The default value is taken from SciPy's L-BFGS-B implementation.\n\n    \"\"\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"maxcor\": self.limited_memory_storage_length,\n            \"ftol\": self.convergence_ftol_rel,\n            \"gtol\": self.convergence_gtol_abs,\n            \"maxfun\": self.stopping_maxfun,\n            \"maxiter\": self.stopping_maxiter,\n            \"maxls\": self.max_line_search_steps,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac,\n            x0=x0,\n            method=\"L-BFGS-B\",\n            jac=True,\n            bounds=_get_scipy_bounds(problem.bounds),\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_slsqp\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipySLSQP(Algorithm):\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"maxiter\": self.stopping_maxiter,\n            \"ftol\": self.convergence_ftol_abs,\n            \"disp\": self.display,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac,\n            x0=x0,\n            method=\"SLSQP\",\n            jac=True,\n            bounds=_get_scipy_bounds(problem.bounds),\n            constraints=problem.nonlinear_constraints,\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_neldermead\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyNelderMead(Algorithm):\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_XTOL_ABS\n    adaptive: bool = False\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"maxiter\": self.stopping_maxiter,\n            \"maxfev\": self.stopping_maxfun,\n            \"xatol\": self.convergence_xtol_abs,\n            \"fatol\": self.convergence_ftol_abs,\n            # TODO: Benchmark if adaptive = True works better\n            \"adaptive\": self.adaptive,\n            \"disp\": self.display,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun,\n            x0=x0,\n            bounds=_get_scipy_bounds(problem.bounds),\n            method=\"Nelder-Mead\",\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_powell\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyPowell(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"xtol\": self.convergence_xtol_rel,\n            \"ftol\": self.convergence_ftol_rel,\n            \"maxfev\": self.stopping_maxfun,\n            \"maxiter\": self.stopping_maxiter,\n            \"disp\": self.display,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun,\n            x0=x0,\n            method=\"Powell\",\n            bounds=_get_scipy_bounds(problem.bounds),\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_bfgs\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyBFGS(Algorithm):\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    norm: NonNegativeFloat = np.inf\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    display: bool = False\n    armijo_condition: NonNegativeFloat = 1e-4\n    curvature_condition: NonNegativeFloat = 0.9\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"gtol\": self.convergence_gtol_abs,\n            \"maxiter\": self.stopping_maxiter,\n            \"norm\": self.norm,\n            \"xrtol\": self.convergence_xtol_rel,\n            \"disp\": self.display,\n            \"c1\": self.armijo_condition,\n            \"c2\": self.curvature_condition,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac, x0=x0, method=\"BFGS\", jac=True, options=options\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_conjugate_gradient\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyConjugateGradient(Algorithm):\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    norm: NonNegativeFloat = np.inf\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"gtol\": self.convergence_gtol_abs,\n            \"maxiter\": self.stopping_maxiter,\n            \"norm\": self.norm,\n            \"disp\": self.display,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac, x0=x0, method=\"CG\", jac=True, options=options\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_newton_cg\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyNewtonCG(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"xtol\": self.convergence_xtol_rel,\n            \"maxiter\": self.stopping_maxiter,\n            \"disp\": self.display,\n        }\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac,\n            x0=x0,\n            method=\"Newton-CG\",\n            jac=True,\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_cobyla\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyCOBYLA(Algorithm):\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    trustregion_initial_radius: PositiveFloat | None = None\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        # TODO: Maybe we should leave the radius at their default\n        if self.trustregion_initial_radius is None:\n            radius = calculate_trustregion_initial_radius(x0)\n        else:\n            radius = self.trustregion_initial_radius\n\n        options = {\n            \"maxiter\": self.stopping_maxiter,\n            \"rhobeg\": radius,\n            \"disp\": self.display,\n        }\n\n        # cannot handle equality constraints\n        nonlinear_constraints = equality_as_inequality_constraints(\n            problem.nonlinear_constraints\n        )\n\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun,\n            x0=x0,\n            method=\"COBYLA\",\n            constraints=nonlinear_constraints,\n            options=options,\n            tol=self.convergence_xtol_rel,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_ls_trf\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyLSTRF(Algorithm):\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    relative_step_size_diff_approx: NonNegativeFloat | None = None\n    tr_solver: Literal[\"exact\", \"lsmr\"] | None = None\n    tr_solver_options: dict[str, Any] | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.tr_solver_options is None:\n            tr_solver_options = {}\n        else:\n            tr_solver_options = self.tr_solver_options\n\n        lower_bounds = -np.inf if problem.bounds.lower is None else problem.bounds.lower\n        upper_bounds = np.inf if problem.bounds.upper is None else problem.bounds.upper\n\n        raw_res = scipy.optimize.least_squares(\n            fun=problem.fun,\n            x0=x0,\n            # This optimizer does not work with fun_and_jac\n            jac=problem.jac,\n            bounds=(lower_bounds, upper_bounds),\n            method=\"trf\",\n            max_nfev=self.stopping_maxfun,\n            ftol=self.convergence_ftol_rel,\n            gtol=self.convergence_gtol_rel,\n            diff_step=self.relative_step_size_diff_approx,\n            tr_solver=self.tr_solver,\n            tr_options=tr_solver_options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_ls_dogbox\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyLSDogbox(Algorithm):\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    relative_step_size_diff_approx: NonNegativeFloat | None = None\n    tr_solver: Literal[\"exact\", \"lsmr\"] | None = None\n    tr_solver_options: dict[str, Any] | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.tr_solver_options is None:\n            tr_solver_options = {}\n        else:\n            tr_solver_options = self.tr_solver_options\n\n        lower_bounds = -np.inf if problem.bounds.lower is None else problem.bounds.lower\n        upper_bounds = np.inf if problem.bounds.upper is None else problem.bounds.upper\n\n        raw_res = scipy.optimize.least_squares(\n            fun=problem.fun,\n            x0=x0,\n            # This optimizer does not work with fun_and_jac\n            jac=problem.jac,\n            bounds=(lower_bounds, upper_bounds),\n            method=\"dogbox\",\n            max_nfev=self.stopping_maxfun,\n            ftol=self.convergence_ftol_rel,\n            gtol=self.convergence_gtol_rel,\n            diff_step=self.relative_step_size_diff_approx,\n            tr_solver=self.tr_solver,\n            tr_options=tr_solver_options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_ls_lm\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyLSLM(Algorithm):\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    relative_step_size_diff_approx: NonNegativeFloat | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        raw_res = scipy.optimize.least_squares(\n            fun=problem.fun,\n            x0=x0,\n            # This optimizer does not work with fun_and_jac\n            jac=problem.jac,\n            method=\"lm\",\n            max_nfev=self.stopping_maxfun,\n            ftol=self.convergence_ftol_rel,\n            gtol=self.convergence_gtol_rel,\n            diff_step=self.relative_step_size_diff_approx,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_truncated_newton\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyTruncatedNewton(Algorithm):\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_FTOL_ABS\n    convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_XTOL_ABS\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    max_hess_evaluations_per_iteration: int = -1\n    max_step_for_line_search: NonNegativeFloat = 0\n    line_search_severity: float = -1\n    finite_difference_precision: NonNegativeFloat = 0\n    criterion_rescale_factor: float = -1\n    # TODO: Check type hint for `func_min_estimate`\n    func_min_estimate: float = 0\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        options = {\n            \"ftol\": self.convergence_ftol_abs,\n            \"xtol\": self.convergence_xtol_abs,\n            \"gtol\": self.convergence_gtol_abs,\n            \"maxfun\": self.stopping_maxfun,\n            \"maxCGit\": self.max_hess_evaluations_per_iteration,\n            \"stepmx\": self.max_step_for_line_search,\n            \"minfev\": self.func_min_estimate,\n            \"eta\": self.line_search_severity,\n            \"accuracy\": self.finite_difference_precision,\n            \"rescale\": self.criterion_rescale_factor,\n            \"disp\": self.display,\n        }\n\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac,\n            x0=x0,\n            method=\"TNC\",\n            jac=True,\n            bounds=_get_scipy_bounds(problem.bounds),\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_trust_constr\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyTrustConstr(Algorithm):\n    # TODO: Check if can be set to CONVERGENCE_GTOL_ABS\n    convergence_gtol_abs: NonNegativeFloat = 1e-08\n    convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n    trustregion_initial_radius: PositiveFloat | None = None\n    display: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.trustregion_initial_radius is None:\n            trustregion_initial_radius = calculate_trustregion_initial_radius(x0)\n        else:\n            trustregion_initial_radius = self.trustregion_initial_radius\n\n        options = {\n            \"gtol\": self.convergence_gtol_abs,\n            \"maxiter\": self.stopping_maxiter,\n            \"xtol\": self.convergence_xtol_rel,\n            \"initial_tr_radius\": trustregion_initial_radius,\n            \"disp\": self.display,\n        }\n\n        # cannot handle equality constraints\n        nonlinear_constraints = equality_as_inequality_constraints(\n            problem.nonlinear_constraints\n        )\n\n        raw_res = scipy.optimize.minimize(\n            fun=problem.fun_and_jac,\n            jac=True,\n            x0=x0,\n            method=\"trust-constr\",\n            bounds=_get_scipy_bounds(problem.bounds),\n            constraints=_get_scipy_constraints(nonlinear_constraints),\n            options=options,\n        )\n        res = process_scipy_result(raw_res)\n        return res\n\n\ndef process_scipy_result(scipy_res: ScipyOptimizeResult) -> InternalOptimizeResult:\n    res = InternalOptimizeResult(\n        x=scipy_res.x,\n        fun=scipy_res.fun,\n        success=bool(scipy_res.success),\n        message=str(scipy_res.message),\n        n_fun_evals=_int_if_not_none(scipy_res.get(\"nfev\")),\n        n_jac_evals=_int_if_not_none(scipy_res.get(\"njev\")),\n        n_hess_evals=_int_if_not_none(scipy_res.get(\"nhev\")),\n        n_iterations=_int_if_not_none(scipy_res.get(\"nit\")),\n        # TODO: Pass on more things once we can convert them to external\n        status=None,\n        jac=None,\n        hess=None,\n        hess_inv=None,\n        max_constraint_violation=None,\n        info=None,\n        history=None,\n    )\n    return res\n\n\ndef _int_if_not_none(value: SupportsInt | None) -> int | None:\n    if value is None:\n        return None\n    return int(value)\n\n\ndef _get_scipy_constraints(constraints):\n    \"\"\"Transform internal nonlinear constraints to scipy readable format.\n\n    This format is currently only used by scipy_trust_constr.\n\n    \"\"\"\n    scipy_constraints = [_internal_to_scipy_constraint(c) for c in constraints]\n    return scipy_constraints\n\n\ndef _internal_to_scipy_constraint(c):\n    new_constr = NonlinearConstraint(\n        fun=c[\"fun\"],\n        lb=np.zeros(c[\"n_constr\"]),\n        ub=np.tile(np.inf, c[\"n_constr\"]),\n        jac=c[\"jac\"],\n    )\n    return new_constr\n\n\n@mark.minimizer(\n    name=\"scipy_basinhopping\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyBasinhopping(Algorithm):\n    local_algorithm: (\n        Literal[\n            \"Nelder-Mead\",\n            \"Powell\",\n            \"CG\",\n            \"BFGS\",\n            \"Newton-CG\",\n            \"L-BFGS-B\",\n            \"TNC\",\n            \"COBYLA\",\n            \"SLSQP\",\n            \"trust-constr\",\n            \"dogleg\",\n            \"trust-ncg\",\n            \"trust-exact\",\n            \"trust-krylov\",\n        ]\n        | Callable\n    ) = \"L-BFGS-B\"\n    n_local_optimizations: PositiveInt = 100\n    temperature: NonNegativeFloat = 1.0\n    stepsize: NonNegativeFloat = 0.5\n    local_algo_options: dict[str, Any] | None = None\n    take_step: Callable | None = None\n    accept_test: Callable | None = None\n    interval: PositiveInt = 50\n    convergence_n_unchanged_iterations: PositiveInt | None = None\n    seed: int | np.random.Generator | np.random.RandomState | None = None\n    target_accept_rate: NonNegativeFloat = 0.5\n    stepwise_factor: NonNegativeFloat = 0.9\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        n_local_optimizations = max(1, self.n_local_optimizations - 1)\n        if self.local_algo_options is None:\n            local_algo_options = {}\n        else:\n            local_algo_options = self.local_algo_options\n        minimizer_kwargs = {\n            \"method\": self.local_algorithm,\n            \"bounds\": _get_scipy_bounds(problem.bounds),\n            \"jac\": problem.jac,\n        }\n        minimizer_kwargs = {**minimizer_kwargs, **local_algo_options}\n\n        res = scipy.optimize.basinhopping(\n            func=problem.fun,\n            x0=x0,\n            minimizer_kwargs=minimizer_kwargs,\n            niter=n_local_optimizations,\n            T=self.temperature,\n            stepsize=self.stepsize,\n            take_step=self.take_step,\n            accept_test=self.accept_test,\n            interval=self.interval,\n            niter_success=self.convergence_n_unchanged_iterations,\n            seed=self.seed,\n            target_accept_rate=self.target_accept_rate,\n            stepwise_factor=self.stepwise_factor,\n        )\n\n        return process_scipy_result(res)\n\n\n@mark.minimizer(\n    name=\"scipy_brute\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=True,\n)\n@dataclass(frozen=True)\nclass ScipyBrute(Algorithm):\n    n_grid_points: PositiveInt = 20\n    polishing_function: Callable | None = None\n    n_cores: PositiveInt = 1\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = \"joblib\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        workers = _get_workers(self.n_cores, self.batch_evaluator)\n        if problem.bounds.lower is None or problem.bounds.upper is None:\n            raise ValueError(\n                \"\"\"Global algorithms like scipy_brute need finite bounds\n                 for all parameters\"\"\"\n            )\n        raw_res = scipy.optimize.brute(\n            func=problem.fun,\n            ranges=tuple(zip(problem.bounds.lower, problem.bounds.upper, strict=True)),\n            Ns=self.n_grid_points,\n            full_output=True,\n            finish=self.polishing_function,\n            workers=workers,\n        )\n        res = InternalOptimizeResult(\n            x=raw_res[0],\n            fun=raw_res[1],\n            n_fun_evals=raw_res[2].size,\n            n_iterations=raw_res[2].size,\n            success=True,\n            message=\"brute force optimization terminated successfully\",\n        )\n\n        return res\n\n\n@mark.minimizer(\n    name=\"scipy_differential_evolution\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=True,\n)\n@dataclass(frozen=True)\nclass ScipyDifferentialEvolution(Algorithm):\n    strategy: (\n        Literal[\n            \"best1bin\",\n            \"best1exp\",\n            \"rand1exp\",\n            \"randtobest1exp\",\n            \"currenttobest1exp\",\n            \"best2exp\",\n            \"rand2exp\",\n            \"randtobest1bin\",\n            \"currenttobest1bin\",\n            \"best2bin\",\n            \"rand2bin\",\n            \"rand1bin\",\n        ]\n        | Callable\n    ) = \"best1bin\"\n    stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    population_size_multiplier: NonNegativeInt = 15\n    convergence_ftol_rel: NonNegativeFloat = 0.01\n    # TODO: Refine type to add ranges [0,2] if float.\n    mutation_constant: NonNegativeFloat | Tuple[NonNegativeFloat, NonNegativeFloat] = (\n        0.5,\n        1,\n    )\n    # TODO: Refine type to add ranges [0,1].\n    recombination_constant: NonNegativeFloat = 0.7\n    seed: int | np.random.Generator | np.random.RandomState | None = None\n    polish: bool = True\n    sampling_method: (\n        Literal[\"latinhypercube\", \"random\", \"sobol\", \"halton\"] | NDArray[np.float64]\n    ) = \"latinhypercube\"\n    convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS\n    n_cores: PositiveInt = 1\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = \"joblib\"\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        workers = _get_workers(self.n_cores, self.batch_evaluator)\n        res = scipy.optimize.differential_evolution(\n            func=problem.fun,\n            bounds=_get_scipy_bounds(problem.bounds),\n            strategy=self.strategy,\n            maxiter=self.stopping_maxiter,\n            popsize=self.population_size_multiplier,\n            tol=self.convergence_ftol_rel,\n            mutation=self.mutation_constant,\n            recombination=self.recombination_constant,\n            seed=self.seed,\n            polish=self.polish,\n            init=self.sampling_method,\n            atol=self.convergence_ftol_abs,\n            updating=\"deferred\",\n            workers=workers,\n            constraints=_get_scipy_constraints(problem.nonlinear_constraints),\n        )\n\n        return process_scipy_result(res)\n\n\n@mark.minimizer(\n    name=\"scipy_shgo\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=True,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipySHGO(Algorithm):\n    local_algorithm: (\n        Literal[\n            \"Nelder-Mead\",\n            \"Powell\",\n            \"CG\",\n            \"BFGS\",\n            \"Newton-CG\",\n            \"L-BFGS-B\",\n            \"TNC\",\n            \"COBYLA\",\n            \"SLSQP\",\n            \"trust-constr\",\n            \"dogleg\",\n            \"trust-ncg\",\n            \"trust-exact\",\n            \"trust-krylov\",\n        ]\n        | Callable\n    ) = \"L-BFGS-B\"\n    local_algo_options: dict[str, Any] | None = None\n    n_sampling_points: PositiveInt = 128\n    n_simplex_iterations: PositiveInt = 1\n    sampling_method: Literal[\"simplicial\", \"halton\", \"sobol\"] | Callable = \"simplicial\"\n    max_sampling_evaluations: PositiveInt | None = None\n    convergence_minimum_criterion_value: float | None = None\n    convergence_minimum_criterion_tolerance: NonNegativeFloat = 1e-4\n    stopping_maxiter: PositiveInt | None = None\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    stopping_max_processing_time: PositiveFloat | None = None\n    minimum_homology_group_rank_differential: PositiveInt | None = None\n    symmetry: List | bool = False\n    minimize_every_iteration: bool = True\n    max_local_minimizations_per_iteration: PositiveInt | bool = False\n    infinity_constraints: bool = True\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if self.local_algorithm == \"COBYLA\":\n            nonlinear_constraints = equality_as_inequality_constraints(\n                problem.nonlinear_constraints\n            )\n\n        nonlinear_constraints = vector_as_list_of_scalar_constraints(\n            problem.nonlinear_constraints\n        )\n\n        local_algo_options = (\n            {} if self.local_algo_options is None else self.local_algo_options\n        )\n        default_minimizer_kwargs = {\n            \"method\": self.local_algorithm,\n            \"bounds\": _get_scipy_bounds(problem.bounds),\n            \"jac\": problem.jac,\n        }\n\n        minimizer_kwargs = {**default_minimizer_kwargs, **local_algo_options}\n        options = {\n            \"maxfev\": self.max_sampling_evaluations,\n            \"f_min\": self.convergence_minimum_criterion_value,\n            \"f_tol\": self.convergence_minimum_criterion_tolerance,\n            \"maxiter\": self.stopping_maxiter,\n            \"maxev\": self.stopping_maxfun,\n            \"maxtime\": self.stopping_max_processing_time,\n            \"minhgrd\": self.minimum_homology_group_rank_differential,\n            \"symmetry\": self.symmetry,\n            \"jac\": problem.jac,\n            \"minimize_every_iter\": self.minimize_every_iteration,\n            \"local_iter\": self.max_local_minimizations_per_iteration,\n            \"infty_constraints\": self.infinity_constraints,\n        }\n\n        if any(options.values()) is False:\n            options_used = None\n        else:\n            options_used = options\n\n        res = scipy.optimize.shgo(\n            func=problem.fun,\n            bounds=_get_scipy_bounds(problem.bounds),\n            constraints=nonlinear_constraints,\n            minimizer_kwargs=minimizer_kwargs,\n            n=self.n_sampling_points,\n            iters=self.n_simplex_iterations,\n            sampling_method=self.sampling_method,\n            options=options_used,\n        )\n\n        return process_scipy_result(res)\n\n\n@mark.minimizer(\n    name=\"scipy_dual_annealing\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=True,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyDualAnnealing(Algorithm):\n    stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    local_algorithm: (\n        Literal[\n            \"Nelder-Mead\",\n            \"Powell\",\n            \"CG\",\n            \"BFGS\",\n            \"Newton-CG\",\n            \"L-BFGS-B\",\n            \"TNC\",\n            \"COBYLA\",\n            \"SLSQP\",\n            \"trust-constr\",\n            \"dogleg\",\n            \"trust-ncg\",\n            \"trust-exact\",\n            \"trust-krylov\",\n        ]\n        | Callable\n    ) = \"L-BFGS-B\"\n    local_algo_options: dict[str, Any] | None = None\n    # TODO: Refine type to add ranges (0.01, 5e4]\n    initial_temperature: PositiveFloat = 5230.0\n    # TODO: Refine type to add ranges (0,1)\n    restart_temperature_ratio: PositiveFloat = 2e-05\n    # TODO: Refine type to add ranges (1, 3]\n    visit: PositiveFloat = 2.62\n    # TODO: Refine type to add ranges (-1e4, -5]\n    accept: NegativeFloat = -5.0\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    seed: int | np.random.Generator | np.random.RandomState | None = None\n    no_local_search: bool = False\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        local_algo_options = (\n            {} if self.local_algo_options is None else self.local_algo_options\n        )\n        default_minimizer_kwargs = {\n            \"method\": self.local_algorithm,\n            \"bounds\": _get_scipy_bounds(problem.bounds),\n            \"jac\": problem.jac,\n        }\n\n        minimizer_kwargs = {**default_minimizer_kwargs, **local_algo_options}\n\n        res = scipy.optimize.dual_annealing(\n            func=problem.fun,\n            bounds=_get_scipy_bounds(problem.bounds),\n            maxiter=self.stopping_maxiter,\n            minimizer_kwargs=minimizer_kwargs,\n            initial_temp=self.initial_temperature,\n            restart_temp_ratio=self.restart_temperature_ratio,\n            visit=self.visit,\n            accept=self.accept,\n            maxfun=self.stopping_maxfun,\n            seed=self.seed,\n            no_local_search=self.no_local_search,\n            x0=x0,\n        )\n\n        return process_scipy_result(res)\n\n\n@mark.minimizer(\n    name=\"scipy_direct\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=True,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=True,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass ScipyDirect(Algorithm):\n    convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL\n    stopping_maxfun: PositiveInt = STOPPING_MAXFUN\n    stopping_maxiter: PositiveInt = STOPPING_MAXFUN_GLOBAL\n    locally_biased: bool = True\n    convergence_minimum_criterion_value: float = -np.inf\n    # TODO: must be between 0 and 1\n    convergence_minimum_criterion_tolerance: NonNegativeFloat = 1e-4\n    # TODO: must be between 0 and 1\n    volume_hyperrectangle_tolerance: NonNegativeFloat = 1e-16\n    # TODO: must be between 0 and 1\n    length_hyperrectangle_tolerance: NonNegativeFloat = 1e-6\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        res = scipy.optimize.direct(\n            func=problem.fun,\n            bounds=_get_scipy_bounds(problem.bounds),\n            eps=self.convergence_ftol_rel,\n            maxfun=self.stopping_maxfun,\n            maxiter=self.stopping_maxiter,\n            locally_biased=self.locally_biased,\n            f_min=self.convergence_minimum_criterion_value,\n            f_min_rtol=self.convergence_minimum_criterion_tolerance,\n            vol_tol=self.volume_hyperrectangle_tolerance,\n            len_tol=self.length_hyperrectangle_tolerance,\n        )\n\n        return process_scipy_result(res)\n\n\ndef _get_workers(n_cores, batch_evaluator):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n    out = functools.partial(\n        batch_evaluator,\n        n_cores=n_cores,\n        error_handling=\"raise\",\n    )\n    return out\n\n\ndef _get_scipy_bounds(bounds: InternalBounds) -> ScipyBounds | None:\n    if bounds.lower is None and bounds.upper is None:\n        return None\n\n    lower = bounds.lower if bounds.lower is not None else -np.inf\n    upper = bounds.upper if bounds.upper is not None else np.inf\n    return ScipyBounds(lb=lower, ub=upper)\n\n\ndef process_scipy_result_old(scipy_results_obj):\n    # using get with defaults to access dict elements is just a safety measure\n    raw_res = {**scipy_results_obj}\n    processed = {\n        \"solution_x\": raw_res.get(\"x\"),\n        \"solution_criterion\": raw_res.get(\"fun\"),\n        \"solution_derivative\": raw_res.get(\"jac\"),\n        \"solution_hessian\": raw_res.get(\"hess\"),\n        \"n_fun_evals\": raw_res.get(\"nfev\"),\n        \"n_jac_evals\": raw_res.get(\"njac\") or raw_res.get(\"njev\"),\n        \"n_iterations\": raw_res.get(\"nit\"),\n        \"success\": raw_res.get(\"success\"),\n        \"reached_convergence_criterion\": None,\n        \"message\": raw_res.get(\"message\"),\n    }\n    return processed\n"
  },
  {
    "path": "src/optimagic/optimizers/tao_optimizers.py",
    "content": "\"\"\"This module implements the POUNDERs algorithm.\"\"\"\n\nimport functools\nfrom dataclasses import dataclass\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.config import IS_PETSC4PY_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algo_options import (\n    CONVERGENCE_GTOL_ABS,\n    CONVERGENCE_GTOL_REL,\n    CONVERGENCE_GTOL_SCALED,\n    STOPPING_MAXITER,\n)\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import AggregationLevel, NonNegativeFloat, PositiveInt\nfrom optimagic.utilities import calculate_trustregion_initial_radius\n\n\n@mark.minimizer(\n    name=\"tao_pounders\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=IS_PETSC4PY_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=False,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass TAOPounders(Algorithm):\n    \"\"\"Implement the POUNDERs algorithm.\"\"\"\n\n    convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS\n    convergence_gtol_rel: NonNegativeFloat = CONVERGENCE_GTOL_REL\n    convergence_gtol_scaled: NonNegativeFloat = CONVERGENCE_GTOL_SCALED\n    trustregion_initial_radius: NonNegativeFloat | None = None\n    stopping_maxiter: PositiveInt = STOPPING_MAXITER\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        raw = tao_pounders(\n            criterion=problem.fun,\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            convergence_gtol_abs=self.convergence_gtol_abs,\n            convergence_gtol_rel=self.convergence_gtol_rel,\n            convergence_gtol_scaled=self.convergence_gtol_scaled,\n            trustregion_initial_radius=self.trustregion_initial_radius,\n            stopping_maxiter=self.stopping_maxiter,\n        )\n\n        res = InternalOptimizeResult(\n            x=raw[\"solution_x\"],\n            fun=raw[\"solution_criterion\"],\n            success=raw[\"success\"],\n            message=raw[\"message\"],\n            n_fun_evals=raw[\"n_fun_evals\"],\n            n_jac_evals=0,\n            n_hess_evals=0,\n            n_iterations=raw[\"n_iterations\"],\n            info={\n                \"gradient_norm\": raw[\"gradient_norm\"],\n                \"criterion_norm\": raw[\"criterion_norm\"],\n                \"convergence_code\": raw[\"convergence_code\"],\n                \"convergence_reason\": raw[\"reached_convergence_criterion\"],\n            },\n        )\n\n        return res\n\n\ndef tao_pounders(\n    criterion,\n    x,\n    lower_bounds,\n    upper_bounds,\n    *,\n    convergence_gtol_abs=CONVERGENCE_GTOL_ABS,\n    convergence_gtol_rel=CONVERGENCE_GTOL_REL,\n    convergence_gtol_scaled=CONVERGENCE_GTOL_SCALED,\n    trustregion_initial_radius=None,\n    stopping_maxiter=STOPPING_MAXITER,\n):\n    r\"\"\"Minimize a function using the POUNDERs algorithm.\n\n    For details see\n    :ref: `tao_algorithm`.\n\n    \"\"\"\n    if not IS_PETSC4PY_INSTALLED:\n        raise NotInstalledError(\n            \"The 'tao_pounders' algorithm requires the petsc4py package to be \"\n            \"installed. If you are using Linux or MacOS, install the package with \"\n            \"'conda install -c conda-forge petsc4py'. The package is not available on \"\n            \"Windows. Windows users can use optimagics 'pounders' algorithm instead.\"\n        )\n    from petsc4py import PETSc\n\n    first_eval = criterion(x)\n    n_errors = len(first_eval)\n    _x = _initialise_petsc_array(x)\n    # We need to know the number of contributions of the criterion value to allocate the\n    # array.\n    residuals_out = _initialise_petsc_array(n_errors)\n\n    # Create the solver object.\n    tao = PETSc.TAO().create(PETSc.COMM_WORLD)\n\n    # Set the solver type.\n    tao.setType(\"pounders\")\n\n    tao.setFromOptions()\n\n    def func_tao(tao, x, resid_out):  # noqa: ARG001\n        \"\"\"Evaluate objective and attach result to an petsc object f.\n\n        This is required to use the pounders solver from tao.\n\n        Args:\n             tao: The tao object we created for the optimization task.\n             x (PETSc.array): Current parameter values.\n             f: Petsc object in which we save the current function value.\n\n        \"\"\"\n        resid_out.array = criterion(x.array)\n\n    # Set the procedure for calculating the objective. This part has to be changed if we\n    # want more than pounders.\n    tao.setResidual(func_tao, residuals_out)\n\n    if trustregion_initial_radius is None:\n        trustregion_initial_radius = calculate_trustregion_initial_radius(_x)\n    elif trustregion_initial_radius <= 0:\n        raise ValueError(\"The initial trust region radius must be > 0.\")\n    tao.setInitialTrustRegionRadius(trustregion_initial_radius)\n\n    # Add bounds if provided.\n    if lower_bounds is not None or upper_bounds is not None:\n        if lower_bounds is None:\n            lower_bounds = np.full(len(x), -np.inf)\n        if upper_bounds is None:\n            upper_bounds = np.full(len(x), np.inf)\n        lower_bounds = _initialise_petsc_array(lower_bounds)\n        upper_bounds = _initialise_petsc_array(upper_bounds)\n        tao.setVariableBounds(lower_bounds, upper_bounds)\n\n    # Put the starting values into the container and pass them to the optimizer.\n    tao.setInitial(_x)\n\n    # Obtain tolerances for the convergence criteria. Since we can not create\n    # scaled_gradient_tolerance manually we manually set absolute_gradient_tolerance and\n    # or relative_gradient_tolerance to zero once a subset of these two is turned off\n    # and scaled_gradient_tolerance is still turned on.\n    default_gatol = convergence_gtol_abs if convergence_gtol_abs else -1\n    default_gttol = convergence_gtol_scaled if convergence_gtol_scaled else -1\n    default_grtol = convergence_gtol_rel if convergence_gtol_rel else -1\n    # Set tolerances for default convergence tests.\n    tao.setTolerances(\n        gatol=default_gatol,\n        grtol=default_grtol,\n        gttol=default_gttol,\n    )\n\n    # Set user defined convergence tests. Beware that specifying multiple tests could\n    # overwrite others or lead to unclear behavior.\n    if stopping_maxiter is not None:\n        tao.setConvergenceTest(functools.partial(_max_iters, stopping_maxiter))\n    elif convergence_gtol_scaled is False and convergence_gtol_abs is False:\n        tao.setConvergenceTest(functools.partial(_grtol_conv, convergence_gtol_rel))\n    elif convergence_gtol_rel is False and convergence_gtol_scaled is False:\n        tao.setConvergenceTest(functools.partial(_gatol_conv, convergence_gtol_abs))\n    elif convergence_gtol_scaled is False:\n        tao.setConvergenceTest(\n            functools.partial(\n                _grtol_gatol_conv,\n                convergence_gtol_rel,\n                convergence_gtol_abs,\n            )\n        )\n\n    # Run the problem.\n    tao.solve()\n\n    results = _process_pounders_results(residuals_out, tao)\n\n    # Destroy petsc objects for memory reasons.\n    petsc_bounds = [b for b in (lower_bounds, upper_bounds) if b is not None]\n    for obj in [tao, _x, residuals_out, *petsc_bounds]:\n        obj.destroy()\n\n    return results\n\n\ndef _initialise_petsc_array(len_or_array):\n    \"\"\"Initialize an empty array or fill in provided values.\n\n    Args:\n        len_or_array (int or numpy.ndarray): If the value is an integer, allocate an\n            empty array with the given length. If the value is an array, allocate an\n            array of equal length and fill in the values.\n\n    \"\"\"\n    from petsc4py import PETSc\n\n    length = len_or_array if isinstance(len_or_array, int) else len(len_or_array)\n\n    array = PETSc.Vec().create(PETSc.COMM_WORLD)\n    array.setSizes(length)\n    array.setFromOptions()\n\n    if isinstance(len_or_array, np.ndarray):\n        array.array = len_or_array\n\n    return array\n\n\ndef _max_iters(max_iterations, tao):\n    if tao.getSolutionStatus()[0] < max_iterations:\n        return 0\n    elif tao.getSolutionStatus()[0] >= max_iterations:\n        tao.setConvergedReason(8)\n\n\ndef _gatol_conv(absolute_gradient_tolerance, tao):\n    if tao.getSolutionStatus()[2] >= absolute_gradient_tolerance:\n        return 0\n    elif tao.getSolutionStatus()[2] < absolute_gradient_tolerance:\n        tao.setConvergedReason(3)\n\n\ndef _grtol_conv(relative_gradient_tolerance, tao):\n    if (\n        tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1]\n        >= relative_gradient_tolerance\n    ):\n        return 0\n    elif (\n        tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1]\n        < relative_gradient_tolerance\n    ):\n        tao.setConvergedReason(4)\n\n\ndef _grtol_gatol_conv(relative_gradient_tolerance, absolute_gradient_tolerance, tao):\n    if (\n        tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1]\n        >= relative_gradient_tolerance\n    ):\n        return 0\n    elif (\n        tao.getSolutionStatus()[2] / tao.getSolutionStatus()[1]\n        < relative_gradient_tolerance\n    ):\n        tao.setConvergedReason(4)\n\n    elif tao.getSolutionStatus()[2] < absolute_gradient_tolerance:\n        tao.setConvergedReason(3)\n\n\ndef _translate_tao_convergence_reason(tao_resaon):\n    mapping = {\n        3: \"absolute_gradient_tolerance below critical value\",\n        4: \"relative_gradient_tolerance below critical value\",\n        5: \"scaled_gradient_tolerance below critical value\",\n        6: \"step size small\",\n        7: \"objective below min value\",\n        8: \"user defined\",\n        -2: \"maxits reached\",\n        -4: \"numerical problems\",\n        -5: \"max funcevals reached\",\n        -6: \"line search failure\",\n        -7: \"trust region failure\",\n        -8: \"user defined\",\n    }\n    return mapping[tao_resaon]\n\n\ndef _process_pounders_results(residuals_out, tao):\n    convergence_code = tao.getConvergedReason()\n    convergence_reason = _translate_tao_convergence_reason(convergence_code)\n\n    results = {\n        \"solution_x\": tao.solution.array,\n        \"solution_criterion\": tao.function,\n        \"solution_derivative\": None,\n        \"solution_hessian\": None,\n        \"n_fun_evals\": tao.getIterationNumber(),\n        \"n_jac_evals\": None,\n        \"n_iterations\": None,\n        \"success\": bool(convergence_code >= 0),\n        \"reached_convergence_criterion\": (\n            convergence_reason if convergence_code >= 0 else None\n        ),\n        \"message\": convergence_reason,\n        # Further results.\n        \"solution_criterion_values\": residuals_out.array,\n        \"gradient_norm\": tao.gnorm,\n        \"criterion_norm\": tao.cnorm,\n        \"convergence_code\": convergence_code,\n    }\n\n    return results\n"
  },
  {
    "path": "src/optimagic/optimizers/tranquilo.py",
    "content": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Callable, Literal\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom packaging import version\n\nfrom optimagic import mark\nfrom optimagic.config import IS_TRANQUILO_INSTALLED\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalOptimizationProblem,\n)\nfrom optimagic.typing import (\n    AggregationLevel,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n)\n\nif TYPE_CHECKING:\n    from tranquilo.options import (\n        AcceptanceOptions,\n        FilterOptions,\n        FitterOptions,\n        NoiseAdaptationOptions,\n        RadiusOptions,\n        SamplerOptions,\n        StagnationOptions,\n        SubsolverOptions,\n        VarianceEstimatorOptions,\n    )\n\nif IS_TRANQUILO_INSTALLED:\n    import tranquilo\n\n    IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0 = version.parse(\n        tranquilo.__version__\n    ) >= version.parse(\"0.1.0\")\nelse:\n    IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0 = False\n\nTRANQUILO_INSTALLATION_INSTRUCTIONS = (\n    \"The 'tranquilo' algorithm requires the tranquilo package version 0.1.0 or newer \"\n    \"to be installed. Install it with 'conda -c conda-forge install tranquilo>=0.1.0'.\"\n)\n\n\n@mark.minimizer(\n    name=\"tranquilo\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=IS_TRANQUILO_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass Tranquilo(Algorithm):\n    # function type\n    functype: Literal[\"scalar\"] = \"scalar\"\n    # basic options\n    noisy: bool = False\n    # convergence options\n    disable_convergence: bool = False\n    convergence_ftol_abs: NonNegativeFloat = 0.0\n    convergence_gtol_abs: NonNegativeFloat = 0.0\n    convergence_xtol_abs: NonNegativeFloat = 0.0\n    convergence_ftol_rel: NonNegativeFloat = 2e-9\n    convergence_gtol_rel: NonNegativeFloat = 1e-8\n    convergence_xtol_rel: NonNegativeFloat = 1e-8\n    convergence_min_trust_region_radius: NonNegativeFloat = 0.0\n    # stopping options\n    stopping_maxfun: PositiveInt = 2_000\n    stopping_maxiter: PositiveInt = 200\n    stopping_maxtime: NonNegativeFloat = np.inf\n    # single advanced options\n    batch_evaluator: Literal[\n        \"joblib\",\n        \"pathos\",\n    ] = \"joblib\"\n    n_cores: PositiveInt = 1\n    batch_size: PositiveInt | None = None\n    sample_size: PositiveInt | None = None\n    model_type: (\n        Literal[\n            \"quadratic\",\n            \"linear\",\n        ]\n        | None\n    ) = None\n    search_radius_factor: PositiveFloat | None = None\n    n_evals_per_point: NonNegativeInt | None = None\n    n_evals_at_start: NonNegativeInt | None = None\n    seed: int | None = 925408\n    # bundled advanced options\n    radius_options: RadiusOptions | None = None\n    stagnation_options: StagnationOptions | None = None\n    noise_adaptation_options: NoiseAdaptationOptions | None = None\n    # component names and related options\n    sampler: (\n        Literal[\n            \"optimal_hull\",\n            \"random_hull\",\n            \"random_interior\",\n        ]\n        | Callable\n    ) = \"optimal_hull\"\n    sampler_options: SamplerOptions | None = None\n    sample_filter: (\n        Literal[\n            \"discard_all\",\n            \"keep_all\",\n            \"clustering\",\n            \"drop_excess\",\n        ]\n        | Callable\n        | None\n    ) = None\n    sample_filter_options: FilterOptions | None = None\n    model_fitter: (\n        Literal[\n            \"ols\",\n            \"ridge\",\n            \"powell\",\n            \"tranquilo\",\n        ]\n        | Callable\n        | None\n    ) = None\n    model_fitter_options: FitterOptions | None = None\n    cube_subsolver: (\n        Literal[\n            \"bntr\",\n            \"bntr_fast\",\n            \"fallback_cube\",\n            \"fallback_multistart\",\n        ]\n        | Callable\n    ) = \"bntr_fast\"\n    sphere_subsolver: (\n        Literal[\n            \"gqtpar\",\n            \"gqtpar_fast\",\n            \"fallback_reparametrized\",\n            \"fallback_inscribed_cube\",\n            \"fallback_norm_constraint\",\n        ]\n        | Callable\n    ) = \"gqtpar_fast\"\n    retry_subproblem_with_fallback: bool = True\n    subsolver_options: SubsolverOptions | None = None\n    acceptance_decider: (\n        Literal[\n            \"classic\",\n            \"naive_noisy\",\n            \"classic_line_search\",\n            \"noisy\",\n        ]\n        | Callable\n        | None\n    ) = None\n    acceptance_decider_options: AcceptanceOptions | None = None\n    variance_estimator: Literal[\"classic\"] | Callable = \"classic\"\n    variance_estimator_options: VarianceEstimatorOptions | None = None\n    infinity_handler: Literal[\"relative\"] | Callable = \"relative\"\n    residualize: bool | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0:\n            raise NotInstalledError(TRANQUILO_INSTALLATION_INSTRUCTIONS)\n        from tranquilo.tranquilo import _tranquilo\n\n        raw_res = _tranquilo(\n            functype=\"scalar\",\n            batch_fun=problem.batch_fun,\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            noisy=self.noisy,\n            disable_convergence=self.disable_convergence,\n            convergence_absolute_criterion_tolerance=self.convergence_ftol_abs,\n            convergence_absolute_gradient_tolerance=self.convergence_gtol_abs,\n            convergence_absolute_params_tolerance=self.convergence_xtol_abs,\n            convergence_relative_criterion_tolerance=self.convergence_ftol_rel,\n            convergence_relative_gradient_tolerance=self.convergence_gtol_rel,\n            convergence_relative_params_tolerance=self.convergence_xtol_rel,\n            convergence_min_trust_region_radius=self.convergence_min_trust_region_radius,\n            stopping_max_criterion_evaluations=self.stopping_maxfun,\n            stopping_max_iterations=self.stopping_maxiter,\n            stopping_max_time=self.stopping_maxtime,\n            n_cores=self.n_cores,\n            batch_size=self.batch_size,\n            sample_size=self.sample_size,\n            model_type=self.model_type,\n            search_radius_factor=self.search_radius_factor,\n            n_evals_per_point=self.n_evals_per_point,\n            n_evals_at_start=self.n_evals_at_start,\n            seed=self.seed,\n            radius_options=self.radius_options,\n            stagnation_options=self.stagnation_options,\n            noise_adaptation_options=self.noise_adaptation_options,\n            sampler=self.sampler,\n            sampler_options=self.sampler_options,\n            sample_filter=self.sample_filter,\n            sample_filter_options=self.sample_filter_options,\n            model_fitter=self.model_fitter,\n            model_fitter_options=self.model_fitter_options,\n            cube_subsolver=self.cube_subsolver,\n            sphere_subsolver=self.sphere_subsolver,\n            retry_subproblem_with_fallback=self.retry_subproblem_with_fallback,\n            subsolver_options=self.subsolver_options,\n            acceptance_decider=self.acceptance_decider,\n            acceptance_decider_options=self.acceptance_decider_options,\n            variance_estimator=self.variance_estimator,\n            variance_estimator_options=self.variance_estimator_options,\n            infinity_handler=self.infinity_handler,\n            residualize=self.residualize,\n        )\n\n        res = InternalOptimizeResult(\n            x=raw_res[\"solution_x\"],\n            fun=raw_res[\"solution_criterion\"],\n            message=raw_res[\"message\"],\n            info={\"states\": raw_res[\"states\"]},\n        )\n        return res\n\n\n@mark.minimizer(\n    name=\"tranquilo_ls\",\n    solver_type=AggregationLevel.LEAST_SQUARES,\n    is_available=IS_TRANQUILO_INSTALLED,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=True,\n    supports_infinite_bounds=True,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass TranquiloLS(Algorithm):\n    # basic options\n    noisy: bool = False\n    # convergence options\n    disable_convergence: bool = False\n    convergence_ftol_abs: NonNegativeFloat = 0.0\n    convergence_gtol_abs: NonNegativeFloat = 0.0\n    convergence_xtol_abs: NonNegativeFloat = 0.0\n    convergence_ftol_rel: NonNegativeFloat = 2e-9\n    convergence_gtol_rel: NonNegativeFloat = 1e-8\n    convergence_xtol_rel: NonNegativeFloat = 1e-8\n    convergence_min_trust_region_radius: NonNegativeFloat = 0.0\n    # stopping options\n    stopping_maxfun: PositiveInt = 2_000\n    stopping_maxiter: PositiveInt = 200\n    stopping_maxtime: NonNegativeFloat = np.inf\n    # single advanced options\n    batch_evaluator: Literal[\n        \"joblib\",\n        \"pathos\",\n    ] = \"joblib\"\n    n_cores: PositiveInt = 1\n    batch_size: PositiveInt | None = None\n    sample_size: PositiveInt | None = None\n    model_type: (\n        Literal[\n            \"quadratic\",\n            \"linear\",\n        ]\n        | None\n    ) = None\n    search_radius_factor: PositiveFloat | None = None\n    n_evals_per_point: NonNegativeInt | None = None\n    n_evals_at_start: NonNegativeInt | None = None\n    seed: int | None = 925408\n    # bundled advanced options\n    radius_options: RadiusOptions | None = None\n    stagnation_options: StagnationOptions | None = None\n    noise_adaptation_options: NoiseAdaptationOptions | None = None\n    # component names and related options\n    sampler: (\n        Literal[\n            \"optimal_hull\",\n            \"random_hull\",\n            \"random_interior\",\n        ]\n        | Callable\n    ) = \"optimal_hull\"\n    sampler_options: SamplerOptions | None = None\n    sample_filter: (\n        Literal[\n            \"discard_all\",\n            \"keep_all\",\n            \"clustering\",\n            \"drop_excess\",\n        ]\n        | Callable\n        | None\n    ) = None\n    sample_filter_options: FilterOptions | None = None\n    model_fitter: (\n        Literal[\n            \"ols\",\n            \"ridge\",\n            \"powell\",\n            \"tranquilo\",\n        ]\n        | Callable\n        | None\n    ) = None\n    model_fitter_options: FitterOptions | None = None\n    cube_subsolver: (\n        Literal[\n            \"bntr\",\n            \"bntr_fast\",\n            \"fallback_cube\",\n            \"fallback_multistart\",\n        ]\n        | Callable\n    ) = \"bntr_fast\"\n    sphere_subsolver: (\n        Literal[\n            \"gqtpar\",\n            \"gqtpar_fast\",\n            \"fallback_reparametrized\",\n            \"fallback_inscribed_cube\",\n            \"fallback_norm_constraint\",\n        ]\n        | Callable\n    ) = \"gqtpar_fast\"\n    retry_subproblem_with_fallback: bool = True\n    subsolver_options: SubsolverOptions | None = None\n    acceptance_decider: (\n        Literal[\n            \"classic\",\n            \"naive_noisy\",\n            \"classic_line_search\",\n            \"noisy\",\n        ]\n        | Callable\n        | None\n    ) = None\n    acceptance_decider_options: AcceptanceOptions | None = None\n    variance_estimator: Literal[\"classic\"] | Callable = \"classic\"\n    variance_estimator_options: VarianceEstimatorOptions | None = None\n    infinity_handler: Literal[\"relative\"] | Callable = \"relative\"\n    residualize: bool | None = None\n\n    def _solve_internal_problem(\n        self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]\n    ) -> InternalOptimizeResult:\n        if not IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0:\n            raise NotInstalledError(TRANQUILO_INSTALLATION_INSTRUCTIONS)\n        from tranquilo.tranquilo import _tranquilo\n\n        raw_res = _tranquilo(\n            functype=\"least_squares\",\n            batch_fun=problem.batch_fun,\n            x=x0,\n            lower_bounds=problem.bounds.lower,\n            upper_bounds=problem.bounds.upper,\n            noisy=self.noisy,\n            disable_convergence=self.disable_convergence,\n            convergence_absolute_criterion_tolerance=self.convergence_ftol_abs,\n            convergence_absolute_gradient_tolerance=self.convergence_gtol_abs,\n            convergence_absolute_params_tolerance=self.convergence_xtol_abs,\n            convergence_relative_criterion_tolerance=self.convergence_ftol_rel,\n            convergence_relative_gradient_tolerance=self.convergence_gtol_rel,\n            convergence_relative_params_tolerance=self.convergence_xtol_rel,\n            convergence_min_trust_region_radius=self.convergence_min_trust_region_radius,\n            stopping_max_criterion_evaluations=self.stopping_maxfun,\n            stopping_max_iterations=self.stopping_maxiter,\n            stopping_max_time=self.stopping_maxtime,\n            n_cores=self.n_cores,\n            batch_size=self.batch_size,\n            sample_size=self.sample_size,\n            model_type=self.model_type,\n            search_radius_factor=self.search_radius_factor,\n            n_evals_per_point=self.n_evals_per_point,\n            n_evals_at_start=self.n_evals_at_start,\n            seed=self.seed,\n            radius_options=self.radius_options,\n            stagnation_options=self.stagnation_options,\n            noise_adaptation_options=self.noise_adaptation_options,\n            sampler=self.sampler,\n            sampler_options=self.sampler_options,\n            sample_filter=self.sample_filter,\n            sample_filter_options=self.sample_filter_options,\n            model_fitter=self.model_fitter,\n            model_fitter_options=self.model_fitter_options,\n            cube_subsolver=self.cube_subsolver,\n            sphere_subsolver=self.sphere_subsolver,\n            retry_subproblem_with_fallback=self.retry_subproblem_with_fallback,\n            subsolver_options=self.subsolver_options,\n            acceptance_decider=self.acceptance_decider,\n            acceptance_decider_options=self.acceptance_decider_options,\n            variance_estimator=self.variance_estimator,\n            variance_estimator_options=self.variance_estimator_options,\n            infinity_handler=self.infinity_handler,\n            residualize=self.residualize,\n        )\n        res = InternalOptimizeResult(\n            x=raw_res[\"solution_x\"],\n            fun=raw_res[\"solution_criterion\"],\n            message=raw_res[\"message\"],\n            info={\"states\": raw_res[\"states\"]},\n        )\n        return res\n"
  },
  {
    "path": "src/optimagic/parameters/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/parameters/block_trees.py",
    "content": "\"\"\"Functions to convert between array and block-tree representations of a matrix.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import tree_flatten, tree_unflatten\nfrom pybaum import tree_just_flatten as tree_leaves\n\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef matrix_to_block_tree(matrix, outer_tree, inner_tree):\n    \"\"\"Convert a matrix (2-dimensional array) to block-tree.\n\n    A block tree most often arises when one applies an operation to a function that maps\n    between two trees. For certain functions this results in a 2-dimensional data array.\n    Two main examples are the Jacobian of the function f : inner_tree -> outer_tree,\n    which results in a block tree structure, or the covariance matrix of a tree, in\n    which case outer_tree = inner_tree.\n\n    Args:\n        matrix (numpy.ndarray): 2d representation of the block tree. Has shape (m, n).\n        outer_tree: A pytree. If flattened to scalars has length m.\n        inner_tree: A pytree. If flattened to scalars has length n.\n\n    Returns:\n        block_tree: A (block) pytree.\n\n    \"\"\"\n    _check_dimensions_matrix(matrix, outer_tree, inner_tree)\n\n    flat_outer, treedef_outer = tree_flatten(outer_tree)\n    flat_inner, treedef_inner = tree_flatten(inner_tree)\n\n    flat_outer_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_outer]\n    flat_inner_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_inner]\n\n    shapes_outer = [np.shape(a) for a in flat_outer_np]\n    shapes_inner = [np.shape(a) for a in flat_inner_np]\n\n    block_bounds_outer = np.cumsum([int(np.prod(s)) for s in shapes_outer[:-1]])\n    block_bounds_inner = np.cumsum([int(np.prod(s)) for s in shapes_inner[:-1]])\n\n    blocks = []\n    for leaf_outer, s1, submat in zip(\n        flat_outer,\n        shapes_outer,\n        np.split(matrix, block_bounds_outer, axis=0),\n        strict=False,\n    ):\n        row = []\n        for leaf_inner, s2, block_values in zip(\n            flat_inner,\n            shapes_inner,\n            np.split(submat, block_bounds_inner, axis=1),\n            strict=False,\n        ):\n            raw_block = block_values.reshape((*s1, *s2))\n            block = _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner)\n            row.append(block)\n\n        blocks.append(row)\n\n    block_tree = tree_unflatten(\n        treedef_outer, [tree_unflatten(treedef_inner, row) for row in blocks]\n    )\n\n    return block_tree\n\n\ndef hessian_to_block_tree(hessian, f_tree, params_tree):\n    \"\"\"Convert a Hessian array to block-tree format.\n\n    Remark: In comparison to Jax we need this formatting function because we calculate\n    the second derivative using second-order finite differences. Jax computes the\n    second derivative by applying their jacobian function twice, which produces the\n    desired block-tree shape of the Hessian automatically. If we apply our first\n    derivative function twice we get the same block-tree shape.\n\n    Args:\n        hessian (np.ndarray): The Hessian, 2- or 3-dimensional array representation of\n            the resulting block-tree.\n        f_tree (pytree): The function evaluated at params_tree.\n        params_tree (pytree): The params_tree.\n\n    Returns:\n        hessian_block_tree (pytree): The pytree\n\n    \"\"\"\n    _check_dimensions_hessian(hessian, f_tree, params_tree)\n\n    if hessian.ndim == 2:\n        hessian = hessian[np.newaxis]\n\n    flat_f, treedef_f = tree_flatten(f_tree)\n    flat_p, treedef_p = tree_flatten(params_tree)\n\n    flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f]\n    flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p]\n\n    shapes_f = [np.shape(a) for a in flat_f_np]\n    shapes_p = [np.shape(a) for a in flat_p_np]\n\n    block_bounds_f = np.cumsum([int(np.prod(s)) for s in shapes_f[:-1]])\n    block_bounds_p = np.cumsum([int(np.prod(s)) for s in shapes_p[:-1]])\n\n    sub_block_trees = []\n    for s0, subarr in zip(\n        shapes_f, np.split(hessian, block_bounds_f, axis=0), strict=False\n    ):\n        blocks = []\n        for leaf_outer, s1, submat in zip(\n            flat_p, shapes_p, np.split(subarr, block_bounds_p, axis=1), strict=False\n        ):\n            row = []\n            for leaf_inner, s2, block_values in zip(\n                flat_p, shapes_p, np.split(submat, block_bounds_p, axis=2), strict=False\n            ):\n                _shape = [k for k in (*s0, *s1, *s2) if k != 1]\n                raw_block = block_values.reshape(_shape)\n                block = _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner)\n                row.append(block)\n            blocks.append(row)\n        block_tree = tree_unflatten(\n            treedef_p, [tree_unflatten(treedef_p, row) for row in blocks]\n        )\n        sub_block_trees.append(block_tree)\n\n    hessian_block_tree = tree_unflatten(treedef_f, sub_block_trees)\n    return hessian_block_tree\n\n\ndef block_tree_to_matrix(block_tree, outer_tree, inner_tree):\n    \"\"\"Convert a block tree to a matrix.\n\n    A block tree most often arises when one applies an operation to a function that maps\n    between two trees. Two main examples are the Jacobian of the function f : inner_tree\n    -> outer_tree, which results in a block tree structure, or the covariance matrix of\n    a tree, in which case outer_tree = inner_tree.\n\n    Args:\n        block_tree: A (block) pytree, must match dimensions of outer_tree and inner_tree\n        outer_tree: A pytree.\n        inner_tree: A pytree.\n\n    Returns:\n        matrix (np.ndarray): 2d array containing information stored in block_tree.\n\n    \"\"\"\n    flat_outer = tree_leaves(outer_tree)\n    flat_inner = tree_leaves(inner_tree)\n    flat_block_tree = tree_leaves(block_tree)\n\n    flat_outer_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_outer]\n    flat_inner_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_inner]\n\n    size_outer = [np.size(a) for a in flat_outer_np]\n    size_inner = [np.size(a) for a in flat_inner_np]\n\n    n_blocks_outer = len(size_outer)\n    n_blocks_inner = len(size_inner)\n\n    block_rows_raw = [\n        flat_block_tree[n_blocks_inner * i : n_blocks_inner * (i + 1)]\n        for i in range(n_blocks_outer)\n    ]\n\n    block_rows = []\n    for s1, row in zip(size_outer, block_rows_raw, strict=False):\n        shapes = [(s1, s2) for s2 in size_inner]\n        row_np = [_convert_to_numpy(leaf, only_pandas=False) for leaf in row]\n        row_reshaped = _reshape_list(row_np, shapes)\n        row_concatenated = np.concatenate(row_reshaped, axis=1)\n        block_rows.append(row_concatenated)\n\n    matrix = np.concatenate(block_rows, axis=0)\n\n    _check_dimensions_matrix(matrix, flat_outer, flat_inner)\n    return matrix\n\n\ndef block_tree_to_hessian(block_hessian, f_tree, params_tree):\n    \"\"\"Convert a block tree to a Hessian array.\n\n    Remark: In comparison to Jax we need this formatting function because we calculate\n    the second derivative using second-order finite differences. Jax computes the\n    second derivative by applying their jacobian function twice, which produces the\n    desired block-tree shape of the Hessian automatically. If we apply our first\n    derivative function twice we get the same block-tree shape.\n\n    Args:\n        block_hessian: A (block) pytree, must match dimensions of f_tree and params_tree\n        f_tree (pytree): The function evaluated at params_tree.\n        params_tree (pytree): The params_tree.\n\n    Returns:\n        matrix (np.ndarray): 2d array containing information stored in block_tree.\n\n    \"\"\"\n    flat_f = tree_leaves(f_tree)\n    flat_p = tree_leaves(params_tree)\n    flat_block_tree = tree_leaves(block_hessian)\n\n    flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f]\n    flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p]\n\n    size_f = [np.size(a) for a in flat_f_np]\n    size_p = [np.size(a) for a in flat_p_np]\n\n    n_blocks_f = len(size_f)\n    n_blocks_p = len(size_p)\n\n    outer_blocks = [\n        flat_block_tree[(n_blocks_p**2) * i : (n_blocks_p**2) * (i + 1)]\n        for i in range(n_blocks_f)\n    ]\n\n    inner_matrices = []\n    for outer_block_dim, list_inner_blocks in zip(size_f, outer_blocks, strict=False):\n        block_rows_raw = [\n            list_inner_blocks[n_blocks_p * i : n_blocks_p * (i + 1)]\n            for i in range(n_blocks_p)\n        ]\n        block_rows = []\n        for s1, row in zip(size_p, block_rows_raw, strict=False):\n            shapes = [(outer_block_dim, s1, s2) for s2 in size_p]\n            row_np = [_convert_to_numpy(leaf, only_pandas=False) for leaf in row]\n            row_np_3d = [leaf[np.newaxis] if leaf.ndim < 3 else leaf for leaf in row_np]\n            row_reshaped = _reshape_list(row_np_3d, shapes)\n            row_concatenated = np.concatenate(row_reshaped, axis=2)\n            block_rows.append(row_concatenated)\n\n        inner_matrix = np.concatenate(block_rows, axis=1)\n        inner_matrices.append(inner_matrix)\n\n    hessian = np.concatenate(inner_matrices, axis=0)\n    _check_dimensions_hessian(hessian, f_tree, params_tree)\n    return hessian\n\n\ndef _convert_to_numpy(obj, only_pandas=True):\n    if only_pandas:\n        out = _convert_pandas_objects_to_numpy(obj)\n    else:\n        out = np.asarray(obj)\n    return out\n\n\ndef _convert_pandas_objects_to_numpy(obj):\n    if not isinstance(obj, (pd.Series, pd.DataFrame)):\n        return obj\n    elif isinstance(obj, pd.Series):\n        out = obj.to_numpy()\n    elif \"value\" in obj.columns:\n        out = obj[\"value\"].to_numpy()\n    else:\n        out = obj.to_numpy()\n    return out\n\n\ndef _convert_raw_block_to_pandas(raw_block, leaf_outer, leaf_inner):\n    if np.ndim(raw_block) not in (1, 2):\n        return raw_block\n\n    if not _is_pd_object(leaf_outer) and not _is_pd_object(leaf_inner):\n        return raw_block\n\n    index1 = None if not _is_pd_object(leaf_outer) else leaf_outer.index\n    index2 = None if not _is_pd_object(leaf_inner) else leaf_inner.index\n\n    # can only happen if one leaf is a scalar and the other a pandas\n    # object that is interpreted as one-dimensional. We want to convert\n    # the block to a series wtih the index of the pandas object\n    if np.ndim(raw_block) == 1:\n        out = pd.Series(raw_block, index=_select_non_none(index1, index2))\n\n    # can happen in two cases\n    elif np.ndim(raw_block) == 2:\n        # case 1: one leaf is scalar and the other is a DataFrame\n        # without value column. We want to convert the block to a DataFrame\n        # with same index and columns as original DataFrame\n        if np.isscalar(leaf_outer) or np.isscalar(leaf_inner):\n            if np.isscalar(leaf_outer):\n                index, columns = leaf_inner.index, leaf_inner.columns\n            elif np.isscalar(leaf_inner):\n                index, columns = leaf_outer.index, leaf_outer.columns\n            out = pd.DataFrame(raw_block, index=index, columns=columns)\n        # case 2: both 1d Data structures and at least one of them is\n        # a pandas object. We want to convert the result to a DataFrame\n        # with index=index1 and columns=index2\n        else:\n            out = pd.DataFrame(raw_block, index=index1, columns=index2)\n\n    return out\n\n\ndef _select_non_none(first, second):\n    if first is None and second is None:\n        raise ValueError()\n    elif first is not None and second is not None:\n        raise ValueError()\n    elif first is None:\n        out = second\n    elif second is None:\n        out = first\n    return out\n\n\ndef _reshape_list(list_to_reshape, shapes):\n    \"\"\"Reshape list of numpy.ndarray according to list of shapes.\n\n    Args:\n        list_to_reshape (list): List containing numpy.ndarray's.\n        shapes (list): List of shape tuples.\n\n    Returns:\n        reshaped (list): List containing the reshaped numpy.ndarray's.\n\n    \"\"\"\n    if len(list_to_reshape) != len(shapes):\n        raise ValueError(\"Arguments must have the same number of elements.\")\n    reshaped = [\n        a.reshape(shape) for a, shape in zip(list_to_reshape, shapes, strict=False)\n    ]\n    return reshaped\n\n\ndef _is_pd_object(obj):\n    return isinstance(obj, (pd.Series, pd.DataFrame))\n\n\ndef _check_dimensions_matrix(matrix, outer_tree, inner_tree):\n    extended_registry = get_registry(extended=True)\n    flat_outer = tree_leaves(outer_tree, registry=extended_registry)\n    flat_inner = tree_leaves(inner_tree, registry=extended_registry)\n\n    if matrix.shape[0] != len(flat_outer):\n        raise ValueError(\"First dimension of matrix does not match that of outer_tree.\")\n    if matrix.shape[1] != len(flat_inner):\n        raise ValueError(\n            \"Second dimension of matrix does not match that of inner_tree.\"\n        )\n\n\ndef _check_dimensions_hessian(hessian, f_tree, params_tree):\n    extended_registry = get_registry(extended=True)\n    flat_f = tree_leaves(f_tree, registry=extended_registry)\n    flat_p = tree_leaves(params_tree, registry=extended_registry)\n\n    if len(flat_f) == 1:\n        # consider only dimensions with non trivial size (larger than 1)\n        relevant_hessian_shape = tuple(k for k in hessian.shape if k != 1)\n\n        if len(relevant_hessian_shape) == 0 and len(flat_p) != 1:\n            # scalar f and scalar params -> scalar hessian\n            raise ValueError(\"Hessian dimension does not match those of params.\")\n\n        if len(relevant_hessian_shape) == 2:\n            # scalar f and vector params -> matrix hessian\n            if relevant_hessian_shape != (len(flat_p), len(flat_p)):\n                raise ValueError(\"Hessian dimension does not match those of params.\")\n\n        if len(relevant_hessian_shape) > 2:\n            raise ValueError(\"Hessian must be 0- or 2-d if f is scalar-valued.\")\n    else:\n        if hessian.ndim != 3:\n            raise ValueError(\"Hessian must be 3d if f is multidimensional.\")\n        if hessian.shape[0] != len(flat_f):\n            raise ValueError(\"First Hessian dimension does not match that of f.\")\n        if hessian.shape[1:] != (len(flat_p), len(flat_p)):\n            raise ValueError(\n                \"Last two Hessian dimensions do not match those of params.\"\n            )\n"
  },
  {
    "path": "src/optimagic/parameters/bounds.py",
    "content": "from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Any, Literal, Sequence\n\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom pybaum import leaf_names, tree_map\nfrom pybaum import tree_just_flatten as tree_leaves\nfrom scipy.optimize import Bounds as ScipyBounds\n\nfrom optimagic.exceptions import InvalidBoundsError\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import PyTree, PyTreeRegistry\nfrom optimagic.utilities import fast_numpy_full\n\n\n@dataclass(frozen=True)\nclass Bounds:\n    lower: PyTree | None = None\n    upper: PyTree | None = None\n    soft_lower: PyTree | None = None\n    soft_upper: PyTree | None = None\n\n\ndef pre_process_bounds(\n    bounds: None | Bounds | ScipyBounds | Sequence[tuple[float, float]],\n) -> Bounds | None:\n    \"\"\"Convert all valid types of specifying bounds to optimagic.Bounds.\n\n    This just harmonizes multiple ways of specifying bounds into a single format.\n    It does not check that bounds are valid or compatible with params.\n\n    Args:\n        bounds: The user provided bounds.\n\n    Returns:\n        The bounds in the optimagic format.\n\n    Raises:\n        InvalidBoundsError: If bounds cannot be processed, e.g. because they do not have\n            the correct type.\n\n    \"\"\"\n    if isinstance(bounds, ScipyBounds):\n        bounds = Bounds(lower=bounds.lb, upper=bounds.ub)\n    elif isinstance(bounds, Bounds) or bounds is None:\n        pass\n    else:\n        try:\n            bounds = _process_bounds_sequence(bounds)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            raise InvalidBoundsError(\n                f\"Invalid bounds of type: {type(bounds)}. Bounds must be \"\n                \"optimagic.Bounds, scipy.optimize.Bounds or a Sequence of tuples with \"\n                \"lower and upper bounds.\"\n            ) from e\n    return bounds\n\n\ndef _process_bounds_sequence(bounds: Sequence[tuple[float, float]]) -> Bounds:\n    lower = fast_numpy_full(len(bounds), fill_value=-np.inf)\n    upper = fast_numpy_full(len(bounds), fill_value=np.inf)\n\n    for i, (lb, ub) in enumerate(bounds):\n        if lb is not None:\n            lower[i] = lb\n        if ub is not None:\n            upper[i] = ub\n    return Bounds(lower=lower, upper=upper)\n\n\ndef get_internal_bounds(\n    params: PyTree,\n    bounds: Bounds | None = None,\n    registry: PyTreeRegistry | None = None,\n    add_soft_bounds: bool = False,\n) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None]:\n    \"\"\"Create consolidated and flattened bounds for params.\n\n    If params is a DataFrame with value column, the user provided bounds are\n    extended with bounds from the params DataFrame.\n\n    If no bounds are provided, we return None. If some bounds are available the missing\n    entries are set to -np.inf for the lower bound and np.inf for the upper bound.\n\n    The bounds provided in `bounds` override bounds provided in params if both are\n    specified (in the case where params is a DataFrame with bounds as a column).\n\n    Args:\n        params: The parameter pytree.\n        bounds: The lower and upper bounds.\n        registry: pybaum registry.\n        add_soft_bounds: If True, the element-wise maximum (minimum) of the lower and\n            soft_lower (upper and soft_upper) bounds are taken. If False, the lower\n            (upper) bounds are returned.\n\n    Returns:\n        Consolidated and flattened lower_bounds.\n        Consolidated and flattened upper_bounds.\n\n    \"\"\"\n    bounds = Bounds() if bounds is None else bounds\n\n    fast_path = _is_fast_path(\n        params=params,\n        bounds=bounds,\n        add_soft_bounds=add_soft_bounds,\n    )\n    if fast_path:\n        return _get_fast_path_bounds(bounds)\n\n    # Handling of None-valued bounds in the slow path needs to be improved. Currently,\n    # None-valued bounds are replaced with arrays of np.inf and -np.inf, and then\n    # translated back to None if all entries are non-finite.\n\n    registry = get_registry(extended=True) if registry is None else registry\n    n_params = len(tree_leaves(params, registry=registry))\n\n    # Fill leaves with np.nan. If params contains a data frame with bounds as a column,\n    # that column is NOT overwritten (as long as an extended registry is used).\n    nan_tree = tree_map(lambda leaf: np.nan, params, registry=registry)  # noqa: ARG005\n\n    lower_flat = _update_bounds_and_flatten(nan_tree, bounds.lower, kind=\"lower_bound\")\n    upper_flat = _update_bounds_and_flatten(nan_tree, bounds.upper, kind=\"upper_bound\")\n\n    if len(lower_flat) != n_params:\n        raise InvalidBoundsError(\"lower_bounds do not match dimension of params.\")\n    if len(upper_flat) != n_params:\n        raise InvalidBoundsError(\"upper_bounds do not match dimension of params.\")\n\n    lower_flat[np.isnan(lower_flat)] = -np.inf\n    upper_flat[np.isnan(upper_flat)] = np.inf\n\n    if add_soft_bounds:\n        lower_flat_soft = _update_bounds_and_flatten(\n            nan_tree, bounds.soft_lower, kind=\"soft_lower_bound\"\n        )\n        lower_flat_soft[np.isnan(lower_flat_soft)] = -np.inf\n        lower_flat = np.maximum(lower_flat, lower_flat_soft)\n\n        upper_flat_soft = _update_bounds_and_flatten(\n            nan_tree, bounds.soft_upper, kind=\"soft_upper_bound\"\n        )\n        upper_flat_soft[np.isnan(upper_flat_soft)] = np.inf\n        upper_flat = np.minimum(upper_flat, upper_flat_soft)\n\n    if (lower_flat > upper_flat).any():\n        msg = \"Invalid bounds. Some lower bounds are larger than upper bounds.\"\n        raise InvalidBoundsError(msg)\n\n    if np.isinf(lower_flat).all():\n        lower_flat = None  # type: ignore[assignment]\n    if np.isinf(upper_flat).all():\n        upper_flat = None  # type: ignore[assignment]\n\n    return lower_flat, upper_flat\n\n\ndef _update_bounds_and_flatten(\n    nan_tree: PyTree,\n    bounds: PyTree,\n    kind: Literal[\"lower_bound\", \"upper_bound\", \"soft_lower_bound\", \"soft_upper_bound\"],\n) -> NDArray[np.float64]:\n    \"\"\"Flatten bounds array and update it with bounds from params.\n\n    Args:\n        nan_tree: Pytree with the same structure as params, filled with nans.\n        bounds: The candidate bounds to be updated and flattened.\n        kind: One of \"lower_bound\", \"upper_bound\", \"soft_lower_bound\",\n            \"soft_upper_bound\".\n\n    Returns:\n        np.ndarray: The updated and flattened bounds.\n\n    \"\"\"\n    registry = get_registry(extended=True, data_col=kind)\n    flat_nan_tree = tree_leaves(nan_tree, registry=registry)\n\n    if bounds is not None:\n        registry = get_registry(extended=True)\n        flat_bounds = tree_leaves(bounds, registry=registry)\n\n        seperator = 10 * \"$\"\n        params_names = leaf_names(nan_tree, registry=registry, separator=seperator)\n        bounds_names = leaf_names(bounds, registry=registry, separator=seperator)\n\n        flat_nan_dict = dict(zip(params_names, flat_nan_tree, strict=False))\n\n        invalid = {\"names\": [], \"bounds\": []}  # type: ignore\n        for bounds_name, bounds_leaf in zip(bounds_names, flat_bounds, strict=False):\n            # if a bounds leaf is None we treat it as saying the the corresponding\n            # subtree of params has no bounds.\n            if bounds_leaf is not None:\n                if bounds_name in flat_nan_dict:\n                    flat_nan_dict[bounds_name] = bounds_leaf\n                else:\n                    invalid[\"names\"].append(bounds_name)\n                    invalid[\"bounds\"].append(bounds_leaf)\n\n        if invalid[\"bounds\"]:\n            msg = (\n                f\"{kind} could not be matched to params pytree. The bounds \"\n                f\"{invalid['bounds']} with names {invalid['names']} are not part of \"\n                \"params.\"\n            )\n            raise InvalidBoundsError(msg)\n\n        flat_nan_tree = list(flat_nan_dict.values())\n\n    updated = np.array(flat_nan_tree, dtype=np.float64)\n    return updated\n\n\ndef _is_fast_path(params: PyTree, bounds: Bounds, add_soft_bounds: bool) -> bool:\n    out = True\n    if add_soft_bounds:\n        out = False\n\n    if not _is_1d_array(params):\n        out = False\n\n    for bound in (bounds.lower, bounds.upper):\n        if not (_is_1d_array(bound) or bound is None):\n            out = False\n    return out\n\n\ndef _is_1d_array(candidate: Any) -> bool:\n    return isinstance(candidate, np.ndarray) and candidate.ndim == 1\n\n\ndef _get_fast_path_bounds(\n    bounds: Bounds,\n) -> tuple[NDArray[np.float64] | None, NDArray[np.float64] | None]:\n    if bounds.lower is None:\n        lower_bounds = None\n    else:\n        lower_bounds = bounds.lower.astype(float)\n        if np.isinf(lower_bounds).all():\n            lower_bounds = None\n\n    if bounds.upper is None:\n        upper_bounds = None\n    else:\n        upper_bounds = bounds.upper.astype(float)\n        if np.isinf(upper_bounds).all():\n            upper_bounds = None\n\n    if (\n        lower_bounds is not None\n        and upper_bounds is not None\n        and (lower_bounds > upper_bounds).any()\n    ):\n        msg = \"Invalid bounds. Some lower bounds are larger than upper bounds.\"\n        raise InvalidBoundsError(msg)\n\n    return lower_bounds, upper_bounds\n"
  },
  {
    "path": "src/optimagic/parameters/check_constraints.py",
    "content": "\"\"\"Check compatibility of pc with each other and with bounds and fixes.\n\nSee the module docstring of process_constraints for naming conventions.\n\n\"\"\"\n\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic.exceptions import InvalidConstraintError, InvalidParamsError\nfrom optimagic.utilities import cov_params_to_matrix, sdcorr_params_to_matrix\n\n\ndef check_constraints_are_satisfied(flat_constraints, param_values, param_names):\n    \"\"\"Check that params satisfies all constraints.\n\n    This should be called before the more specialized constraints are rewritten to\n    linear constraints in order to get better error messages!\n\n    We let the checks pass if all \"values\" are np.nan. This way `process_constraints`\n    can be used on empty params DataFrames which is useful to construct templates for\n    start parameters that can be filled out by the user.\n\n    Args:\n        pc (list): List of constraints with processed selectors.\n        params (pd.DataFrame): See :ref:`params`\n\n    Raises:\n        ValueError if constraints are not satisfied.\n\n    \"\"\"\n    # skip check if all parameters are NaN\n    if not np.isfinite(param_values).any():\n        return\n\n    for constr in flat_constraints:\n        typ = constr[\"type\"]\n        subset = param_values[constr[\"index\"]]\n\n        report = []\n\n        _msg = partial(_get_message, constr, param_names)\n\n        if typ == \"covariance\":\n            cov = cov_params_to_matrix(subset)\n            e, _ = np.linalg.eigh(cov)\n            if not np.all(e > -1e-8):\n                report.append(_msg())\n        elif typ == \"sdcorr\":\n            cov = sdcorr_params_to_matrix(subset)\n            e, _ = np.linalg.eigh(cov)\n            if not np.all(e > -1e-8):\n                report.append(_msg())\n        elif typ == \"probability\":\n            if not np.isclose(subset.sum(), 1, rtol=0.01):\n                explanation = \"Probabilities do not sum to 1.\"\n                report.append(_msg(explanation))\n            if np.any(subset < 0):\n                explanation = \"There are negative Probabilities.\"\n                report.append(_msg(explanation))\n            if np.any(subset > 1):\n                explanation = \"There are probabilities larger than 1.\"\n                report.append(_msg(explanation))\n        elif typ == \"fixed\":\n            if \"value\" in constr and not np.allclose(subset, constr[\"value\"]):\n                explanation = (\n                    \"Fixing parameters to different values than their start values \"\n                    \"was allowed in earlier versions of optimagic but is \"\n                    \"forbidden now. \"\n                )\n                report.append(_msg(explanation))\n        elif typ == \"increasing\":\n            if np.any(np.diff(subset) < 0):\n                report.append(_msg())\n        elif typ == \"decreasing\":\n            if np.any(np.diff(subset) > 0):\n                report.append(_msg())\n        elif typ == \"linear\":\n            wsum = subset.dot(constr[\"weights\"])\n            if \"lower_bound\" in constr and wsum < constr[\"lower_bound\"]:\n                explanation = \"Lower bound of linear constraint is violated.\"\n                report.append(_msg(explanation))\n            elif \"upper_bound\" in constr and wsum > constr[\"upper_bound\"]:\n                explanation = \"Upper bound of linear constraint violated\"\n                report.append(_msg(explanation))\n            elif \"value\" in constr and not np.isclose(wsum, constr[\"value\"]):\n                explanation = \"Equality condition of linear constraint violated\"\n                report.append(_msg(explanation))\n        elif typ == \"equality\":\n            if len(set(subset.tolist())) > 1:\n                report.append(_msg())\n\n        report = \"\\n\".join(report)\n        if report != \"\":\n            raise InvalidParamsError(f\"Violated constraint at start params:\\n{report}\")\n\n\ndef _get_message(constraint, param_names, explanation=\"\"):\n    start = (\n        f\"A constraint of type '{constraint['type']}' is not fulfilled in params, \"\n        \"please make sure that it holds for the starting values. The problem arose \"\n        \"because:\"\n    )\n\n    if explanation:\n        explanation = f\" {explanation.rstrip('. ')}. \"\n\n    names = [param_names[i] for i in constraint[\"index\"]]\n\n    end = (\n        f\"The names of the involved parameters are:\\n{names}\\n\"\n        \"The relevant constraint is:\\n\"\n        f\"{constraint}.\"\n    )\n\n    msg = start + explanation + end\n    return msg\n\n\ndef check_types(constraints):\n    \"\"\"Check that no invalid constraint types are requested.\n\n    Args:\n        constraints (list): List of constraints.\n\n    Raises:\n        TypeError if invalid constraint types are encountered\n\n    \"\"\"\n    valid_types = {\n        \"covariance\",\n        \"sdcorr\",\n        \"linear\",\n        \"probability\",\n        \"increasing\",\n        \"decreasing\",\n        \"equality\",\n        \"pairwise_equality\",\n        \"fixed\",\n    }\n    for constr in constraints:\n        if constr[\"type\"] not in valid_types:\n            raise InvalidConstraintError(\n                \"Invalid constraint_type: {}\".format(constr[\"type\"]),\n            )\n\n\ndef check_for_incompatible_overlaps(transformations, parnames):\n    \"\"\"Check that there are no overlaps between constraints that transform parameters.\n\n    Since the constraints are already consolidated such that only those that transform\n    a parameter are left and all equality constraints are already plugged in, this\n    boils down to checking that no parameter appears more than once.\n\n    Args:\n        constr_info (dict): Dict of 1d numpy arrays with info about constraints.\n        transformations (list): Processed transforming constraints.\n        parnames (list): List of parameter names.\n\n    \"\"\"\n    all_indices = []\n    for constr in transformations:\n        all_indices += constr[\"index\"]\n\n    msg = (\n        \"Transforming constraints such as 'covariance', 'sdcorr', 'probability' \"\n        \"and 'linear' cannot overlap. This includes overlaps induced by equality \"\n        \"constraints. This was violated for the following parameters:\\n{}\"\n    )\n\n    if len(set(all_indices)) < len(all_indices):\n        unique, counts = np.unique(all_indices, return_counts=True)\n        invalid_indices = unique[counts >= 2]\n        invalid_names = [parnames[i] for i in invalid_indices]\n\n        raise InvalidConstraintError(msg.format(invalid_names))\n\n\ndef check_fixes_and_bounds(constr_info, transformations, parnames):\n    \"\"\"Check fixes.\n\n    Warn the user if he fixes a parameter to a value even though that parameter has\n    a different non-nan value in params\n\n    check that fixes are compatible with other constraints.\n\n    Args:\n        constr_info (dict): Dict of 1d numpy arrays with info about constraints.\n        transformations (list): Processed transforming constraints.\n        parnames (list): List of parameter names.\n\n    \"\"\"\n    constr_info = constr_info.copy()\n    constr_info[\"index\"] = parnames\n\n    prob_msg = (\n        \"{} constraints are incompatible with fixes or bounds. \"\n        \"This is violated for:\\n{}\"\n    )\n\n    cov_msg = (\n        \"{} constraints are incompatible with fixes or bounds except for the first \"\n        \"parameter. This is violated for:\\n{}\"\n    )\n\n    for constr in transformations:\n        if constr[\"type\"] in [\"covariance\", \"sdcorr\"]:\n            subset = _iloc(dictionary=constr_info, positions=constr[\"index\"][1:])\n            if subset[\"is_fixed_to_value\"].any():\n                problematic = subset[\"index\"][subset[\"is_fixed_to_value\"]]\n                raise InvalidConstraintError(\n                    cov_msg.format(constr[\"type\"], problematic)\n                )\n            finite_bounds = np.isfinite(subset[\"lower_bounds\"]) | np.isfinite(\n                subset[\"upper_bounds\"]\n            )\n            if finite_bounds.any():\n                problematic = subset[\"index\"][finite_bounds]\n                raise InvalidConstraintError(\n                    prob_msg.format(constr[\"type\"], problematic)\n                )\n        elif constr[\"type\"] == \"probability\":\n            subset = _iloc(dictionary=constr_info, positions=constr[\"index\"])\n            if subset[\"is_fixed_to_value\"].any():\n                problematic = subset[\"index\"][subset[\"is_fixed_to_value\"]]\n                raise InvalidConstraintError(\n                    prob_msg.format(constr[\"type\"], problematic)\n                )\n            finite_bounds = np.isfinite(subset[\"lower_bounds\"]) | np.isfinite(\n                subset[\"upper_bounds\"]\n            )\n            if finite_bounds.any():\n                problematic = subset[\"index\"][finite_bounds]\n                raise InvalidConstraintError(\n                    prob_msg.format(constr[\"type\"], problematic)\n                )\n\n    is_invalid = constr_info[\"lower_bounds\"] >= constr_info[\"upper_bounds\"]\n    if is_invalid.any():\n        info = pd.DataFrame(\n            {\n                \"names\": np.array(parnames)[is_invalid],\n                \"lower_bounds\": constr_info[\"lower_bounds\"][is_invalid],\n                \"upper_bounds\": constr_info[\"upper_bounds\"][is_invalid],\n            }\n        )\n\n        msg = (\n            \"lower_bound must be strictly smaller than upper_bound. \"\n            f\"This is violated for:\\n{info}\"\n        )\n\n        raise InvalidConstraintError(msg)\n\n\ndef _iloc(dictionary, positions):\n    \"\"\"Substitute function for DataFrame.iloc. that works for a dictionary of arrays.\n\n    It creates a subset of the input dictionary based on the\n    index values in the info list, and returns this subset as\n    a dictionary with numpy arrays.\n\n    Args:\n        dictionary (dict): Dictionary of arrays.\n        position (list): List, slice or array of indices.\n\n    \"\"\"\n    subset = {}\n    for key, value in dictionary.items():\n        if isinstance(value, list) and not isinstance(positions, slice):\n            subset[key] = [value[i] for i in positions]\n        else:\n            subset[key] = value[positions]\n\n    return subset\n"
  },
  {
    "path": "src/optimagic/parameters/consolidate_constraints.py",
    "content": "\"\"\"Functions to consolidate user provided constraints.\n\nConsolidation means that redundant constraints are dropped and other constraints are\ncollected in meaningful bundles.\n\nCheck the module docstring of process_constraints for naming conventions.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.utilities import (\n    fast_numpy_full,\n    number_of_triangular_elements_to_dimension,\n)\n\n\ndef consolidate_constraints(\n    constraints, parvec, lower_bounds, upper_bounds, param_names\n):\n    \"\"\"Consolidate constraints with each other and remove redundant ones.\n\n    Args:\n        constraints (list): List with constraint dictionaries. It is assumed that\n            the selectors are already processed, increasing and decreasing\n            constraints have been rewritten as linear constraints and\n            pairwise_equality constraints have been rewritten as equality constraints.\n        parvec (np.ndarray): 1d numpy array with parameters.\n        lower_bounds (np.ndarray | None): 1d numpy array with lower_bounds\n        upper_bounds (np.ndarray | None): 1d numpy array with upper_bounds\n        param_names (list): Names of parameters. Used for error messages.\n\n    Returns:\n        list: This contains processed version of all\n            constraints that require an actual kernel transformation. The information\n            on all other constraints is subsumed in pp.\n        dict: Dict of 1d numpy arrays with information about non-transforming\n            constraints.\n\n    \"\"\"\n    # None-valued bounds are handled by instantiating them as an -inf and inf array. In\n    # the future, this should be handled more gracefully.\n    if lower_bounds is None:\n        lower_bounds = fast_numpy_full(len(parvec), fill_value=-np.inf)\n    if upper_bounds is None:\n        upper_bounds = fast_numpy_full(len(parvec), fill_value=np.inf)\n\n    raw_eq, other_constraints = _split_constraints(constraints, \"equality\")\n    equality_constraints = _consolidate_equality_constraints(raw_eq)\n\n    fixed_constraints, other_constraints = _split_constraints(\n        other_constraints, \"fixed\"\n    )\n    fixed_value = _consolidate_fixes_with_equality_constraints(\n        fixed_constraints, equality_constraints, parvec\n    )\n\n    constr_info = {\n        \"fixed_values\": fixed_value,\n        \"is_fixed_to_value\": np.isfinite(fixed_value),\n    }\n\n    other_constraints = [\n        c\n        for c in other_constraints\n        if not constr_info[\"is_fixed_to_value\"][c[\"index\"]].all()\n    ]\n\n    (\n        other_constraints,\n        lower_bounds,\n        upper_bounds,\n    ) = simplify_covariance_and_sdcorr_constraints(\n        constraints=other_constraints,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        is_fixed_to_value=constr_info[\"is_fixed_to_value\"],\n        fixed_value=constr_info[\"fixed_values\"],\n    )\n\n    lower_bounds, upper_bounds = _consolidate_bounds_with_equality_constraints(\n        equality_constraints,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n    )\n\n    constr_info[\"lower_bounds\"] = lower_bounds\n    constr_info[\"upper_bounds\"] = upper_bounds\n\n    (\n        other_constraints,\n        post_replacements,\n        is_fixed_to_other,\n    ) = _plug_equality_constraints_into_selectors(\n        equality_constraints, other_constraints, n_params=len(parvec)\n    )\n\n    constr_info[\"post_replacements\"] = post_replacements\n    constr_info[\"is_fixed_to_other\"] = is_fixed_to_other\n\n    linear_constraints, other_constraints = _split_constraints(\n        other_constraints, \"linear\"\n    )\n\n    if len(linear_constraints) > 0:\n        linear_constraints = _consolidate_linear_constraints(\n            params_vec=parvec,\n            linear_constraints=linear_constraints,\n            constr_info=constr_info,\n            param_names=param_names,\n        )\n\n    constraints = other_constraints + linear_constraints\n\n    return constraints, constr_info\n\n\ndef _consolidate_equality_constraints(equality_constraints):\n    \"\"\"Consolidate equality constraints as far as possible.\n\n    Since equality is a transitive conditions we can consolidate any two equality\n    constraints that have at least one parameter in common into one condition. Besides\n    being faster, this also ensures that the result remains unchanged if equality\n    constraints are are split into several different constraints or if they are\n    specified in a different order.\n\n    The index in the consolidated equality constraints is sorted in the same order\n    as the index of params. This is no problem because order is irrelevant for\n    equality constraints.\n\n    Args:\n        equality_constraints (list): List of dictionaries where each dictionary is a\n            constraint. It is assumed that the selectors were already processed.\n\n    Returns:\n        list: List of consolidated equality constraints.\n\n    \"\"\"\n    candidates = [constr[\"index\"] for constr in equality_constraints]\n    # drop constraints that just restrict one parameter to be equal to itself\n    candidates = [c for c in candidates if len(c) >= 2]\n    merged = _join_overlapping_lists(candidates)\n    consolidated = [{\"index\": sorted(index), \"type\": \"equality\"} for index in merged]\n\n    return consolidated\n\n\ndef _join_overlapping_lists(candidates):\n    \"\"\"Bundle all candidates with with non-empty intersection.\n\n    Args:\n        candidates (list): List of potentially overlapping lists.\n\n    Returns:\n        bundles (list): List of lists where all overlapping lists have been joined\n            and sorted.\n\n    \"\"\"\n    bundles = []\n\n    while len(candidates) > 0:\n        new_candidates = _unite_first_with_all_intersecting_elements(candidates)\n        if len(candidates) == len(new_candidates):\n            bundles.append(sorted(new_candidates[0]))\n            candidates = candidates[1:]\n        else:\n            candidates = new_candidates\n\n    return bundles\n\n\ndef _unite_first_with_all_intersecting_elements(indices):\n    \"\"\"Helper function to bundle overlapping indices.\n\n    Args:\n        indices (list): A list lists with indices.\n\n    \"\"\"\n    first = set(indices[0])\n    new_first = first\n    new_others = []\n    for idx in indices[1:]:\n        if len(first.intersection(idx)) > 0:\n            new_first = new_first.union(idx)\n        else:\n            new_others.append(idx)\n\n    return [new_first, *new_others]\n\n\ndef _consolidate_fixes_with_equality_constraints(\n    fixed_constraints, equality_constraints, parvec\n):\n    \"\"\"Consolidate fixes with equality constraints.\n\n    If any equality constrained parameter is fixed, all of the parameters that are\n    equal to it have to be fixed to the same value.\n\n    Args:\n        fixed_constraints (list): List of constrains of type \"fixed\".\n        equality_constraints (list): List of constraints of type \"equality\".\n        parvec (np.ndarray): 1d numpy array with parameters.\n\n    Returns:\n        fixed_value (pd.Series): Series with the fixed value for all parameters that\n            are fixed and np.nan everywhere else. Has the same index as params.\n\n    \"\"\"\n    fixed_value = np.full(len(parvec), np.nan)\n    for fix in fixed_constraints:\n        fixed_value[fix[\"index\"]] = fix.get(\"value\", parvec[fix[\"index\"]])\n\n    for eq in equality_constraints:\n        if np.isfinite(fixed_value[eq[\"index\"]]).any():\n            valcounts = _unique_values(fixed_value[eq[\"index\"]])\n            assert len(valcounts) == 1, (\n                \"Equality constrained parameters cannot be fixed to different values.\"\n            )\n            fixed_value[eq[\"index\"]] = valcounts[0]\n\n    return fixed_value\n\n\ndef _consolidate_bounds_with_equality_constraints(\n    equality_constraints, lower_bounds, upper_bounds\n):\n    \"\"\"Consolidate bounds with equality constraints.\n\n    Check that there are no incompatible bounds on equality constrained parameters and\n    set the bounds for equal parameters to the strictest bound encountered on any of\n    them.\n\n    Args:\n        equality_constraints (list): List of constraints of type \"equality\".\n        lower_bounds (np.ndarray): Lower bounds for parameters.\n        upper_bounds (np.ndarray): Upper bounds for parameters.\n\n    Returns:\n        np.ndarray: 1d array with lower bounds\n        np.ndarray: 1d array with upper bounds\n\n    \"\"\"\n    lower = lower_bounds.copy()\n    upper = upper_bounds.copy()\n    for eq in equality_constraints:\n        lower[eq[\"index\"]] = lower[eq[\"index\"]].max()\n        upper[eq[\"index\"]] = upper[eq[\"index\"]].min()\n\n    return lower, upper\n\n\ndef _split_constraints(constraints, type_):\n    \"\"\"Split list of constraints in two list.\n\n    The first list contains all constraints of type and the second the rest.\n\n    \"\"\"\n    filtered = [c for c in constraints if c[\"type\"] == type_]\n    rest = [c for c in constraints if c[\"type\"] != type_]\n    return filtered, rest\n\n\ndef simplify_covariance_and_sdcorr_constraints(\n    constraints,\n    lower_bounds,\n    upper_bounds,\n    is_fixed_to_value,\n    fixed_value,\n):\n    \"\"\"Enforce covariance and sdcorr constraints by bounds if possible.\n\n    This is possible if the dimension is <= 2 or all covariances are fexd to 0.\n\n    \"\"\"\n    cov_constraints, others = _split_constraints(constraints, \"covariance\")\n    sdcorr_constraints, others = _split_constraints(others, \"sdcorr\")\n    to_simplify = cov_constraints + sdcorr_constraints\n    lower = lower_bounds.copy()\n    upper = upper_bounds.copy()\n\n    not_simplifyable = []\n    for constr in to_simplify:\n        dim = number_of_triangular_elements_to_dimension(len(constr[\"index\"]))\n        if constr[\"type\"] == \"covariance\":\n            diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()]\n            diag_indices = np.array(constr[\"index\"])[diag_positions].tolist()\n            off_indices = [i for i in constr[\"index\"] if i not in diag_positions]\n        if constr[\"type\"] == \"sdcorr\":\n            diag_indices = constr[\"index\"][:dim]\n            off_indices = constr[\"index\"][dim:]\n\n        uncorrelated = False\n        if is_fixed_to_value[off_indices].all():\n            if (fixed_value[off_indices] == 0).all():\n                uncorrelated = True\n\n        if uncorrelated:\n            lower[diag_indices] = np.maximum(0, lower[diag_indices])\n        elif dim <= 2 and constr[\"type\"] == \"sdcorr\":\n            lower[diag_indices] = np.maximum(0, lower[diag_indices])\n            lower[off_indices] = -1\n            upper[off_indices] = 1\n        else:\n            not_simplifyable.append(constr)\n\n    return others + not_simplifyable, lower, upper\n\n\ndef _plug_equality_constraints_into_selectors(\n    equality_constraints, other_constraints, n_params\n):\n    \"\"\"Rewrite all constraint in terms of free parameters.\n\n    Only one parameter from a set of equality constrained parameters will actually\n    be free. Which one is not important. We take the one with the lowest iloc.\n\n    Then all other constraints have to be rewritten in terms of the free parameters.\n    Once that is done, redundant constraints can be filtered out.\n\n    Args:\n        equality_constraints (list): List of constraints of type \"equality\".\n        other_constraints (list): All other constraints.\n        n_params (int): Number of parameters.\n\n    Returns:\n        list: List of processed non-equality constraints.\n        np.ndarray: post_replacements\n        np.ndarray: is_fixed_to_other\n\n    \"\"\"\n    is_equal_to = np.full(n_params, -1)\n    for eq in equality_constraints:\n        is_equal_to[sorted(eq[\"index\"])[1:]] = sorted(eq[\"index\"])[0]\n    post_replacements = is_equal_to.astype(int)\n    is_fixed_to_other = is_equal_to >= 0\n    helper = pd.Series(post_replacements)\n    replace_dict = helper[helper >= 0].to_dict()\n\n    plugged_in = []\n    for constr in other_constraints:\n        new = constr.copy()\n        new[\"index\"] = pd.Series(constr[\"index\"]).replace(replace_dict).tolist()\n        plugged_in.append(new)\n\n    linear_constraints, others = _split_constraints(plugged_in, \"linear\")\n\n    pc = []\n    for constr in others:\n        if not _is_redundant(constr, pc):\n            pc.append(constr)\n\n    pc += linear_constraints\n\n    return pc, post_replacements, is_fixed_to_other\n\n\ndef _consolidate_linear_constraints(\n    params_vec, linear_constraints, constr_info, param_names\n):\n    \"\"\"Consolidate linear constraints.\n\n    Consolidation entails the following steps:\n    - Plugging fixes and equality constraints into the linear constraints\n    - Collect weights of those constraints that overlap into weight DataFrames\n    - Collect corresponding right hand sides (bounds or values) in DataFrames\n    - Express box constraints of parameters involved in linear constraints as\n      additional linear constraints.\n    - Rescale the weights for easier detection of linear dependence\n    - Drop redundant constraints\n    - Check compatibility of constraints\n    - Construct a list of consolidated constraint dictionaries that contain\n        all matrices needed for the kernel transformations.\n\n    Args:\n        params_vec (np.ndarray): 1d numpy array wtih parameters\n        linear_constraints (list): Linear constraints that already have processed\n            weights and selector fields.\n        constr_info (dict): Dict with information about constraints.\n        param_names (list): Parameter names. Used for error messages.\n\n    Returns:\n        list: Processed and consolidated linear constraints.\n\n    \"\"\"\n    weights, right_hand_side = _transform_linear_constraints_to_pandas_objects(\n        linear_constraints, n_params=len(params_vec)\n    )\n\n    weights = _plug_equality_constraints_into_linear_weights(\n        weights, constr_info[\"post_replacements\"]\n    )\n    weights, right_hand_side = _plug_fixes_into_linear_weights_and_rhs(\n        weights,\n        right_hand_side,\n        constr_info[\"is_fixed_to_value\"],\n        constr_info[\"fixed_values\"],\n    )\n\n    involved_parameters = [set(w[w != 0].index) for _, w in weights.iterrows()]\n\n    bundled_indices = _join_overlapping_lists(involved_parameters)\n\n    pc = []\n    for involved_parameters in bundled_indices:\n        w = weights[involved_parameters][\n            (weights[involved_parameters] != 0).any(axis=1)\n        ].copy(deep=True)\n        rhs = right_hand_side.loc[w.index].copy(deep=True)\n        w, rhs = _express_bounds_as_linear_constraints(\n            w, rhs, constr_info[\"lower_bounds\"], constr_info[\"upper_bounds\"]\n        )\n        w, rhs = _rescale_linear_constraints(w, rhs)\n        w, rhs = _drop_redundant_linear_constraints(w, rhs)\n        _check_consolidated_weights(w, param_names=param_names)\n        to_internal, from_internal = _get_kernel_transformation_matrices(w)\n        constr = {\n            \"index\": list(w.columns),\n            \"type\": \"linear\",\n            \"to_internal\": to_internal,\n            \"from_internal\": from_internal,\n            \"right_hand_side\": rhs,\n        }\n        pc.append(constr)\n\n    return pc\n\n\ndef _transform_linear_constraints_to_pandas_objects(linear_constranits, n_params):\n    \"\"\"Collect information from the linear constraint dictionaries into pandas objects.\n\n    Args:\n        linear_constraints (list): List of constraint of type \"linear\".\n        n_params (int): number of parameters.\n\n    Returns:\n        weights (pd.DataFrame): DataFrame with one row per constraint and one column\n            per parameter. Columns names are the ilocs of the parameters in params.\n        rhs (pd.DataFrame): DataFrame with the columns \"value\", \"lower_bound\" and\n            \"upper_bound\" that collects the right hand sides of the constraints.\n\n    \"\"\"\n    all_weights, all_values, all_lbs, all_ubs = [], [], [], []\n    for constr in linear_constranits:\n        all_weights.append(constr[\"weights\"])\n        all_values.append(constr.get(\"value\", np.nan))\n        all_lbs.append(constr.get(\"lower_bound\", -np.inf))\n        all_ubs.append(constr.get(\"upper_bound\", np.inf))\n\n    weights = pd.concat(all_weights, axis=1).T.reset_index()\n    weights = weights.reindex(columns=np.arange(n_params)).fillna(0)\n    values = pd.Series(all_values, name=\"value\")\n    lbs = pd.Series(all_lbs, name=\"lower_bound\")\n    ubs = pd.Series(all_ubs, name=\"upper_bound\")\n    rhs = pd.concat([values, lbs, ubs], axis=1)\n\n    return weights, rhs\n\n\ndef _plug_equality_constraints_into_linear_weights(weights, post_replacements):\n    \"\"\"Sum the weights of equality constrained parameters.\n\n    The sum of the weights is then the new weight of the equality constrained parameter\n    that is actually free. The weights of the other parameters are set to zero.\n\n    Args:\n        weights (pd.DataFrame): Weight matrices for linear constraints.\n        post_replacements (pd.Series): The _post_replacements column of pp.\n\n    Returns:\n        plugged_weights (pd.DataFrame)\n\n    \"\"\"\n    w = weights.T\n    plugged_iloc = pd.Series(post_replacements)\n    plugged_iloc = plugged_iloc.where(plugged_iloc >= 0, np.arange(len(plugged_iloc)))\n    w[\"plugged_iloc\"] = plugged_iloc\n\n    plugged_weights = w.groupby(\"plugged_iloc\").sum()\n    plugged_weights = plugged_weights.reindex(w.index).fillna(0).T\n\n    return plugged_weights\n\n\ndef _plug_fixes_into_linear_weights_and_rhs(\n    weights, rhs, is_fixed_to_value, fixed_value\n):\n    \"\"\"Set weights of fixed parameters to 0 and adjust right hand sides accordingly.\n\n    Args:\n        weights (pd.DataFrame): Weight matrix for linear constraint.\n        rhs (pd.DataFrame): Right hand side of the linear constraint.\n        is_fixed_to_value (pd.Series): The _is_fixed_to_value column of pp.\n        fixed_value (pd.Series): The _fixed_value column of pp.\n\n    Returns:\n        new_weights (pd.DataFrame)\n        new_rhs (pd.DataFrame)\n\n    \"\"\"\n    ilocs = np.arange(len(fixed_value))\n    fixed_ilocs = ilocs[is_fixed_to_value].tolist()\n    new_rhs = rhs.copy()\n    new_weights = weights.copy()\n\n    if len(fixed_ilocs) > 0:\n        fixed_values = fixed_value[fixed_ilocs]\n        fixed_contribution = weights[fixed_ilocs] @ fixed_values\n        for column in [\"lower_bound\", \"upper_bound\", \"value\"]:\n            new_rhs[column] = new_rhs[column] - fixed_contribution\n        for i in fixed_ilocs:\n            new_weights[i] = 0\n\n    return new_weights, new_rhs\n\n\ndef _express_bounds_as_linear_constraints(weights, rhs, lower, upper):\n    \"\"\"Express bounds of linearly constrained params as linear constraint.\n\n    In general it is easier to keep bounds separately from the constraints\n    but in the case of linearly constrained parameters we need to express them as\n    additional linear constraints to check compatibility and to choose the correct\n    reparametrization.\n\n    Args:\n        weights (pd.DataFrame): The weight matrix of the linear constraint.\n        rhs (pd.DataFrame): The right hand side of the linear constraint.\n        lower (np.ndarray): Lower bounds.\n        upper (np.ndarray): Upper bounds.\n\n    Returns:\n        extended_weights (pd.DataFrame)\n        extended_rhs (pd.DataFrame)\n\n    \"\"\"\n    additional_pc = []\n    for i in weights.columns:\n        new = {}\n        if np.isfinite(lower[i]):\n            new[\"lower_bound\"] = lower[i]\n        if np.isfinite(upper[i]):\n            new[\"upper_bound\"] = upper[i]\n        if new != {}:\n            new[\"weights\"] = pd.Series([1], name=\"w\", index=[i])\n            additional_pc.append(new)\n\n    if len(additional_pc) > 0:\n        new_weights, new_rhs = _transform_linear_constraints_to_pandas_objects(\n            additional_pc, len(lower)\n        )\n        new_weights = new_weights[weights.columns]\n\n        extended_weights = pd.concat([weights, new_weights]).reset_index(drop=True)\n        extended_rhs = pd.concat([rhs, new_rhs]).reset_index(drop=True)\n    else:\n        extended_weights, extended_rhs = weights, rhs\n\n    return extended_weights, extended_rhs\n\n\ndef _rescale_linear_constraints(weights, rhs):\n    \"\"\"Rescale rows in weights such that the first nonzero element equals one.\n\n    This will make it easier to detect redundant rows.\n\n    Args:\n        weights (pd.DataFrame): The weight matrix of the linear constraint.\n        rhs (pd.DataFrame): The right hand side of the linear constraint.\n\n    Returns:\n        new_weights (pd.DataFrame)\n        new_rhs (pd.DataFrame)\n\n    \"\"\"\n    first_nonzero = weights.replace(0, np.nan).bfill(axis=1).iloc[:, 0]\n    scaling_factor = 1 / first_nonzero.to_numpy().reshape(-1, 1)\n    new_weights = scaling_factor * weights\n    scaled_rhs = scaling_factor * rhs\n    new_rhs = scaled_rhs.copy()\n    new_rhs[\"lower_bound\"] = scaled_rhs[\"lower_bound\"].where(\n        scaling_factor.flatten() > 0, scaled_rhs[\"upper_bound\"]\n    )\n    new_rhs[\"upper_bound\"] = scaled_rhs[\"upper_bound\"].where(\n        scaling_factor.flatten() > 0, scaled_rhs[\"lower_bound\"]\n    )\n\n    return new_weights, new_rhs\n\n\ndef _drop_redundant_linear_constraints(weights, rhs):\n    \"\"\"Drop linear constraints that are implied by other linear constraints.\n\n    This is not yet very smart. We just check for linearly dependent weights.\n\n    Args:\n        weights (pd.DataFrame): The weight matrix of the linear constraint.\n        rhs (pd.DataFrame): The right hand side of the linear constraint.\n\n    Returns:\n        new_weights (pd.DataFrame)\n        new_rhs (pd.DataFrame)\n\n    \"\"\"\n    weights[\"dupl_group\"] = weights.groupby(list(weights.columns)).ngroup()\n    rhs[\"dupl_group\"] = weights[\"dupl_group\"]\n    weights.set_index(\"dupl_group\", inplace=True)\n\n    new_weights = weights.drop_duplicates()\n\n    def _consolidate_fix(x):\n        vc = x.value_counts(dropna=True)\n        if len(vc) == 0:\n            return np.nan\n        elif len(vc) == 1:\n            return vc.index[0]\n        else:\n            raise ValueError\n\n    ub = rhs.groupby(\"dupl_group\")[\"upper_bound\"].min()\n    lb = rhs.groupby(\"dupl_group\")[\"lower_bound\"].max()\n    fix = rhs.groupby(\"dupl_group\")[\"value\"].apply(_consolidate_fix)\n\n    # remove the bounds for fixed parameters\n    ub = ub.where(fix.isnull(), np.inf)\n    lb = lb.where(fix.isnull(), -np.inf)\n\n    new_rhs = pd.concat(\n        [lb, ub, fix], axis=1, names=[\"lower_bound\", \"upper_bound\", \"value\"]\n    )\n    new_rhs = new_rhs.reindex(new_weights.index)\n\n    return new_weights, new_rhs\n\n\ndef _check_consolidated_weights(weights, param_names):\n    \"\"\"Check the rank condition on the linear weights.\"\"\"\n    n_constraints, n_params = weights.shape\n\n    msg_too_many = (\n        \"Too many linear constraints. There can be at most as many linear constraints\"\n        \"as involved parameters with non-zero weights.\\n\"\n    )\n\n    msg_rank = \"The weights for linear constraints must be linearly independent.\\n\"\n\n    msg_general = (\n        \"The error occurred for constraints on the following parameters:\\n{}\\n with \"\n        \"weighting matrix:\\n{}\\nIt is possible that you did not specify those \"\n        \"constraints as linear constraints but as bounds, fixes, increasing or \"\n        \"decreasing constraints.\"\n    )\n    relevant_names = [param_names[i] for i in weights.columns]\n\n    if n_constraints > n_params:\n        raise InvalidConstraintError(\n            msg_too_many + msg_general.format(relevant_names, weights)\n        )\n\n    if np.linalg.matrix_rank(weights) < n_constraints:\n        raise InvalidConstraintError(\n            msg_rank + msg_general.format(relevant_names, weights)\n        )\n\n\ndef _get_kernel_transformation_matrices(weights):\n    \"\"\"Construct the m matrix for the kernel transformations.\n\n    See :ref:`linear_constraint_implementation` for details.\n\n    Args:\n        weights (pd.DataFrame): Weight matrix of a linear constraint.\n\n    \"\"\"\n    n_constraints, n_params = weights.shape\n\n    identity = np.eye(n_params)\n\n    i = 0\n    filled_weights = weights\n    while len(filled_weights) < n_params:\n        candidate = np.vstack([identity[i], filled_weights])\n        if np.linalg.matrix_rank(candidate) == len(candidate):\n            filled_weights = candidate\n        i += 1\n\n    k = n_params - n_constraints\n\n    filled_weights[:k] = filled_weights[:k][::-1]\n\n    to_internal = filled_weights\n    from_internal = np.linalg.inv(to_internal)\n\n    return to_internal, from_internal\n\n\ndef _is_redundant(candidate, others):\n    \"\"\"Check if a constraint is redundant given other constraints.\n\n    Applicable to all but linear constraints.\n\n    \"\"\"\n    assert candidate[\"type\"] != \"linear\"\n    if len(others) == 0:\n        is_redundant = False\n    else:\n        same_type, _ = _split_constraints(others, candidate[\"type\"])\n        duplicates = [c for c in same_type if c[\"index\"] == candidate[\"index\"]]\n        is_redundant = len(duplicates) > 0\n\n    return is_redundant\n\n\ndef _unique_values(arr, dropna=True):\n    if dropna:\n        arr = arr[np.isfinite(arr)]\n    return list(set(arr.tolist()))\n"
  },
  {
    "path": "src/optimagic/parameters/constraint_tools.py",
    "content": "from optimagic import deprecations\nfrom optimagic.parameters.bounds import pre_process_bounds\nfrom optimagic.parameters.conversion import get_converter\n\n\ndef count_free_params(\n    params,\n    constraints=None,\n    bounds=None,\n    # deprecated\n    lower_bounds=None,\n    upper_bounds=None,\n):\n    \"\"\"Count the (free) parameters of an optimization problem.\n\n    Args:\n        params (pytree): The parameters.\n        constraints (list): The constraints for the optimization problem. If constraints\n            are provided, only the free parameters are counted.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. Check our how-to guide on bounds\n            for examples. If params is a flat numpy array, you can also provide bounds\n            via any format that is supported by scipy.optimize.minimize.\n\n    Returns:\n        int: Number of (free) parameters\n\n    \"\"\"\n    bounds = deprecations.replace_and_warn_about_deprecated_bounds(\n        bounds=bounds,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n    )\n\n    deprecations.throw_dict_constraints_future_warning_if_required(constraints)\n\n    bounds = pre_process_bounds(bounds)\n    constraints = deprecations.pre_process_constraints(constraints)\n\n    _, internal_params = get_converter(\n        params=params,\n        constraints=constraints,\n        bounds=bounds,\n        func_eval=3,\n        solver_type=\"value\",\n    )\n\n    return int(internal_params.free_mask.sum())\n\n\ndef check_constraints(\n    params,\n    constraints,\n    bounds=None,\n    # deprecated\n    lower_bounds=None,\n    upper_bounds=None,\n):\n    \"\"\"Raise an error if constraints are invalid or not satisfied in params.\n\n    Args:\n        params (pytree): The parameters.\n        constraints (list): The constraints for the optimization problem.\n        bounds: Lower and upper bounds on the parameters. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are used for\n            sampling based optimizers but are not enforced during optimization. Each\n            bound type mirrors the structure of params. Check our how-to guide on bounds\n            for examples. If params is a flat numpy array, you can also provide bounds\n            via any format that is supported by scipy.optimize.minimize.\n\n    Raises:\n        InvalidParamsError: If constraints are valid but not satisfied.\n        InvalidConstraintError: If constraints are invalid.\n\n    \"\"\"\n    bounds = deprecations.replace_and_warn_about_deprecated_bounds(\n        bounds=bounds,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n    )\n\n    deprecations.throw_dict_constraints_future_warning_if_required(constraints)\n\n    bounds = pre_process_bounds(bounds)\n    constraints = deprecations.pre_process_constraints(constraints)\n\n    get_converter(\n        params=params,\n        constraints=constraints,\n        bounds=bounds,\n        func_eval=3,\n        solver_type=\"value\",\n    )\n"
  },
  {
    "path": "src/optimagic/parameters/conversion.py",
    "content": "\"\"\"Aggregate the multiple parameter and function output conversions into on.\"\"\"\n\nfrom dataclasses import dataclass, replace\nfrom typing import Callable\n\nimport numpy as np\n\nfrom optimagic.parameters.process_selectors import process_selectors\nfrom optimagic.parameters.scale_conversion import get_scale_converter\nfrom optimagic.parameters.space_conversion import InternalParams, get_space_converter\nfrom optimagic.parameters.tree_conversion import get_tree_converter\nfrom optimagic.typing import AggregationLevel\n\n\ndef get_converter(\n    params,\n    constraints,\n    bounds,\n    func_eval,\n    solver_type,\n    scaling=None,\n    derivative_eval=None,\n    add_soft_bounds=False,\n):\n    \"\"\"Get a converter between external and internal params and internal params.\n\n    This combines the following conversions:\n    - Flattening parameters provided as pytrees (tree_conversion)\n    - Enforcing constraints via reparametrizations (space_conversion)\n    - Scaling of the parameter space (scale_conversion)\n\n    The resulting converter can transform parameters, function outputs and derivatives.\n\n    If possible, fast paths for some or all transformations are chosen.\n\n    Args:\n        params (pytree): The user provided parameters.\n        constraints (list): The user provided constraints.\n        bounds (Bounds): The user provided bounds.\n        func_eval (float or pytree): An evaluation of ``func`` at ``params``.\n            Used to flatten the derivative output.\n        solver_type: Used to determine how the derivative output has to be\n            transformed for the optimzer.\n        scaling (ScalingOptions | None): Scaling options. If None, no scaling is\n            performed.\n        derivative_eval (dict, pytree or None): Evaluation of the derivative of\n            func at params. Used for consistency checks.\n        add_soft_bounds (bool): Whether soft bounds should be added to the\n            internal_params\n\n    Returns:\n        Converter: NamedTuple with methods to convert between internal and external\n            parameters, derivatives and function outputs.\n        InternalParams: NamedTuple with internal parameter values, lower_bounds and\n            upper_bounds.\n\n    \"\"\"\n    fast_path = _is_fast_path(\n        params=params,\n        constraints=constraints,\n        solver_type=solver_type,\n        scaling=scaling,\n        derivative_eval=derivative_eval,\n        add_soft_bounds=add_soft_bounds,\n    )\n    if fast_path:\n        return _get_fast_path_converter(\n            params=params,\n            bounds=bounds,\n            solver_type=solver_type,\n        )\n\n    tree_converter, internal_params = get_tree_converter(\n        params=params,\n        bounds=bounds,\n        func_eval=func_eval,\n        derivative_eval=derivative_eval,\n        solver_type=solver_type,\n        add_soft_bounds=add_soft_bounds,\n    )\n\n    flat_constraints = process_selectors(\n        constraints=constraints,\n        params=params,\n        tree_converter=tree_converter,\n        param_names=internal_params.names,\n    )\n\n    space_converter, internal_params = get_space_converter(\n        internal_params=internal_params, internal_constraints=flat_constraints\n    )\n\n    scale_converter, scaled_params = get_scale_converter(\n        internal_params=internal_params,\n        scaling=scaling,\n    )\n\n    def _params_to_internal(params):\n        x_flat = tree_converter.params_flatten(params)\n        x_internal = space_converter.params_to_internal(x_flat)\n        x_scaled = scale_converter.params_to_internal(x_internal)\n        return x_scaled\n\n    def _params_from_internal(x, return_type=\"tree\"):\n        x_unscaled = scale_converter.params_from_internal(x)\n        x_external = space_converter.params_from_internal(x_unscaled)\n\n        x_tree = tree_converter.params_unflatten(x_external)\n        if return_type == \"tree\":\n            out = x_tree\n        elif return_type == \"tree_and_flat\":\n            out = x_tree, x_external\n        elif return_type == \"flat\":\n            out = x_external\n        else:\n            msg = (\n                f\"Invalid return type: {return_type}. Must be one of 'tree', 'flat', \"\n                \"'tree_and_flat'\"\n            )\n            raise ValueError(msg)\n        return out\n\n    def _derivative_to_internal(derivative_eval, x, jac_is_flat=False):\n        if jac_is_flat:\n            jacobian = derivative_eval\n        else:\n            jacobian = tree_converter.derivative_flatten(derivative_eval)\n        x_unscaled = scale_converter.params_from_internal(x)\n        jac_with_space_conversion = space_converter.derivative_to_internal(\n            jacobian, x_unscaled\n        )\n        jac_with_unscaling = scale_converter.derivative_to_internal(\n            jac_with_space_conversion\n        )\n        return jac_with_unscaling\n\n    internal_params = replace(scaled_params, free_mask=internal_params.free_mask)\n\n    converter = Converter(\n        params_to_internal=_params_to_internal,\n        params_from_internal=_params_from_internal,\n        derivative_to_internal=_derivative_to_internal,\n        has_transforming_constraints=space_converter.has_transforming_constraints,\n    )\n\n    return converter, internal_params\n\n\n@dataclass(frozen=True)\nclass Converter:\n    params_to_internal: Callable\n    params_from_internal: Callable\n    derivative_to_internal: Callable\n    has_transforming_constraints: bool\n\n\ndef _fast_params_from_internal(x, return_type=\"tree\"):\n    x = x.astype(float)\n    if return_type == \"tree_and_flat\":\n        return x, x\n    else:\n        return x\n\n\ndef _get_fast_path_converter(params, bounds, solver_type):\n    def _fast_derivative_to_internal(\n        derivative_eval,\n        x,  # noqa: ARG001\n        jac_is_flat=True,  # noqa: ARG001\n    ):\n        # make signature compatible with non-fast path\n        return derivative_eval\n\n    converter = Converter(\n        params_to_internal=lambda params: params.astype(float),\n        params_from_internal=_fast_params_from_internal,\n        derivative_to_internal=_fast_derivative_to_internal,\n        has_transforming_constraints=False,\n    )\n\n    if bounds is None or bounds.lower is None:\n        lower_bounds = None\n    else:\n        lower_bounds = bounds.lower.astype(float)\n\n    if bounds is None or bounds.upper is None:\n        upper_bounds = None\n    else:\n        upper_bounds = bounds.upper.astype(float)\n\n    internal_params = InternalParams(\n        values=params.astype(float),\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        free_mask=np.full(len(params), True),\n        names=[str(i) for i in range(len(params))],\n    )\n    return converter, internal_params\n\n\ndef _is_fast_path(\n    params,\n    constraints,\n    solver_type,\n    scaling,\n    derivative_eval,\n    add_soft_bounds,\n):\n    if not _is_1d_arr(params):\n        return False\n    if constraints:\n        return False\n\n    if scaling is not None:\n        return False\n\n    if not _is_fast_deriv_eval(derivative_eval, solver_type):\n        return False\n\n    if add_soft_bounds:\n        return False\n\n    return True\n\n\ndef _is_fast_deriv_eval(d, solver_type):\n    # this is the case if no or closed form derivatives are used\n    if d is None:\n        return True\n\n    if solver_type == AggregationLevel.SCALAR:\n        if not _is_1d_arr(d):\n            return False\n    else:\n        if not _is_2d_arr(d):\n            return False\n\n    return True\n\n\ndef _is_1d_arr(candidate):\n    return isinstance(candidate, np.ndarray) and candidate.ndim == 1\n\n\ndef _is_2d_arr(candidate):\n    return isinstance(candidate, np.ndarray) and candidate.ndim == 2\n"
  },
  {
    "path": "src/optimagic/parameters/kernel_transformations.py",
    "content": "r\"\"\"Functions and derivatives thereof to transform external and internal params.\n\nRemarks on the mathematical notation:\n-------------------------------------\n\nWe let :math:`X` denote the Cholesky factor of some covariance matrix :math:`S`.\nThat is :math:`X X^\\top = S`. We write :math:`\\text{vec}(A)` for the column-wise\nvectorization of the matrix :math:`A` and we write :math:`\\text{vech}(A)` for\nthe row-wise half vectorization of :math:`A`. We denote the elimination\nmatrix by :math:`L`, which fulfills :math:`L \\text{vec}(A) = \\text{vech}(A)`.\nFor lower-triangular matrices :math:`A` we define the \"lower-triangular\"\nduplication matrix :math:`D`, which is not to be confused with the standard\nduplication matrix, and fulfills :math:`D \\text{vech}(A) = \\text{vec}(A)`. At\nlast we define the so called commutation matrix :math:`K` which is given by the\nproperty that :math:`K \\text{vec}(A) = \\text{vec}(A^\\top)`.\n\nRemarks on reference literature:\n--------------------------------\n\nThe solutions on how to compute the jacobians implemented here can be found\nusing matrix calculus. See for example 'Matrix Differential Calculus with\nApplications in Statistics and Econometrics' by Magnus and Neudecker. In\nspecific cases we refer to posts on math.stackexchange.com.\n\n.. rubric:: References\n\n.. _post_mathoverflow:\n   https://google.github.io/styleguide/pyguide.html\n\n\"\"\"\n\nimport numpy as np\n\nfrom optimagic.utilities import (\n    chol_params_to_lower_triangular_matrix,\n    cov_matrix_to_sdcorr_params,\n    cov_params_to_matrix,\n    dimension_to_number_of_triangular_elements,\n    robust_cholesky,\n    sdcorr_params_to_matrix,\n)\n\n\ndef covariance_to_internal(external_values, constr):\n    \"\"\"Do a cholesky reparametrization.\"\"\"\n    cov = cov_params_to_matrix(external_values)\n    chol = robust_cholesky(cov)\n    return chol[np.tril_indices(len(cov))]\n\n\ndef covariance_to_internal_jacobian(external_values, constr):\n    r\"\"\"Jacobian of ``covariance_to_internal``.\n\n    For reference see docstring of ``jacobian_covariance_from_internal``. In\n    comparison to that function, however, here we want to differentiate the\n    reverse graph\n                external --> cov --> cholesky --> internal\n\n    Again use the vectors :math:`c` and :math:`x` to denote the external and\n    internal values, respectively. To solve for the jacobian we make use of the\n    identity\n\n    .. math::\n        \\frac{\\mathrm{d}x}{\\mathrm{d}c} = (\\frac{\\mathrm{d}c}{\\mathrm{d}x})^{-1}\n\n    Args:\n        external_values (np.ndarray): Row-wise half-vectorized covariance matrix\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    cov = cov_params_to_matrix(external_values)\n    chol = robust_cholesky(cov)\n\n    internal = chol[np.tril_indices(len(chol))]\n\n    deriv = covariance_from_internal_jacobian(internal, constr=None)\n    deriv = np.linalg.pinv(deriv)\n    return deriv\n\n\ndef covariance_from_internal(internal_values, constr):\n    \"\"\"Undo a cholesky reparametrization.\"\"\"\n    chol = chol_params_to_lower_triangular_matrix(internal_values)\n    cov = chol @ chol.T\n    return cov[np.tril_indices(len(chol))]\n\n\ndef covariance_from_internal_jacobian(internal_values, constr):\n    r\"\"\"Jacobian of ``covariance_from_internal``.\n\n    The following result is motivated by https://tinyurl.com/y4pbfxst, which is\n    shortly presented again here. For notation see the explaination at the\n    beginning of the module.\n\n\n    Explaination of the result\n    --------------------------\n\n    We want to differentiate the graph\n                internal --> cholesky --> cov --> external\n\n    Define :math:`x' := \\text{vec}(X)` and :math:`c' := \\text{vec}(S)`, where\n    :math:`X` denotes the Cholesky factor of the covariance matrix :math:`S`.\n    We then first differentiate the part \"cholesky --> cov\" using the result\n    stated in the tinyurl above to get\n\n    .. math::\n        J' := \\frac{\\mathrm{d}c'}{\\mathrm{d}x'} = (I + K)(X \\otimes I) \\,,\n\n    where :math:`K` denotes the commutation matrix. Using this intermediate\n    result we can compute the jacobian as\n\n    .. math:: \\frac{\\mathrm{d}c}{\\mathrm{d}x} = L J' D \\,,\n\n    where :math:`c := \\text{external}` and :math:`x := \\text{internal}`.\n\n    Args:\n        internal_values (np.ndarray): Cholesky factors stored in an \"internal\"\n            format.\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    chol = chol_params_to_lower_triangular_matrix(internal_values)\n    dim = len(chol)\n\n    K = _commutation_matrix(dim)\n    L = _elimination_matrix(dim)\n\n    left = np.eye(dim**2) + K\n    right = np.kron(chol, np.eye(dim))\n\n    intermediate = left @ right\n\n    deriv = L @ intermediate @ L.T\n    return deriv\n\n\ndef sdcorr_to_internal(external_values, constr):\n    \"\"\"Convert sdcorr to cov and do a cholesky reparametrization.\"\"\"\n    cov = sdcorr_params_to_matrix(external_values)\n    chol = robust_cholesky(cov)\n    return chol[np.tril_indices(len(cov))]\n\n\ndef sdcorr_to_internal_jacobian(external_values, constr):\n    r\"\"\"Derivative of ``sdcorr_to_internal``.\n\n    For reference see docstring of ``jacobian_sdcorr_from_internal``. In\n    comparison to that function, however, here we want to differentiate the\n    reverse graph\n\n     external --> mod. corr-mat --> corr-mat --> cov --> cholesky --> internal\n\n    Again use the vectors :math:`p` and :math:`x` to denote the external and\n    internal values, respectively. To solve for the jacobian we make use of the\n    identity\n\n    .. math::\n        \\frac{\\mathrm{d}x}{\\mathrm{d}p} = (\\frac{\\mathrm{d}p}{\\mathrm{d}x})^{-1}\n\n    Args:\n        external_values (np.ndarray): Row-wise half-vectorized modified correlation\n            matrix.\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    cov = sdcorr_params_to_matrix(external_values)\n    chol = robust_cholesky(cov)\n\n    internal = chol[np.tril_indices(len(chol))]\n\n    deriv = sdcorr_from_internal_jacobian(internal, constr=None)\n    deriv = np.linalg.pinv(deriv)\n    return deriv\n\n\ndef sdcorr_from_internal(internal_values, constr):\n    \"\"\"Undo a cholesky reparametrization.\"\"\"\n    chol = chol_params_to_lower_triangular_matrix(internal_values)\n    cov = chol @ chol.T\n    return cov_matrix_to_sdcorr_params(cov)\n\n\ndef sdcorr_from_internal_jacobian(internal_values, constr):\n    r\"\"\"Derivative of ``sdcorr_from_internal``.\n\n    The following result is motivated by https://tinyurl.com/y6ytlyd9; however\n    since the question was formulated with an error the result here is adjusted\n    slightly. In particular, in the answer by user 'greg', the matrix :math:`A`\n    should have been defined as :math:`A = \\text{diag}(||x_1||, \\dots, ||x_n||)`\n    , where :math:`||x_i||` denotes the euclidian norm of the the i-th row of\n    :math:`X` (the Cholesky factor). For notation see the explaination at the\n    beginning of the module or the question on the tinyurl. The variable names\n    in this function are chosen to be consistent with the tinyurl link.\n\n    Explaination on the result\n    --------------------------\n\n    We want to differentiate the graph\n\n     internal --> cholesky --> cov --> corr-mat --> mod. corr-mat --> external\n\n    where mod. corr-mat denotes the modified correlation matrix which has the\n    standard deviations stored on its diagonal. Let :math:`x := \\text{internal}`\n    and :math:`p := \\text{external}`. Then we want to compute the quantity\n\n    .. math:: \\frac{\\mathrm{d} p}{\\mathrm{d} x} .\n\n    As before we consider an intermediate result first. Namely we define\n    :math:`A` as above, :math:`V := A^{-1}` and :math:`P := V S V + A - I`. The\n    attentive reader might now notice that :math:`P` is the modified correlation\n    matrix. At last we write :math:`x' := \\text{vec}(X)` and\n    :math:`p' := \\text{vec}(P)`. Using the result stated in the tinyurl above,\n    adjusted for the different matrix :math:`A`, we can compute the quantity\n    :math:`(\\mathrm{d} p'/ \\mathrm{d} x')`.\n\n    Finally, since we can define transformation matrices :math:`T` and :math:`L`\n    to get :math:`p = T p'` and :math:`x = L x'` (where :math:`L` denotes the\n    elimination matrix with corresponding duplication matrix :math:`D`), we can\n    get our final result as\n\n    .. math::\n        \\frac{\\mathrm{d}p}{\\mathrm{d}x} = T \\frac{\\mathrm{d}p'}{\\mathrm{d}x'} D\n\n    Args:\n        internal_values (np.ndarray): Cholesky factors stored in an \"internal\"\n            format.\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    X = chol_params_to_lower_triangular_matrix(internal_values)\n    dim = len(X)\n\n    identity = np.eye(dim)\n    S = X @ X.T\n\n    #  the wrong formulation in the tinyurl stated: A = np.multiply(I, X)\n    A = np.sqrt(np.multiply(identity, S))\n\n    V = np.linalg.inv(A)\n\n    K = _commutation_matrix(dim)\n    Y = np.diag(identity.ravel(\"F\"))\n\n    #  with the wrong formulation in the tinyurl we would have had U = Y\n    norms = np.sqrt((X**2).sum(axis=1).reshape(-1, 1))\n    XX = X / norms\n    U = Y @ np.kron(identity, XX) @ K\n\n    N = np.kron(identity, X) @ K + np.kron(X, identity)\n\n    VS = V @ S\n    B = np.kron(V, V)\n    H = np.kron(VS, identity)\n    J = np.kron(identity, VS)\n\n    intermediate = U + B @ N - (H + J) @ B @ U\n\n    T = _transformation_matrix(dim)\n    D = _duplication_matrix(dim)\n\n    deriv = T @ intermediate @ D\n    return deriv\n\n\ndef probability_to_internal(external_values, constr):\n    \"\"\"Reparametrize probability constrained parameters to internal.\"\"\"\n    return external_values / external_values[-1]\n\n\ndef probability_to_internal_jacobian(external_values, constr):\n    r\"\"\"Jacobian of ``probability_to_internal``.\n\n    Let :math:`x = \\text{external}`. The function ``probability_to_internal``\n    has the following structure\n\n    .. math::  f: \\mathbb{R}^m \\to \\mathbb{R}^m, x \\mapsto \\frac{1}{x_m} x\n\n    where :math:`e_k` denotes the m-dimensional k-th standard basis vector. The\n    jacobian can then be computed as\n\n    .. math::\n        J(f)(x) =\n        \\frac{1}{x_m} \\sum_{k=1}^{m-1} e_k e_k^\\top -\n        \\frac{1}{x_m^2}  [0, \\dots, 0,\n            \\left ( \\begin{matrix} x_{1:m-1} \\\\ 0 \\end{matrix} \\right )\n        ]\n\n    Args:\n        external_values (np.ndarray): Array of probabilities; sums to one.\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    dim = len(external_values)\n\n    deriv = np.eye(dim) / external_values[-1]\n    deriv[:, -1] -= external_values / (external_values[-1] ** 2)\n    deriv[-1, -1] = 0\n\n    return deriv\n\n\ndef probability_from_internal(internal_values, constr):\n    \"\"\"Reparametrize probability constrained parameters from internal.\"\"\"\n    return internal_values / internal_values.sum()\n\n\ndef probability_from_internal_jacobian(internal_values, constr):\n    r\"\"\"Jacobian of ``probability_from_internal``.\n\n    Let :math:`x := \\text{internal}`. The function ``probability_from_internal``\n    has the following structure\n\n    .. math::`f: \\mathbb{R}^m \\to \\mathbb{R}^m, x \\mapsto \\frac{1}{x^\\top 1} x`\n\n    where :math:`1` denotes a vector of all ones and :math:`I_m` the identity\n    matrix. The jacobian can be computed as\n\n    .. math::  J(f)(x) = \\frac{1}{\\sigma} I_m - \\frac{1}{\\sigma^2} 1 x^\\top\n\n    Args:\n        internal_values (np.ndarray): Internal (positive) values.\n\n    Returns:\n        deriv: The Jacobian matrix.\n\n    \"\"\"\n    dim = len(internal_values)\n\n    sigma = np.sum(internal_values)\n    left = np.eye(dim)\n    right = (np.ones((dim, dim)) * (internal_values / sigma)).T\n\n    deriv = (left - right) / sigma\n    return deriv\n\n\ndef linear_to_internal(external_values, constr):\n    \"\"\"Reparametrize linear constraint to internal.\"\"\"\n    return constr[\"to_internal\"] @ external_values\n\n\ndef linear_to_internal_jacobian(external_values, constr):\n    return constr[\"to_internal\"]\n\n\ndef linear_from_internal(internal_values, constr):\n    \"\"\"Reparametrize linear constraint from internal.\"\"\"\n    return constr[\"from_internal\"] @ internal_values\n\n\ndef linear_from_internal_jacobian(internal_values, constr):\n    return constr[\"from_internal\"]\n\n\ndef _elimination_matrix(dim):\n    r\"\"\"Construct (row-wise) elimination matrix.\n\n    Let :math:`A` be a quadratic matrix. Let :math:`\\text{vec}(A)` be the\n    column-wise vectorization of :math:`A`. Let :math:`\\text{vech}(A)` be the\n    row-wise half-vectorization of :math:`A`. Then the corresponding elimination\n    matrix :math:`L` has the property\n\n    .. math::  L \\text{vec}(A) = \\text{vech}(A)\n\n    See the wiki entry https://tinyurl.com/yy4sdr43 for further information, but\n    note that here we are using :math:`\\text{vech}` as the row-wise and not\n    column-wise half-vectorization.\n\n    Args:\n        dim (int): The dimension.\n\n    Returns:\n        eliminator (np.ndarray): The elimination matrix.\n\n    Examples:\n    >>> import numpy as np\n    >>> from numpy.testing import assert_array_almost_equal\n    >>> dim = 10\n    >>> A = np.random.default_rng().normal(size=(dim, dim))\n    >>> vectorized = A.ravel('F')\n    >>> half_vectorized = A[np.tril_indices(dim)]\n    >>> L = _elimination_matrix(dim)\n    >>> assert_array_almost_equal(L @ vectorized, half_vectorized)\n\n    \"\"\"\n    n = dimension_to_number_of_triangular_elements(dim)\n\n    counter = np.zeros((dim, dim), int) - 1\n    counter[np.tril_indices(dim)] = np.arange(n, dtype=int)\n\n    columns = [_unit_vector_or_zeros(i, n) for i in counter.ravel(\"F\")]\n\n    eliminator = np.column_stack(columns)\n    return eliminator\n\n\ndef _duplication_matrix(dim):\n    r\"\"\"Return duplication matrix.\n\n    Let :math:`A` be a lower-triangular quadratic matrix. Let\n    :math:`\\text{vec}(A)` be the column-wise vectorization of :math:`A`. Let\n    :math:`\\text{vech}(A)` be the row-wise half-vectorization of :math:`A`.\n    Then the corresponding elimination matrix :math:`D` has the property\n\n    .. math::  D \\text{vech}(A) = \\text{vec}(A)\n\n    In particular note that here :math:`D = L^\\top`.\n\n    See the wiki entry https://tinyurl.com/yy4sdr43 for further information, but\n    note that here we are using :math:`\\text{vech}` as the row-wise and not\n    column-wise half-vectorization, and that we are using this operator on a\n    lower-triangular matrix and not a symmetric matrix, which allows for the\n    identity :math:`D = L^\\top`.\n\n    Args:\n        dim (int): The dimension.\n\n    Returns:\n        duplicator (np.ndarray): The duplication matrix.\n\n    Example:\n    >>> import numpy as np\n    >>> from numpy.testing import assert_array_almost_equal\n    >>> dim = 10\n    >>> A = np.tril(np.random.default_rng().normal(size=(dim, dim)))\n    >>> vectorized = A.ravel('F')\n    >>> half_vectorized = A[np.tril_indices(dim)]\n    >>> D = _duplication_matrix(dim)\n    >>> assert_array_almost_equal(D @ half_vectorized, vectorized)\n\n    \"\"\"\n    duplicator = _elimination_matrix(dim).T\n    return duplicator\n\n\ndef _transformation_matrix(dim):\n    r\"\"\"Return transformation matrix.\n\n    Let :math:`A` be a quadratic matrix of dimension :math:`m \\times m`. Define\n    the :math:`m-1 \\times m-1` matrix :math:`B` as the lower-triangular matrix\n    with entries given by the lower-triangular part of :math:`A` without the\n    diagonal. Set :math:`a := \\text{diag}(A)`. We define the special\n    vectorization operator :math:`\\bar{\\text{vec}}` as the operator that maps\n    the diagonal of a matrix to the first entries of the vector and then\n    proceeds to map the remaining lower part of the matrix using a row-wise\n    half-vectorization scheme. That is, we would have\n\n    .. math:: \\bar{\\text{vec}}(A) = (a^\\top, \\text{vech}(A)^\\top)^\\top\n\n    Then the transformation matrix :math:`T` is defined by the property that\n\n    .. math:: T \\text{vec}(A) = \\bar{\\text{vec}}(A)\n\n    We use this transformation when we map the vectorization of the modified\n    correlation matrix to the externally stored ``sdcorr_params``.\n\n    Args:\n        dim (int): The dimension.\n\n    Returns:\n        transformer (np.ndarray): The transformation matrix.\n\n    Example:\n    >>> import numpy as np\n    >>> from numpy.testing import assert_array_almost_equal\n    >>> from optimagic.utilities import cov_matrix_to_sdcorr_params\n    >>> from optimagic.utilities import cov_to_sds_and_corr\n    >>> cov = np.cov(np.random.default_rng().normal(size=(10, 4)))\n    >>> sds, corr = cov_to_sds_and_corr(cov)\n    >>> corr[np.diag_indices(len(cov))] = sds\n    >>> vectorized = corr.ravel('F')\n    >>> sdcorr_params = cov_matrix_to_sdcorr_params(cov)\n    >>> T = _transformation_matrix(len(cov))\n    >>> assert_array_almost_equal(T @ vectorized, sdcorr_params)\n\n    \"\"\"\n    n = dimension_to_number_of_triangular_elements(dim)\n    counter = np.zeros((dim, dim)) + np.nan\n    counter[np.diag_indices(dim)] = np.arange(dim, dtype=int)\n    counter[np.tril_indices(dim, k=-1)] = np.arange(dim, n, dtype=int)\n\n    m = counter.ravel(\"F\")\n    num_na = np.count_nonzero(np.isnan(m))\n    indices = m.argsort()[:-num_na]\n\n    rows = [_unit_vector_or_zeros(i, dim**2) for i in indices]\n\n    transformer = np.vstack(rows)\n    return transformer\n\n\ndef _commutation_matrix(dim):\n    r\"\"\"Return commutation matrix.\n\n    Let :math:`A` be a quadratic matrix. Let :math:`\\text{vec}(A)` be the\n    column-wise vectorization of :math:`A`. Then the corresponding commutation\n    matrix :math:`K` has the property\n\n    .. math::  K \\text{vec}(A) = \\text{vec}(A^\\top)\n\n    See the wiki entry https://tinyurl.com/yydgq2z4 for further information.\n\n    Args:\n        dim (int): The dimension.\n\n    Returns:\n        cummuter (np.ndarrary): The cummutation matrix.\n\n    Example:\n    >>> import numpy as np\n    >>> from numpy.testing import assert_array_almost_equal\n    >>> dim = 10\n    >>> A = np.random.default_rng().normal(size=(dim, dim))\n    >>> vectorized = A.ravel('F')\n    >>> vectorized_transposed = A.T.ravel('F')\n    >>> K = _commutation_matrix(dim)\n    >>> assert_array_almost_equal(K @ vectorized, vectorized_transposed)\n\n    \"\"\"\n    row = np.arange(dim**2)\n    col = row.reshape((dim, dim), order=\"F\").ravel()\n    commuter = np.zeros((dim**2, dim**2), dtype=np.int8)\n    commuter[row, col] = 1\n    return commuter\n\n\ndef _unit_vector_or_zeros(index, size):\n    \"\"\"Return unit vector or vector of all zeroes.\n\n    Args:\n        index (int): On which index to set a 1. If it is set to -1 a vector of\n            all zeros will be returned.\n        size (int): Dimension of the resulting vector.\n\n    Returns:\n        u (np.ndarray): The unit or zero vector.\n\n    Example:\n    >>> import numpy as np\n    >>> _unit_vector_or_zeros(1, 2)\n    array([0, 1])\n    >>> _unit_vector_or_zeros(-1, 2)\n    array([0, 0])\n\n    \"\"\"\n    u = np.zeros(size, int)\n    if index != -1:\n        u[index] = 1\n    return u\n"
  },
  {
    "path": "src/optimagic/parameters/nonlinear_constraints.py",
    "content": "import itertools\nfrom dataclasses import asdict\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import tree_flatten, tree_just_flatten, tree_unflatten\n\nfrom optimagic.differentiation.derivatives import first_derivative\nfrom optimagic.exceptions import InvalidConstraintError, InvalidFunctionError\nfrom optimagic.optimization.algo_options import CONSTRAINTS_ABSOLUTE_TOLERANCE\nfrom optimagic.parameters.block_trees import block_tree_to_matrix\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef process_nonlinear_constraints(\n    nonlinear_constraints,\n    params,\n    bounds,\n    converter,\n    numdiff_options,\n    skip_checks,\n):\n    \"\"\"Process and prepare nonlinear constraints for internal use.\n\n    A user-provided nonlinear constraint consists of a function that is evaluated on a\n    selection of parameters returning a scalar or vector that must either be equal to\n    a fixed value (equality constraint) or smaller and larger than or equal to a lower\n    and upper bound (inequality constraint).\n\n    This function processes the nonlinear constraints in the following way:\n\n    1. The constraint a <= g(x) <= b is transformed to h(x) >= 0, where h(x) is\n       - h(x) = g(x), if a == 0 and b == inf\n       - h(x) = g(x) - a, if a != 0 and b == inf\n       - h(x) = (g(x) - a, -g(x) + b) >= 0, if a != 0 and b != inf.\n\n    2. The equality constraint g(x) = v is transformed to h(x) >= 0, where\n       h(x) = (g(x) - v, -g(x) + v).\n\n    3. Vector constraints are transformed to a list of scalar constraints.\n       g(x) = (g1(x), g2(x), ...) >= 0 is transformed to (g1(x) >= 0, g2(x) >= 0, ...).\n\n    4. The constraint function (defined on a selection of user-facing parameters) is\n       transformed to be evaluated on the internal parameters.\n\n\n    Args:\n        nonlinear_constraints (list[dict]): List of dictionaries, each representing a\n            nonlinear constraint.\n        params (pandas): A pytree containing the parameters with respect to which the\n            criterion is optimized. Examples are a numpy array, a pandas Series,\n            a DataFrame with \"value\" column, a float and any kind of (nested) dictionary\n            or list containing these elements. See :ref:`params` for examples.\n        bounds (Bounds): Bounds object containing information on the bounds of the\n            parameters. See :ref:`bounds` for details.\n        converter (Converter): NamedTuple with methods to convert between internal and\n            external parameters, derivatives and function outputs.\n        numdiff_options (NumdiffOptions): Options for numerical derivatives. See\n            :ref:`first_derivative` for details. Note that the default method is changed\n            to \"forward\" for speed reasons.\n        skip_checks (bool): Whether checks on the inputs are skipped. This makes the\n            optimization faster, especially for very fast constraint functions. Default\n            False.\n\n    Returns:\n        list[dict]: List of processed constraints.\n\n    \"\"\"\n    # do checks first to fail fast\n    constraint_evals = []\n    for _constraint in nonlinear_constraints:\n        _eval = _check_validity_and_return_evaluation(_constraint, params, skip_checks)\n        constraint_evals.append(_eval)\n\n    processed = []\n    for _constraint, _eval in zip(\n        nonlinear_constraints, constraint_evals, strict=False\n    ):\n        _processed_constraint = _process_nonlinear_constraint(\n            _constraint,\n            constraint_eval=_eval,\n            params=params,\n            bounds=bounds,\n            converter=converter,\n            numdiff_options=numdiff_options,\n        )\n        processed.append(_processed_constraint)\n\n    return processed\n\n\ndef _process_nonlinear_constraint(\n    c, constraint_eval, params, bounds, converter, numdiff_options\n):\n    \"\"\"Process a single nonlinear constraint.\"\"\"\n    # ==================================================================================\n    # Process selector and evaluate functions if necessary\n    # ==================================================================================\n\n    external_selector = _process_selector(c)  # functional selector\n\n    constraint_func = c[\"func\"]\n\n    if constraint_eval is None:\n        selected = external_selector(params)\n        constraint_eval = constraint_func(selected)\n\n    if bounds is not None:\n        # TODO: use bounds for numerical derivative; For this to work we need to\n        # extend bounds to the full params pytree before passing them to\n        # process_nonlinear_constraints.\n\n        # constraint_bounds = replace(\n        #     bounds,\n        #     lower=external_selector(bounds.lower),\n        #     upper=external_selector(bounds.upper),\n        # )\n        constraint_bounds = None\n    else:\n        constraint_bounds = None\n\n    _n_constr = len(np.atleast_1d(constraint_eval))\n\n    # ==================================================================================\n    # Consolidate and transform jacobian\n    # ==================================================================================\n\n    # process numdiff_options for numerical derivative\n\n    if \"derivative\" in c:\n        if not callable(c[\"derivative\"]):\n            msg = \"Jacobian of constraints needs to be callable.\"\n            raise ValueError(msg)\n        jacobian = c[\"derivative\"]\n    else:\n        # use finite-differences if no closed-form jacobian is defined\n        def jacobian(p):\n            return first_derivative(\n                constraint_func,\n                p,\n                bounds=constraint_bounds,\n                error_handling=\"raise_strict\",\n                **asdict(numdiff_options),\n            ).derivative\n\n    # To define the internal Jacobian we need to know which parameters enter the\n    # contraint function.\n    selection_indices, n_params = _get_selection_indices(params, external_selector)\n\n    def _internal_jacobian(x):\n        \"\"\"Return Jacobian of constraint at internal parameters.\n\n        The constraint function is written to be evaluated on a selection of the\n        external parameters. The optimizer, however, only works on internal parameters.\n        These can be significantly different from the external parameters, due to\n        optimagic's reparametrization features. In this function we compute the Jacobian\n        of the constraint at the internal parameters using information on the Jacobian\n        of the constraint at the selected external parameters.\n\n        \"\"\"\n        params = converter.params_from_internal(x)\n        selected = external_selector(params)\n        jac = jacobian(selected)\n        jac_matrix = block_tree_to_matrix(jac, constraint_eval, selected)\n        jac_extended = _extend_jacobian(jac_matrix, selection_indices, n_params)\n        jac_internal = converter.derivative_to_internal(\n            jac_extended, x, jac_is_flat=True\n        )\n        return np.atleast_2d(jac_internal)\n\n    # ==================================================================================\n    # Transform constraint function and derive bounds\n    # ==================================================================================\n    _type = \"eq\" if \"value\" in c else \"ineq\"\n\n    if _type == \"eq\":\n        # ==============================================================================\n        # Equality constraints\n        #\n        # We define the internal constraint function to be satisfied if it is equal\n        # to zero, by subtracting the fixed value.\n\n        _value = np.atleast_1d(np.array(c[\"value\"], dtype=float))\n\n        def internal_constraint_func(x):\n            params = converter.params_from_internal(x)\n            select = external_selector(params)\n            out = np.atleast_1d(constraint_func(select)) - _value\n            return out\n\n        jacobian_from_internal = _internal_jacobian\n        n_constr = _n_constr\n\n    else:\n        # ==============================================================================\n        # Inequality constraints\n        #\n        # We define the internal constraint function to be satisfied if it is\n        # greater than or equal to zero (positivity constraint). If the bounds already\n        # satify this condition we do not change anything, otherwise we need to perform\n        # a transformation.\n\n        def _internal_constraint_func(x):\n            params = converter.params_from_internal(x)\n            select = external_selector(params)\n            return np.atleast_1d(constraint_func(select))\n\n        lower_bounds = c.get(\"lower_bounds\", 0)\n        upper_bounds = c.get(\"upper_bounds\", np.inf)\n\n        transformation = _get_transformation(lower_bounds, upper_bounds)\n\n        internal_constraint_func = _compose_funcs(\n            _internal_constraint_func, transformation[\"func\"]\n        )\n\n        jacobian_from_internal = _compose_funcs(\n            _internal_jacobian, transformation[\"derivative\"]\n        )\n\n        n_constr = 2 * _n_constr if transformation[\"name\"] == \"stack\" else _n_constr\n\n    internal_constr = {\n        \"n_constr\": n_constr,\n        \"type\": _type,\n        \"fun\": internal_constraint_func,  # internal name for 'func'\n        \"jac\": jacobian_from_internal,  # internal name for 'derivative'\n        \"tol\": c.get(\"tol\", CONSTRAINTS_ABSOLUTE_TOLERANCE),\n    }\n\n    return internal_constr\n\n\ndef equality_as_inequality_constraints(nonlinear_constraints):\n    \"\"\"Return constraints where equality constraints are converted to inequality.\"\"\"\n    constraints = [_equality_to_inequality(c) for c in nonlinear_constraints]\n    return constraints\n\n\ndef _equality_to_inequality(c):\n    \"\"\"Transform a single constraint.\n\n    An equality constaint g(x) = 0 can be transformed to two inequality constraints\n    using (g(x), -g(x)) >= 0. Hence, the number of constraints doubles, and the\n    constraint functions itself as well as the derivative need to be updated.\n\n    \"\"\"\n    if c[\"type\"] == \"eq\":\n\n        def transform(x, func):\n            value = func(x)\n            return np.concatenate((value, -value), axis=0)\n\n        out = {\n            \"fun\": partial(transform, func=c[\"fun\"]),\n            \"jac\": partial(transform, func=c[\"jac\"]),\n            \"n_constr\": 2 * c[\"n_constr\"],\n            \"tol\": c[\"tol\"],\n            \"type\": \"ineq\",\n        }\n    else:\n        out = c\n    return out\n\n\ndef vector_as_list_of_scalar_constraints(nonlinear_constraints):\n    \"\"\"Return constraints where vector constraints are converted to scalar constraints.\n\n    This is necessary for internal optimizers that only support scalar constraints.\n\n    \"\"\"\n    list_of_constraints_lists = [\n        _vector_to_list_of_scalar(c) for c in nonlinear_constraints\n    ]\n    constraints = list(itertools.chain.from_iterable(list_of_constraints_lists))\n    return constraints\n\n\ndef _vector_to_list_of_scalar(constraint):\n    if constraint[\"n_constr\"] > 1:\n        out = []\n        for k in range(constraint[\"n_constr\"]):\n            c = constraint.copy()\n            fun, jac = _get_components(constraint[\"fun\"], constraint[\"jac\"], idx=k)\n            c[\"fun\"] = fun\n            c[\"jac\"] = jac\n            c[\"n_constr\"] = 1\n            out.append(c)\n    else:\n        out = [constraint]\n    return out\n\n\ndef _get_components(fun, jac, idx):\n    \"\"\"Return function and derivative for a single component of a vector function.\n\n    Args:\n        fun (callable): Function that returns a vector.\n        jac (callable): Derivative of the function that returns a matrix.\n        idx (int): Index of the component.\n\n    Returns:\n        callable: Component function at index idx.\n        callable: Jacobian of the component function.\n\n    \"\"\"\n    fun_component = lambda x: fun(x)[idx]\n    jac_component = lambda x: jac(x)[idx]\n    return fun_component, jac_component\n\n\n# ======================================================================================\n# Helper Functions\n# ======================================================================================\n\n\ndef _process_selector(c):\n    if \"selector\" in c:\n        selector = c[\"selector\"]\n    elif \"loc\" in c:\n\n        def selector(params):\n            return params.loc[c[\"loc\"]]\n\n    elif \"query\" in c:\n\n        def selector(params):\n            return params.query(c[\"query\"])\n\n    else:\n        selector = _identity\n    return selector\n\n\ndef _compose_funcs(f, g):\n    return lambda x: g(f(x))\n\n\ndef _identity(x):\n    return x\n\n\n# ======================================================================================\n# Jacobian helper functions\n# ======================================================================================\n\n\ndef _extend_jacobian(jac_mat, selection_indices, n_params):\n    \"\"\"Extend Jacobian on selected parameters to full params.\n\n    Jacobian of constraints is defined on a selection of the parameters, however, we\n    need the Jacobian on the full params. Since the Jacobian is trivially zero at the\n    non-selected params we can simply fill a zero matrix.\n\n    \"\"\"\n    jac_extended = np.zeros((jac_mat.shape[0], n_params))\n    jac_extended[:, selection_indices] = jac_mat\n    return jac_extended\n\n\ndef _get_selection_indices(params, selector):\n    \"\"\"Get index of selected flat params and number of flat params.\"\"\"\n    registry = get_registry(extended=True)\n    flat_params, params_treedef = tree_flatten(params, registry=registry)\n    n_params = len(flat_params)\n    indices = np.arange(n_params, dtype=int)\n    params_indices = tree_unflatten(params_treedef, indices, registry=registry)\n    selected = selector(params_indices)\n    selection_indices = np.array(\n        tree_just_flatten(selected, registry=registry), dtype=int\n    )\n    return selection_indices, n_params\n\n\n# ======================================================================================\n# Transformation helper functions\n# ======================================================================================\n\n\ndef _get_transformation(lower_bounds, upper_bounds):\n    \"\"\"Get transformation given bounds.\n\n    The internal inequality constraint is defined as h(x) >= 0. However, the user can\n    specify: a <= g(x) <= b. To get the internal represenation we need to transform the\n    constraint.\n\n    \"\"\"\n    transformation_type = _get_transformation_type(lower_bounds, upper_bounds)\n\n    if transformation_type == \"identity\":\n        transformer = {\"func\": _identity, \"derivative\": _identity}\n    elif transformation_type == \"subtract_lb\":\n        transformer = {\n            \"func\": lambda v: v - lower_bounds,\n            \"derivative\": _identity,\n        }\n    elif transformation_type == \"stack\":\n        transformer = {\n            \"func\": lambda v: np.concatenate(\n                (v - lower_bounds, upper_bounds - v), axis=0\n            ),\n            \"derivative\": lambda v: np.concatenate((v, -v), axis=0),\n        }\n    transformer[\"name\"] = transformation_type\n    return transformer\n\n\ndef _get_transformation_type(lower_bounds, upper_bounds):\n    lb_is_zero = not np.count_nonzero(lower_bounds)\n    ub_is_inf = np.all(np.isposinf(upper_bounds))\n\n    if lb_is_zero and ub_is_inf:\n        # the external constraint is already in the correct format\n        _transformation_type = \"identity\"\n    elif ub_is_inf:\n        # the external constraint can be transformed by subtraction\n        _transformation_type = \"subtract_lb\"\n    else:\n        # the external constraint can only be transformed by duplication (stacking)\n        _transformation_type = \"stack\"\n    return _transformation_type\n\n\n# ======================================================================================\n# Checks\n# ======================================================================================\n\n\ndef _check_validity_and_return_evaluation(c, params, skip_checks):\n    \"\"\"Check that nonlinear constraints are valid.\n\n    Returns:\n        constaint_eval: Evaluation of constraint at params, if skip_checks if False,\n            else None.\n\n    \"\"\"\n    # ==================================================================================\n    # check functions\n    # ==================================================================================\n\n    if \"func\" not in c:\n        raise InvalidConstraintError(\n            \"Constraint needs to have entry 'fun', representing the constraint \"\n            \"function.\"\n        )\n    if not callable(c[\"func\"]):\n        raise InvalidConstraintError(\n            \"Entry 'fun' in nonlinear constraints has be callable.\"\n        )\n\n    if \"derivative\" in c and not callable(c[\"derivative\"]):\n        raise InvalidConstraintError(\n            \"Entry 'jac' in nonlinear constraints has be callable.\"\n        )\n\n    # ==================================================================================\n    # check bounds\n    # ==================================================================================\n\n    is_equality_constraint = \"value\" in c\n\n    if is_equality_constraint:\n        if \"lower_bounds\" in c or \"upper_bounds\" in c:\n            raise InvalidConstraintError(\n                \"Only one of 'value' or ('lower_bounds', 'upper_bounds') can be \"\n                \"passed to a nonlinear constraint.\"\n            )\n\n    if not is_equality_constraint:\n        if \"lower_bounds\" not in c and \"upper_bounds\" not in c:\n            raise InvalidConstraintError(\n                \"For inequality constraint at least one of ('lower_bounds', \"\n                \"'upper_bounds') has to be passed to the nonlinear constraint.\"\n            )\n\n    if \"lower_bounds\" in c and \"upper_bounds\" in c:\n        if not np.all(np.array(c[\"lower_bounds\"]) <= np.array(c[\"upper_bounds\"])):\n            raise InvalidConstraintError(\n                \"If lower bounds need to less than or equal to upper bounds.\"\n            )\n\n    # ==================================================================================\n    # check selector\n    # ==================================================================================\n\n    if \"selector\" in c:\n        if not callable(c[\"selector\"]):\n            raise InvalidConstraintError(\n                f\"'selector' entry needs to be callable in constraint {c}.\"\n            )\n        else:\n            try:\n                c[\"selector\"](params)\n            except Exception as e:\n                raise InvalidFunctionError(\n                    \"Error when calling 'selector' function on params in constraint \"\n                    f\" {c}\"\n                ) from e\n\n    elif \"loc\" in c:\n        if not isinstance(params, (pd.Series, pd.DataFrame)):\n            raise InvalidConstraintError(\n                \"params needs to be pd.Series or pd.DataFrame to use 'loc' selector in \"\n                f\"in consrtaint {c}.\"\n            )\n        try:\n            params.loc[c[\"loc\"]]\n        except (KeyError, IndexError) as e:\n            raise InvalidConstraintError(\"'loc' string is invalid.\") from e\n\n    elif \"query\" in c:\n        if not isinstance(params, pd.DataFrame):\n            raise InvalidConstraintError(\n                \"params needs to be pd.DataFrame to use 'query' selector in \"\n                f\"constraints {c}.\"\n            )\n        try:\n            params.query(c[\"query\"])\n        except Exception as e:\n            raise InvalidConstraintError(\n                f\"'query' string is invalid in constraint {c}.\"\n            ) from e\n\n    # ==================================================================================\n    # check that constraints can be evaluated\n    # ==================================================================================\n\n    constraint_eval = None\n\n    if not skip_checks:\n        selector = _process_selector(c)\n\n        try:\n            constraint_eval = c[\"func\"](selector(params))\n        except Exception as e:\n            raise InvalidFunctionError(\n                f\"Error when evaluating function of constraint {c}.\"\n            ) from e\n\n    return constraint_eval\n"
  },
  {
    "path": "src/optimagic/parameters/process_constraints.py",
    "content": "\"\"\"Process the user provided pc for use during the optimization.\n\nThe main purpose of this module is to convert the user provided constraints into inputs\nfor fast reparametrization functions. In the process, the constraints are checked and\nconsolidated. Consolidation means that redundant constraints are dropped and other\nconstraints are collected in meaningful bundles.\n\nTo improve readability, the actual code for checking and consolidation are in separate\nmodules.\n\nCalls to functions doing checking are scattered across the module. This is in order to\nperform each check as soon as it becomes possible, which allows errors to be raised at a\npoint where constraints still look similar to what users wrote. However, some checks can\nonly be done after consolidation.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic.parameters.check_constraints import (\n    check_constraints_are_satisfied,\n    check_fixes_and_bounds,\n    check_for_incompatible_overlaps,\n    check_types,\n)\nfrom optimagic.parameters.consolidate_constraints import consolidate_constraints\nfrom optimagic.utilities import number_of_triangular_elements_to_dimension\n\n\ndef process_constraints(\n    constraints,\n    params_vec,\n    lower_bounds,\n    upper_bounds,\n    param_names,\n):\n    \"\"\"Process, consolidate and check constraints.\n\n    Args:\n        constraints (list): List of constraints where the fields that select parameters\n            have already been consolidated into an ``\"index\"`` field that selects\n            the same parameters from the flattened_parameter vector.\n        params_vec (np.ndarray): Flattened version of params.\n        lower_bounds (np.ndarray | None): Lower bounds for params_vec.\n        upper_bounds (np.ndarray | None): Upper bounds for params_vec.\n        param_names (list): Names of the flattened parameters. Only used to produce\n            good error messages.\n\n    Returns:\n        transformations (list): A processed version of those constraints\n            that entail actual transformations and not just fixing parameters.\n        constr_info (dict): Dict of 1d numpy arrays of length n_params (or None) with\n            information that is needed for the reparametrizations.\n            - lower_bounds: Lower bounds for the internal parameter vector. Those are\n              derived from the original lower bounds and additional bounds implied by\n              other constraints.\n            - upper_bounds: As lower_bounds but for upper bounds.\n            - internal_free: Boolean column that is true for those parameters over\n              which the optimizer will actually optimize.\n            - pre_replacements: The j_th element indicates the position of the internal\n              parameter that has to be copied into the j_th position of the external\n              parameter vector when reparametrizing from_internal, before any\n              transformations are applied. Negative if no element has to be copied.\n            - post_replacements: As pre_replacements, but applied after the\n              transformations are done.\n            - internal_fixed_values: Contains transformed versions of the fixed values\n              that will become equal to the external fixed values after the\n              kernel transformations are applied.\n              parameter\n\n    \"\"\"\n    params_vec = params_vec.copy()\n    check_types(constraints)\n\n    constraints = _replace_pairwise_equality_by_equality(constraints)\n    constraints = _process_linear_weights(constraints)\n    check_constraints_are_satisfied(constraints, params_vec, param_names)\n    constraints = _replace_increasing_and_decreasing_by_linear(constraints)\n    # process newly generated linear constraints\n    constraints = _process_linear_weights(constraints)\n\n    transformations, constr_info = consolidate_constraints(\n        constraints=constraints,\n        parvec=params_vec,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        param_names=param_names,\n    )\n\n    check_for_incompatible_overlaps(transformations, param_names)\n    check_fixes_and_bounds(constr_info, transformations, param_names)\n\n    is_fixed_to_value = constr_info.pop(\"is_fixed_to_value\")\n    is_fixed_to_other = constr_info.pop(\"is_fixed_to_other\")\n    int_lower, int_upper = _create_internal_bounds(\n        constr_info[\"lower_bounds\"], constr_info[\"upper_bounds\"], transformations\n    )\n    constr_info[\"internal_free\"] = _create_internal_free(\n        is_fixed_to_value=is_fixed_to_value,\n        is_fixed_to_other=is_fixed_to_other,\n        constraints=transformations,\n    )\n    constr_info[\"lower_bounds\"] = int_lower[constr_info[\"internal_free\"]]\n    constr_info[\"upper_bounds\"] = int_upper[constr_info[\"internal_free\"]]\n\n    constr_info[\"pre_replacements\"] = _create_pre_replacements(\n        constr_info[\"internal_free\"]\n    )\n\n    constr_info[\"internal_fixed_values\"] = _create_internal_fixed_value(\n        constr_info[\"fixed_values\"], transformations\n    )\n\n    del constr_info[\"fixed_values\"]\n\n    return transformations, constr_info\n\n\ndef _replace_pairwise_equality_by_equality(constraints):\n    \"\"\"Rewrite pairwise equality constraints to equality constraints.\n\n    Args:\n        constraints (list): List of dictionaries where each dictionary is a constraint.\n            It is assumed that the selectors in constraints were already processed.\n\n    Returns:\n        list: List of processed constraints.\n\n    \"\"\"\n    pairwise_constraints = [c for c in constraints if c[\"type\"] == \"pairwise_equality\"]\n    constraints = [c for c in constraints if c[\"type\"] != \"pairwise_equality\"]\n    for constr in pairwise_constraints:\n        equality_constraints = [\n            {\"index\": list(elements), \"type\": \"equality\"}\n            for elements in zip(*constr[\"indices\"], strict=False)\n        ]\n        constraints += equality_constraints\n\n    return constraints\n\n\ndef _process_linear_weights(constraints):\n    \"\"\"Harmonize the weights of linear constraints.\n\n    Args:\n        constraints (list): Constraints where the selectors have already been processed.\n\n    Returns:\n        list: Constraints where all weights are Series.\n\n    \"\"\"\n    processed = []\n    for constr in constraints:\n        if constr[\"type\"] == \"linear\":\n            raw_weights = constr[\"weights\"]\n\n            if isinstance(raw_weights, (np.ndarray, list, tuple, pd.Series)):\n                if len(raw_weights) != len(constr[\"index\"]):\n                    msg = (\n                        f\"weights of length {len(raw_weights)} could not be aligned \"\n                        f\"with selected parameters of length {len(constr['index'])}.\"\n                    )\n                    raise ValueError(msg)\n                weights = np.asarray(raw_weights)\n            elif isinstance(raw_weights, (float, int)):\n                weights = np.full(len(constr[\"index\"]), float(raw_weights))\n            else:\n                raise TypeError(f\"Invalid type for linear weights {type(raw_weights)}.\")\n\n            new_constr = constr.copy()\n            weights_sr = pd.Series(weights, index=constr[\"index\"])\n            new_constr[\"weights\"] = weights_sr\n            processed.append(new_constr)\n        else:\n            processed.append(constr)\n\n    return processed\n\n\ndef _replace_increasing_and_decreasing_by_linear(constraints):\n    \"\"\"Write increasing and decreasing constraints as linear constraints.\n\n    Args:\n        constraints (list): Constraints where the selectors have already been processed.\n\n    Returns:\n        list: Processed constraints.\n\n    \"\"\"\n    increasing_ilocs, other_constraints = [], []\n\n    for constr in constraints:\n        if constr[\"type\"] == \"increasing\":\n            increasing_ilocs.append(constr[\"index\"])\n        elif constr[\"type\"] == \"decreasing\":\n            increasing_ilocs.append(constr[\"index\"][::-1])\n        else:\n            other_constraints.append(constr)\n\n    linear_constraints = []\n    for iloc in increasing_ilocs:\n        for smaller, larger in zip(iloc, iloc[1:], strict=False):\n            linear_constr = {\n                \"index\": [smaller, larger],\n                \"type\": \"linear\",\n                \"weights\": np.array([-1, 1]),\n                \"lower_bound\": 0,\n            }\n            linear_constraints.append(linear_constr)\n\n    processed = linear_constraints + other_constraints\n    return processed\n\n\ndef _create_internal_bounds(lower, upper, constraints):\n    \"\"\"Create bounds for the internal parameter vector.\n\n    The resulting arrays have the length of the flat external params and will be reduced\n    later.\n\n    Args:\n        lower (np.ndarray): Processed and consolidated external lower bounds.\n        upper (np.ndarray): Processed and consolidated external upper bounds.\n        constraints (pd.DataFrame): Processed and consolidated constraints.\n\n    Returns:\n        int_lower (np.ndarray): Lower bound of internal parameters.\n        int_upper (np.ndarray): Upper bound of internal parameters.\n\n    \"\"\"\n    int_lower, int_upper = lower.copy(), upper.copy()\n\n    for constr in constraints:\n        if constr[\"type\"] in [\"covariance\", \"sdcorr\"]:\n            # Note that the diagonal positions are the same for covariance and sdcorr\n            # because the internal params contains the Cholesky factor of the implied\n            # covariance matrix in both cases.\n            dim = number_of_triangular_elements_to_dimension(len(constr[\"index\"]))\n            diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()]\n            diag_indices = np.array(constr[\"index\"])[diag_positions].tolist()\n            bd = constr.get(\"regularization\", 0)\n            bd = np.sqrt(bd) if constr[\"type\"] == \"covariance\" else bd\n            int_lower[diag_indices] = np.maximum(int_lower[diag_indices], bd)\n        elif constr[\"type\"] == \"probability\":\n            int_lower[constr[\"index\"]] = 0\n        elif constr[\"type\"] == \"linear\":\n            int_lower[constr[\"index\"]] = -np.inf\n            int_upper[constr[\"index\"]] = np.inf\n            relevant_index = constr[\"index\"][-len(constr[\"right_hand_side\"]) :]\n            int_lower[relevant_index] = constr[\"right_hand_side\"][\"lower_bound\"]\n            int_upper[relevant_index] = constr[\"right_hand_side\"][\"upper_bound\"]\n        else:\n            raise TypeError(\"Invalid constraint type {}\".format(constr[\"type\"]))\n\n    return int_lower, int_upper\n\n\ndef _create_internal_free(is_fixed_to_value, is_fixed_to_other, constraints):\n    \"\"\"Boolean array that is True for parameters over which the optimizer optimizes.\n\n    Args:\n        is_fixed_to_value (np.ndarray): boolean array\n        is_fixed_to_other (np.ndarray): boolean array\n\n    Returns:\n        np.ndarray\n\n    \"\"\"\n    int_fixed = is_fixed_to_value | is_fixed_to_other\n\n    for constr in constraints:\n        if constr[\"type\"] == \"probability\":\n            int_fixed[constr[\"index\"][-1]] = True\n        elif constr[\"type\"] == \"linear\":\n            int_fixed[constr[\"index\"]] = False\n            relevant_index = constr[\"index\"][-len(constr[\"right_hand_side\"]) :]\n            int_fixed[relevant_index] = np.isfinite(constr[\"right_hand_side\"][\"value\"])\n\n    int_free = ~int_fixed\n\n    return int_free\n\n\ndef _create_pre_replacements(internal_free):\n    \"\"\"Create an array with internal position of parameters.\n\n    The j_th element indicates the position of the internal parameter that has to be\n    copied into the j_th position of the external parameter vector when reparametrizing\n    from_internal, before any transformations are applied. Negative if no element has\n    to be copied.\n\n    This will be used to copy the free internal parameters into a parameter vector\n    that has the same length as all params.\n\n    Args:\n        internal_free (np.ndarray): The _internal_free column of the processed params.\n\n    \"\"\"\n    pre_replacements = np.full(len(internal_free), -1)\n    pre_replacements[internal_free] = np.arange(internal_free.sum())\n\n    return pre_replacements\n\n\ndef _create_internal_fixed_value(fixed_values, constraints):\n    \"\"\"Create and array with the values to which internal parameters are fixed.\n\n    This contains additional fixes used to enforce other constraints and (potentially\n    transformed) user specified fixed values.\n\n    Args:\n        fixed_value (np.ndarray): The (external) _fixed_value column of pp.\n        constraints (list): Processed and consolidated params.\n\n    \"\"\"\n    int_fix = fixed_values.copy()\n    for constr in constraints:\n        if constr[\"type\"] == \"probability\":\n            int_fix[constr[\"index\"][-1]] = 1\n        elif constr[\"type\"] in [\"covariance\", \"sdcorr\"]:\n            int_fix[constr[\"index\"][0]] = np.sqrt(int_fix[constr[\"index\"][0]])\n        elif constr[\"type\"] == \"linear\":\n            int_fix[constr[\"index\"]] = np.nan\n            relevant_index = constr[\"index\"][-len(constr[\"right_hand_side\"]) :]\n            int_fix[relevant_index] = constr[\"right_hand_side\"][\"value\"].to_numpy()\n\n    return int_fix\n"
  },
  {
    "path": "src/optimagic/parameters/process_selectors.py",
    "content": "import warnings\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import tree_just_flatten\n\nfrom optimagic.constraints import Constraint\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef process_selectors(constraints, params, tree_converter, param_names):\n    \"\"\"Process and harmonize the selector fields of constraints.\n\n    By selector fields we mean loc, locs, query, queries, selector and selectors\n    entries in constraints.\n\n    The processed selector fields are called \"index\" and are integer numpy arrays with\n    positions of parameters in a flattened parameter vector.\n\n    Args:\n        constraints (list): User provided constraints.\n        params (pytree): User provided params.\n        tree_converter (TreeConverter): NamedTuple with methods to convert between\n            flattend and unflattend parameters.\n        param_names (list): Names of flattened parameters. Used for error messages.\n\n    Returns:\n        list: List of constraints with additional \"index\" entry.\n\n    \"\"\"\n    # fast path\n    if constraints in (None, []):\n        return []\n\n    if isinstance(constraints, dict):\n        constraints = [constraints]\n\n    registry = get_registry(extended=True)\n    n_params = len(tree_converter.params_flatten(params))\n    helper = tree_converter.params_unflatten(np.arange(n_params))\n    params_case = _get_params_case(params)\n    flat_constraints = []\n    for constr in constraints:\n        selector_case = _get_selector_case(constr)\n        field = _get_selection_field(\n            constraint=constr,\n            selector_case=selector_case,\n            params_case=params_case,\n        )\n        evaluator = _get_selection_evaluator(\n            field=field,\n            constraint=constr,\n            params_case=params_case,\n            registry=registry,\n        )\n        try:\n            with warnings.catch_warnings():\n                warnings.simplefilter(\"ignore\", category=pd.errors.PerformanceWarning)\n                selected = evaluator(helper)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = (\n                \"An error occurred when trying to select parameters for the following \"\n                f\"constraint:\\n{constr}\"\n            )\n            raise InvalidConstraintError(msg) from e\n\n        if selector_case == \"one selector\":\n            if np.isscalar(selected):\n                selected = [selected]\n            selected = np.array(selected).astype(int)\n            _fail_if_duplicates(selected, constr, param_names)\n        else:\n            selected = [[sel] if np.isscalar(sel) else sel for sel in selected]\n            _fail_if_selections_are_incompatible(selected, constr)\n            selected = [np.array(sel).astype(int) for sel in selected]\n            for sel in selected:\n                _fail_if_duplicates(sel, constr, param_names)\n\n        new_constr = constr.copy()\n        if selector_case == \"one selector\":\n            new_constr[\"index\"] = selected\n        else:\n            new_constr[\"indices\"] = selected\n\n        if selector_case == \"one selector\":\n            if len(new_constr[\"index\"]) > 0:\n                flat_constraints.append(new_constr)\n        else:\n            if len(new_constr[\"indices\"][0]) > 0:\n                flat_constraints.append(new_constr)\n    return flat_constraints\n\n\ndef _get_selection_field(constraint, selector_case, params_case):\n    \"\"\"Get the relevant selection field of a constraint.\"\"\"\n    selector_case = _get_selector_case(constraint)\n\n    valid = {\n        \"multiple selectors\": {\n            \"dataframe\": {\"locs\", \"queries\", \"selectors\"},\n            \"numpy array\": {\"locs\", \"selectors\"},\n            \"pytree\": {\"selectors\"},\n            \"series\": {\"locs\", \"selectors\"},\n        },\n        \"one selector\": {\n            \"dataframe\": {\"loc\", \"query\", \"selector\"},\n            \"numpy array\": {\"loc\", \"selector\"},\n            \"pytree\": {\"selector\"},\n            \"series\": {\"loc\", \"selector\"},\n        },\n    }\n\n    valid = valid[selector_case][params_case]\n\n    present = set(constraint).intersection(valid)\n\n    if not present:\n        msg = (\n            \"No valid parameter selection field in constraint. Valid selection fields \"\n            f\"are {valid}. The constraint is:\\n{constraint}\"\n        )\n        raise InvalidConstraintError(msg)\n    elif len(present) > 1:\n        msg = (\n            f\"Too many parameter selection fields in constraint: {present}. \"\n            \"Constraints must have exactly one parameter selection field. The \"\n            f\"constraint was:\\n{constraint}\"\n        )\n        raise InvalidConstraintError(msg)\n\n    field = list(present)[0]\n    return field\n\n\ndef _get_selection_evaluator(field, constraint, params_case, registry):\n    if field == \"selector\":\n\n        def evaluator(params):\n            raw = constraint[\"selector\"](params)\n            flat = tree_just_flatten(raw, registry=registry)\n            return flat\n\n    elif field == \"selectors\":\n\n        def evaluator(params):\n            raw = [sel(params) for sel in constraint[\"selectors\"]]\n            flat = [tree_just_flatten(r, registry=registry) for r in raw]\n            return flat\n\n    elif field == \"loc\":\n        if params_case == \"dataframe\":\n\n            def evaluator(params):\n                return params.loc[constraint[\"loc\"], \"value\"].tolist()\n\n        else:\n\n            def evaluator(params):\n                return params[constraint[\"loc\"]].tolist()\n\n    elif field == \"locs\":\n        if params_case == \"dataframe\":\n\n            def evaluator(params):\n                return [params.loc[lo, \"value\"].tolist() for lo in constraint[\"locs\"]]\n\n        else:\n\n            def evaluator(params):\n                return [params[lo].tolist() for lo in constraint[\"locs\"]]\n\n    elif field == \"query\":\n\n        def evaluator(params):\n            return params.query(constraint[\"query\"])[\"value\"].tolist()\n\n    elif field == \"queries\":\n\n        def evaluator(params):\n            return [params.query(q)[\"value\"].tolist() for q in constraint[\"queries\"]]\n\n    else:\n        raise ValueError(f\"Invalid parameter selection field: {field}\")\n\n    return evaluator\n\n\ndef _get_params_case(params):\n    if isinstance(params, pd.DataFrame) and \"value\" in params:\n        params_case = \"dataframe\"\n    elif isinstance(params, pd.Series):\n        params_case = \"series\"\n    elif isinstance(params, np.ndarray):\n        params_case = \"numpy array\"\n    else:\n        params_case = \"pytree\"\n    return params_case\n\n\ndef _get_selector_case(constraint):\n    if constraint[\"type\"] == \"pairwise_equality\":\n        selector_case = \"multiple selectors\"\n    else:\n        selector_case = \"one selector\"\n    return selector_case\n\n\ndef _fail_if_duplicates(\n    selected: list[int], constraint: Constraint, param_names: list[str]\n) -> None:\n    duplicates = _find_duplicates(selected)\n    if duplicates:\n        names = [param_names[i] for i in duplicates]\n        msg = (\n            \"Error while processing constraints. There are duplicates in selected \"\n            \"parameters. The parameters that were selected more than once are \"\n            f\"{names}. The problematic constraint is:\\n{constraint}\"\n        )\n        raise InvalidConstraintError(msg)\n\n\ndef _fail_if_selections_are_incompatible(selected, constraint):\n    if len(selected) <= 1:\n        msg = (\n            \"pairwise equality constraints require mutliple sets of selected \"\n            \"parameters but there is just one in the following constraint:\\n\"\n            f\"{constraint}\"\n        )\n        raise InvalidConstraintError(msg)\n    lengths = [len(sel) for sel in selected]\n    if len(set(lengths)) != 1:\n        msg = (\n            \"All sets of selected parameters for pairwise equality constraints need \"\n            f\"to have the same length. You have lengths {lengths} in constraint:\\n\"\n            f\"{constraint}\"\n        )\n        raise InvalidConstraintError(msg)\n\n\ndef _find_duplicates(list_):\n    return [item for item, count in Counter(list_).items() if count > 1]\n"
  },
  {
    "path": "src/optimagic/parameters/scale_conversion.py",
    "content": "from dataclasses import dataclass\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom optimagic.parameters.scaling import ScalingOptions\nfrom optimagic.parameters.space_conversion import InternalParams\n\n\n@dataclass(frozen=True)\nclass ScaleConverter:\n    factor: NDArray[np.float64] | None\n    offset: NDArray[np.float64] | None\n\n    def params_to_internal(self, vec: NDArray[np.float64]) -> NDArray[np.float64]:\n        \"\"\"Scale a parameter vector from external scale to internal one.\"\"\"\n        if self.offset is not None:\n            vec = vec - self.offset\n        if self.factor is not None:\n            vec = vec / self.factor\n        return vec\n\n    def params_from_internal(self, vec: NDArray[np.float64]) -> NDArray[np.float64]:\n        \"\"\"Scale a parameter vector from internal scale to external one.\"\"\"\n        if self.factor is not None:\n            vec = vec * self.factor\n        if self.offset is not None:\n            vec = vec + self.offset\n        return vec\n\n    def derivative_to_internal(\n        self, derivative: NDArray[np.float64]\n    ) -> NDArray[np.float64]:\n        \"\"\"Scale a derivative vector from external scale to internal one.\"\"\"\n        if self.factor is not None:\n            derivative = derivative * self.factor\n        return derivative\n\n    def derivative_from_internal(\n        self, derivative: NDArray[np.float64]\n    ) -> NDArray[np.float64]:\n        \"\"\"Scale a derivative vector from internal scale to external one.\"\"\"\n        if self.factor is not None:\n            derivative = derivative / self.factor\n        return derivative\n\n\ndef get_scale_converter(\n    internal_params: InternalParams,\n    scaling: ScalingOptions | None,\n) -> tuple[ScaleConverter, InternalParams]:\n    \"\"\"Get a converter between scaled and unscaled parameters.\n\n    Args:\n        internal_params: NamedTuple of internal and possibly reparametrized but not yet\n            scaled parameter values and bounds.\n        scaling: Scaling options. If None, no scaling is performed.\n\n    Returns:\n        ScaleConverter: Dataclass with methods to convert between scaled and unscaled\n            internal parameters and derivatives.\n        InternalParams: Dataclass with internal parameter values and bounds.\n\n    \"\"\"\n    # fast path\n    if scaling is None:\n        return ScaleConverter(factor=None, offset=None), internal_params\n\n    factor, offset = calculate_scaling_factor_and_offset(\n        internal_params=internal_params,\n        options=scaling,\n    )\n\n    converter = ScaleConverter(factor=factor, offset=offset)\n\n    if internal_params.soft_lower_bounds is not None:\n        _soft_lower = converter.params_to_internal(internal_params.soft_lower_bounds)\n    else:\n        _soft_lower = None\n\n    if internal_params.soft_upper_bounds is not None:\n        _soft_upper = converter.params_to_internal(internal_params.soft_upper_bounds)\n    else:\n        _soft_upper = None\n\n    if internal_params.lower_bounds is not None:\n        _lower_bounds = converter.params_to_internal(internal_params.lower_bounds)\n    else:\n        _lower_bounds = None\n\n    if internal_params.upper_bounds is not None:\n        _upper_bounds = converter.params_to_internal(internal_params.upper_bounds)\n    else:\n        _upper_bounds = None\n\n    params = InternalParams(\n        values=converter.params_to_internal(internal_params.values),\n        lower_bounds=_lower_bounds,\n        upper_bounds=_upper_bounds,\n        names=internal_params.names,\n        soft_lower_bounds=_soft_lower,\n        soft_upper_bounds=_soft_upper,\n    )\n\n    return converter, params\n\n\ndef calculate_scaling_factor_and_offset(\n    internal_params: InternalParams,\n    options: ScalingOptions,\n) -> tuple[NDArray[np.float64], NDArray[np.float64] | None]:\n    x = internal_params.values\n    lower_bounds = internal_params.lower_bounds\n    upper_bounds = internal_params.upper_bounds\n\n    if options.method == \"start_values\":\n        raw_factor = np.clip(np.abs(x), options.clipping_value, np.inf)\n        scaling_offset = None\n    elif options.method == \"bounds\":\n        if (\n            lower_bounds is None\n            or np.isinf(lower_bounds).any()\n            or upper_bounds is None\n            or np.isinf(upper_bounds).any()\n        ):\n            raise ValueError(\n                \"To use the 'bounds' scaling method, all bounds must be finite.\"\n            )\n        raw_factor = upper_bounds - lower_bounds\n        scaling_offset = lower_bounds\n    else:\n        raise ValueError(f\"Invalid scaling method: {options.method}\")\n\n    scaling_factor = raw_factor / options.magnitude\n\n    return scaling_factor, scaling_offset\n"
  },
  {
    "path": "src/optimagic/parameters/scaling.py",
    "content": "from dataclasses import dataclass\nfrom typing import Literal, TypedDict\n\nfrom typing_extensions import NotRequired\n\nfrom optimagic.exceptions import InvalidScalingError\n\n\n@dataclass(frozen=True)\nclass ScalingOptions:\n    \"\"\"Scaling options in optimization problems.\n\n    Attributes:\n        method: The method used for scaling. Can be \"start_values\" or \"bounds\". Default\n            is \"start_values\".\n        clipping_value: The minimum value to which elements are clipped to avoid\n            division by zero. Must be a positive number. Default is 0.1.\n        magnitude: A factor by which the scaled parameters are multiplied to adjust\n            their magnitude. Must be a positive number. Default is 1.0.\n\n    Raises:\n        InvalidScalingError: If scaling options cannot be processed, e.g. because they\n            do not have the correct type.\n\n    \"\"\"\n\n    method: Literal[\"start_values\", \"bounds\"] = \"start_values\"\n    clipping_value: float = 0.1\n    magnitude: float = 1.0\n\n    def __post_init__(self) -> None:\n        _validate_attribute_types_and_values(self)\n\n\nclass ScalingOptionsDict(TypedDict):\n    method: NotRequired[Literal[\"start_values\", \"bounds\"]]\n    clipping_value: NotRequired[float]\n    magnitude: NotRequired[float]\n\n\ndef pre_process_scaling(\n    scaling: bool | ScalingOptions | ScalingOptionsDict | None,\n) -> ScalingOptions | None:\n    \"\"\"Convert all valid types of scaling options to optimagic.ScalingOptions.\n\n    This just harmonizes multiple ways of specifying scaling options into a single\n    format. It performs runtime type checks, but it does not check whether scaling\n    options are consistent with other option choices.\n\n    Args:\n        scaling: The user provided scaling options.\n\n    Returns:\n        The scaling options in the optimagic format.\n\n    Raises:\n        InvalidScalingError: If scaling options cannot be processed, e.g. because they\n            do not have the correct type.\n\n    \"\"\"\n    if isinstance(scaling, bool):\n        scaling = ScalingOptions() if scaling else None\n    elif isinstance(scaling, ScalingOptions) or scaling is None:\n        pass\n    else:\n        try:\n            scaling = ScalingOptions(**scaling)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            if isinstance(e, InvalidScalingError):\n                raise e\n            raise InvalidScalingError(\n                f\"Invalid scaling options of type: {type(scaling)}. Scaling options \"\n                \"must be of type optimagic.ScalingOptions, a dictionary with a subset \"\n                \"of the keys {'method', 'clipping_value', 'magnitude'}, None, or a \"\n                \"boolean.\"\n            ) from e\n\n    return scaling\n\n\ndef _validate_attribute_types_and_values(options: ScalingOptions) -> None:\n    if options.method not in (\"start_values\", \"bounds\"):\n        raise InvalidScalingError(\n            f\"Invalid scaling method: {options.method}. Valid methods are \"\n            \"'start_values' and 'bounds'.\"\n        )\n\n    if (\n        not isinstance(options.clipping_value, int | float)\n        or options.clipping_value <= 0\n    ):\n        raise InvalidScalingError(\n            f\"Invalid clipping value: {options.clipping_value}. Clipping value \"\n            \"must be a positive number.\"\n        )\n\n    if not isinstance(options.magnitude, int | float) or options.magnitude <= 0:\n        raise InvalidScalingError(\n            f\"Invalid scaling magnitude: {options.magnitude}. Scaling magnitude \"\n            \"must be a positive number.\"\n        )\n"
  },
  {
    "path": "src/optimagic/parameters/space_conversion.py",
    "content": "\"\"\"Handle constraints by reparametrizations.\n\nThe functions in this module allow to convert between internal and external parameter\nvectors.\n\nAn external parameter vector is a possibly flattened version of the parameter vector as\nit was specified by the user. This external parameter vector might be subject to\nconstraints, such as the condition that the first two parameters are equal.\n\nAn internal parameter vector is an internal representation of the parameters in a\ndifferent space. The internal parameters are meaningless and have no direct\ninterpretation. However, the internal parameter vector has two important properties:\n1. It is only subject to box constraints\n2. `reparametrize_from_internal(internal_parameter)` always produces a valid external\nparameter vector (i.e. one that fulfills all constraints.\n\nFor more background see :ref:`implementation_of_constraints`.\n\nThe reparametrization from internal can be broken down into three separate steps:\n\n- Writing values from the internal parameter vector into an array that is as long as the\n  external parameters and contains NaNs or values to which parameters have been fixed.\n  We call this step `pre_replace`.\n- Transforming slices of the resulting vector with kernel transformations. Note that\n  this step does not change the length. All kernel transformations have as many input\n  as output parameters and are invertible. We call this step `transformation`. The\n  resulting vector might still contrain NaNs.\n- Fill the NaNs by duplicating values of the transformed parameter vector. We call this\n  step `post_replace`\n\nIn the following, let n_external be the length of th external parameter vector and\nn_internal the length of the internal parameter vector.\n\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Callable\n\nimport numpy as np\n\nimport optimagic.parameters.kernel_transformations as kt\nfrom optimagic.parameters.process_constraints import process_constraints\n\n\ndef get_space_converter(\n    internal_params,\n    internal_constraints,\n):\n    \"\"\"Get functions to convert between in-/external space of params and derivatives.\n\n    In the internal parameter space the optimization problem is unconstrained except\n    for bounds.\n\n    Args:\n        internal_params (InternalParams): Dataclass with internal parameter values and\n            bounds.\n        internal_constraints (list): List of constraints with processed selector fields.\n\n    Returns:\n        SpaceConverter: The space converter.\n        InternalParams: Dataclass with entries:\n            - value (np.ndarray): Internal parameter values.\n            - lower_bounds (np.ndarray | None): Lower bounds on the internal params.\n            - upper_bounds (np.ndarray | None): Upper bounds on the internal params.\n            - soft_lower_bounds (np.ndarray | None): Soft lower bounds on the internal\n              params.\n            - soft_upper_bounds (np.ndarray | None): Soft upper bounds on the internal\n              params.\n            - name (list): List of names of the external parameters.\n            - free_mask (np.ndarray): Boolean mask representing which external parameter\n              is free.\n\n    \"\"\"\n    transformations, constr_info = process_constraints(\n        constraints=internal_constraints,\n        params_vec=internal_params.values,\n        lower_bounds=internal_params.lower_bounds,\n        upper_bounds=internal_params.upper_bounds,\n        param_names=internal_params.names,\n    )\n    _params_to_internal = partial(\n        reparametrize_to_internal,\n        internal_free=constr_info[\"internal_free\"],\n        transformations=transformations,\n    )\n\n    _params_from_internal = partial(\n        reparametrize_from_internal,\n        fixed_values=constr_info[\"internal_fixed_values\"],\n        pre_replacements=constr_info[\"pre_replacements\"],\n        transformations=transformations,\n        post_replacements=constr_info[\"post_replacements\"],\n    )\n\n    _dim_internal = int(constr_info[\"internal_free\"].sum())\n\n    _pre_replace_jac = pre_replace_jacobian(\n        pre_replacements=constr_info[\"pre_replacements\"], dim_in=_dim_internal\n    )\n\n    _post_replace_jac = post_replace_jacobian(\n        post_replacements=constr_info[\"post_replacements\"]\n    )\n\n    _derivative_to_internal = partial(\n        convert_external_derivative_to_internal,\n        fixed_values=constr_info[\"internal_fixed_values\"],\n        pre_replacements=constr_info[\"pre_replacements\"],\n        transformations=transformations,\n        pre_replace_jac=_pre_replace_jac,\n        post_replace_jac=_post_replace_jac,\n    )\n\n    _has_transforming_constraints = bool(transformations)\n\n    converter = SpaceConverter(\n        params_to_internal=_params_to_internal,\n        params_from_internal=_params_from_internal,\n        derivative_to_internal=_derivative_to_internal,\n        has_transforming_constraints=_has_transforming_constraints,\n    )\n\n    free_mask = constr_info[\"internal_free\"]\n    if (\n        internal_params.soft_lower_bounds is not None\n        and not _has_transforming_constraints\n    ):\n        _soft_lower = internal_params.soft_lower_bounds[free_mask]\n    else:\n        _soft_lower = None\n\n    if (\n        internal_params.soft_upper_bounds is not None\n        and not _has_transforming_constraints\n    ):\n        _soft_upper = internal_params.soft_upper_bounds[free_mask]\n    else:\n        _soft_upper = None\n\n    params = InternalParams(\n        values=converter.params_to_internal(internal_params.values),\n        lower_bounds=constr_info[\"lower_bounds\"],\n        upper_bounds=constr_info[\"upper_bounds\"],\n        names=internal_params.names,\n        free_mask=free_mask,\n        soft_lower_bounds=_soft_lower,\n        soft_upper_bounds=_soft_upper,\n    )\n    return converter, params\n\n\n@dataclass(frozen=True)\nclass SpaceConverter:\n    params_to_internal: Callable\n    params_from_internal: Callable\n    derivative_to_internal: Callable\n    has_transforming_constraints: bool\n\n\ndef reparametrize_to_internal(\n    external,\n    internal_free,\n    transformations,\n):\n    \"\"\"Convert a params DataFrame into a numpy array of internal parameters.\n\n    Args:\n        external (np.ndarray or pandas.DataFrmae): 1d array with of external parameter\n            values or params DataFrame.\n        internal_free (np.ndarray): 1d array of lenth n_external that determines\n            which parameters are free.\n        transformations (list): Processed transforming constraints.\n\n    Returns:\n        internal_params (numpy.ndarray): 1d numpy array of free reparametrized\n            parameters.\n\n    \"\"\"\n    with_internal_values = external.copy()\n\n    for constr in transformations:\n        func = getattr(kt, f\"{constr['type']}_to_internal\")\n\n        with_internal_values[constr[\"index\"]] = func(external[constr[\"index\"]], constr)\n\n    internal = with_internal_values[internal_free]\n\n    return internal\n\n\ndef reparametrize_from_internal(\n    internal,\n    fixed_values,\n    pre_replacements,\n    transformations,\n    post_replacements,\n):\n    \"\"\"Convert a numpy array of internal parameters to a params DataFrame.\n\n    Args:\n        internal (numpy.ndarray): 1d numpy array with internal parameters\n        fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains\n            NaN for parameters that are not fixed and an internal representation of the\n            value to which a parameter has been fixed for all others.\n        pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th\n            element in array contains the position of the internal parameter that has to\n            be copied to the i_th position of the external parameter vector or -1 if no\n            value has to be copied.\n        transformations (list): Processed transforming constraints.\n        post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th\n            element contains the position a parameter in the transformed parameter\n            vector that has to be copied to duplicated and copied to the i_th position\n            of the external parameter vector.\n\n    Returns:\n        numpy.ndarray: Array with external parameters\n\n    \"\"\"\n    # do pre-replacements\n    external_values = pre_replace(internal, fixed_values, pre_replacements)\n\n    # do transformations\n    for constr in transformations:\n        func = getattr(kt, f\"{constr['type']}_from_internal\")\n        external_values[constr[\"index\"]] = func(\n            external_values[constr[\"index\"]], constr\n        )\n\n    # do post-replacements\n    external_values = post_replace(external_values, post_replacements)\n\n    return external_values\n\n\ndef convert_external_derivative_to_internal(\n    external_derivative,\n    internal_values,\n    fixed_values,\n    pre_replacements,\n    transformations,\n    post_replacements=None,\n    pre_replace_jac=None,\n    post_replace_jac=None,\n):\n    r\"\"\"Compute the derivative of the criterion utilizing an external derivative.\n\n    Denote by :math:`c` the criterion function which is evaluated on the full\n    parameter set. Denote by :math:`g` the paramater transform which maps an\n    internal to an external paramter, i.e :math:`g: x \\mapsto g(x)`, with\n    :math:`x` denoting the internal paramter vector and :math:`g(x)` the\n    respective external parameter frame. We are interested in the derivative of\n    the composition :math:`f := c \\circ g` which maps an internal vector to the\n    criterion value. The derivative can be computed using the chain rule, as\n\n    .. math::\n        \\frac{\\mathrm{d}f}{\\mathrm{d}x}(x) =\n            \\frac{\\mathrm{d}c}{\\mathrm{d}g}(g(x)) \\times\n            \\frac{\\mathrm{d}g}{\\mathrm{d}x}(x)\n\n    We assume that the user provides the first part of the above product. The\n    second part denotes the derivative of the parameter transform from inner\n    to external.\n\n    Args:\n        external_derivative (numpy.ndarray): The external derivative evaluated at\n            external values mapped from ``internal_values``.\n        internal_values (numpy.ndarray): 1d numpy array with internal parameters\n        fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains\n            NaN for parameters that are not fixed and an internal representation of the\n            value to which a parameter has been fixed for all others.\n        pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th\n            element in array contains the position of the internal parameter that has to\n            be copied to the i_th position of the external parameter vector or -1 if no\n            value has to be copied.\n        transformations (list): Processed transforming constraints.\n        post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th\n            element contains the position a parameter in the transformed parameter\n            vector that has to be copied to duplicated and copied to the i_th position\n            of the external parameter vector.\n        pre_replace_jac (np.ndarray): 2d Array with the jacobian of pre_replace\n        post_replacment_jacobian (np.ndarray): 2d Array with the jacobian post_replace\n\n    Returns:\n        deriv (numpy.ndarray): The gradient or Jacobian.\n\n    \"\"\"\n    dim_in = len(internal_values)\n\n    pre_replaced = pre_replace(internal_values, fixed_values, pre_replacements)\n\n    if post_replacements is None and post_replace_jac is None:\n        raise ValueError(\n            \"either post_replacements or post_replace_jac must be specified.\"\n        )\n\n    if pre_replace_jac is None:\n        pre_replace_jac = pre_replace_jacobian(pre_replacements, dim_in)\n\n    if post_replace_jac is None:\n        post_replace_jac = post_replace_jacobian(post_replacements)\n\n    transform_jac = transformation_jacobian(transformations, pre_replaced)\n\n    external_derivative = np.atleast_2d(external_derivative)\n    tall_external = external_derivative.shape[0] > external_derivative.shape[1]\n\n    mat_list = [\n        external_derivative,\n        post_replace_jac,\n        transform_jac,\n        pre_replace_jac,\n    ]\n\n    if tall_external:\n        deriv = _multiply_from_right(mat_list)\n    else:\n        deriv = _multiply_from_left(mat_list)\n\n    # return gradient with shape (len(params),)\n    if deriv.shape[0] == 1:\n        deriv = deriv.flatten()\n    return deriv\n\n\ndef _multiply_from_left(mat_list):\n    \"\"\"Multiply all matrices in the list, starting from the left.\n\n    Note that this only affects the order in which the pairwise multiplications happen,\n    not the actual result.\n\n    \"\"\"\n    out = mat_list[0]\n    for mat in mat_list[1:]:\n        out = out @ mat\n    return out\n\n\ndef _multiply_from_right(mat_list):\n    \"\"\"Multiply all matrices in the list, starting from the right.\n\n    Note that this only affects the order in which the pairwise multiplications happen,\n    not the actual result.\n\n    \"\"\"\n    out = mat_list[-1]\n    for mat in reversed(mat_list[:-1]):\n        out = mat @ out\n    return out\n\n\ndef pre_replace(internal_values, fixed_values, pre_replacements):\n    \"\"\"Return pre-replaced parameters.\n\n    Args:\n        internal (numpy.ndarray): 1d numpy array with internal parameter.\n        fixed_values (numpy.ndarray): 1d numpy array of length n_external. It contains\n            NaN for parameters that are not fixed and an internal representation of the\n            value to which a parameter has been fixed for all others.\n        pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th\n            element in array contains the position of the internal parameter that has to\n            be copied to the i_th position of the external parameter vector or -1 if no\n            value has to be copied.\n\n    Returns:\n        pre_replaced (numpy.ndarray): 1d numpy array with pre-replaced params.\n\n\n    Examples:\n        >>> internal_values = np.array([1., 2.])\n        >>> fixed_values = np.array([np.nan, 0, np.nan])\n        >>> pre_replacements = np.array([1, -1, 0])\n        >>> pre_replace(internal_values, fixed_values, pre_replacements)\n        array([2., 0., 1.])\n\n    \"\"\"\n    pre_replaced = fixed_values.copy()\n\n    mask = pre_replacements >= 0\n    positions = pre_replacements[mask]\n    pre_replaced[mask] = internal_values[positions]\n    return pre_replaced\n\n\ndef pre_replace_jacobian(pre_replacements, dim_in):\n    \"\"\"Return Jacobian of pre-replacement step.\n\n    Remark. The function ``pre_replace`` can have ``np.nan`` in its output. In\n    this case we know from the underlying structure that the derivative of this\n    output with respect to any of the inputs is zero. Here we use this additional\n    knowledge; however, when the derivative is computed using a numerical\n    differentiation technique this will not be the case. Thus the numerical\n    derivative can differ from the derivative here in these cases.\n\n    Args:\n        pre_replacements (numpy.ndarray): 1d numpy of length n_external. The i_th\n            element in array contains the position of the internal parameter that has to\n            be copied to the i_th position of the external parameter vector or -1 if no\n            value has to be copied.\n        dim_in (int): Dimension of the internal parameters.\n\n    Returns:\n        jacobian (np.ndarray): The jacobian.\n\n    Examples:\n        >>> # Note: The example is the same as in the doctest of pre_replace\n        >>> pre_replacements = np.array([1, -1, 0])\n        >>> pre_replace_jacobian(pre_replacements, 2)\n        array([[0., 1.],\n               [0., 0.],\n               [1., 0.]])\n\n    \"\"\"\n    dim_out = len(pre_replacements)\n    mask = pre_replacements >= 0\n    position_in = pre_replacements[mask]\n    position_out = np.arange(dim_out)[mask]\n\n    jacobian = np.zeros((dim_out, dim_in))\n    jacobian[position_out, position_in] = 1\n    return jacobian\n\n\ndef transformation_jacobian(transformations, pre_replaced):\n    \"\"\"Return Jacobian of constraint transformation step.\n\n    The Jacobian of the constraint transformation step is build as a block matrix\n    of either identity matrices, in the case when the external parameter equals\n    the internal parameter, or, of the Jacobians of the specific kernel transforms,\n    in case the external paramater is a transformed version of the internal.\n\n    Args:\n        transformations (list): Processed transforming constraints.\n        pre_replaced (numpy.ndarray): 1d numpy array with pre-replaced params.\n        dim (int): The dimension of the external parameters.\n\n    Returns:\n        jacobian (numpy.ndarray): The Jacobian.\n\n    \"\"\"\n    dim = len(pre_replaced)\n    jacobian = np.eye(dim)\n\n    for constr in transformations:\n        block_indices = constr[\"index\"]\n        jacobian_func = getattr(kt, f\"{constr['type']}_from_internal_jacobian\")\n        jac = jacobian_func(pre_replaced[block_indices], constr)\n        jacobian[np.ix_(block_indices, block_indices)] = jac\n\n    return jacobian\n\n\ndef post_replace(external_values, post_replacements):\n    \"\"\"Return post-replaed parameters.\n\n    Args:\n        external_values (numpy.ndarray): 1d numpy array of external params.\n        post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th\n            element contains the position a parameter in the transformed parameter\n            vector that has to be copied to duplicated and copied to the i_th position\n            of the external parameter vector.\n\n    Returns:\n        post_replaced (numpy.ndarray): 1d numpy array with post-replaced params.\n\n    Examples:\n        >>> external_values = np.array([3., 4., np.nan])\n        >>> post_replacements = np.array([-1, -1, 1])\n        >>> post_replace(external_values, post_replacements)\n        array([3., 4., 4.])\n\n    \"\"\"\n    post_replaced = external_values.copy()\n\n    mask = post_replacements >= 0\n    positions = post_replacements[mask]\n    post_replaced[mask] = post_replaced[positions]\n    return post_replaced\n\n\ndef post_replace_jacobian(post_replacements):\n    \"\"\"Return Jacobian of post-replacement step.\n\n    Args:\n        post_replacements (numpy.ndarray): 1d numpy array of lenth n_external. The i_th\n            element contains the position a parameter in the transformed parameter\n            vector that has to be copied to duplicated and copied to the i_th position\n            of the external parameter vector.\n        dim (int): The dimension of the external parameters.\n\n    Returns:\n        jacobian (np.ndarray): The Jacobian.\n\n    Examples:\n        >>> # Note: the example is the same as in the doctest of post_replace\n        >>> post_replacements = np.array([-1, -1, 1])\n        >>> post_replace_jacobian(post_replacements)\n        array([[1., 0., 0.],\n               [0., 1., 0.],\n               [0., 1., 0.]])\n\n    \"\"\"\n    dim = len(post_replacements)\n    mask = post_replacements >= 0\n    positions_in = post_replacements[mask]\n    positions_out = np.arange(dim)[mask]\n\n    jacobian = np.eye(dim)\n    jacobian[positions_out, :] *= 0\n    jacobian[positions_out, positions_in] = 1\n    return jacobian\n\n\n@dataclass(frozen=True)\nclass InternalParams:\n    values: np.ndarray\n    lower_bounds: np.ndarray | None\n    upper_bounds: np.ndarray | None\n    soft_lower_bounds: np.ndarray | None = None\n    soft_upper_bounds: np.ndarray | None = None\n    names: list | None = None\n    free_mask: np.ndarray | None = None\n"
  },
  {
    "path": "src/optimagic/parameters/tree_conversion.py",
    "content": "from typing import Callable, NamedTuple\n\nimport numpy as np\nfrom pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten\n\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.parameters.block_trees import block_tree_to_matrix\nfrom optimagic.parameters.bounds import get_internal_bounds\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import AggregationLevel\n\n\ndef get_tree_converter(\n    params,\n    bounds,\n    func_eval,\n    solver_type,\n    derivative_eval=None,\n    add_soft_bounds=False,\n):\n    \"\"\"Get flatten and unflatten functions for criterion and its derivative.\n\n    The function creates a converter with methods to convert parameters, derivatives\n    and the output of the criterion function between the user provided pytree structure\n    and flat representations.\n\n    The main motivation for bundling all of this together (as opposed to handling\n    parameters, derivatives and function outputs separately) is that the derivative\n    conversion needs to know about the structure of params and the criterion output.\n\n    Args:\n        params (pytree): The user provided parameters.\n        lower_bounds (pytree): The user provided lower_bounds\n        upper_bounds (pytree): The user provided upper bounds\n        solver_type: Used to determine how derivative output has to be\n            transformed for the optimizer.\n        derivative_eval (dict, pytree or None): Evaluation of the derivative of\n            func at params. Used for consistency checks.\n        soft_lower_bounds (pytree): As lower_bounds\n        soft_upper_bounds (pytree): As upper_bounds\n        add_soft_bounds (bool): Whether soft bounds should be added to the flat_params\n\n    Returns:\n        TreeConverter: NamedTuple with flatten and unflatten methods.\n        FlatParams: NamedTuple of 1d arrays with flattened bounds and param names.\n\n    \"\"\"\n    _registry = get_registry(extended=True)\n    _params_vec, _params_treedef = tree_flatten(params, registry=_registry)\n    _params_vec = np.array(_params_vec).astype(float)\n    _lower, _upper = get_internal_bounds(\n        params=params,\n        bounds=bounds,\n        registry=_registry,\n    )\n\n    if add_soft_bounds:\n        _soft_lower, _soft_upper = get_internal_bounds(\n            params=params,\n            bounds=bounds,\n            registry=_registry,\n            add_soft_bounds=add_soft_bounds,\n        )\n    else:\n        _soft_lower, _soft_upper = None, None\n\n    _param_names = leaf_names(params, registry=_registry)\n\n    flat_params = FlatParams(\n        values=_params_vec,\n        lower_bounds=_lower,\n        upper_bounds=_upper,\n        names=_param_names,\n        soft_lower_bounds=_soft_lower,\n        soft_upper_bounds=_soft_upper,\n    )\n\n    _params_flatten = _get_params_flatten(registry=_registry)\n    _params_unflatten = _get_params_unflatten(\n        registry=_registry, treedef=_params_treedef\n    )\n\n    _derivative_flatten = _get_derivative_flatten(\n        registry=_registry,\n        solver_type=solver_type,\n        params=params,\n        func_eval=func_eval,\n        derivative_eval=derivative_eval,\n    )\n\n    converter = TreeConverter(\n        params_flatten=_params_flatten,\n        params_unflatten=_params_unflatten,\n        derivative_flatten=_derivative_flatten,\n    )\n\n    return converter, flat_params\n\n\ndef _get_params_flatten(registry):\n    def params_flatten(params):\n        return np.array(tree_just_flatten(params, registry=registry)).astype(float)\n\n    return params_flatten\n\n\ndef _get_params_unflatten(registry, treedef):\n    def params_unflatten(x):\n        return tree_unflatten(treedef=treedef, leaves=list(x), registry=registry)\n\n    return params_unflatten\n\n\ndef _get_best_key_and_aggregator(needed_key, available_keys):\n    if needed_key in available_keys:\n        key = needed_key\n        if needed_key == \"value\":\n            aggregate = lambda x: float(x[0])\n        else:\n            aggregate = lambda x: np.array(x).astype(float)\n    elif needed_key == \"contributions\" and \"root_contributions\" in available_keys:\n        key = \"root_contributions\"\n        aggregate = lambda x: np.array(x).astype(float) ** 2\n    elif needed_key == \"value\" and \"contributions\" in available_keys:\n        key = \"contributions\"\n        aggregate = lambda x: float(np.sum(x))\n    elif needed_key == \"value\" and \"root_contributions\" in available_keys:\n        key = \"root_contributions\"\n        aggregate = lambda x: float((np.array(x) ** 2).sum())\n    else:\n        msg = (\n            \"The optimizer you requested requires a criterion function that returns \"\n            f\"a dictionary with the entry '{needed_key}'. Your function returns a \"\n            f\"dictionary that only contains the entries {available_keys}.\"\n        )\n        raise InvalidFunctionError(msg)\n\n    return key, aggregate\n\n\ndef _get_derivative_flatten(registry, solver_type, params, func_eval, derivative_eval):\n    # gradient case\n    if solver_type == AggregationLevel.SCALAR:\n\n        def derivative_flatten(derivative_eval):\n            flat = np.array(\n                tree_just_flatten(derivative_eval, registry=registry)\n            ).astype(float)\n            return flat\n\n    # jacobian case\n    else:\n\n        def derivative_flatten(derivative_eval):\n            flat = block_tree_to_matrix(\n                derivative_eval,\n                outer_tree=func_eval,\n                inner_tree=params,\n            )\n            return flat\n\n    if derivative_eval is not None:\n        try:\n            derivative_flatten(derivative_eval)\n        except (KeyboardInterrupt, SystemExit):\n            raise\n        except Exception as e:\n            msg = \"The output of derivative and criterion cannot be aligned.\"\n            raise InvalidFunctionError(msg) from e\n\n    return derivative_flatten\n\n\nclass TreeConverter(NamedTuple):\n    params_flatten: Callable\n    params_unflatten: Callable\n    derivative_flatten: Callable\n\n\nclass FlatParams(NamedTuple):\n    values: np.ndarray\n    lower_bounds: np.ndarray | None\n    upper_bounds: np.ndarray | None\n    soft_lower_bounds: np.ndarray | None = None\n    soft_upper_bounds: np.ndarray | None = None\n    names: list | None = None\n    free_mask: np.ndarray | None = None\n"
  },
  {
    "path": "src/optimagic/parameters/tree_registry.py",
    "content": "\"\"\"Wrapper around pybaum get_registry to tailor it to optimagic.\"\"\"\n\nfrom functools import partial\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom pybaum import get_registry as get_pybaum_registry\n\n\ndef get_registry(extended=False, data_col=\"value\"):\n    \"\"\"Return pytree registry.\n\n    Special Rules\n    -------------\n    If extended is True the registry contains pd.DataFrame. In optimagic a data frame\n    can represent a 1d object with extra information, instead of a 2d object. This is\n    only allowed for params data frames, in which case they contain a 'value' column.\n    The extra information of such an object can be accessed using the data_col argument.\n    By default the 'value' column is extracted. If data_col is not 'value' but the data\n    frame contains a 'value' column, a list of np.nan is returned.\n\n    Args:\n        extended (bool): If True appends types 'numpy.ndarray', 'pandas.Series' and\n            'pandas.DataFrame' to the registry.\n        data_col (str): This column is used as the data source in a data frame when\n            flattening and unflattening a pytree. Defaults to 'value'; see special rules\n            above for behavior with non-default values.\n\n    Returns:\n        dict: The pytree registry.\n\n    \"\"\"\n    types = (\n        [\"numpy.ndarray\", \"pandas.Series\", \"jax.numpy.ndarray\"] if extended else None\n    )\n    registry = get_pybaum_registry(types=types)\n    if extended:\n        registry[pd.DataFrame] = {\n            \"flatten\": partial(_flatten_df, data_col=data_col),\n            \"unflatten\": partial(_unflatten_df, data_col=data_col),\n            \"names\": _get_df_names,\n        }\n    return registry\n\n\ndef _flatten_df(df, data_col):\n    is_value_df = \"value\" in df\n    if is_value_df:\n        flat = df.get(data_col, default=np.full(len(df), np.nan)).tolist()\n    else:\n        flat = df.to_numpy().flatten().tolist()\n\n    aux_data = {\n        \"is_value_df\": is_value_df,\n        \"df\": df,\n    }\n    return flat, aux_data\n\n\ndef _unflatten_df(aux_data, leaves, data_col):\n    if aux_data[\"is_value_df\"]:\n        out = aux_data[\"df\"].assign(**{data_col: leaves})\n    else:\n        out = pd.DataFrame(\n            data=np.array(leaves).reshape(aux_data[\"df\"].shape),\n            columns=aux_data[\"df\"].columns,\n            index=aux_data[\"df\"].index,\n        )\n    return out\n\n\ndef _get_df_names(df):\n    index_strings = list(df.index.map(_index_element_to_string))\n    if \"value\" in df:\n        out = index_strings\n    else:\n        out = [\"_\".join([loc, col]) for loc, col in product(index_strings, df.columns)]\n\n    return out\n\n\ndef _index_element_to_string(element):\n    if isinstance(element, (tuple, list)):\n        as_strings = [str(entry) for entry in element]\n        res_string = \"_\".join(as_strings)\n    else:\n        res_string = str(element)\n\n    return res_string\n"
  },
  {
    "path": "src/optimagic/py.typed",
    "content": ""
  },
  {
    "path": "src/optimagic/sandbox.py",
    "content": "from optimagic.visualization.slice_plot_3d import slice_plot_3d\n\n__all__ = [\"slice_plot_3d\"]\n"
  },
  {
    "path": "src/optimagic/shared/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/shared/check_option_dicts.py",
    "content": "\"\"\"Check option dictionaries for minimize, maximize.\"\"\"\n\n\ndef check_optimization_options(options, usage, algorithm_mandatory=True):\n    \"\"\"Check optimize_options or maximize_options for usage in estimation functions.\"\"\"\n    options = {} if options is None else options\n\n    if algorithm_mandatory:\n        if not isinstance(options, dict) or \"algorithm\" not in options:\n            raise ValueError(\n                \"optimize_options or maximize_options must be a dict containing at \"\n                \"least the entry 'algorithm'\"\n            )\n    else:\n        if not isinstance(options, dict):\n            raise ValueError(\n                \"optimize_options or maximize_options must be a dict or None.\"\n            )\n\n    criterion_options = {\n        \"criterion\",\n        \"criterion_kwargs\",\n        \"derivative\",\n        \"derivative_kwargs\",\n    }\n\n    invalid_criterion = criterion_options.intersection(options)\n    if invalid_criterion:\n        msg = (\n            \"Entries related to the criterion function, its derivatives or keyword \"\n            \"arguments of those functions are not valid entries of optimize_options \"\n            f\"or maximize_options for {usage}. Remove: {invalid_criterion}\"\n        )\n        raise ValueError(msg)\n\n    general_options = {\"logging\", \"log_options\", \"constraints\"}\n\n    invalid_general = general_options.intersection(options)\n\n    if invalid_general:\n        msg = (\n            \"The following are not valid entries of optimize_options because they are \"\n            \"not only relevant for minimization but also for inference: \"\n            f\"{invalid_general}\"\n        )\n        raise ValueError(msg)\n"
  },
  {
    "path": "src/optimagic/shared/compat.py",
    "content": "\"\"\"Compatibility module.\n\nContains wrapper functions to handle compatibility issues between different versions of\nexternal libraries.\n\n\"\"\"\n\n\ndef pd_df_map(df, func, na_action=None, **kwargs):\n    \"\"\"Apply a function to a Dataframe elementwise.\n\n    pandas has depricated the .applymap() function with version 2.1.0. This function\n    calls either .map() (if pandas version is greater or equal to 2.1.0) or .applymap()\n    (if pandas version is smaller than 2.1.0).\n\n    Args:\n        df (pd.DataFrame): A pandas DataFrame.\n        func (callable): Python function, returns a single value from a single value.\n        na_action (str): If 'ignore', propagate NaN values, without passing them to\n            func. If None, pass NaN values to func. Default is None.\n        **kwargs: Additional keyword arguments to pass as keywords arguments to func.\n\n    Returns:\n        pd.DataFrame: Transformed DataFrame.\n\n    \"\"\"\n    return df.map(func, na_action=na_action, **kwargs)\n"
  },
  {
    "path": "src/optimagic/shared/process_user_function.py",
    "content": "\"\"\"Process user provided functions.\"\"\"\n\nimport inspect\nfrom functools import partial, update_wrapper\n\nfrom optimagic.exceptions import InvalidFunctionError, InvalidKwargsError\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.typing import AggregationLevel\nfrom optimagic.utilities import propose_alternatives\n\n\ndef partial_func_of_params(func, kwargs, name=\"your function\", skip_checks=False):\n    # fast path\n    if skip_checks and kwargs in (None, {}):\n        return func\n\n    kept, ignored = filter_kwargs(func, kwargs)\n\n    if ignored:\n        possibilities = [p for p in inspect.signature(func).parameters if p != \"params\"]\n        proposals = [propose_alternatives(arg, possibilities, 1)[0] for arg in ignored]\n\n        msg = (\n            \"The following user provided keyword arguments are not compatible with \"\n            f\"{name}:\\n\\n\"\n        )\n        for arg, prop in zip(ignored, proposals, strict=False):\n            msg += f\"{arg}: Did you mean {prop}?\"\n\n        raise InvalidKwargsError(msg)\n\n    # update_wrapper preserves static fields that might have been added to the function\n    # via mark decorators.\n    out = update_wrapper(partial(func, **kept), func)\n\n    if not skip_checks:\n        unpartialled_args = get_unpartialled_arguments(out)\n        no_default_args = get_arguments_without_default(out)\n\n        no_free_argument_left = len(unpartialled_args) < 1\n\n        if no_free_argument_left and kept:\n            raise InvalidKwargsError(\n                f\"Too many keyword arguments for {name}. After applying all keyword \"\n                \"arguments there must be at least one free argument (the params) left.\"\n            )\n        elif no_free_argument_left:\n            raise InvalidFunctionError(f\"{name} must have at least one free argument.\")\n\n        required_args = unpartialled_args.intersection(no_default_args)\n        too_many_required_arguments = len(required_args) > 1\n\n        # Try to discover if we have a jax calculated jacobian that has a weird\n        # signature that would not pass this test:\n        skip_because_of_jax = required_args == {\"args\", \"kwargs\"}\n\n        if too_many_required_arguments and not skip_because_of_jax:\n            raise InvalidKwargsError(\n                f\"Too few keyword arguments for {name}. After applying all keyword \"\n                \"arguments at most one required argument (the params) should remain. \"\n                \"in your case the following required arguments remain: \"\n                f\"{required_args}.\"\n            )\n\n    return out\n\n\ndef filter_kwargs(func, kwargs):\n    valid = get_unpartialled_arguments(func)\n\n    kept = {key: val for key, val in kwargs.items() if key in valid}\n\n    ignored = {key: val for key, val in kwargs.items() if key not in valid}\n\n    return kept, ignored\n\n\ndef get_unpartialled_arguments(func):\n    unpartialled = set(inspect.signature(func).parameters)\n\n    if isinstance(func, partial):\n        partialed_in = set(func.keywords)\n        unpartialled = unpartialled - partialed_in\n\n    return unpartialled\n\n\ndef get_arguments_without_default(func):\n    args = dict(inspect.signature(func).parameters)\n    no_default = []\n    for name, arg in args.items():\n        if not hasattr(arg.default, \"__len__\"):\n            if arg.default == inspect.Parameter.empty:\n                no_default.append(name)\n\n    no_default = set(no_default)\n    return no_default\n\n\ndef get_kwargs_from_args(args, func, offset=0):\n    \"\"\"Convert positional arguments to a dict of keyword arguments.\n\n    Args:\n        args (list, tuple): Positional arguments.\n        func (callable): Function to be called.\n        offset (int, optional): Number of arguments to skip. Defaults to 0.\n\n    Returns:\n        dict: Keyword arguments.\n\n    \"\"\"\n    names = list(inspect.signature(func).parameters)[offset:]\n    kwargs = {name: arg for name, arg in zip(names, args, strict=False)}\n    return kwargs\n\n\ndef infer_aggregation_level(func):\n    \"\"\"Infer the problem type from type hints or attributes left by mark decorators.\n\n    The problem type is either inferred from a `._problem_type` attribute or from type\n    hints. If neither is present, we assume the problem type is scalar. This assumption\n    is motivated by compatibility with the `scipy.optimize` interface.\n\n    \"\"\"\n    return_type = inspect.signature(func).return_annotation\n    if hasattr(func, \"_problem_type\"):\n        out = func._problem_type\n    elif return_type in (ScalarFunctionValue, float):\n        out = AggregationLevel.SCALAR\n    elif return_type == LeastSquaresFunctionValue:\n        out = AggregationLevel.LEAST_SQUARES\n    elif return_type == LikelihoodFunctionValue:\n        out = AggregationLevel.LIKELIHOOD\n    else:\n        out = AggregationLevel.SCALAR\n    return out\n"
  },
  {
    "path": "src/optimagic/timing.py",
    "content": "from dataclasses import dataclass\nfrom typing import Callable, Iterable\n\n\n@dataclass(frozen=True)\nclass CostModel:\n    fun: float | None\n    jac: float | None\n    fun_and_jac: float | None\n    label: str\n    aggregate_batch_time: Callable[[Iterable[float]], float]\n\n    def __post_init__(self) -> None:\n        if not callable(self.aggregate_batch_time):\n            raise ValueError(\n                \"aggregate_batch_time must be a callable, got \"\n                f\"{self.aggregate_batch_time}\"\n            )\n\n\nevaluation_time = CostModel(\n    fun=None,\n    jac=None,\n    fun_and_jac=None,\n    label=\"Function time (seconds)\",\n    aggregate_batch_time=sum,\n)\n\nfun_evaluations = CostModel(\n    fun=1,\n    jac=0,\n    fun_and_jac=1,\n    label=\"Number of criterion evaluations\",\n    aggregate_batch_time=sum,\n)\n\nfun_batches = CostModel(\n    fun=1, jac=0, fun_and_jac=1, label=\"Number of batches\", aggregate_batch_time=max\n)\n\nwall_time = \"wall_time\"\n\n\nTIMING_REGISTRY = {\n    \"evaluation_time\": evaluation_time,\n    \"fun_evaluations\": fun_evaluations,\n    \"fun_batches\": fun_batches,\n    \"wall_time\": wall_time,\n}\n"
  },
  {
    "path": "src/optimagic/type_conversion.py",
    "content": "from typing import Any\n\nfrom optimagic.typing import (\n    GtOneFloat,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n)\n\n\ndef _process_float_like(value: Any) -> float:\n    \"\"\"Process a value that should be converted to a float.\"\"\"\n    return float(value)\n\n\ndef _process_int_like(value: Any) -> int:\n    \"\"\"Process a value that should be converted to an int.\"\"\"\n    if isinstance(value, int):\n        return value\n    elif isinstance(value, str):\n        return int(float(value))\n    else:\n        return int(value)\n\n\ndef _process_positive_int_like(value: Any) -> PositiveInt:\n    \"\"\"Process a value that should be converted to a positive int.\"\"\"\n    out = _process_int_like(value)\n    if out <= 0:\n        raise ValueError(f\"Value must be positive, got {out}\")\n    return out\n\n\ndef _process_non_negative_int_like(value: Any) -> NonNegativeInt:\n    \"\"\"Process a value that should be converted to a non-negative int.\"\"\"\n    out = _process_int_like(value)\n    if out < 0:\n        raise ValueError(f\"Value must be non-negative, got {out}\")\n    return out\n\n\ndef _process_positive_float_like(value: Any) -> PositiveFloat:\n    \"\"\"Process a value that should be converted to a positive float.\"\"\"\n    out = _process_float_like(value)\n    if out <= 0:\n        raise ValueError(f\"Value must be positive, got {out}\")\n    return out\n\n\ndef _process_non_negative_float_like(value: Any) -> NonNegativeFloat:\n    \"\"\"Process a value that should be converted to a non-negative float.\"\"\"\n    out = _process_float_like(value)\n    if out < 0:\n        raise ValueError(f\"Value must be non-negative, got {out}\")\n    return out\n\n\ndef _process_gt_one_float_like(value: Any) -> GtOneFloat:\n    \"\"\"Process a value that should be converted to a float greater than one.\"\"\"\n    out = _process_float_like(value)\n    if out <= 1:\n        raise ValueError(f\"Value must be greater than one, got {out}\")\n    return out\n\n\ndef _process_bool_like(value: Any) -> bool:\n    \"\"\"Process a value that should be converted to a bool.\"\"\"\n    if isinstance(value, bool):\n        return value\n    elif isinstance(value, str):\n        if value.lower() in {\"true\", \"1\", \"yes\"}:\n            return True\n        elif value.lower() in {\"false\", \"0\", \"no\"}:\n            return False\n\n    return bool(value)\n\n\nTYPE_CONVERTERS = {\n    float: _process_float_like,\n    int: _process_int_like,\n    bool: _process_bool_like,\n    PositiveInt: _process_positive_int_like,\n    NonNegativeInt: _process_non_negative_int_like,\n    PositiveFloat: _process_positive_float_like,\n    NonNegativeFloat: _process_non_negative_float_like,\n    GtOneFloat: _process_gt_one_float_like,\n}\n"
  },
  {
    "path": "src/optimagic/typing.py",
    "content": "from dataclasses import dataclass, fields\nfrom enum import Enum\nfrom typing import (\n    Annotated,\n    Any,\n    Callable,\n    ItemsView,\n    Iterator,\n    KeysView,\n    Literal,\n    Protocol,\n    TypeVar,\n    ValuesView,\n)\n\nimport numpy as np\nfrom annotated_types import Ge, Gt, Le, Lt\nfrom numpy._typing import NDArray\n\nPyTree = Any\nPyTreeRegistry = dict[type | str, dict[str, Callable[[Any], Any]]]\nScalar = Any\n\nT = TypeVar(\"T\")\n\n\nclass AggregationLevel(Enum):\n    \"\"\"Enum to specify the aggregation level of objective functions and solvers.\"\"\"\n\n    SCALAR = \"scalar\"\n    LEAST_SQUARES = \"least_squares\"\n    LIKELIHOOD = \"likelihood\"\n\n\nclass Direction(str, Enum):\n    \"\"\"Enum to specify the direction of optimization.\"\"\"\n\n    MINIMIZE = \"minimize\"\n    MAXIMIZE = \"maximize\"\n\n\n@dataclass(frozen=True)\nclass DictLikeAccess:\n    r\"\"\"Useful base class for replacing string-based dictionaries with dataclass\n    instances and keeping backward compatability regarding read access to the data\n    structure.\n    \"\"\"\n\n    def __getitem__(self, key: str) -> Any:\n        if key in self.__dict__:\n            return getattr(self, key)\n        else:\n            raise KeyError(f\"{key} not found in {self.__class__.__name__}\")\n\n    def __iter__(self) -> Iterator[str]:\n        return iter(self._dict_repr())\n\n    def _dict_repr(self) -> dict[str, Any]:\n        return {field.name: getattr(self, field.name) for field in fields(self)}\n\n    def keys(self) -> KeysView[str]:\n        return self._dict_repr().keys()\n\n    def items(self) -> ItemsView[str, Any]:\n        return self._dict_repr().items()\n\n    def values(self) -> ValuesView[str]:\n        return self._dict_repr().values()\n\n\n@dataclass(frozen=True)\nclass TupleLikeAccess:\n    r\"\"\"Useful base class for replacing tuples with dataclass instances and keeping\n    backward compatability regarding read access to the data structure.\n    \"\"\"\n\n    def __getitem__(self, index: int | slice) -> Any:\n        field_values = [getattr(self, field.name) for field in fields(self)]\n        return field_values[index]\n\n    def __len__(self) -> int:\n        return len(fields(self))\n\n    def __iter__(self) -> Iterator[str]:\n        for field in fields(self):\n            yield getattr(self, field.name)\n\n\nclass ErrorHandling(Enum):\n    \"\"\"Enum to specify the error handling strategy of the optimization algorithm.\"\"\"\n\n    RAISE = \"raise\"\n    RAISE_STRICT = \"raise_strict\"\n    CONTINUE = \"continue\"\n\n\nclass EvalTask(Enum):\n    \"\"\"Enum to specify the task of the evaluation function.\"\"\"\n\n    FUN = \"fun\"\n    JAC = \"jac\"\n    FUN_AND_JAC = \"fun_and_jac\"\n    EXPLORATION = \"exploration\"\n\n\nclass BatchEvaluator(Protocol):\n    def __call__(\n        self,\n        func: Callable[..., T],\n        arguments: list[Any],\n        n_cores: int = 1,\n        error_handling: ErrorHandling\n        | Literal[\"raise\", \"continue\"] = ErrorHandling.CONTINUE,\n        unpack_symbol: Literal[\"*\", \"**\"] | None = None,\n    ) -> list[T]:\n        pass\n\n\nPositiveInt = Annotated[int, Gt(0)]\n\"\"\"Type alias for positive integers (greater than 0).\"\"\"\nNonNegativeInt = Annotated[int, Ge(0)]\n\"\"\"Type alias for non-negative integers (greater than or equal to 0).\"\"\"\nPositiveFloat = Annotated[float, Gt(0)]\n\"\"\"Type alias for positive floats (greater than 0).\"\"\"\nNonNegativeFloat = Annotated[float, Ge(0)]\n\"\"\"Type alias for non-negative floats (greater than or equal to 0).\"\"\"\nProbabilityFloat = Annotated[float, Ge(0), Le(1)]\n\"\"\"Type alias for probability floats (between 0 and 1, inclusive).\"\"\"\nNegativeFloat = Annotated[float, Lt(0)]\n\"\"\"Type alias for negative floats (less than 0).\"\"\"\nGtOneFloat = Annotated[float, Gt(1)]\n\"\"\"Type alias for floats greater than 1.\"\"\"\nUnitIntervalFloat = Annotated[float, Gt(0), Le(1)]\n\"\"\"Type alias for floats in (0, 1].\"\"\"\nYesNoBool = Literal[\"yes\", \"no\"] | bool\n\"\"\"Type alias for boolean values represented as 'yes' or 'no' strings or as boolean\nvalues.\"\"\"\nDirectionLiteral = Literal[\"minimize\", \"maximize\"]\n\"\"\"Type alias for optimization direction, either 'minimize' or 'maximize'.\"\"\"\nBatchEvaluatorLiteral = Literal[\"joblib\", \"pathos\", \"threading\"]\n\"\"\"Type alias for batch evaluator types, can be 'joblib', 'pathos', or 'threading'.\"\"\"\nErrorHandlingLiteral = Literal[\"raise\", \"continue\"]\n\"\"\"Type alias for error handling strategies, can be 'raise' or 'continue'.\"\"\"\n\n\n@dataclass(frozen=True)\nclass IterationHistory(DictLikeAccess):\n    \"\"\"History of iterations in a process.\n\n    Attributes:\n        params: A list of parameters used in each iteration.\n        criterion: A list of criterion values obtained in each iteration.\n        runtime: A list or array of runtimes associated with each iteration.\n\n    \"\"\"\n\n    params: list[PyTree]\n    fun: list[float]\n    time: list[float] | NDArray[np.float64]\n\n\n@dataclass(frozen=True)\nclass MultiStartIterationHistory(TupleLikeAccess):\n    \"\"\"History of multiple start iterations.\n\n    Attributes:\n        history: The main iteration history, representing the best end value.\n        local_histories: Optional, a list of local iteration histories.\n        exploration: Optional, iteration history for exploration steps.\n\n    \"\"\"\n\n    history: IterationHistory\n    local_histories: list[IterationHistory] | None = None\n    exploration: IterationHistory | None = None\n"
  },
  {
    "path": "src/optimagic/utilities.py",
    "content": "import difflib\nimport warnings\nfrom hashlib import sha1\n\nimport cloudpickle\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom scipy.linalg import ldl, qr\n\nwith warnings.catch_warnings():\n    warnings.simplefilter(\"ignore\", category=UserWarning)\n\n\ndef fast_numpy_full(length: int, fill_value: float) -> NDArray[np.float64]:\n    \"\"\"Return a new array of given length, filled with fill_value.\n\n    Empirically, this is faster than using np.full for small arrays.\n\n    \"\"\"\n    if length < 18:\n        return np.array([fill_value] * length, dtype=np.float64)\n    else:\n        return np.full(length, fill_value=fill_value, dtype=np.float64)\n\n\ndef chol_params_to_lower_triangular_matrix(params):\n    dim = number_of_triangular_elements_to_dimension(len(params))\n    mat = np.zeros((dim, dim))\n    mat[np.tril_indices(dim)] = params\n    return mat\n\n\ndef cov_params_to_matrix(cov_params):\n    \"\"\"Build covariance matrix from 1d array with its lower triangular elements.\n\n    Args:\n        cov_params (np.array): 1d array with the lower triangular elements of a\n            covariance matrix (in C-order)\n\n    Returns:\n        cov (np.array): a covariance matrix\n\n    \"\"\"\n    lower = chol_params_to_lower_triangular_matrix(cov_params)\n    cov = lower + np.tril(lower, k=-1).T\n    return cov\n\n\ndef cov_matrix_to_params(cov):\n    return cov[np.tril_indices(len(cov))]\n\n\ndef sdcorr_params_to_sds_and_corr(sdcorr_params):\n    dim = number_of_triangular_elements_to_dimension(len(sdcorr_params))\n    sds = np.array(sdcorr_params[:dim])\n    corr = np.eye(dim)\n    corr[np.tril_indices(dim, k=-1)] = sdcorr_params[dim:]\n    corr += np.tril(corr, k=-1).T\n    return sds, corr\n\n\ndef sds_and_corr_to_cov(sds, corr):\n    diag = np.diag(sds)\n    return diag @ corr @ diag\n\n\ndef cov_to_sds_and_corr(cov):\n    sds = np.sqrt(np.diagonal(cov))\n    diag = np.diag(1 / sds)\n    corr = diag @ cov @ diag\n    return sds, corr\n\n\ndef sdcorr_params_to_matrix(sdcorr_params):\n    \"\"\"Build covariance matrix out of variances and correlations.\n\n    Args:\n        sdcorr_params (np.array): 1d array with parameters. The dimensions of the\n            covariance matrix are inferred automatically. The first dim parameters\n            are assumed to be the variances. The remainder are the lower triangular\n            elements (excluding the diagonal) of a correlation matrix.\n\n    Returns:\n        cov (np.array): a covariance matrix\n\n    \"\"\"\n    return sds_and_corr_to_cov(*sdcorr_params_to_sds_and_corr(sdcorr_params))\n\n\ndef cov_matrix_to_sdcorr_params(cov):\n    dim = len(cov)\n    sds, corr = cov_to_sds_and_corr(cov)\n    correlations = corr[np.tril_indices(dim, k=-1)]\n    return np.hstack([sds, correlations])\n\n\ndef number_of_triangular_elements_to_dimension(num):\n    \"\"\"Calculate the dimension of a square matrix from number of triangular elements.\n\n    Args:\n        num (int): The number of upper or lower triangular elements in the matrix.\n\n    Examples:\n        >>> number_of_triangular_elements_to_dimension(6)\n        3\n        >>> number_of_triangular_elements_to_dimension(10)\n        4\n\n    \"\"\"\n    return int(np.sqrt(8 * num + 1) / 2 - 0.5)\n\n\ndef dimension_to_number_of_triangular_elements(dim):\n    \"\"\"Calculate number of triangular elements from the dimension of a square matrix.\n\n    Args:\n        dim (int): Dimension of a square matrix.\n\n    \"\"\"\n    return int(dim * (dim + 1) / 2)\n\n\ndef propose_alternatives(requested, possibilities, number=3):\n    \"\"\"Propose possible alternatives based on similarity to requested.\n\n    Args:\n        requested_algo (str): From the user requested algorithm.\n        possibilities (list(str)): List of available algorithms\n            are lists of algorithms.\n        number (int) : Number of proposals.\n\n    Returns:\n        proposals (list(str)): List of proposed algorithms.\n\n    Example:\n        >>> possibilities = [\"scipy_lbfgsb\", \"scipy_slsqp\", \"nlopt_lbfgsb\"]\n        >>> propose_alternatives(\"scipy_L-BFGS-B\", possibilities, number=1)\n        ['scipy_slsqp']\n        >>> propose_alternatives(\"L-BFGS-B\", possibilities, number=2)\n        ['scipy_slsqp', 'scipy_lbfgsb']\n\n    \"\"\"\n    number = min(number, len(possibilities))\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=UserWarning)\n        proposals = difflib.get_close_matches(\n            requested, possibilities, n=number, cutoff=0\n        )\n\n    return proposals\n\n\ndef robust_cholesky(matrix, threshold=None, return_info=False):\n    \"\"\"Lower triangular cholesky factor of *matrix*.\n\n    Args:\n        matrix (np.array): Square, symmetric and (almost) positive semi-definite matrix\n        threshold (float): Small negative number. Diagonal elements of D from the LDL\n            decomposition between threshold and zero are set to zero. Default is\n            minus machine accuracy.\n        return_info (bool): If True, also return a dictionary with 'method'. Method can\n            take the values 'np.linalg.cholesky' and 'Eigenvalue QR'.\n\n    Returns:\n        chol (np.array): Cholesky factor of matrix\n        info (float, optional): see return_info.\n\n    Raises:\n        np.linalg.LinalgError if an eigenvalue of *matrix* is below *threshold*.\n\n    In contrast to a regular cholesky decomposition, this function will also\n    work for matrices that are only positive semi-definite or even indefinite.\n    For speed and precision reasons we first try a regular cholesky decomposition.\n    If it fails we switch to more robust methods.\n\n    \"\"\"\n    try:\n        chol = np.linalg.cholesky(matrix)\n        method = \"np.linalg.cholesky\"\n    except np.linalg.LinAlgError:\n        method = \"LDL cholesky\"\n        threshold = threshold if threshold is not None else -np.finfo(float).eps\n        chol = _internal_robust_cholesky(matrix, threshold)\n\n    chol_unique = _make_cholesky_unique(chol)\n    info = {\"method\": method}\n\n    out = (chol_unique, info) if return_info else chol_unique\n    return out\n\n\ndef robust_inverse(matrix, msg=\"\"):\n    \"\"\"Calculate the inverse or pseudo-inverse of a matrix.\n\n    The difference to calling a pseudo inverse directly is that this function will\n    emit a warning if the matrix is singular.\n\n    Args:\n        matrix (np.ndarray)\n\n    \"\"\"\n    header = (\n        \"Standard matrix inversion failed due to LinAlgError described below. \"\n        \"A pseudo inverse was calculated instead. \"\n    )\n    if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n        raise ValueError(\"Matrix must be square.\")\n    try:\n        out = np.linalg.inv(matrix)\n    except np.linalg.LinAlgError:\n        out = np.linalg.pinv(matrix)\n        warnings.warn(header + msg)\n    except Exception:\n        raise\n\n    return out\n\n\ndef _internal_robust_cholesky(matrix, threshold):\n    \"\"\"Lower triangular cholesky factor of *matrix* using an LDL decomposition and QR\n    factorization.\n\n    Args:\n        matrix (np.array): Square, symmetric and (almost) positive semi-definite matrix\n        threshold (float): Small negative number. Diagonal elements of D from the LDL\n            decomposition between threshold and zero are set to zero. Default is\n            minus machine accuracy.\n\n    Returns:\n        chol (np.array): Cholesky factor of matrix.\n\n    Raises:\n        np.linalg.LinalgError if diagonal entry in D from LDL decomposition is below\n        *threshold*.\n\n    \"\"\"\n    lu, d, _ = ldl(matrix)\n\n    diags = np.diagonal(d).copy()\n\n    for i in range(len(diags)):\n        if diags[i] >= 0:\n            diags[i] = np.sqrt(diags[i])\n        elif diags[i] > threshold:\n            diags[i] = 0\n        else:\n            raise np.linalg.LinAlgError(\n                \"Diagonal entry below threshold in D from LDL decomposition.\"\n            )\n\n    candidate = lu * diags.reshape(1, len(diags))\n\n    is_triangular = (candidate[np.triu_indices(len(matrix), k=1)] == 0).all()\n\n    if is_triangular:\n        chol = candidate\n    else:\n        _, r = qr(candidate.T)\n        chol = r.T\n\n    return chol\n\n\ndef _make_cholesky_unique(chol):\n    \"\"\"Make a lower triangular cholesky factor unique.\n\n    Cholesky factors are only unique with the additional requirement that all diagonal\n    elements are positive. This is done automatically by np.linalg.cholesky.\n    Since we calucate cholesky factors by QR decompositions we have to do it manually.\n    It is obvious from that this is admissible because:\n    chol sign_swither sign_switcher.T chol.T = chol chol.T\n\n    \"\"\"\n    sign_switcher = np.sign(np.diagonal(chol))\n    return chol * sign_switcher\n\n\ndef hash_array(arr):\n    \"\"\"Create a hashsum for fast comparison of numpy arrays.\"\"\"\n    # make sure array can be represented exactly in floating point numbers\n    arr = 1 + arr - 1\n    return sha1(arr.tobytes()).hexdigest()\n\n\ndef calculate_trustregion_initial_radius(x):\n    r\"\"\"Calculate the initial trust region radius.\n\n    It is calculated as :math:`0.1\\\\max(|x|_{\\\\infty}, 1)`.\n\n    Args:\n        x (np.ndarray): the start parameter values.\n\n    Returns:\n        trust_radius (float): initial trust radius\n\n    \"\"\"\n    x_norm = np.linalg.norm(x, ord=np.inf)\n    return 0.1 * max(x_norm, 1)\n\n\ndef to_pickle(obj, path):\n    with open(path, \"wb\") as buffer:\n        cloudpickle.dump(obj, buffer)\n\n\ndef read_pickle(path):\n    return pd.read_pickle(path)\n\n\ndef isscalar(element):\n    \"\"\"Jax aware replacement for np.isscalar.\"\"\"\n    if np.isscalar(element):\n        return True\n    # call anything a scalar that says it has 0 dimensions\n    return getattr(element, \"ndim\", -1) == 0\n\n\ndef get_rng(seed):\n    \"\"\"Construct a random number generator.\n\n    seed (Union[None, int, numpy.random.Generator]): If seed is None or int the\n        numpy.random.default_rng is used seeded with seed. If seed is already a\n        Generator instance then that instance is used.\n\n    Returns:\n        numpy.random.Generator: The random number generator.\n\n    \"\"\"\n    if isinstance(seed, np.random.Generator):\n        rng = seed\n    elif seed is None or isinstance(seed, int):\n        rng = np.random.default_rng(seed)\n    else:\n        raise TypeError(\"seed type must be in {None, int, numpy.random.Generator}.\")\n    return rng\n\n\ndef list_of_dicts_to_dict_of_lists(list_of_dicts):\n    \"\"\"Convert a list of dicts to a dict of lists.\n\n    Args:\n        list_of_dicts (list): List of dictionaries. All dictionaries have the same keys.\n\n    Returns:\n        dict\n\n    Examples:\n        >>> list_of_dicts_to_dict_of_lists([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}])\n        {'a': [1, 3], 'b': [2, 4]}\n\n    \"\"\"\n    return {k: [dic[k] for dic in list_of_dicts] for k in list_of_dicts[0]}\n\n\ndef dict_of_lists_to_list_of_dicts(dict_of_lists):\n    \"\"\"Convert a dict of lists to a list of dicts.\n\n    Args:\n        dict_of_lists (dict): Dictionary of lists where all lists have the same length.\n\n    Returns:\n        list\n\n    Examples:\n        >>> dict_of_lists_to_list_of_dicts({'a': [1, 3], 'b': [2, 4]})\n        [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]\n\n    \"\"\"\n    return [\n        dict(zip(dict_of_lists, t, strict=False))\n        for t in zip(*dict_of_lists.values(), strict=False)\n    ]\n"
  },
  {
    "path": "src/optimagic/visualization/__init__.py",
    "content": ""
  },
  {
    "path": "src/optimagic/visualization/backends.py",
    "content": "import itertools\nfrom typing import TYPE_CHECKING, Any, Literal, Protocol, overload, runtime_checkable\n\nimport numpy as np\nimport plotly.graph_objects as go\n\nfrom optimagic.config import (\n    IS_ALTAIR_INSTALLED,\n    IS_BOKEH_INSTALLED,\n    IS_MATPLOTLIB_INSTALLED,\n)\nfrom optimagic.exceptions import InvalidPlottingBackendError, NotInstalledError\nfrom optimagic.visualization.plotting_utilities import LineData, MarkerData\n\nif TYPE_CHECKING:\n    import altair as alt\n    import bokeh\n    import matplotlib.pyplot as plt\n\n\n@runtime_checkable\nclass LinePlotFunction(Protocol):\n    def __call__(\n        self,\n        lines: list[LineData],\n        *,\n        title: str | None,\n        xlabel: str | None,\n        xrange: tuple[float, float] | None,\n        ylabel: str | None,\n        yrange: tuple[float, float] | None,\n        template: str | None,\n        height: int | None,\n        width: int | None,\n        legend_properties: dict[str, Any] | None,\n        margin_properties: dict[str, Any] | None,\n        horizontal_line: float | None,\n        marker: MarkerData | None,\n        subplot: Any | None = None,\n    ) -> Any:\n        \"\"\"Protocol of the line_plot function used for type checking.\n\n        Args:\n            ...: All other argument descriptions can be found in the docstring of the\n                `line_plot` function.\n            subplot: The subplot to which the lines should be plotted. The type of this\n                argument depends on the backend used. If not provided, a new figure is\n                created.\n\n        \"\"\"\n        ...\n\n\n@runtime_checkable\nclass GridLinePlotFunction(Protocol):\n    def __call__(\n        self,\n        lines_list: list[list[LineData]],\n        *,\n        n_rows: int,\n        n_cols: int,\n        titles: list[str] | None,\n        xlabels: list[str] | None,\n        xrange: tuple[float, float] | None,\n        share_x: bool,\n        ylabels: list[str] | None,\n        yrange: tuple[float, float] | None,\n        share_y: bool,\n        template: str | None,\n        height: int | None,\n        width: int | None,\n        legend_properties: dict[str, Any] | None,\n        margin_properties: dict[str, Any] | None,\n        plot_title: str | None,\n        marker_list: list[MarkerData] | None,\n        make_subplot_kwargs: dict[str, Any] | None = None,\n    ) -> Any:\n        \"\"\"Protocol of the grid_line_plot function used for type checking.\n\n        Args:\n            ...: All other argument descriptions can be found in the docstring of the\n                `grid_line_plot` function.\n\n        \"\"\"\n        ...\n\n\ndef _line_plot_plotly(\n    lines: list[LineData],\n    *,\n    title: str | None,\n    xlabel: str | None,\n    xrange: tuple[float, float] | None,\n    ylabel: str | None,\n    yrange: tuple[float, float] | None,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    horizontal_line: float | None,\n    marker: MarkerData | None,\n    subplot: tuple[go.Figure, int, int] | None = None,\n) -> go.Figure:\n    \"\"\"Create a line plot using Plotly.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `line_plot` function.\n        subplot: A tuple specifying the subplot to which the lines should be plotted.\n            The tuple contains the Plotly `Figure` object, the row index, and the column\n            index of the subplot. If not provided, a new `Figure` object is created.\n\n    Returns:\n        A Plotly Figure object.\n\n    \"\"\"\n    if template is None:\n        template = \"simple_white\"\n\n    if subplot is None:\n        fig = go.Figure()\n        row, col = None, None\n    else:\n        fig, row, col = subplot\n\n    fig.update_layout(\n        title=title,\n        template=template,\n        height=height,\n        width=width,\n        legend=legend_properties,\n        margin=margin_properties,\n    )\n    fig.update_xaxes(\n        title=xlabel.format(linebreak=\"<br>\") if xlabel else None,\n        range=xrange,\n        row=row,\n        col=col,\n    )\n    fig.update_yaxes(\n        title=ylabel.format(linebreak=\"<br>\") if ylabel else None,\n        range=yrange,\n        row=row,\n        col=col,\n    )\n\n    if horizontal_line is not None:\n        fig.add_hline(\n            y=horizontal_line,\n            line_width=fig.layout.yaxis.linewidth or 1,\n            opacity=1.0,\n            row=row,\n            col=col,\n        )\n\n    for line in lines:\n        trace = go.Scatter(\n            x=line.x,\n            y=line.y,\n            name=line.name,\n            line_color=line.color,\n            mode=\"lines\",\n            showlegend=line.show_in_legend,\n            legendgroup=line.name,\n        )\n        fig.add_trace(trace, row=row, col=col)\n\n    if marker is not None:\n        trace = go.Scatter(\n            x=[marker.x],\n            y=[marker.y],\n            name=marker.name,\n            marker_color=marker.color,\n            showlegend=False,\n        )\n        fig.add_trace(trace, row=row, col=col)\n\n    return fig\n\n\ndef _grid_line_plot_plotly(\n    lines_list: list[list[LineData]],\n    *,\n    n_rows: int,\n    n_cols: int,\n    titles: list[str] | None,\n    xlabels: list[str] | None,\n    xrange: tuple[float, float] | None,\n    share_x: bool,\n    ylabels: list[str] | None,\n    yrange: tuple[float, float] | None,\n    share_y: bool,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    plot_title: str | None,\n    marker_list: list[MarkerData] | None,\n    make_subplot_kwargs: dict[str, Any] | None = None,\n) -> go.Figure:\n    \"\"\"Create a grid of line plots using Plotly.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `grid_line_plot` function.\n\n    Returns:\n        A Plotly Figure object.\n\n    \"\"\"\n    from plotly.subplots import make_subplots\n\n    subplot_kwargs = dict(\n        rows=n_rows,\n        cols=n_cols,\n        subplot_titles=titles,\n        shared_yaxes=share_y,\n        shared_xaxes=share_x,\n        horizontal_spacing=0.3 / n_cols,\n    )\n    subplot_kwargs.update(make_subplot_kwargs or {})\n    fig = make_subplots(**subplot_kwargs)\n\n    for i, (row, col) in enumerate(\n        itertools.product(range(1, n_rows + 1), range(1, n_cols + 1))\n    ):\n        if i >= len(lines_list):\n            break\n\n        _line_plot_plotly(\n            lines_list[i],\n            title=None,\n            xlabel=xlabels[i] if xlabels else None,\n            xrange=xrange,\n            ylabel=ylabels[i] if ylabels else None,\n            yrange=yrange,\n            template=template,\n            height=height,\n            width=width,\n            legend_properties=legend_properties,\n            margin_properties=margin_properties,\n            horizontal_line=None,\n            marker=marker_list[i] if marker_list else None,\n            subplot=(fig, row, col),\n        )\n\n    if plot_title is not None:\n        fig.update_layout(title=plot_title)\n\n    return fig\n\n\ndef _line_plot_matplotlib(\n    lines: list[LineData],\n    *,\n    title: str | None,\n    xlabel: str | None,\n    xrange: tuple[float, float] | None,\n    ylabel: str | None,\n    yrange: tuple[float, float] | None,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    horizontal_line: float | None,\n    marker: MarkerData | None,\n    subplot: \"plt.Axes | None\" = None,\n) -> \"plt.Axes\":\n    \"\"\"Create a line plot using Matplotlib.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `line_plot` function.\n        subplot: A Matplotlib `Axes` object to which the lines should be plotted.\n            If provided, the plot is drawn on the given `Axes`. If not provided,\n            a new `Figure` and `Axes` are created.\n\n    Returns:\n        A Matplotlib Axes object.\n\n    \"\"\"\n    import matplotlib.pyplot as plt\n\n    # In interactive environments (like Jupyter), explicitly enable matplotlib's\n    # interactive mode. If it is not enabled, matplotlib's context manager will\n    # revert to non-interactive mode after creating the first figure, causing\n    # subsequent figures to not display inline.\n    # See: https://github.com/matplotlib/matplotlib/issues/26716\n    if plt.get_backend() == \"module://matplotlib_inline.backend_inline\":\n        plt.ion()\n\n    if template is None:\n        template = \"default\"\n\n    with plt.style.context(template):\n        if subplot is None:\n            px = 1 / plt.rcParams[\"figure.dpi\"]  # pixel in inches\n            fig, ax = plt.subplots(\n                figsize=(width * px, height * px) if width and height else None,\n                layout=\"constrained\",\n            )\n        else:\n            ax = subplot\n\n        for line in lines:\n            ax.plot(\n                line.x,\n                line.y,\n                label=line.name if line.show_in_legend else None,\n                color=line.color,\n            )\n\n        if horizontal_line is not None:\n            ax.axhline(\n                y=horizontal_line,\n                color=ax.spines[\"left\"].get_edgecolor() or \"gray\",\n                linewidth=ax.spines[\"left\"].get_linewidth() or 1.0,\n            )\n\n        if marker is not None:\n            ax.scatter(\n                [marker.x],\n                [marker.y],\n                color=marker.color,\n                label=None,\n            )\n\n        ax.set(\n            title=title,\n            xlabel=xlabel.format(linebreak=\"\\n\") if xlabel else None,\n            xlim=xrange,\n            ylabel=ylabel.format(linebreak=\"\\n\") if ylabel else None,\n            ylim=yrange,\n        )\n\n        if subplot is None and legend_properties is not None:\n            fig.legend(**legend_properties)\n\n    return ax\n\n\ndef _grid_line_plot_matplotlib(\n    lines_list: list[list[LineData]],\n    *,\n    n_rows: int,\n    n_cols: int,\n    titles: list[str] | None,\n    xlabels: list[str] | None,\n    xrange: tuple[float, float] | None,\n    share_x: bool,\n    ylabels: list[str] | None,\n    yrange: tuple[float, float] | None,\n    share_y: bool,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    plot_title: str | None,\n    marker_list: list[MarkerData] | None,\n    make_subplot_kwargs: dict[str, Any] | None = None,\n) -> np.ndarray:\n    \"\"\"Create a grid of line plots using Matplotlib.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `grid_line_plot` function.\n\n    Returns:\n        A 2D numpy array of Matplotlib Axes objects.\n\n    \"\"\"\n    import matplotlib.pyplot as plt\n\n    px = 1 / plt.rcParams[\"figure.dpi\"]  # pixel in inches\n    fig, axes = plt.subplots(\n        nrows=n_rows,\n        ncols=n_cols,\n        squeeze=False,  # always return a 2D array of axes\n        figsize=(width * px, height * px) if width and height else None,\n        layout=\"constrained\",\n    )\n\n    for i, (row, col) in enumerate(itertools.product(range(n_rows), range(n_cols))):\n        if i >= len(lines_list):\n            axes[row, col].set_visible(False)\n            continue\n\n        if share_x and row < n_rows - 1:\n            # Share x-axis with bottom subplot in the same column\n            axes[row, col].sharex(axes[-1, col])\n            axes[row, col].xaxis.set_tick_params(labelbottom=False)\n        if share_y and col > 0:\n            # Share y-axis with left subplot in the same row\n            axes[row, col].sharey(axes[row, 0])\n            axes[row, col].yaxis.set_tick_params(labelleft=False)\n\n        _line_plot_matplotlib(\n            lines_list[i],\n            title=titles[i] if titles else None,\n            xlabel=xlabels[i] if xlabels else None,\n            xrange=xrange,\n            ylabel=ylabels[i] if ylabels else None,\n            yrange=yrange,\n            template=template,\n            height=None,\n            width=None,\n            legend_properties=None,\n            margin_properties=None,\n            horizontal_line=None,\n            marker=marker_list[i] if marker_list else None,\n            subplot=axes[row, col],\n        )\n\n    if legend_properties is not None:\n        fig.legend(**legend_properties)\n    if plot_title is not None:\n        fig.suptitle(plot_title)\n\n    return axes\n\n\ndef _line_plot_bokeh(\n    lines: list[LineData],\n    *,\n    title: str | None,\n    xlabel: str | None,\n    xrange: tuple[float, float] | None,\n    ylabel: str | None,\n    yrange: tuple[float, float] | None,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    horizontal_line: float | None,\n    marker: MarkerData | None,\n    subplot: \"bokeh.plotting.figure | None\" = None,\n) -> \"bokeh.plotting.figure\":\n    \"\"\"Create a line plot using Bokeh.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `line_plot` function.\n        subplot: A Bokeh `Figure` object to which the lines should be plotted.\n            If provided, the plot is drawn on the given `Figure`. If not provided,\n            a new `Figure` is created.\n\n    Returns:\n        A Bokeh Figure object.\n\n    \"\"\"\n    from bokeh import themes\n    from bokeh.io import curdoc\n    from bokeh.models import Range1d\n    from bokeh.models.annotations import Legend, LegendItem, Span, Title\n    from bokeh.plotting import figure\n\n    if template is None:\n        template = \"light_minimal\"\n    curdoc().theme = themes.built_in_themes[template]\n\n    if subplot is not None:\n        p = subplot\n    else:\n        p = figure()\n\n    if title is not None:\n        p.title = Title(text=title)\n    if xlabel is not None:\n        p.xaxis.axis_label = xlabel.format(linebreak=\"\\n\")\n    if xrange is not None:\n        p.x_range = Range1d(*xrange)\n    if ylabel is not None:\n        p.yaxis.axis_label = ylabel.format(linebreak=\"\\n\")\n    if yrange is not None:\n        p.y_range = Range1d(*yrange)\n    if height is not None:\n        p.height = height\n    if width is not None:\n        p.width = width\n\n    _legend_items = []\n    for line in lines:\n        glyph = p.line(\n            line.x,\n            line.y,\n            line_color=line.color,\n            line_width=2,\n        )\n\n        if line.show_in_legend:\n            _legend_items.append(LegendItem(label=line.name, renderers=[glyph]))  # type: ignore[list-item]\n\n    if horizontal_line is not None:\n        span = Span(\n            location=horizontal_line,\n            dimension=\"width\",\n            line_color=p.yaxis.axis_line_color or \"gray\",\n            line_width=p.yaxis.axis_line_width or 2,\n        )\n        p.add_layout(span)\n\n    if marker is not None:\n        p.scatter(\n            x=[marker.x],\n            y=[marker.y],\n            marker=\"circle\",\n            fill_color=marker.color,\n            line_color=marker.color,\n            size=10,\n        )\n\n    if _legend_items:\n        legend_kwargs = legend_properties.copy() if legend_properties else {}\n        place = legend_kwargs.pop(\"place\", \"center\")\n        text = legend_kwargs.pop(\"title\", None)\n\n        legend = Legend(items=_legend_items, **(legend_kwargs))\n        p.add_layout(legend, place=place)\n        p.legend.title = text\n\n    return p\n\n\ndef _grid_line_plot_bokeh(\n    lines_list: list[list[LineData]],\n    *,\n    n_rows: int,\n    n_cols: int,\n    titles: list[str] | None,\n    xlabels: list[str] | None,\n    xrange: tuple[float, float] | None,\n    share_x: bool,\n    ylabels: list[str] | None,\n    yrange: tuple[float, float] | None,\n    share_y: bool,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    plot_title: str | None,\n    marker_list: list[MarkerData] | None,\n    make_subplot_kwargs: dict[str, Any] | None = None,\n) -> \"bokeh.models.GridPlot\":\n    \"\"\"Create a grid of line plots using Bokeh.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `grid_line_plot` function.\n\n    Returns:\n        A Bokeh gridplot object.\n\n    \"\"\"\n    from bokeh.layouts import gridplot\n    from bokeh.plotting import figure\n\n    plots: list[list[figure]] = []\n\n    for row in range(n_rows):\n        subplot_row: list[Any] = []\n        for col in range(n_cols):\n            idx = row * n_cols + col\n            if idx >= len(lines_list):\n                break\n\n            p = figure()\n\n            _line_plot_bokeh(\n                lines_list[idx],\n                title=titles[idx] if titles else None,\n                xlabel=xlabels[idx] if xlabels else None,\n                xrange=xrange,\n                ylabel=ylabels[idx] if ylabels else None,\n                yrange=yrange,\n                template=template,\n                height=None,\n                width=None,\n                legend_properties=legend_properties,\n                margin_properties=None,\n                horizontal_line=None,\n                marker=marker_list[idx] if marker_list else None,\n                subplot=p,\n            )\n\n            if share_x:\n                if row > 0:\n                    # Share x-range with the top-most subplot in the same column\n                    p.x_range = plots[0][col].x_range\n                if row < n_rows - 1:\n                    # Hide tick labels except for subplots in the last row\n                    p.xaxis.major_label_text_font_size = \"0pt\"\n            if share_y:\n                if col > 0:\n                    # Share y-range with the left-most subplot in the same row\n                    p.y_range = subplot_row[0].y_range\n\n                    # Hide tick labels except for subplots in the first column\n                    p.yaxis.major_label_text_font_size = \"0pt\"\n\n            subplot_row.append(p)\n        plots.append(subplot_row)\n\n    grid = gridplot(  # type: ignore[call-overload]\n        plots,\n        height=height // n_rows if height else None,\n        width=width // n_cols if width else None,\n        toolbar_location=\"right\",\n    )\n\n    return grid\n\n\ndef _line_plot_altair(\n    lines: list[LineData],\n    *,\n    title: str | None,\n    xlabel: str | None,\n    xrange: tuple[float, float] | None,\n    ylabel: str | None,\n    yrange: tuple[float, float] | None,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    horizontal_line: float | None,\n    marker: MarkerData | None,\n    subplot: None = None,\n) -> \"alt.Chart\":\n    \"\"\"Create a line plot using Altair.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `line_plot` function.\n        subplot: Unused by Altair.\n\n    Returns:\n        An Altair Chart object.\n\n    \"\"\"\n    import altair as alt\n    import pandas as pd\n\n    alt.data_transformers.disable_max_rows()\n\n    if template is None:\n        template = \"default\"\n    alt.theme.enable(template)\n\n    dfs = []\n    for line in lines:\n        df = pd.DataFrame(\n            {\"x\": line.x, \"y\": line.y, \"name\": line.name, \"color\": line.color}\n        )\n        dfs.append(df)\n    source = pd.concat(dfs)\n\n    figure_properties: dict[str, str | int] = {}\n    if title is not None:\n        figure_properties[\"title\"] = title\n    if width is not None:\n        figure_properties[\"width\"] = width\n    if height is not None:\n        figure_properties[\"height\"] = height\n\n    chart = (\n        alt.Chart(source)\n        .mark_line()\n        .encode(\n            x=alt.X(\n                \"x\",\n                title=xlabel.split(\"{linebreak}\") if xlabel else None,\n                scale=alt.Scale(domain=list(xrange)) if xrange else alt.Undefined,\n            ),\n            y=alt.Y(\n                \"y\",\n                title=ylabel.split(\"{linebreak}\") if ylabel else None,\n                scale=alt.Scale(domain=list(yrange)) if yrange else alt.Undefined,\n            ),\n            color=alt.Color(\"color:N\", scale=None),\n            detail=\"name:N\",\n        )\n        .properties(**figure_properties)\n    )\n\n    if any(line.show_in_legend for line in lines):\n        legend = (\n            alt.Chart(source)\n            .mark_line()\n            .encode(\n                color=alt.Color(\n                    \"name:N\",\n                    title=None,\n                    legend=alt.Legend(**(legend_properties or {})),\n                    scale=alt.Scale(\n                        domain=[line.name for line in lines if line.show_in_legend],\n                        range=[\n                            line.color or \"\" for line in lines if line.show_in_legend\n                        ],\n                    ),\n                )\n            )\n        )\n        chart = chart + legend\n\n    if horizontal_line is not None:\n        hline = (\n            alt.Chart(pd.DataFrame({\"y\": [horizontal_line]})).mark_rule().encode(y=\"y\")\n        )\n        chart = chart + hline\n\n    if marker is not None:\n        marker_chart = (\n            alt.Chart(pd.DataFrame({\"x\": [marker.x], \"y\": [marker.y]}))\n            .mark_point(size=100, shape=\"circle\", color=marker.color, filled=True)\n            .encode(x=\"x\", y=\"y\")\n        )\n        chart = chart + marker_chart\n\n    return chart.interactive()\n\n\ndef _grid_line_plot_altair(\n    lines_list: list[list[LineData]],\n    *,\n    n_rows: int,\n    n_cols: int,\n    titles: list[str] | None,\n    xlabels: list[str] | None,\n    xrange: tuple[float, float] | None,\n    share_x: bool,\n    ylabels: list[str] | None,\n    yrange: tuple[float, float] | None,\n    share_y: bool,\n    template: str | None,\n    height: int | None,\n    width: int | None,\n    legend_properties: dict[str, Any] | None,\n    margin_properties: dict[str, Any] | None,\n    plot_title: str | None,\n    marker_list: list[MarkerData] | None,\n    make_subplot_kwargs: dict[str, Any] | None = None,\n) -> \"alt.Chart | alt.HConcatChart | alt.VConcatChart\":\n    \"\"\"Create a grid of line plots using Altair.\n\n    Args:\n        ...: All other argument descriptions can be found in the docstring of the\n            `grid_line_plot` function.\n\n    Returns:\n        An Altair Chart if the grid contains only one subplot, an Altair HConcatChart\n            if 'n_rows' is 1, otherwise an Altair VConcatChart.\n\n    \"\"\"\n    import altair as alt\n\n    subplot_height = height // n_rows if height else None\n    subplot_width = width // n_cols if width else None\n\n    charts = []\n    for row_idx in range(n_rows):\n        chart_row = []\n        for col_idx in range(n_cols):\n            i = row_idx * n_cols + col_idx\n            if i >= len(lines_list):\n                break\n\n            chart = _line_plot_altair(\n                lines_list[i],\n                title=titles[i] if titles else None,\n                xlabel=xlabels[i] if xlabels else None,\n                xrange=xrange,\n                ylabel=ylabels[i] if ylabels else None,\n                yrange=yrange,\n                template=template,\n                height=subplot_height,\n                width=subplot_width,\n                legend_properties=legend_properties,\n                margin_properties=None,\n                horizontal_line=None,\n                marker=marker_list[i] if marker_list else None,\n                subplot=None,\n            )\n\n            chart_row.append(chart)\n        charts.append(chart_row)\n\n    row_selections = [\n        alt.selection_interval(\n            bind=\"scales\", encodings=[\"y\"], name=f\"share_y_row{row_idx}\"\n        )\n        for row_idx in range(n_rows)\n    ]\n    col_selections = [\n        alt.selection_interval(\n            bind=\"scales\", encodings=[\"x\"], name=f\"share_x_col{col_idx}\"\n        )\n        for col_idx in range(n_cols)\n    ]\n\n    for row_idx, row in enumerate(charts):\n        for col_idx in range(len(row)):\n            chart = row[col_idx]\n\n            params = []\n            if share_y:\n                # Share y-axis for all subplots in the same row\n                params.append(row_selections[row_idx])\n            else:\n                # Use independent y-axes for each subplot\n                params.append(\n                    alt.selection_interval(\n                        bind=\"scales\",\n                        encodings=[\"y\"],\n                        name=f\"ind_y_row{row_idx}_col{col_idx}\",\n                    )\n                )\n            if share_x:\n                # Share x-axis for all subplots in the same column\n                params.append(col_selections[col_idx])\n            else:\n                # Use independent x-axes for each subplot\n                params.append(\n                    alt.selection_interval(\n                        bind=\"scales\",\n                        encodings=[\"x\"],\n                        name=f\"ind_x_row{row_idx}_col{col_idx}\",\n                    )\n                )\n            chart = chart.add_params(*params)\n\n            if share_y and col_idx > 0:\n                # Hide y-axis ticklabels for all subplots except the leftmost column\n                chart = chart.encode(y=alt.Y(axis=alt.Axis(labels=False)))\n            if share_x and row_idx < n_rows - 1:\n                # Hide x-axis ticklabels for all subplots except the bottom row\n                chart = chart.encode(x=alt.X(axis=alt.Axis(labels=False)))\n\n            charts[row_idx][col_idx] = chart\n\n    row_charts = []\n    for row in charts:\n        row_chart: alt.Chart | alt.HConcatChart\n        if len(row) == 1:\n            row_chart = row[0]\n        else:\n            row_chart = alt.hconcat(*row)\n        row_charts.append(row_chart)\n\n    grid_chart: alt.Chart | alt.HConcatChart | alt.VConcatChart\n    if len(row_charts) == 1:\n        grid_chart = row_charts[0]\n    else:\n        grid_chart = alt.vconcat(*row_charts)\n\n    if plot_title is not None:\n        grid_chart = grid_chart.properties(title=plot_title)\n\n    return grid_chart\n\n\ndef line_plot(\n    lines: list[LineData],\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    *,\n    title: str | None = None,\n    xlabel: str | None = None,\n    xrange: tuple[float, float] | None = None,\n    ylabel: str | None = None,\n    yrange: tuple[float, float] | None = None,\n    template: str | None = None,\n    height: int | None = None,\n    width: int | None = None,\n    legend_properties: dict[str, Any] | None = None,\n    margin_properties: dict[str, Any] | None = None,\n    horizontal_line: float | None = None,\n    marker: MarkerData | None = None,\n) -> Any:\n    \"\"\"Create a line plot corresponding to the specified backend.\n\n    Args:\n        lines: List of objects each containing data for a line in the plot.\n            The order of lines in the list determines the order in which they are\n            plotted, with later lines being rendered on top of earlier ones.\n        backend: The backend to use for plotting.\n        title: Title of the plot.\n        xlabel: Label for the x-axis.\n        xrange: View limits for the x-axis.\n        ylabel: Label for the y-axis.\n        yrange: View limits for the y-axis.\n        template: Backend-specific template for styling the plot.\n        height: Height of the plot (in pixels).\n        width: Width of the plot (in pixels).\n        legend_properties: Backend-specific properties for the legend.\n        margin_properties: Backend-specific properties for the plot margins.\n        horizontal_line: If provided, a horizontal line is drawn at the specified\n            y-value.\n        marker: An object containing data for a marker in the plot.\n\n    Returns:\n        A figure object corresponding to the specified backend.\n\n    \"\"\"\n    _line_plot_backend_function = _get_plot_function(backend, grid_plot=False)\n\n    fig = _line_plot_backend_function(\n        lines,\n        title=title,\n        xlabel=xlabel,\n        xrange=xrange,\n        ylabel=ylabel,\n        yrange=yrange,\n        template=template,\n        height=height,\n        width=width,\n        legend_properties=legend_properties,\n        margin_properties=margin_properties,\n        horizontal_line=horizontal_line,\n        marker=marker,\n    )\n\n    return fig\n\n\ndef grid_line_plot(\n    lines_list: list[list[LineData]],\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    *,\n    n_rows: int,\n    n_cols: int,\n    titles: list[str] | None = None,\n    xlabels: list[str] | None = None,\n    xrange: tuple[float, float] | None = None,\n    share_x: bool = False,\n    ylabels: list[str] | None = None,\n    yrange: tuple[float, float] | None = None,\n    share_y: bool = False,\n    template: str | None = None,\n    height: int | None = None,\n    width: int | None = None,\n    legend_properties: dict[str, Any] | None = None,\n    margin_properties: dict[str, Any] | None = None,\n    plot_title: str | None = None,\n    marker_list: list[MarkerData] | None = None,\n    make_subplot_kwargs: dict[str, Any] | None = None,\n) -> Any:\n    \"\"\"Create a grid of line plots corresponding to the specified backend.\n\n    Args:\n        lines_list: A list where each element is a list of objects containing data\n            for the lines in a subplot. The order of sublists determines the order\n            of subplots in the grid (row-wise), and the order of lines within each\n            sublist determines the order of lines in that subplot.\n        backend: The backend to use for plotting.\n        n_rows: Number of rows in the grid.\n        n_cols: Number of columns in the grid.\n        titles: Titles for each subplot in the grid.\n        xlabels: Labels for the x-axis of each subplot.\n        xrange: View limits for the x-axis of each subplot.\n        share_x: If True, all subplots share the same x-axis limits and each subplot in\n            a column actually share the x-axis.\n        ylabels: Labels for the y-axis of each subplot.\n        yrange: View limits for the y-axis of each subplot.\n        share_y: If True, all subplots share the same y-axis limits and each subplot in\n            a row actually share the y-axis.\n        template: Backend-specific template for styling the plots.\n        height: Height of the entire grid plot (in pixels).\n        width: Width of the entire grid plot (in pixels).\n        legend_properties: Backend-specific properties for the legend.\n        margin_properties: Backend-specific properties for the plot margins.\n        plot_title: Title for the entire grid plot.\n        marker_list: A list where where each element is an object containing data\n            for a marker in a subplot. The order of objects in the list determines\n            the subplot on which the marker is plotted.\n\n    Returns:\n        A figure object corresponding to the specified backend.\n\n    \"\"\"\n    _grid_line_plot_backend_function = _get_plot_function(backend, grid_plot=True)\n\n    fig = _grid_line_plot_backend_function(\n        lines_list,\n        n_rows=n_rows,\n        n_cols=n_cols,\n        titles=titles,\n        xlabels=xlabels,\n        xrange=xrange,\n        share_x=share_x,\n        ylabels=ylabels,\n        yrange=yrange,\n        share_y=share_y,\n        template=template,\n        height=height,\n        width=width,\n        legend_properties=legend_properties,\n        margin_properties=margin_properties,\n        plot_title=plot_title,\n        marker_list=marker_list,\n        make_subplot_kwargs=make_subplot_kwargs,\n    )\n\n    return fig\n\n\nBACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION: dict[\n    str, tuple[bool, LinePlotFunction, GridLinePlotFunction]\n] = {\n    \"plotly\": (True, _line_plot_plotly, _grid_line_plot_plotly),\n    \"matplotlib\": (\n        IS_MATPLOTLIB_INSTALLED,\n        _line_plot_matplotlib,\n        _grid_line_plot_matplotlib,\n    ),\n    \"bokeh\": (\n        IS_BOKEH_INSTALLED,\n        _line_plot_bokeh,\n        _grid_line_plot_bokeh,\n    ),\n    \"altair\": (\n        IS_ALTAIR_INSTALLED,\n        _line_plot_altair,\n        _grid_line_plot_altair,\n    ),\n}\n\n\n@overload\ndef _get_plot_function(\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"],\n    grid_plot: Literal[False],\n) -> LinePlotFunction: ...\n\n\n@overload\ndef _get_plot_function(\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"],\n    grid_plot: Literal[True],\n) -> GridLinePlotFunction: ...\n\n\ndef _get_plot_function(\n    backend: str, grid_plot: bool\n) -> LinePlotFunction | GridLinePlotFunction:\n    if backend not in BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION:\n        msg = (\n            f\"Invalid plotting backend '{backend}'. \"\n            f\"Available backends: \"\n            f\"{', '.join(BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())}\"\n        )\n        raise InvalidPlottingBackendError(msg)\n\n    (\n        _is_backend_available,\n        _line_plot_backend_function,\n        _grid_line_plot_backend_function,\n    ) = BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION[backend]\n\n    if not _is_backend_available:\n        msg = (\n            f\"The {backend} backend is not installed. \"\n            f\"Install the package using either 'pip install {backend}' or \"\n            f\"'conda install -c conda-forge {backend}'\"\n        )\n        raise NotInstalledError(msg)\n\n    if grid_plot:\n        return _grid_line_plot_backend_function\n    else:\n        return _line_plot_backend_function\n"
  },
  {
    "path": "src/optimagic/visualization/convergence_plot.py",
    "content": "from typing import Any, Literal\n\nimport numpy as np\nimport pandas as pd\n\nfrom optimagic.benchmarking.process_benchmark_results import (\n    process_benchmark_results,\n)\nfrom optimagic.config import DEFAULT_PALETTE\nfrom optimagic.utilities import propose_alternatives\nfrom optimagic.visualization.backends import grid_line_plot, line_plot\nfrom optimagic.visualization.plotting_utilities import LineData, get_palette_cycle\n\nBACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = {\n    \"plotly\": {},\n    \"matplotlib\": {\"loc\": \"outside right upper\", \"fontsize\": \"x-small\"},\n    \"bokeh\": {\n        \"location\": \"top_right\",\n        \"place\": \"right\",\n        \"label_text_font_size\": \"8pt\",\n    },\n    \"altair\": {\"orient\": \"right\"},\n}\n\nBACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES: dict[str, dict[str, int]] = {\n    \"plotly\": {\"l\": 10, \"r\": 10, \"t\": 30, \"b\": 10},\n    # \"matplotlib\": handles margins automatically via constrained layout\n}\n\nOUTCOME_TO_CONVERGENCE_PLOT_YLABEL: dict[str, str] = {\n    \"criterion\": \"Current Function Value\",\n    \"monotone_criterion\": \"Best Function Value Found So Far\",\n    \"criterion_normalized\": (\n        \"Share of Function Distance to Optimum{linebreak}\"\n        \"Missing From Current Criterion Value\"\n    ),\n    \"monotone_criterion_normalized\": (\n        \"Share of Function Distance to Optimum{linebreak}Missing From Best So Far\"\n    ),\n    \"parameter_distance\": \"Distance Between Current and{linebreak}Optimal Parameters\",\n    \"parameter_distance_normalized\": (\n        \"Share of Parameter Distance to Optimum{linebreak}\"\n        \"Missing From Current Parameters\"\n    ),\n    \"monotone_parameter_distance_normalized\": (\n        \"Share of Parameter Distance to Optimum{linebreak}\"\n        \"Missing From the Best Parameters So Far\"\n    ),\n    \"monotone_parameter_distance\": (\n        \"Distance Between the Best Parameters{linebreak}\"\n        \"So Far and the Optimal Parameters\"\n    ),\n}\n\nRUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL: dict[str, str] = {\n    \"n_evaluations\": \"Number of Function Evaluations\",\n    \"walltime\": \"Elapsed Time\",\n    \"n_batches\": \"Number of Batches\",\n}\n\n\ndef convergence_plot(\n    problems: dict[str, dict[str, Any]],\n    results: dict[tuple[str, str], dict[str, Any]],\n    *,\n    problem_subset: list[str] | None = None,\n    algorithm_subset: list[str] | None = None,\n    n_cols: int = 2,\n    distance_measure: Literal[\"criterion\", \"parameter_distance\"] = \"criterion\",\n    monotone: bool = True,\n    normalize_distance: bool = True,\n    runtime_measure: Literal[\n        \"n_evaluations\", \"walltime\", \"n_batches\"\n    ] = \"n_evaluations\",\n    stopping_criterion: Literal[\"x\", \"y\", \"x_and_y\", \"x_or_y\"] = \"y\",\n    x_precision: float = 1e-4,\n    y_precision: float = 1e-4,\n    combine_plots_in_grid: bool = True,\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    template: str | None = None,\n    palette: list[str] | str = DEFAULT_PALETTE,\n) -> Any:\n    \"\"\"Plot convergence of optimizers for a set of problems.\n\n    This creates a grid of plots, showing the convergence of the different\n    algorithms on each problem. The faster a line falls, the faster the algorithm\n    improved on the problem. The algorithm converged where its line reaches 0\n    (if normalize_distance is True) or the horizontal line labeled \"true solution\".\n\n    Each plot shows on the x axis the runtime_measure, which can be walltime, number\n    of evaluations or number of batches. Each algorithm's convergence is a line in the\n    plot. Convergence can be measured by the criterion value of the particular\n    time/evaluation. The convergence can be made monotone (i.e. always taking the bast\n    value so far) or normalized such that the distance from the start to the true\n    solution is one.\n\n    Args:\n        problems: optimagic benchmarking problems dictionary. Keys are the problem\n            names. Values contain information on the problem, including the solution\n            value.\n        results: optimagic benchmarking results dictionary. Keys are tuples of the form\n            (problem, algorithm), values are dictionaries of the collected information\n            on the benchmark run, including 'criterion_history' and 'time_history'.\n        problem_subset: List of problem names. These must be a subset of the keys of the\n            problems dictionary. If provided the convergence plot is only created for\n            the problems specified in this list.\n        algorithm_subset: List of algorithm names. These must be a subset of the keys of\n            the optimizer_options passed to run_benchmark. If provided only the\n            convergence of the given algorithms are shown.\n        n_cols: number of columns in the plot of grids. The number of rows is determined\n            automatically.\n        distance_measure: One of \"criterion\", \"parameter_distance\".\n        monotone: If True the best found criterion value so far is plotted.\n            If False the particular criterion evaluation of that time is used.\n        normalize_distance: If True the progress is scaled by the total distance between\n            the start value and the optimal value, i.e. 1 means the algorithm is as far\n            from the solution as the start value and 0 means the algorithm has reached\n            the solution value.\n        runtime_measure: This is the runtime until the desired convergence was reached\n            by an algorithm.\n        stopping_criterion: Determines how convergence is determined from the two\n            precisions. To effectively disable convergence, set `x_precision` and/or\n            `y_precision` to very small values (or 0).\n        x_precision: how close an algorithm must have gotten to the true parameter\n            values (as percent of the Euclidean distance between start and solution\n            parameters) before the criterion for clipping and convergence is fulfilled.\n        y_precision: how close an algorithm must have gotten to the true criterion\n            values (as percent of the distance between start and solution criterion\n            value) before the criterion for clipping and convergence is fulfilled.\n        combine_plots_in_grid: Whether to return a single figure containing subplots\n            for each factor pair or a dictionary of individual plots. Default is True.\n        backend: The backend to use for plotting. Default is \"plotly\".\n        template: The template for the figure. If not specified, the default template of\n            the backend is used. For the 'bokeh' and 'altair' backends, this changes the\n            global theme, which affects all plots from that backend in the session.\n        palette: The coloring palette for traces. Default is the D3 qualitative palette.\n\n    Returns:\n        The figure object containing the convergence plot if `combine_plots_in_grid` is\n            True. Otherwise, a dictionary mapping problem names to their respective\n            figure objects is returned.\n\n    \"\"\"\n    # ==================================================================================\n    # Process inputs\n\n    df, _ = process_benchmark_results(\n        problems=problems,\n        results=results,\n        stopping_criterion=stopping_criterion,\n        x_precision=x_precision,\n        y_precision=y_precision,\n    )\n\n    if isinstance(problem_subset, str):\n        problem_subset = [problem_subset]\n    if isinstance(algorithm_subset, str):\n        algorithm_subset = [algorithm_subset]\n\n    _check_only_allowed_subset_provided(problem_subset, df[\"problem\"], \"problem\")\n    _check_only_allowed_subset_provided(algorithm_subset, df[\"algorithm\"], \"algorithm\")\n\n    if problem_subset is not None:\n        df = df[df[\"problem\"].isin(problem_subset)]\n    if algorithm_subset is not None:\n        df = df[df[\"algorithm\"].isin(algorithm_subset)]\n\n    # ==================================================================================\n    # Extract backend-agnostic plotting data\n\n    outcome = (\n        f\"{'monotone_' if monotone else ''}\"\n        + distance_measure\n        + f\"{'_normalized' if normalize_distance else ''}\"\n    )\n\n    lines_list, titles = _extract_convergence_plot_lines(\n        df=df,\n        problems=problems,\n        runtime_measure=runtime_measure,\n        outcome=outcome,\n        palette=palette,\n        combine_plots_in_grid=combine_plots_in_grid,\n        backend=backend,\n    )\n\n    n_rows = int(np.ceil(len(lines_list) / n_cols))\n\n    # ==================================================================================\n    # Generate the figure\n\n    if combine_plots_in_grid:\n        fig = grid_line_plot(\n            lines_list,\n            backend=backend,\n            n_rows=n_rows,\n            n_cols=n_cols,\n            titles=titles,\n            xlabels=(\n                [RUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL[runtime_measure]]\n                * len(lines_list)\n            ),\n            ylabels=[OUTCOME_TO_CONVERGENCE_PLOT_YLABEL[outcome]] * len(lines_list),\n            template=template,\n            height=320 * n_rows,\n            width=500 * n_cols,\n            legend_properties=BACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES.get(\n                backend, None\n            ),\n            margin_properties=BACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES.get(\n                backend, None\n            ),\n        )\n\n        return fig\n\n    else:\n        fig_dict = {}\n\n        for i, subplot_lines in enumerate(lines_list):\n            fig = line_plot(\n                subplot_lines,\n                backend=backend,\n                title=titles[i],\n                xlabel=RUNTIME_MEASURE_TO_CONVERGENCE_PLOT_XLABEL[runtime_measure],\n                ylabel=OUTCOME_TO_CONVERGENCE_PLOT_YLABEL[outcome],\n                template=template,\n                height=320,\n                width=500,\n                legend_properties=BACKEND_TO_CONVERGENCE_PLOT_LEGEND_PROPERTIES.get(\n                    backend, None\n                ),\n                margin_properties=BACKEND_TO_CONVERGENCE_PLOT_MARGIN_PROPERTIES.get(\n                    backend, None\n                ),\n            )\n\n            key = titles[i].replace(\" \", \"_\").lower()\n            fig_dict[key] = fig\n\n        return fig_dict\n\n\ndef _extract_convergence_plot_lines(\n    df: pd.DataFrame,\n    problems: dict[str, dict[str, Any]],\n    runtime_measure: str,\n    outcome: str,\n    palette: list[str] | str,\n    combine_plots_in_grid: bool,\n    backend: str,\n) -> tuple[list[list[LineData]], list[str]]:\n    lines_list = []  # container for all subplots\n    titles = []\n\n    for i, (_prob_name, _prob_data) in enumerate(df.groupby(\"problem\", sort=False)):\n        prob_name = str(_prob_name)\n        subplot_lines = []  # container for data of traces in individual subplot\n        palette_cycle = get_palette_cycle(palette)\n\n        if runtime_measure == \"n_batches\":\n            to_plot = (\n                _prob_data.groupby([\"algorithm\", runtime_measure]).min().reset_index()\n            )\n        else:\n            to_plot = _prob_data\n\n        show_in_legend = True\n        if combine_plots_in_grid:\n            # If combining plots, only show in legend of first subplot\n            # For 'bokeh' backend, show in legend for all subplots\n            # as it does not support single legend on grid plots.\n            # See: https://github.com/bokeh/bokeh/issues/7607\n            show_in_legend = (i == 0) or (backend == \"bokeh\")\n\n        for alg, group in to_plot.groupby(\"algorithm\", sort=False):\n            line_data = LineData(\n                x=group[runtime_measure].to_numpy(),\n                y=group[outcome].to_numpy(),\n                name=str(alg),\n                color=next(palette_cycle),\n                # if combining plots, only show legend in first subplot\n                show_in_legend=show_in_legend,\n            )\n            subplot_lines.append(line_data)\n\n        if outcome in (\"criterion\", \"monotone_criterion\"):\n            f_opt = problems[prob_name][\"solution\"][\"value\"]\n            line_data = LineData(\n                x=to_plot[runtime_measure].to_numpy(),\n                y=np.full(to_plot[runtime_measure].shape, f_opt),\n                name=\"true solution\",\n                color=next(palette_cycle),\n                # if combining plots, only show legend in first subplot\n                show_in_legend=show_in_legend,\n            )\n            subplot_lines.append(line_data)\n\n        lines_list.append(subplot_lines)\n        titles.append(prob_name.replace(\"_\", \" \").title())\n\n    return lines_list, titles\n\n\ndef _check_only_allowed_subset_provided(\n    subset: list[str] | None, allowed: pd.Series | list[str], name: str\n) -> None:\n    \"\"\"Check if all entries of a proposed subset are in a Series.\n\n    Args:\n        subset: If None, no checks are performed. Else a ValueError is raised listing\n            all entries that are not in the provided Series.\n        allowed: allowed entries.\n        name: name of the provided entries to use for the ValueError.\n\n    Raises:\n        ValueError\n\n    \"\"\"\n    allowed_set = set(allowed)\n    if subset is not None:\n        missing = [entry for entry in subset if entry not in allowed_set]\n        if missing:\n            missing_msg = \"\"\n            for entry in missing:\n                proposed = propose_alternatives(entry, allowed_set)\n                missing_msg += f\"Invalid {name}: {entry}. Did you mean {proposed}?\\n\"\n            raise ValueError(missing_msg)\n"
  },
  {
    "path": "src/optimagic/visualization/deviation_plot.py",
    "content": "import pandas as pd\nimport plotly.express as px\n\nfrom optimagic.benchmarking.process_benchmark_results import (\n    process_benchmark_results,\n)\nfrom optimagic.config import PLOTLY_TEMPLATE\n\n\ndef deviation_plot(\n    problems,\n    results,\n    *,\n    runtime_measure=\"n_evaluations\",\n    distance_measure=\"criterion\",\n    monotone=True,\n    template=PLOTLY_TEMPLATE,\n):\n    \"\"\"Plot average convergence of optimizers for a set of problems.\n\n    Returns aggregated version convergence plot, showing the convergence of the\n    different algorithms, averaged over a problem set. The faster a line falls, the\n    faster the algorithm improved on average.\n\n    The x axis is the runtime_measure, which can be walltime or number of evaluations.\n    The y axis is the average over the convergence measures of the problems in the set.\n    Convergence can be measured by the criterion value of the particular\n    time/evaluation. The convergence can be made monotone by always taking the\n    best  value.\n\n    Args:\n        problems (dict): optimagic benchmarking problems dictionary. Keys are the\n            problem names. Values contain information on the problem, including the\n            solution value.\n        results (dict): optimagic benchmarking results dictionary. Keys are\n            tuples of the form (problem, algorithm), values are dictionaries of the\n            collected information on the benchmark run, including 'criterion_history'\n            and 'time_history'.\n        runtime_measure (str): One of \"n_evaluations\", \"n_batches\".\n        distance_measure (str): One of \"criterion\", \"parameter_distance\".\n        monotone (bool): If True the best found criterion value so far is plotted.\n            If False the particular criterion evaluation of that time is used.\n        template (str): The template for the figure. Default is \"plotly_white\".\n\n    Returns:\n        plotly.Figure\n\n    \"\"\"\n    df, _ = process_benchmark_results(\n        problems=problems,\n        results=results,\n        stopping_criterion=\"y\",\n        x_precision=1e-6,\n        y_precision=1e-6,\n    )\n\n    outcome = f\"{'monotone_' if monotone else ''}\" + distance_measure + \"_normalized\"\n    deviations = (\n        df.groupby([\"problem\", \"algorithm\", runtime_measure])\n        .min()[outcome]\n        .reindex(\n            pd.MultiIndex.from_product(\n                [\n                    df[\"problem\"].unique(),\n                    df[\"algorithm\"].unique(),\n                    range(df[runtime_measure].min(), df[runtime_measure].max() + 1),\n                ],\n                names=[\"problem\", \"algorithm\", runtime_measure],\n            )\n        )\n        .ffill()\n        .reset_index()\n    )\n    average_deviations = (\n        deviations.groupby([\"algorithm\", runtime_measure])\n        .mean(numeric_only=True)[outcome]\n        .reset_index()\n    )\n    fig = px.line(average_deviations, x=runtime_measure, y=outcome, color=\"algorithm\")\n\n    y_labels = {\n        \"criterion_normalized\": \"Share of Function Distance to Optimum<br>\"\n        \"Missing From Current Criterion Value\",\n        \"monotone_criterion_normalized\": \"Share of Function Distance to Optimum<br>\"\n        \"Missing From Best So Far\",\n        \"parameter_distance_normalized\": \"Share of Parameter Distance to Optimum<br>\"\n        \"Missing From Current Parameters\",\n        \"monotone_parameter_distance_normalized\": \"Share of the Parameter Distance \"\n        \"to Optimum<br> Missing From the Best Parameters So Far\",\n    }\n    x_labels = {\n        \"n_evaluations\": \"Numver of Function Evaluations\",\n        \"n_batches\": \"Number of Batches\",\n    }\n    fig.update_layout(\n        xaxis_title=x_labels[runtime_measure],\n        yaxis_title=y_labels[outcome],\n        title=None,\n        height=300,\n        width=500,\n        margin={\"l\": 10, \"r\": 10, \"t\": 30, \"b\": 10},\n        template=template,\n    )\n\n    return fig\n"
  },
  {
    "path": "src/optimagic/visualization/history_plots.py",
    "content": "import inspect\nimport itertools\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Callable, Literal\n\nimport numpy as np\nfrom pybaum import leaf_names, tree_flatten, tree_just_flatten, tree_unflatten\n\nfrom optimagic.config import DEFAULT_PALETTE\nfrom optimagic.logging.logger import LogReader, SQLiteLogOptions\nfrom optimagic.optimization.algorithm import Algorithm\nfrom optimagic.optimization.history import History\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import IterationHistory, PyTree\nfrom optimagic.visualization.backends import line_plot\nfrom optimagic.visualization.plotting_utilities import LineData, get_palette_cycle\n\nBACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = {\n    \"plotly\": {\n        \"yanchor\": \"top\",\n        \"xanchor\": \"right\",\n        \"y\": 0.95,\n        \"x\": 0.95,\n    },\n    \"matplotlib\": {\n        \"loc\": \"upper right\",\n    },\n    \"bokeh\": {\n        \"location\": \"top_right\",\n    },\n    \"altair\": {\n        \"orient\": \"top-right\",\n    },\n}\n\n\nResultOrPath = OptimizeResult | str | Path\n\n\ndef criterion_plot(\n    results: ResultOrPath | list[ResultOrPath] | dict[str, ResultOrPath],\n    names: list[str] | str | None = None,\n    max_evaluations: int | None = None,\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    template: str | None = None,\n    palette: list[str] | str = DEFAULT_PALETTE,\n    stack_multistart: bool = False,\n    monotone: bool = False,\n    show_exploration: bool = False,\n) -> Any:\n    \"\"\"Plot the criterion history of an optimization.\n\n    Args:\n        results: An optimization result (or list of, or dict of results) with collected\n            history, or path(s) to it. If dict, then the key is used as the name in the\n            legend.\n        max_evaluations: Clip the criterion history after that many entries.\n        backend: The backend to use for plotting. Default is \"plotly\".\n        template: The template for the figure. If not specified, the default template of\n            the backend is used. For the 'bokeh' and 'altair' backends, this changes the\n            global theme, which affects all plots from that backend in the session.\n        palette: The coloring palette for traces. Default is the D3 qualitative palette.\n        stack_multistart: Whether to combine multistart histories into a single history.\n            Default is False.\n        monotone: If True, the criterion plot becomes monotone in the sense that at each\n            iteration the current best criterion value is displayed. Default is False.\n        show_exploration: If True, exploration samples of a multistart optimization are\n            visualized. Default is False.\n\n    Returns:\n        The figure object containing the criterion plot.\n\n    \"\"\"\n    # ==================================================================================\n    # Process inputs\n\n    palette_cycle = get_palette_cycle(palette)\n\n    dict_of_optimize_results_or_paths = _harmonize_inputs_to_dict(results, names)\n\n    # ==================================================================================\n    # Extract backend-agnostic plotting data from results\n\n    list_of_optimize_data = _retrieve_optimization_data_from_results(\n        results=dict_of_optimize_results_or_paths,\n        stack_multistart=stack_multistart,\n        show_exploration=show_exploration,\n        plot_name=\"criterion_plot\",\n    )\n\n    lines, multistart_lines = _extract_criterion_plot_lines(\n        data=list_of_optimize_data,\n        max_evaluations=max_evaluations,\n        palette_cycle=palette_cycle,\n        stack_multistart=stack_multistart,\n        monotone=monotone,\n    )\n\n    # ==================================================================================\n    # Generate the figure\n\n    fig = line_plot(\n        lines=multistart_lines + lines,\n        backend=backend,\n        xlabel=\"No. of criterion evaluations\",\n        ylabel=\"Criterion value\",\n        template=template,\n        legend_properties=BACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES.get(backend, None),\n    )\n\n    return fig\n\n\ndef _harmonize_inputs_to_dict(\n    results: ResultOrPath | list[ResultOrPath] | dict[str, ResultOrPath],\n    names: list[str] | str | None,\n) -> dict[str, ResultOrPath]:\n    \"\"\"Convert all valid inputs for results and names to dict[str, OptimizeResult].\"\"\"\n    # convert scalar case to list case\n    if not isinstance(names, list) and names is not None:\n        names = [names]\n\n    if isinstance(results, (OptimizeResult, str, Path)):\n        results = [results]\n\n    if names is not None and len(names) != len(results):\n        raise ValueError(\"len(results) needs to be equal to len(names).\")\n\n    # handle dict case\n    if isinstance(results, dict):\n        if names is not None:\n            results_dict = dict(zip(names, list(results.values()), strict=False))\n        else:\n            results_dict = results\n\n    # unlabeled iterable of results\n    else:\n        if names is None:\n            names = [str(i) for i in range(len(results))]\n        results_dict = dict(zip(names, results, strict=False))\n\n    # convert keys to strings\n    results_dict = {_convert_key_to_str(k): v for k, v in results_dict.items()}\n\n    return results_dict\n\n\ndef _convert_key_to_str(key: Any) -> str:\n    if inspect.isclass(key) and issubclass(key, Algorithm):\n        out = str(key.name)\n    elif isinstance(key, Algorithm):\n        out = str(key.name)\n    else:\n        out = str(key)\n    return out\n\n\ndef params_plot(\n    result: ResultOrPath,\n    selector: Callable[[PyTree], PyTree] | None = None,\n    max_evaluations: int | None = None,\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    template: str | None = None,\n    palette: list[str] | str = DEFAULT_PALETTE,\n    show_exploration: bool = False,\n) -> Any:\n    \"\"\"Plot the params history of an optimization.\n\n    Args:\n        result: An optimization result with collected history, or path to it.\n            If dict, then the key is used as the name in the legend.\n        selector: A callable that takes params and returns a subset of params.\n            If provided, only the selected subset of params is plotted.\n        max_evaluations: Clip the criterion history after that many entries.\n        backend: The backend to use for plotting. Default is \"plotly\".\n        template: The template for the figure. If not specified, the default template of\n            the backend is used. For the 'bokeh' and 'altair' backends, this changes the\n            global theme, which affects all plots from that backend in the session.\n        palette: The coloring palette for traces. Default is the D3 qualitative palette.\n        show_exploration: If True, exploration samples of a multistart optimization are\n            visualized. Default is False.\n\n    Returns:\n        The figure object containing the params plot.\n\n    \"\"\"\n    # ==================================================================================\n    # Process inputs\n\n    palette_cycle = get_palette_cycle(palette)\n\n    # ==================================================================================\n    # Extract backend-agnostic plotting data from results\n\n    optimize_data = _retrieve_optimization_data_from_single_result(\n        result=result,\n        stack_multistart=True,\n        show_exploration=show_exploration,\n        plot_name=\"params_plot\",\n    )\n\n    lines = _extract_params_plot_lines(\n        data=optimize_data,\n        selector=selector,\n        max_evaluations=max_evaluations,\n        palette_cycle=palette_cycle,\n    )\n\n    # ==================================================================================\n    # Generate the figure\n\n    fig = line_plot(\n        lines=lines,\n        backend=backend,\n        xlabel=\"No. of criterion evaluations\",\n        ylabel=\"Parameter value\",\n        template=template,\n        legend_properties=BACKEND_TO_HISTORY_PLOT_LEGEND_PROPERTIES.get(backend, None),\n    )\n\n    return fig\n\n\n@dataclass(frozen=True)\nclass _PlottingMultistartHistory:\n    \"\"\"Data container for an optimization history and metadata. Contains local histories\n    in case of multistart optimization.\n\n    This dataclass is only used internally.\n\n    \"\"\"\n\n    history: History\n    name: str | None\n    start_params: PyTree\n    is_multistart: bool\n    local_histories: list[History] | list[IterationHistory] | None\n    stacked_local_histories: History | None\n\n\ndef _retrieve_optimization_data_from_results(\n    results: dict[str, ResultOrPath],\n    stack_multistart: bool,\n    show_exploration: bool,\n    plot_name: str,\n) -> list[_PlottingMultistartHistory]:\n    # Retrieves data from multiple results by iterating over the results dictionary\n    # and calling the single result retrieval function.\n\n    data = []\n    for name, res in results.items():\n        _data = _retrieve_optimization_data_from_single_result(\n            result=res,\n            stack_multistart=stack_multistart,\n            show_exploration=show_exploration,\n            plot_name=plot_name,\n            res_name=name,\n        )\n\n        data.append(_data)\n\n    return data\n\n\ndef _retrieve_optimization_data_from_single_result(\n    result: ResultOrPath,\n    stack_multistart: bool,\n    show_exploration: bool,\n    plot_name: str,\n    res_name: str | None = None,\n) -> _PlottingMultistartHistory:\n    \"\"\"Retrieve data from a single result (OptimizeResult or database).\n\n    Args:\n        result: An optimization result with collected history, or path to it.\n        stack_multistart: Whether to combine multistart histories into a single history.\n            Default is False.\n        show_exploration: If True, exploration samples of a multistart optimization are\n            visualized. Default is False.\n        plot_name: Name of the plotting function that calls this function. Used for\n            raising errors.\n        res_name: Name of the result.\n\n    Returns:\n        A data object containing the history, metadata, and local histories of the\n            optimization result.\n\n    \"\"\"\n    if isinstance(result, OptimizeResult):\n        data = _retrieve_optimization_data_from_result_object(\n            res=result,\n            stack_multistart=stack_multistart,\n            show_exploration=show_exploration,\n            plot_name=plot_name,\n            res_name=res_name,\n        )\n    elif isinstance(result, (str, Path)):\n        data = _retrieve_optimization_data_from_database(\n            res=result,\n            stack_multistart=stack_multistart,\n            show_exploration=show_exploration,\n            res_name=res_name,\n        )\n    else:\n        msg = (\n            \"result must be an OptimizeResult or a path to a log file, \"\n            f\"but is type {type(result)}.\"\n        )\n        raise TypeError(msg)\n\n    return data\n\n\ndef _retrieve_optimization_data_from_result_object(\n    res: OptimizeResult,\n    stack_multistart: bool,\n    show_exploration: bool,\n    plot_name: str,\n    res_name: str | None = None,\n) -> _PlottingMultistartHistory:\n    \"\"\"Retrieve optimization data from result object.\n\n    Args:\n        res: An optimization result object.\n        stack_multistart: Whether to combine multistart histories into a single history.\n            Default is False.\n        show_exploration: If True, exploration samples of a multistart optimization are\n            visualized. Default is False.\n        plot_name: Name of the plotting function that calls this function. Used for\n            raising errors.\n        res_name: Name of the result.\n\n    Returns:\n        A data object containing the history, metadata, and local histories of the\n            optimization result.\n\n    \"\"\"\n    if res.history is None:\n        msg = f\"{plot_name} requires an optimize result with history. Enable history \"\n        \"collection by setting collect_history=True when calling maximize or minimize.\"\n        raise ValueError(msg)\n\n    if res.multistart_info:\n        local_histories = [\n            opt.history\n            for opt in res.multistart_info.local_optima\n            if opt.history is not None\n        ]\n\n        if stack_multistart:\n            stacked = _get_stacked_local_histories(local_histories, res.direction)\n            if show_exploration:\n                fun = res.multistart_info.exploration_results[::-1] + stacked.fun\n                params = res.multistart_info.exploration_sample[::-1] + stacked.params\n\n                stacked = History(\n                    direction=stacked.direction,\n                    fun=fun,\n                    params=params,\n                    # TODO: This needs to be fixed\n                    start_time=len(fun) * [None],  # type: ignore\n                    stop_time=len(fun) * [None],  # type: ignore\n                    batches=len(fun) * [None],  # type: ignore\n                    task=len(fun) * [None],  # type: ignore\n                )\n        else:\n            stacked = None\n    else:\n        local_histories = None\n        stacked = None\n\n    data = _PlottingMultistartHistory(\n        history=res.history,\n        name=res_name,\n        start_params=res.start_params,\n        is_multistart=res.multistart_info is not None,\n        local_histories=local_histories,\n        stacked_local_histories=stacked,\n    )\n    return data\n\n\ndef _retrieve_optimization_data_from_database(\n    res: str | Path,\n    stack_multistart: bool,\n    show_exploration: bool,\n    res_name: str | None = None,\n) -> _PlottingMultistartHistory:\n    \"\"\"Retrieve optimization data from a database.\n\n    Args:\n        res: A path to an optimization database.\n        stack_multistart: Whether to combine multistart histories into a single history.\n            Default is False.\n        show_exploration: If True, exploration samples of a multistart optimization are\n            visualized. Default is False.\n        res_name: Name of the result.\n\n    Returns:\n        A data object containing the history, metadata, and local histories of the\n            optimization result.\n\n    \"\"\"\n    reader: LogReader = LogReader.from_options(SQLiteLogOptions(res))\n    _problem_table = reader.problem_df\n\n    direction = _problem_table[\"direction\"].tolist()[-1]\n\n    multistart_history = reader.read_multistart_history(direction)\n    _history = multistart_history.history\n    local_histories = multistart_history.local_histories\n    exploration = multistart_history.exploration\n\n    if stack_multistart and local_histories is not None:\n        stacked = _get_stacked_local_histories(local_histories, direction, _history)\n        if show_exploration:\n            stacked[\"params\"] = exploration[\"params\"][::-1] + stacked[\"params\"]  # type: ignore\n            stacked[\"criterion\"] = exploration[\"criterion\"][::-1] + stacked[\"criterion\"]  # type: ignore\n    else:\n        stacked = None\n\n    history = History(\n        direction=direction,\n        fun=_history[\"fun\"],\n        params=_history[\"params\"],\n        start_time=_history[\"time\"],\n        # TODO (@janosg): Retrieve `stop_time` from `hist` once it is available.\n        # https://github.com/optimagic-dev/optimagic/pull/553\n        stop_time=len(_history[\"fun\"]) * [None],  # type: ignore\n        task=len(_history[\"fun\"]) * [None],  # type: ignore\n        batches=list(range(len(_history[\"fun\"]))),\n    )\n\n    data = _PlottingMultistartHistory(\n        history=history,\n        name=res_name,\n        start_params=reader.read_start_params(),\n        is_multistart=local_histories is not None,\n        local_histories=local_histories,\n        stacked_local_histories=stacked,\n    )\n    return data\n\n\ndef _get_stacked_local_histories(\n    local_histories: list[History] | list[IterationHistory],\n    direction: Any,\n    history: History | IterationHistory | None = None,\n) -> History:\n    \"\"\"Stack local histories.\n\n    Local histories is a list of dictionaries, each of the same structure. We transform\n    this to a dictionary of lists. Finally, when the data is read from the database we\n    append the best history at the end.\n\n    \"\"\"\n    stacked: dict[str, list[Any]] = {\"criterion\": [], \"params\": [], \"runtime\": []}\n    for hist in local_histories:\n        stacked[\"criterion\"].extend(hist.fun)\n        stacked[\"params\"].extend(hist.params)\n        stacked[\"runtime\"].extend(hist.time)\n\n    # append additional history is necessary\n    if history is not None:\n        stacked[\"criterion\"].extend(history.fun)\n        stacked[\"params\"].extend(history.params)\n        stacked[\"runtime\"].extend(history.time)\n\n    return History(\n        direction=direction,\n        fun=stacked[\"criterion\"],\n        params=stacked[\"params\"],\n        start_time=stacked[\"runtime\"],\n        # TODO (@janosg): Retrieve `stop_time` from `hist` once it is available for the\n        # IterationHistory.\n        # https://github.com/optimagic-dev/optimagic/pull/553\n        stop_time=len(stacked[\"criterion\"]) * [None],  # type: ignore\n        task=len(stacked[\"criterion\"]) * [None],  # type: ignore\n        batches=list(range(len(stacked[\"criterion\"]))),\n    )\n\n\ndef _extract_criterion_plot_lines(\n    data: list[_PlottingMultistartHistory],\n    max_evaluations: int | None,\n    palette_cycle: \"itertools.cycle[str]\",\n    stack_multistart: bool,\n    monotone: bool,\n) -> tuple[list[LineData], list[LineData]]:\n    \"\"\"Extract lines for criterion plot from data.\n\n    Args:\n        data: Data retrieved from results or database.\n        max_evaluations: Clip the criterion history after that many entries.\n        palette_cycle: Cycle of colors for plotting.\n        stack_multistart: Whether to combine multistart histories into a single\n            history. Default is False.\n        monotone: If True, the criterion plot becomes monotone in the sense that at each\n            iteration the current best criterion value is displayed.\n\n    Returns:\n        Tuple containing\n            - lines: Main optimization paths.\n            - multistart_lines: Multistart optimization paths.\n\n    \"\"\"\n    fun_or_monotone_fun = \"monotone_fun\" if monotone else \"fun\"\n\n    # Collect multistart optimization paths\n    multistart_lines: list[LineData] = []\n\n    plot_multistart = len(data) == 1 and data[0].is_multistart and not stack_multistart\n\n    if plot_multistart and data[0].local_histories:\n        for i, local_history in enumerate(data[0].local_histories):\n            history = getattr(local_history, fun_or_monotone_fun)\n\n            if max_evaluations is not None and len(history) > max_evaluations:\n                history = history[:max_evaluations]\n\n            line_data = LineData(\n                x=np.arange(len(history)),\n                y=history,\n                color=\"#bab0ac\",\n                name=str(i),\n                show_in_legend=False,\n            )\n            multistart_lines.append(line_data)\n\n    # Collect main optimization paths\n    lines: list[LineData] = []\n\n    for _data in data:\n        if stack_multistart and _data.stacked_local_histories is not None:\n            _history = _data.stacked_local_histories\n        else:\n            _history = _data.history\n\n        history = getattr(_history, fun_or_monotone_fun)\n\n        if max_evaluations is not None and len(history) > max_evaluations:\n            history = history[:max_evaluations]\n\n        line_data = LineData(\n            x=np.arange(len(history)),\n            y=history,\n            color=next(palette_cycle),\n            name=\"best result\" if plot_multistart else _data.name,\n            show_in_legend=not plot_multistart,\n        )\n        lines.append(line_data)\n\n    return lines, multistart_lines\n\n\ndef _extract_params_plot_lines(\n    data: _PlottingMultistartHistory,\n    selector: Callable[[PyTree], PyTree] | None,\n    max_evaluations: int | None,\n    palette_cycle: \"itertools.cycle[str]\",\n) -> list[LineData]:\n    \"\"\"Extract lines for params plot from data.\n\n    Args:\n        data: Data retrieved from results or database.\n        selector: A callable that takes params and returns a subset of params.\n            If provided, only the selected subset of params is plotted.\n        max_evaluations: Clip the criterion history after that many entries.\n        palette_cycle: Cycle of colors for plotting.\n\n    Returns:\n        lines: Parameter histories.\n\n    \"\"\"\n    if data.stacked_local_histories is not None:\n        history = data.stacked_local_histories.params\n    else:\n        history = data.history.params\n    start_params = data.start_params\n\n    registry = get_registry(extended=True)\n\n    hist_arr = np.array([tree_just_flatten(p, registry=registry) for p in history]).T\n    names = leaf_names(start_params, registry=registry)\n\n    if selector is not None:\n        flat, treedef = tree_flatten(start_params, registry=registry)\n        helper = tree_unflatten(treedef, list(range(len(flat))), registry=registry)\n        selected = np.array(tree_just_flatten(selector(helper), registry=registry))\n        names = [names[i] for i in selected]\n        hist_arr = hist_arr[selected]\n\n    lines: list[LineData] = []\n\n    for name, _data in zip(names, hist_arr, strict=False):\n        if max_evaluations is not None and len(_data) > max_evaluations:\n            plot_data = _data[:max_evaluations]\n        else:\n            plot_data = _data\n\n        line_data = LineData(\n            x=np.arange(len(plot_data)),\n            y=plot_data,\n            color=next(palette_cycle),\n            name=name,\n            show_in_legend=True,\n        )\n        lines.append(line_data)\n\n    return lines\n"
  },
  {
    "path": "src/optimagic/visualization/plotting_utilities.py",
    "content": "import base64\nimport collections.abc\nimport itertools\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import Any\n\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom optimagic.config import PLOTLY_TEMPLATE\n\n\n@dataclass(frozen=True)\nclass LineData:\n    \"\"\"Data of a single line.\n\n    Attributes:\n        x: The x-coordinates of the points.\n        y: The y-coordinates of the points.\n        color: The color of the line. Default is None.\n        name: The name of the line. Default is None.\n        show_in_legend: Whether to show the line in the legend. Default is True.\n\n    \"\"\"\n\n    x: np.ndarray\n    y: np.ndarray\n    color: str | None = None\n    name: str | None = None\n    show_in_legend: bool = True\n\n\n@dataclass(frozen=True)\nclass MarkerData:\n    \"\"\"Data of a single marker.\n\n    Attributes:\n        x: The x-coordinate of the marker.\n        y: The y-coordinate of the marker.\n        color: The color of the marker. Default is None.\n        name: The name of the marker. Default is None.\n\n    \"\"\"\n\n    x: float\n    y: float\n    color: str | None = None\n    name: str | None = None\n\n\ndef combine_plots(\n    plots,\n    plots_per_row=2,\n    sharex=False,\n    sharey=True,\n    share_yrange_all=True,\n    expand_yrange=0.02,\n    share_xrange_all=False,\n    make_subplot_kwargs=None,\n    showlegend=True,\n    template=PLOTLY_TEMPLATE,\n    clean_legend=True,\n    layout_kwargs=None,\n    legend_kwargs=None,\n    title_kwargs=None,\n):\n    \"\"\"Combine individual plots into figure with subplots. Uses list of plotly Figures\n    to build plotly Figure with subplots.\n\n    Args:\n        plots (list): List with individual plots.\n        plots_per_row (int): Number of plots per row.\n        make_subplot_kwargs (dict or NoneType): Dictionary of keyword arguments used\n            to instantiate plotly Figure with multiple subplots. Is used to define\n            properties such as, for example, the spacing between subplots. If None,\n            default arguments defined in the function are used.\n        sharex (bool): Whether to share the properties of x-axis across subplots. In\n            the sam column\n        sharey (bool): If True, share the properties of y-axis across subplots in the\n        share_yrange_all (bool): If True, set the same range of y axis for all plots.\n        y_expand (float): The ration by which to expand the range of the (shared) y\n            axis, such that the axis is not cropped at exactly max of y variable.\n        share_xrange_all (bool): If True, set the same range of x axis for all plots.\n        showlegend (bool): If True, show legend.\n        template (str): Plotly layout template. Must be one of plotly.io.templates.\n        clean_legend (bool): If True, then cleans the legend from duplicates.\n        layout_kwargs (dict or NoneType): Dictionary of key word arguments used to\n            update layout of plotly Figure object. If None, the default kwargs defined\n            in the function will be used.\n        legend_kwargs (dict or NoneType): Dictionary of key word arguments used to\n            update position, orientation and title of figure legend. If None, default\n            position and orientation will be used with no title.\n        title_kwargs (dict or NoneType): Dictionary of key word arguments used to\n            update properties of the figure title. Use {'text': '<desired title>'}\n            to set figure title.\n\n    Returns:\n        fig (plotly.Figure): Plotly figure with subplots that combines individual\n            slice plots.\n\n    \"\"\"\n    plots = deepcopy(plots)\n\n    make_subplot_kwargs, nrows = get_make_subplot_kwargs(\n        sharex, sharey, make_subplot_kwargs, plots_per_row, plots\n    )\n    fig = make_subplots(**make_subplot_kwargs)\n    layout_kwargs = get_layout_kwargs(\n        layout_kwargs, legend_kwargs, title_kwargs, template, showlegend\n    )\n    for i, (row, col) in enumerate(\n        itertools.product(np.arange(nrows), np.arange(plots_per_row))\n    ):\n        try:\n            subfig = plots[i]\n            fig.update_xaxes(\n                title_text=subfig.layout.xaxis.title.text, col=col + 1, row=row + 1\n            )\n            if sharey:\n                if col == 0:\n                    fig.update_yaxes(\n                        title_text=subfig.layout.yaxis.title.text,\n                        col=col + 1,\n                        row=row + 1,\n                    )\n            else:\n                fig.update_yaxes(\n                    title_text=subfig.layout.yaxis.title.text, col=col + 1, row=row + 1\n                )\n        except IndexError:\n            subfig = go.Figure()\n        for d in subfig.data:\n            fig.add_trace(\n                d,\n                col=col + 1,\n                row=row + 1,\n            )\n\n    fig.update_layout(**layout_kwargs, width=400 * plots_per_row, height=300 * nrows)\n    if share_yrange_all:\n        lb = []\n        ub = []\n        for f in plots:\n            for d in f.data:\n                y = _ensure_array_from_plotly_data(d[\"y\"])\n                lb.append(np.min(y))\n                ub.append(np.max(y))\n        ub = np.max(ub)\n        lb = np.min(lb)\n        y_range = ub - lb\n        y_lower = lb - y_range * expand_yrange\n        y_upper = ub + y_range * expand_yrange\n        fig.update_yaxes(range=[y_lower, y_upper])\n    if share_xrange_all:\n        lb = []\n        ub = []\n        for f in plots:\n            for d in f.data:\n                x = _ensure_array_from_plotly_data(d[\"x\"])\n                lb.append(np.min(x))\n                ub.append(np.max(x))\n        x_upper = np.max(ub)\n        x_lower = np.min(lb)\n        fig.update_xaxes(range=[x_lower, x_upper])\n    if clean_legend:\n        fig = _clean_legend_duplicates(fig)\n    return fig\n\n\ndef create_grid_plot(\n    rows,\n    cols,\n    ind_list,\n    names,\n    kws,\n    x_title=None,\n    y_title=None,\n    clean_legend=False,\n    scientific_notation=False,\n    share_xax=False,\n    x_min=None,\n    x_max=None,\n):\n    \"\"\"Create a dictionary for a grid plot from a list of traces.\n\n    Args:\n        rows (int): Number of rows in a plot.\n        cols (int): Number of cols in a plot.\n        ind_list (iterable): The list of traces for each individual plot.\n        names (iterable): The list of titles for the each plot.\n        kws (dict): The dictionary for the layout.update, unified for each\n        individual plot.\n        x_title (iterable or None): The list of x-axis labels for each plot. If None,\n        then no labels are added.\n        y_title (iterable or None): The list of y-axis labels for each plot. If None,\n        then no labels are added.\n        clean_legend (bool): If True, then cleans the legend from duplicates.\n        Default False.\n        sci_notation (bool): If True then updates the ticks on x- and y-axis to\n        be displayed in a scientific notation. Default False.\n        share_xax (bool): If True, then the x-axis domain is the same\n        for each individual plot.\n        x_min (int or None): The lower bound for share_xax.\n        x_max (int or None): The upped bound for share_xax.\n\n    Returns:\n        plotly.Figure: The plot with subplots.\n\n    \"\"\"\n    if x_title is None:\n        x_title = [\"\" for ind in range(len(ind_list))]\n    if y_title is None:\n        y_title = [\"\" for ind in range(len(ind_list))]\n\n    fig = make_subplots(rows=rows, cols=cols, subplot_titles=names)\n    for ind, (facet_row, facet_col) in enumerate(\n        itertools.product(range(1, rows + 1), range(1, cols + 1))\n    ):\n        if ind + 1 > len(ind_list):\n            break  # if there are empty individual plots\n\n        traces = ind_list[ind]\n        for trace in range(len(traces)):\n            fig.add_trace(traces[trace], row=facet_row, col=facet_col)\n            # style axis labels\n            fig.update_xaxes(row=facet_row, col=facet_col, title=x_title[ind])\n            fig.update_yaxes(row=facet_row, col=facet_col, title=y_title[ind])\n\n    # deleting duplicates in legend\n    if clean_legend:\n        fig = _clean_legend_duplicates(fig)\n\n    # scientific notations for axis ticks\n    if scientific_notation:\n        fig.update_yaxes(tickformat=\".2e\")\n        fig.update_xaxes(tickformat=\".2e\")\n\n    if share_xax:\n        fig.update_xaxes(range=[x_min, x_max])\n\n    # setting template theme and size\n    fig.update_layout(**kws)\n\n    return fig\n\n\ndef create_ind_dict(\n    ind_list,\n    names,\n    kws,\n    x_title=None,\n    y_title=None,\n    clean_legend=False,\n    scientific_notation=False,\n    share_xax=False,\n    x_min=None,\n    x_max=None,\n):\n    \"\"\"Create a dictionary for individual plots from a list of traces.\n\n    Args:\n        ind_list (iterable): The list of traces for each individual plot.\n        names (iterable): The list of titles for the each plot.\n        kws (dict): The dictionary for the layout.update, unified for each\n        individual plot.\n        x_title (iterable or None): The list of x-axis labels for each plot. If None,\n        then no labels are added.\n        y_title (iterable or None): The list of y-axis labels for each plot. If None,\n        then no labels are added.\n        clean_legend (bool): If True, then cleans the legend from duplicates.\n        Default False.\n        sci_notation (bool): If True then updates the ticks on x- and y-axis to\n        be displayed in a scientific notation. Default False.\n        share_xax (bool): If True, then the x-axis domain is the same\n        for each individual plot.\n        x_min (int or None): The lower bound for share_xax.\n        x_max (int or None): The upped bound for share_xax.\n\n    Returns:\n        Dictionary of individual plots.\n\n    \"\"\"\n    fig_dict = {}\n    if x_title is None:\n        x_title = [\"\" for ind in range(len(ind_list))]\n    if y_title is None:\n        y_title = [\"\" for ind in range(len(ind_list))]\n\n    for ind in range(len(ind_list)):\n        fig = go.Figure()\n        traces = ind_list[ind]\n        for trace in range(len(traces)):\n            fig.add_trace(traces[trace])\n        # adding title and styling axes and theme\n        fig.update_layout(\n            title=names[ind], xaxis_title=x_title[ind], yaxis_title=y_title[ind], **kws\n        )\n        # scientific notations for axis ticks\n        if scientific_notation:\n            fig.update_yaxes(tickformat=\".2e\")\n            fig.update_xaxes(tickformat=\".2e\")\n        # deleting duplicates in legend\n        if clean_legend:\n            fig = _clean_legend_duplicates(fig)\n        if share_xax:\n            fig.update_xaxes(range=[x_min, x_max])\n        # adding to dictionary\n        key = names[ind].replace(\" \", \"_\").lower()\n        fig_dict[key] = fig\n\n    return fig_dict\n\n\ndef _clean_legend_duplicates(fig):\n    trace_names = set()\n\n    def disable_legend_if_duplicate(trace):\n        if trace.name in trace_names:\n            # in this case the legend is a duplicate\n            trace.update(showlegend=False)\n        else:\n            trace_names.add(trace.name)\n\n    fig.for_each_trace(disable_legend_if_duplicate)\n    return fig\n\n\ndef get_make_subplot_kwargs(sharex, sharey, kwrgs, plots_per_row, plots):\n    \"\"\"Define and update keywargs for instantiating figure with subplots.\"\"\"\n    nrows = int(np.ceil(len(plots) / plots_per_row))\n    default_kwargs = {\n        \"rows\": nrows,\n        \"cols\": plots_per_row,\n        \"start_cell\": \"top-left\",\n        \"print_grid\": False,\n        \"shared_yaxes\": sharey,\n        \"shared_xaxes\": sharex,\n        \"horizontal_spacing\": 1 / (plots_per_row * 4),\n    }\n\n    if nrows > 1:\n        default_kwargs[\"vertical_spacing\"] = (1 / (nrows - 1)) / 3\n\n    if not sharey:\n        default_kwargs[\"horizontal_spacing\"] = 2 * default_kwargs[\"horizontal_spacing\"]\n    if kwrgs:\n        default_kwargs.update(kwrgs)\n    return default_kwargs, nrows\n\n\ndef get_layout_kwargs(layout_kwargs, legend_kwargs, title_kwargs, template, showlegend):\n    \"\"\"Define and update default kwargs for update_layout.\n\n    Defines some default keyword arguments to update figure layout, such as title and\n    legend.\n\n    \"\"\"\n    default_kwargs = {\n        \"template\": template,\n        \"xaxis_showgrid\": False,\n        \"yaxis_showgrid\": False,\n        \"showlegend\": showlegend,\n        \"legend\": {},\n        \"title\": {},\n    }\n    if title_kwargs:\n        default_kwargs[\"title\"] = title_kwargs\n    if legend_kwargs:\n        default_kwargs[\"legend\"].update(legend_kwargs)\n    if layout_kwargs:\n        default_kwargs.update(layout_kwargs)\n    return default_kwargs\n\n\ndef _ensure_array_from_plotly_data(data: Any) -> np.ndarray:\n    \"\"\"Ensure that data is a numpy array, including decoding Plotly v6+ base64 format.\n\n    Args:\n        data: Can be a numpy array, (nested) sequence (e.g., list of lists), or a\n              dict with 'bdata' and 'dtype' keys (Plotly v6+ format).\n\n    Returns:\n        Data as a numpy array.\n\n    Raises:\n        ValueError: If input cannot be interpreted as an array.\n\n    \"\"\"\n    if isinstance(data, np.ndarray):\n        return data\n    elif isinstance(data, dict) and \"bdata\" in data and \"dtype\" in data:\n        return _decode_base64_data(data[\"bdata\"], dtype=data[\"dtype\"])\n    elif isinstance(data, collections.abc.Sequence):\n        try:\n            return np.array(data, dtype=np.float64)\n        except Exception:\n            pass\n    raise ValueError(\"Failed to convert input to numpy array.\")\n\n\ndef _decode_base64_data(b64data: str, dtype: str) -> np.ndarray:\n    decoded = base64.b64decode(b64data)\n    return np.frombuffer(decoded, dtype=np.dtype(dtype))\n\n\ndef get_palette_cycle(palette: list[str] | str) -> \"itertools.cycle[str]\":\n    if not isinstance(palette, list):\n        palette = [palette]\n    return itertools.cycle(palette)\n"
  },
  {
    "path": "src/optimagic/visualization/profile_plot.py",
    "content": "import itertools\nfrom typing import Any, Literal\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\n\nfrom optimagic.benchmarking.process_benchmark_results import (\n    process_benchmark_results,\n)\nfrom optimagic.config import DEFAULT_PALETTE\nfrom optimagic.visualization.backends import line_plot\nfrom optimagic.visualization.plotting_utilities import LineData, get_palette_cycle\n\nBACKEND_TO_PROFILE_PLOT_LEGEND_PROPERTIES: dict[str, dict[str, Any]] = {\n    \"plotly\": {\"title\": {\"text\": \"algorithm\"}},\n    \"matplotlib\": {\n        \"loc\": \"outside right upper\",\n        \"fontsize\": \"x-small\",\n        \"title\": \"algorithm\",\n    },\n    \"bokeh\": {\n        \"location\": \"top_right\",\n        \"place\": \"right\",\n        \"label_text_font_size\": \"8pt\",\n        \"title\": \"algorithm\",\n    },\n    \"altair\": {\"orient\": \"right\", \"title\": \"algorithm\"},\n}\n\nBACKEND_TO_PROFILE_PLOT_MARGIN_PROPERTIES: dict[str, dict[str, Any]] = {\n    \"plotly\": {\"l\": 10, \"r\": 10, \"t\": 30, \"b\": 30},\n    # \"matplotlib\": handles margins automatically via constrained layout\n}\n\n\ndef profile_plot(\n    problems: dict[str, dict[str, Any]],\n    results: dict[tuple[str, str], dict[str, Any]],\n    *,\n    runtime_measure: Literal[\n        \"walltime\", \"n_evaluations\", \"n_batches\"\n    ] = \"n_evaluations\",\n    normalize_runtime: bool = False,\n    stopping_criterion: Literal[\"x\", \"y\", \"x_and_y\", \"x_or_y\"] = \"y\",\n    x_precision: float = 1e-4,\n    y_precision: float = 1e-4,\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    template: str | None = None,\n    palette: list[str] | str = DEFAULT_PALETTE,\n) -> Any:\n    \"\"\"Compare optimizers over a problem set.\n\n    This plot answers the question: What percentage of problems can each algorithm\n    solve within a certain runtime budget?\n\n    The runtime budget is plotted on the x axis and the share of problems each\n    algorithm solved on the y axis.\n\n    Thus, algorithms that are very specialized and perform well on some share of\n    problems but are not able to solve more problems with a larger computational budget\n    will have steep increases and then flat lines. Algorithms that are robust but slow,\n    will have low shares in the beginning but reach very high.\n\n    Note that failing to converge according to the given stopping_criterion and\n    precisions is scored as needing an infinite computational budget.\n\n    For details, see the description of performance and data profiles by\n    Moré and Wild (2009).\n\n    Args:\n        problems: A dictionary where keys are the problem names. Values contain\n            information on the problem, including the solution value.\n        results: A dictionary where keys are tuples of the form (problem, algorithm),\n            values are dictionaries of the collected information on the benchmark\n            run, including 'criterion_history' and 'time_history'.\n        runtime_measure: This is the runtime until the desired convergence was reached\n            by an algorithm. This is called performance measure by Moré and Wild (2009).\n        normalize_runtime: If True the runtime each algorithm needed for each problem is\n            scaled by the time the fastest algorithm needed. If True, the resulting plot\n            is what Moré and Wild (2009) called data profiles.\n        stopping_criterion: Determines how convergence is determined from the two\n            precisions.\n        x_precision: How close an algorithm must have gotten to the true parameter\n            values (as percent of the Euclidean distance between start and solution\n            parameters) before the criterion for clipping and convergence is fulfilled.\n        y_precision: How close an algorithm must have gotten to the true criterion\n            values (as percent of the distance between start and solution criterion\n            value) before the criterion for clipping and convergence is fulfilled.\n        backend: The backend to use for plotting. Default is \"plotly\".\n        template: The template for the figure. If not specified, the default template of\n            the backend is used. For the 'bokeh' and 'altair' backends, this changes the\n            global theme, which affects all plots from that backend in the session.\n        palette: The coloring palette for traces. Default is the D3 qualitative palette.\n\n    Returns:\n        The figure object containing the profile plot.\n\n    \"\"\"\n    # ==================================================================================\n    # Process inputs\n\n    palette_cycle = get_palette_cycle(palette)\n\n    if stopping_criterion is None:\n        raise ValueError(\n            \"You must specify a stopping criterion for the performance plot. \"\n        )\n    if runtime_measure not in [\"walltime\", \"n_evaluations\", \"n_batches\"]:\n        raise ValueError(\n            \"Only 'walltime', 'n_evaluations' or 'n_batches' are allowed as \"\n            f\"runtime_measure. You specified '{runtime_measure}'.\"\n        )\n\n    # ==================================================================================\n    # Extract backend-agnostic plotting data from benchmark results\n\n    df, converged_info = process_benchmark_results(\n        problems=problems,\n        results=results,\n        stopping_criterion=stopping_criterion,\n        x_precision=x_precision,\n        y_precision=y_precision,\n    )\n\n    solution_times = create_solution_times(\n        df,\n        runtime_measure=runtime_measure,\n        converged_info=converged_info,\n    )\n\n    lines = _extract_profile_plot_lines(\n        solution_times=solution_times,\n        normalize_runtime=normalize_runtime,\n        converged_info=converged_info,\n        palette_cycle=palette_cycle,\n    )\n\n    # ==================================================================================\n    # Generate the figure\n\n    fig = line_plot(\n        lines,\n        backend=backend,\n        xlabel=_get_profile_plot_xlabel(runtime_measure, normalize_runtime),\n        ylabel=\"Share of Problems Solved\",\n        template=template,\n        height=300,\n        width=500,\n        legend_properties=BACKEND_TO_PROFILE_PLOT_LEGEND_PROPERTIES.get(backend, None),\n        margin_properties=BACKEND_TO_PROFILE_PLOT_MARGIN_PROPERTIES.get(backend, None),\n        horizontal_line=1.0,\n    )\n\n    return fig\n\n\ndef _extract_profile_plot_lines(\n    solution_times: pd.DataFrame,\n    normalize_runtime: bool,\n    converged_info: pd.DataFrame,\n    palette_cycle: \"itertools.cycle[str]\",\n) -> list[LineData]:\n    \"\"\"Extract lines for profile plot from data.\n\n    Args:\n        solution_times: A DataFrame where columns are the names of the algorithms,\n            indexes are the problems. Values are performance measures.\n        normalize_runtime: If True the runtime each algorithm needed for each problem is\n            scaled by the time the fastest algorithm needed.\n        converged_info: A DataFrame where columns are the names of the algorithms,\n            indexes are the problems. The values are boolean and True when the algorithm\n            arrived at the solution with the desired precision.\n        palette_cycle: Cycle of colors for plotting.\n\n    Returns:\n        A list of data objects containing data for each line of the profile plot.\n\n    \"\"\"\n    if normalize_runtime:\n        solution_times = solution_times.divide(solution_times.min(axis=1), axis=0)\n        solution_times[~converged_info] = np.inf\n\n    alphas = _determine_alpha_grid(solution_times)\n    for_each_alpha = pd.concat(\n        {alpha: solution_times <= alpha for alpha in alphas},\n        names=[\"alpha\"],\n    )\n    performance_profiles = for_each_alpha.groupby(\"alpha\").mean().stack().reset_index()\n\n    lines: list[LineData] = []\n\n    for algorithm, data in performance_profiles.groupby(\"algorithm\"):\n        line_data = LineData(\n            x=data[\"alpha\"].to_numpy(),\n            y=data[0].to_numpy(),\n            name=str(algorithm),\n            color=next(palette_cycle),\n        )\n        lines.append(line_data)\n\n    return lines\n\n\ndef create_solution_times(\n    df: pd.DataFrame,\n    runtime_measure: Literal[\"walltime\", \"n_evaluations\", \"n_batches\"],\n    converged_info: pd.DataFrame,\n    return_tidy: bool = True,\n) -> pd.DataFrame:\n    \"\"\"Find the solution time for each algorithm and problem.\n\n    Args:\n        df: A DataFrame which contains 'problem', 'algorithm' and 'runtime_measure'\n            as columns.\n        runtime_measure: This is the runtime until the desired convergence was reached\n            by an algorithm. This is called performance measure by Moré and Wild (2009).\n        converged_info: A DataFrame where columns are the names of the algorithms,\n            indexes are the problems. The values are boolean and True when the algorithm\n            arrived at the solution with the desired precision.\n        return_tidy: If True, the resulting DataFrame will be a tidy DataFrame\n            with problem and algorithm as indexes and runtime_measure as column.\n            If False, the resulting DataFrame will have problem, algorithm and\n            runtime_measure as columns.\n\n    Returns:\n        A DataFrame. If return_tidy is True, indexes are the problems, columns are the\n            algorithms. If return_tidy is False, columns are problem, algorithm and\n            runtime_measure. The values are either the number of evaluations or the\n            walltime each algorithm needed to achieve the desired precision. If the\n            desired precision was not achieved the value is set to np.inf.\n\n    \"\"\"\n    solution_times = (\n        df.groupby([\"problem\", \"algorithm\"])[runtime_measure].max().unstack()\n    )\n    # We convert the dtype to float to support the use of np.inf\n    solution_times = solution_times.astype(float).where(converged_info, other=np.inf)\n\n    if not return_tidy:\n        solution_times = solution_times.stack().reset_index()\n        solution_times = solution_times.rename(\n            columns={solution_times.columns[2]: runtime_measure}\n        )\n\n    return solution_times\n\n\ndef _determine_alpha_grid(solution_times: pd.DataFrame) -> list[np.float64]:\n    switch_points = _find_switch_points(solution_times=solution_times)\n\n    point_to_right = switch_points[-1] * 1.05\n    extended_switch_points = np.append(switch_points, point_to_right)\n    mid_points = (extended_switch_points[:-1] + extended_switch_points[1:]) / 2\n    alphas = sorted(np.append(extended_switch_points, mid_points))\n    return alphas\n\n\ndef _find_switch_points(solution_times: pd.DataFrame) -> NDArray[np.float64]:\n    \"\"\"Determine the switch points of the performance profiles.\n\n    Args:\n        solution_times: A DataFrame where columns are the names of the algorithms,\n            indexes are the problems. Values are performance measures. They can be\n            either float, when normalize_runtime was True or int when the\n            runtime_measure are not normalized function evaluations or datetime when the\n            not normalized walltime is used.\n\n    Returns:\n        A sorted array of switching points.\n\n    \"\"\"\n    switch_points = np.unique(solution_times.values)\n    if pd.api.types.is_float_dtype(switch_points):\n        switch_points += 1e-10\n    switch_points = switch_points[np.isfinite(switch_points)]\n    return switch_points\n\n\ndef _get_profile_plot_xlabel(runtime_measure: str, normalize_runtime: bool) -> str:\n    # The '{linebreak}' placeholder is replaced with the backend-specific line break\n    # in the corresponding plotting function.\n\n    if normalize_runtime:\n        runtime_measure_to_xlabel = {\n            \"walltime\": (\n                \"Multiple of Minimal Wall Time{linebreak}Needed to Solve the Problem\"\n            ),\n            \"n_evaluations\": (\n                \"Multiple of Minimal Number of Function Evaluations\"\n                \"{linebreak}Needed to Solve the Problem\"\n            ),\n            \"n_batches\": (\n                \"Multiple of Minimal Number of Batches\"\n                \"{linebreak}Needed to Solve the Problem\"\n            ),\n        }\n    else:\n        runtime_measure_to_xlabel = {\n            \"walltime\": \"Wall Time Needed to Solve the Problem\",\n            \"n_evaluations\": \"Number of Function Evaluations\",\n            \"n_batches\": \"Number of Batches\",\n        }\n\n    return runtime_measure_to_xlabel[runtime_measure]\n"
  },
  {
    "path": "src/optimagic/visualization/slice_plot.py",
    "content": "import warnings\nfrom functools import partial\nfrom typing import Any, Callable, Literal\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.typing import NDArray\nfrom pybaum import tree_just_flatten\n\nimport optimagic as om\nfrom optimagic import deprecations\nfrom optimagic.batch_evaluators import (\n    BatchEvaluator,\n    BatchEvaluatorLiteral,\n    process_batch_evaluator,\n)\nfrom optimagic.config import DEFAULT_N_CORES, DEFAULT_PALETTE\nfrom optimagic.deprecations import replace_and_warn_about_deprecated_bounds\nfrom optimagic.optimization.fun_value import (\n    SpecificFunctionValue,\n    convert_fun_output_to_function_value,\n    enforce_return_type,\n)\nfrom optimagic.parameters.bounds import pre_process_bounds\nfrom optimagic.parameters.conversion import get_converter\nfrom optimagic.parameters.space_conversion import InternalParams\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.shared.process_user_function import infer_aggregation_level\nfrom optimagic.typing import AggregationLevel, PyTree\nfrom optimagic.visualization.backends import grid_line_plot, line_plot\nfrom optimagic.visualization.plotting_utilities import LineData, MarkerData\n\n\ndef slice_plot(\n    func: Callable,\n    params: PyTree,\n    bounds: om.Bounds | None = None,\n    func_kwargs: dict | None = None,\n    selector: Callable[[PyTree], PyTree] | None = None,\n    n_cores: int = DEFAULT_N_CORES,\n    n_gridpoints: int = 20,\n    plots_per_row: int = 2,\n    param_names: dict[str, str] | None = None,\n    share_y: bool = True,\n    expand_yrange: float = 0.02,\n    share_x: bool = False,\n    backend: Literal[\"plotly\", \"matplotlib\", \"bokeh\", \"altair\"] = \"plotly\",\n    template: str | None = None,\n    color: str | None = DEFAULT_PALETTE[0],\n    title: str | None = None,\n    return_dict: bool = False,\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator = \"joblib\",\n    # deprecated\n    make_subplot_kwargs: dict | None = None,\n    lower_bounds: None = None,\n    upper_bounds: None = None,\n) -> Any:\n    \"\"\"Plot criterion along coordinates at given and random values.\n\n    Generates plots for each parameter and optionally combines them into a figure\n    with subplots.\n\n    # TODO: Use soft bounds to create the grid (if available).\n    # TODO: Don't do a function evaluation outside the batch evaluator.\n\n    Args:\n        func: criterion function that takes params and returns scalar, PyTree or\n            FunctionValue object.\n        params: A pytree with parameters.\n        bounds: Lower and upper bounds on the parameters. The bounds are used to create\n            a grid over which slice plots are drawn. The most general and preferred\n            way to specify bounds is an `optimagic.Bounds` object that collects lower,\n            upper, soft_lower and soft_upper bounds. The soft bounds are not used for\n            slice_plots. Each bound type mirrors the structure of params. Check our\n            how-to guide on bounds for examples. If params is a flat numpy array, you\n            can also provide bounds via any format that is supported by\n            scipy.optimize.minimize.\n        func_kwargs: Additional keyword arguments passed to func.\n        selector: Function that takes params and returns a subset of params for which we\n            actually want to generate the plot.\n        n_cores: Number of cores.\n        n_gridpoints: Number of gridpoints on which the criterion function is evaluated.\n            This is the number per plotted line.\n        plots_per_row: Number of plots per row.\n        param_names: Dictionary mapping old parameter names to new ones.\n        share_y: If True, the individual plots share the scale on the yaxis and plots in\n            one row actually share the y axis.\n        expand_yrange: The ratio by which to expand the range of the (shared) y axis,\n            such that the axis is not cropped at exactly max of Criterion Value.\n        share_x: If True, set the same range of x axis for all plots and share the\n            x axis for all plots in one column.\n        backend: The backend to use for plotting. Default is \"plotly\".\n        template: The template for the figure. If not specified, the default template of\n            the backend is used. For the 'bokeh' and 'altair' backends, this changes the\n            global theme, which affects all plots from that backend in the session.\n        color: The line color.\n        title: The figure title. This is not used for the `bokeh` backend, as it does\n            not support title for grid plot.\n        return_dict: If True, return dictionary with individual plots of each parameter,\n            else, combine individual plots into a figure with subplots.\n        batch_evaluator: See :ref:`batch_evaluators`.\n\n    Returns:\n        The figure object containing the slice plot if `return_dict` is False.\n            Otherwise, a dictionary with individual slice plots for each parameter.\n\n    \"\"\"\n    # ==================================================================================\n    # Process inputs\n\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n    bounds = pre_process_bounds(bounds)\n\n    func, func_eval = _get_processed_func_and_func_eval(func, func_kwargs, params)\n\n    if make_subplot_kwargs is not None:\n        deprecations.throw_make_subplot_kwargs_in_slice_plot_future_warning()\n\n    # ==================================================================================\n    # Extract backend-agnostic plotting data from results\n\n    plot_data, internal_params = _get_plot_data(\n        func=func,\n        params=params,\n        bounds=bounds,\n        func_eval=func_eval,\n        selector=selector,\n        n_gridpoints=n_gridpoints,\n        batch_evaluator=batch_evaluator,\n        n_cores=n_cores,\n    )\n\n    lines_list, marker_list, xlabels, ylabels = _extract_slice_plot_lines_and_labels(\n        plot_data=plot_data,\n        internal_params=internal_params,\n        func_eval=func_eval,\n        param_names=param_names,\n        color=color,\n    )\n\n    # ==================================================================================\n    # Generate the figure\n\n    xrange, yrange = _get_axis_limits(\n        plot_data, share_y=share_y, share_x=share_x, expand_yrange=expand_yrange\n    )\n\n    if return_dict:\n        fig_dict = {}\n\n        for i in range(len(lines_list)):\n            fig = line_plot(\n                lines=lines_list[i],\n                marker=marker_list[i],\n                backend=backend,\n                xlabel=xlabels[i],\n                ylabel=ylabels[i],\n                template=template,\n            )\n\n            fig_dict[xlabels[i]] = fig\n\n        return fig_dict\n    else:\n        n_rows = int(np.ceil(len(lines_list) / plots_per_row))\n\n        if share_y:\n            ylabels = [\n                ylabel if i % plots_per_row == 0 else \"\"\n                for i, ylabel in enumerate(ylabels)\n            ]\n\n        fig = grid_line_plot(\n            lines_list=lines_list,\n            marker_list=marker_list,\n            backend=backend,\n            n_rows=n_rows,\n            n_cols=plots_per_row,\n            xlabels=xlabels,\n            xrange=xrange,\n            share_x=share_x,\n            ylabels=ylabels,\n            yrange=yrange,\n            share_y=share_y,\n            template=template,\n            height=300 * n_rows,\n            width=400 * plots_per_row,\n            plot_title=title,\n            make_subplot_kwargs=make_subplot_kwargs,\n        )\n        return fig\n\n\ndef _get_processed_func_and_func_eval(\n    func: Callable, func_kwargs: dict | None, params: PyTree\n) -> tuple[Callable, SpecificFunctionValue]:\n    if func_kwargs is not None:\n        func = partial(func, **func_kwargs)\n    func_eval = func(params)\n\n    # handle deprecated function output\n    if deprecations.is_dict_output(func_eval):\n        msg = (\n            \"Functions that return dictionaries are deprecated in slice_plot and will \"\n            \"raise an error in version 0.6.0. Please pass a function that returns a \"\n            \"FunctionValue object instead and use the `mark` decorators to specify \"\n            \"whether it is a scalar, least-squares or likelihood function.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        func_eval = deprecations.convert_dict_to_function_value(func_eval)\n        func = deprecations.replace_dict_output(func)\n\n    # Infer the function type and enforce the return type\n    if deprecations.is_dict_output(func_eval):\n        problem_type = deprecations.infer_problem_type_from_dict_output(func_eval)\n    else:\n        problem_type = infer_aggregation_level(func)\n\n    func_eval = convert_fun_output_to_function_value(func_eval, problem_type)\n    func = enforce_return_type(problem_type)(func)\n\n    return func, func_eval\n\n\ndef _get_plot_data(\n    func: Callable,\n    params: PyTree,\n    bounds: om.Bounds | None,\n    func_eval: SpecificFunctionValue,\n    selector: Callable[[PyTree], PyTree] | None,\n    n_gridpoints: int,\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator,\n    n_cores: int,\n) -> tuple[pd.DataFrame, InternalParams]:\n    converter, internal_params = get_converter(\n        params=params,\n        constraints=None,\n        bounds=bounds,\n        func_eval=func_eval,\n        solver_type=\"value\",\n    )\n\n    n_params = len(internal_params.values)\n\n    selected = np.arange(n_params, dtype=int)\n    if selector is not None:\n        helper = converter.params_from_internal(selected)\n        registry = get_registry(extended=True)\n        selected = np.array(\n            tree_just_flatten(selector(helper), registry=registry), dtype=int\n        ).ravel()  # Ensure the result is a 1D array\n\n    if not np.isfinite(internal_params.lower_bounds[selected]).all():\n        raise ValueError(\"All selected parameters must have finite lower bounds.\")\n\n    if not np.isfinite(internal_params.upper_bounds[selected]).all():\n        raise ValueError(\"All selected parameters must have finite upper bounds.\")\n\n    evaluation_points, metadata = [], []\n    for pos in selected:\n        lb = internal_params.lower_bounds[pos]\n        ub = internal_params.upper_bounds[pos]\n        grid = np.linspace(lb, ub, n_gridpoints)\n        name = internal_params.names[pos]\n        for param_value in grid:\n            if param_value != internal_params.values[pos]:\n                meta = {\n                    \"name\": name,\n                    \"Parameter Value\": param_value,\n                }\n\n                x = internal_params.values.copy()\n                x[pos] = param_value\n                point = converter.params_from_internal(x)\n                evaluation_points.append(point)\n                metadata.append(meta)\n\n    func_values = _retrieve_func_values(\n        func, evaluation_points, batch_evaluator, n_cores\n    )\n    func_values += [func_eval.internal_value(AggregationLevel.SCALAR)] * len(selected)\n\n    for pos in selected:\n        meta = {\n            \"name\": internal_params.names[pos],\n            \"Parameter Value\": internal_params.values[pos],\n        }\n        metadata.append(meta)\n\n    plot_data = pd.DataFrame(metadata)\n    plot_data[\"Function Value\"] = func_values  # type: ignore[assignment]\n\n    return plot_data, internal_params\n\n\ndef _retrieve_func_values(\n    func: Callable,\n    evaluation_points: list[PyTree],\n    batch_evaluator: BatchEvaluatorLiteral | BatchEvaluator,\n    n_cores: int,\n) -> list[float | NDArray[np.float64]]:\n    \"\"\"Retrieve function values at given evaluation points using batch evaluator.\"\"\"\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n\n    func_values = batch_evaluator(\n        func=func,\n        arguments=evaluation_points,\n        error_handling=\"continue\",\n        n_cores=n_cores,\n    )\n\n    # add NaNs where an evaluation failed\n    func_values = [\n        np.nan if isinstance(val, str) else val.internal_value(AggregationLevel.SCALAR)\n        for val in func_values\n    ]\n\n    return func_values\n\n\ndef _extract_slice_plot_lines_and_labels(\n    plot_data: pd.DataFrame,\n    internal_params: InternalParams,\n    func_eval: SpecificFunctionValue,\n    param_names: dict[str, str] | None,\n    color: str | None,\n) -> tuple[list[list[LineData]], list[MarkerData], list[str], list[str]]:\n    \"\"\"Extract lines, markers and labels for slice plots.\"\"\"\n    lines_list = []\n    marker_list = []\n    xlabels = []\n    ylabels = []\n\n    for _par_name, _data in plot_data.groupby(\"name\", sort=False):\n        df = _data.sort_values(\"Parameter Value\")\n\n        par_name = str(_par_name)\n        if param_names is not None and par_name in param_names:\n            par_name = param_names[par_name]\n\n        subplot_line = LineData(\n            x=df[\"Parameter Value\"].to_numpy(),\n            y=df[\"Function Value\"].to_numpy(),\n            color=color,\n            name=par_name,\n            show_in_legend=False,\n        )\n        lines_list.append([subplot_line])\n\n        if internal_params.names is not None:\n            pos = internal_params.names.index(_par_name)\n            marker_data = MarkerData(\n                x=float(internal_params.values[pos]),\n                y=float(func_eval.internal_value(AggregationLevel.SCALAR)),\n                color=color,\n            )\n            marker_list.append(marker_data)\n\n        xlabels.append(par_name)\n        ylabels.append(\"Function Value\")\n\n    return lines_list, marker_list, xlabels, ylabels\n\n\ndef _get_axis_limits(\n    plot_data: pd.DataFrame, share_y: bool, share_x: bool, expand_yrange: float\n) -> tuple[tuple[float, float] | None, tuple[float, float] | None]:\n    if share_y:\n        lb = plot_data[\"Function Value\"].min()\n        ub = plot_data[\"Function Value\"].max()\n        y_range = ub - lb\n        ub += y_range * expand_yrange\n        lb -= y_range * expand_yrange\n        yrange = (lb, ub)\n    else:\n        yrange = None\n\n    if share_x:\n        lb = plot_data[\"Parameter Value\"].min()\n        ub = plot_data[\"Parameter Value\"].max()\n        xrange = (lb, ub)\n    else:\n        xrange = None\n\n    return xrange, yrange\n"
  },
  {
    "path": "src/optimagic/visualization/slice_plot_3d.py",
    "content": "import warnings\nfrom copy import deepcopy\nfrom enum import Enum\nfrom functools import partial\n\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom numpy.typing import NDArray\nfrom plotly.subplots import make_subplots\nfrom pybaum import tree_just_flatten\n\nfrom optimagic import deprecations\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.config import DEFAULT_N_CORES, PLOTLY_TEMPLATE\nfrom optimagic.deprecations import replace_and_warn_about_deprecated_bounds\nfrom optimagic.optimization.fun_value import (\n    convert_fun_output_to_function_value,\n    enforce_return_type,\n)\nfrom optimagic.parameters.bounds import pre_process_bounds\nfrom optimagic.parameters.conversion import get_converter\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.shared.process_user_function import infer_aggregation_level\nfrom optimagic.typing import AggregationLevel\n\n\ndef slice_plot_3d(  # type: ignore[no-untyped-def]\n    func,\n    params,\n    bounds=None,\n    func_kwargs=None,\n    selector=None,\n    n_gridpoints: int = 20,\n    projection=\"univariate\",\n    make_subplot_kwargs=None,\n    layout_kwargs=None,\n    plot_kwargs=None,\n    param_names: dict[str, str] | None = None,\n    expand_yrange: float = 0.02,\n    batch_evaluator=\"joblib\",\n    n_cores: int = DEFAULT_N_CORES,\n    return_dict: bool = False,\n    lower_bounds=None,\n    upper_bounds=None,\n) -> go.Figure | dict[tuple[int, int], go.Figure]:\n    \"\"\"Generate interactive slice, contour or surface plots of a function.\n\n    This function produces plots of a user-supplied criterion function evaluated on a\n    grid of its parameters. It can generate:\n    - 2D univariate slice plots (each parameter vs. function value).\n    - 2D contour plots (two parameters vs. function value).\n    - 3D surface plots (two parameters vs. function value).\n\n    Plots can be returned as a dictionary of individual figures or combined into a\n    single Plotly figure with subplots.\n\n    Args:\n        func (callable): The criterion function. It takes `params` and returns a\n            scalar, PyTree, or `FunctionValue` object.\n        params (pytree): A pytree of parameters.\n        bounds (optimagic.Bounds or sequence or None): An `optimagic.Bounds` object\n            or other supported format specifying the lower and upper bounds for\n            parameters. These bounds define the grid for the plots.\n        func_kwargs (dict or None): Additional keyword arguments for `func`.\n        selector (callable): A function that takes `params` and returns a subset\n            of them to be plotted. If None, all parameters are plotted.\n        n_gridpoints (int): The number of points per parameter used to create the\n            evaluation grid. For a 2D plot, this means `n_gridpoints`**2\n            evaluations.\n        projection (str or dict): The type of plot. Can be `\"univariate\"`,\n            `\"contour\"`, `\"surface\"`, or a dictionary like `{\"lower\": \"contour\",\n            \"upper\": \"surface\"}` to create a grid of mixed plot types.\n        make_subplot_kwargs (dict or None): Keyword arguments for\n            `plotly.subplots.make_subplots`.\n        layout_kwargs (dict or None): Keyword arguments for the figure's\n            `update_layout` method.\n        plot_kwargs (dict or None): A nested dictionary of keyword arguments to\n            customize traces, e.g., `{\"line_plot\": {\"color\": \"blue\"}}`.\n        param_names (dict or NoneType): A dictionary mapping internal parameter\n            names to display names.\n        expand_yrange (float): The factor by which to expand the function value\n            axis range. This only applies to the z-axis of **surface plots** to\n            prevent the plot from feeling cramped. It does not affect line or\n            contour plots.\n        batch_evaluator (str or callable): The batch evaluator to parallelize\n            function evaluations. See :ref:`batch_evaluators`.\n        n_cores (int): The number of cores to use for parallelization.\n        return_dict (bool): If `True`, returns a dictionary of `go.Figure`\n            objects keyed by `(row, col)`. If `False`, returns a single combined\n            `go.Figure`.\n        lower_bounds (sequence or None): Deprecated. Use `bounds` instead.\n        upper_bounds (sequence or None): Deprecated. Use `bounds` instead.\n\n    Returns:\n        plotly.Figure | dict: A single combined Plotly figure or a dictionary of\n        individual figures.\n\n    \"\"\"\n    bounds = replace_and_warn_about_deprecated_bounds(\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n        bounds=bounds,\n    )\n    bounds = pre_process_bounds(bounds)\n\n    if func_kwargs is not None:\n        func = partial(func, **func_kwargs)\n\n    func_eval = func(params)\n\n    # ==================================================================================\n    # handle deprecated function output\n    # ==================================================================================\n    if deprecations.is_dict_output(func_eval):\n        msg = (\n            \"Functions that return dictionaries are deprecated in slice_plot and will \"\n            \"raise an error in version 0.6.0. Please pass a function that returns a \"\n            \"FunctionValue object instead and use the `mark` decorators to specify \"\n            \"whether it is a scalar, least-squares or likelihood function.\"\n        )\n        warnings.warn(msg, FutureWarning)\n        func_eval = deprecations.convert_dict_to_function_value(func_eval)\n        func = deprecations.replace_dict_output(func)\n\n    # ==================================================================================\n    # Infer the function type and enforce the return type\n    # ==================================================================================\n\n    if deprecations.is_dict_output(func_eval):\n        problem_type = deprecations.infer_problem_type_from_dict_output(func_eval)\n    else:\n        problem_type = infer_aggregation_level(func)\n\n    func_eval = convert_fun_output_to_function_value(func_eval, problem_type)\n\n    func = enforce_return_type(problem_type)(func)\n\n    # ==================================================================================\n\n    converter, internal_params = get_converter(\n        params=params,\n        constraints=None,\n        bounds=bounds,\n        func_eval=func_eval,\n        solver_type=\"value\",\n    )\n\n    n_params = len(internal_params.values)\n    selected = np.arange(n_params, dtype=int)\n    if selector is not None:\n        helper = converter.params_from_internal(selected)\n        registry = get_registry(extended=True)\n        selected = np.array(\n            tree_just_flatten(selector(helper), registry=registry), dtype=int\n        ).reshape(-1)\n    n_params = len(selected)\n    if not np.isfinite(internal_params.lower_bounds[selected]).all():\n        raise ValueError(\"All selected parameters must have finite lower bounds.\")\n\n    if not np.isfinite(internal_params.upper_bounds[selected]).all():\n        raise ValueError(\"All selected parameters must have finite upper bounds.\")\n\n    # Projection configuration\n    projection = Projection(projection)\n    if not projection.is_univariate and n_params < 2:\n        raise ValueError(\n            f\"{projection!r} requires at least two parameters. Got {n_params} params.\"\n        )\n\n    params_data, display_names = {}, {}\n\n    for pos in selected:\n        name = internal_params.names[pos]\n        params_data[name] = np.linspace(\n            internal_params.lower_bounds[pos],\n            internal_params.upper_bounds[pos],\n            n_gridpoints,\n        )\n        display_names[name] = param_names.get(name, name) if param_names else name\n\n    # This is where\n    evaluation_points = generate_evaluation_points(\n        projection, selected, internal_params, params_data, converter\n    )\n\n    evaluator = process_batch_evaluator(batch_evaluator)\n\n    raw_func_values = evaluator(\n        func=func,\n        arguments=evaluation_points,\n        error_handling=\"continue\",\n        n_cores=n_cores,\n    )\n\n    # add NaNs where an evaluation failed\n    func_values = np.array(\n        [\n            np.nan\n            if isinstance(val, str)\n            else val.internal_value(AggregationLevel.SCALAR)\n            for val in raw_func_values\n        ]\n    )\n\n    plot_data = plot_data_cache(\n        projection, selected, internal_params, func_values, n_gridpoints\n    )\n\n    # Kwargs evaluation\n    plot_kwargs = evaluate_plot_kwargs(plot_kwargs)\n    make_subplot_kwargs = evaluate_make_subplot_kwargs(\n        make_subplot_kwargs, n_params, projection, display_names\n    )\n    layout_kwargs = evaluate_layout_kwargs(\n        layout_kwargs, projection, make_subplot_kwargs\n    )\n\n    plots = {}\n    if projection.is_univariate:\n        cols = make_subplot_kwargs.get(\"cols\")\n        for idx, param_pos in enumerate(selected):\n            row, col = divmod(idx, cols)\n\n            param_name = internal_params.names[param_pos]\n            display_name = display_names[param_name]\n\n            x = params_data[param_name].tolist()\n            y = plot_data.get(\n                tuple(\n                    sorted(\n                        [\n                            param_name,\n                        ]\n                    )\n                ),\n                [],\n            )\n\n            y_range = compute_yaxis_range(\n                y[~np.isnan(y)] if np.any(~np.isnan(y)) else [0, 1], expand_yrange\n            )\n            grid_univariate = False\n\n            # Scatter plot point\n            scatter_point = {\n                \"x\": [internal_params.values[param_pos]],\n                \"y\": [func_eval.internal_value(AggregationLevel.SCALAR)],\n            }\n\n            fig = plot_line(\n                x,\n                y,\n                display_name,\n                y_range,\n                scatter_point,\n                plot_kwargs,\n                layout_kwargs,\n                grid_univariate,\n            )\n            plots[(row, col)] = fig\n    else:\n        single_plot = True if n_params == 2 else False\n        projection_config = projection.get_config()\n        lower_projection = projection_config.get(\"lower\")\n        upper_projection = projection_config.get(\"upper\")\n\n        for i, x_selected in enumerate(selected):\n            for j, y_selected in enumerate(selected):\n                if x_selected == y_selected and single_plot:\n                    x_pos, y_pos = selected\n                else:\n                    x_pos = x_selected\n                    y_pos = y_selected\n\n                # Diagonal plot are slice plots\n                if i == j and not single_plot:\n                    grid_univariate = True\n                    param_name = internal_params.names[x_pos]\n                    display_name = display_names[param_name]\n\n                    x = params_data[param_name].tolist()\n                    y = plot_data.get(\n                        tuple(\n                            sorted(\n                                [\n                                    param_name,\n                                ]\n                            )\n                        ),\n                        [],\n                    )\n                    y_range = compute_yaxis_range(y, expand_yrange)\n\n                    # Scatter plot point\n                    scatter_point = {\n                        \"x\": [internal_params.values[x_pos]],\n                        \"y\": [func_eval.internal_value(AggregationLevel.SCALAR)],\n                    }\n\n                    fig = plot_line(\n                        x,\n                        y,\n                        display_name,\n                        y_range,\n                        scatter_point,\n                        plot_kwargs,\n                        layout_kwargs,\n                        grid_univariate,\n                    )\n\n                else:\n                    subplot_projection = None\n                    if i < j and upper_projection is not None:\n                        subplot_projection = upper_projection\n                    elif i > j and lower_projection is not None:\n                        subplot_projection = lower_projection\n                    elif i == j and single_plot:\n                        subplot_projection = lower_projection\n\n                    if subplot_projection is not None:\n                        x_name = internal_params.names[x_pos]\n                        y_name = internal_params.names[y_pos]\n                        current_param_names = [x_name, y_name]\n\n                        x, y = np.meshgrid(params_data[x_name], params_data[y_name])\n                        z = plot_data.get(tuple(sorted(current_param_names)), [])\n                        z = np.reshape(z, (n_gridpoints, n_gridpoints))\n\n                        # Scatter plot point\n                        scatter_point = {\n                            \"x\": [internal_params.values[x_pos]],\n                            \"y\": [internal_params.values[y_pos]],\n                            \"z\": [func_eval.internal_value(AggregationLevel.SCALAR)],\n                        }\n\n                        if subplot_projection.is_surface:\n                            fig = plot_surface(\n                                x, y, z, scatter_point, plot_kwargs, layout_kwargs\n                            )\n                        else:\n                            fig = plot_contour(\n                                x,\n                                y,\n                                z,  # type: ignore[arg-type]\n                                scatter_point,\n                                plot_kwargs,\n                                layout_kwargs,\n                            )\n                    else:\n                        fig = go.Figure()\n                plots[(i, j)] = fig\n                if single_plot:\n                    break\n            if single_plot:\n                break\n\n    if return_dict:\n        return plots\n    return combine_plots(plots, make_subplot_kwargs, layout_kwargs, expand_yrange)\n\n\ndef generate_evaluation_points(  # type: ignore[no-untyped-def]\n    projection, selected, internal_params, params_data, converter\n):\n    \"\"\"Create the list of parameter sets for function evaluation.\n\n    This function generates all the points (parameter sets) that need to be\n    evaluated by the criterion function to create the plots. It generates points\n    for both univariate slices and, if applicable, bivariate grids.\n\n    Args:\n        projection (Projection): The processed projection configuration object.\n        selected (NDArray[int]): Array of integer positions for the selected\n            parameters.\n        internal_params (InternalParams): An object holding the internal parameter\n            representation (values, names, bounds).\n        params_data (dict): A dictionary mapping parameter names to their grid\n            values (np.linspace array).\n        converter (Converter): The parameter converter object.\n\n    Returns:\n        list: A list of parameter pytrees. Each element is a full parameter set\n        ready to be passed to the user's criterion function.\n\n    \"\"\"\n    evaluation_points = []\n    default_point = dict(\n        zip(internal_params.names, internal_params.values, strict=False)\n    )\n    for pos in selected:\n        name = internal_params.names[pos]\n        for value in params_data[name]:\n            point = default_point.copy()\n            point[name] = value\n            values = np.array(list(point.values()))\n            evaluation_points.append(converter.params_from_internal(values))\n    if projection.is_dict:\n        for x_pos in selected:\n            for y_pos in selected:\n                if x_pos == y_pos:\n                    continue\n                x_name = internal_params.names[x_pos]\n                y_name = internal_params.names[y_pos]\n\n                x_mesh, y_mesh = np.meshgrid(params_data[x_name], params_data[y_name])\n                for x_val, y_val in zip(x_mesh.ravel(), y_mesh.ravel(), strict=False):\n                    point = default_point.copy()\n                    point[x_name] = x_val\n                    point[y_name] = y_val\n                    values = np.array(list(point.values()))\n                    evaluation_points.append(converter.params_from_internal(values))\n    return evaluation_points\n\n\ndef plot_data_cache(  # type: ignore[no-untyped-def]\n    projection, selected, internal_params, func_values, n_gridpoints\n):\n    \"\"\"Caches and maps evaluated function values to their parameters.\n\n    This function takes the flat array of criterion function outputs and maps\n    them back to the parameters that generated them. The result is a dictionary\n    where keys are tuples of parameter names and values are the corresponding\n    function values.\n\n    Args:\n        projection (Projection): The processed projection configuration object.\n        selected (NDArray[int]): Array of integer positions for the selected\n            parameters.\n        internal_params (InternalParams): An object holding the internal parameter\n            representation.\n        func_values (NDArray[float]): A flat numpy array containing the results\n            from the batch evaluator.\n        n_gridpoints (int): The number of grid points per parameter.\n\n    Returns:\n        dict: A dictionary mapping parameter name tuples to numpy arrays of\n        function values.\n        - For univariate plots: `{(param_name,): array([...])}`\n        - For bivariate plots: `{(param_a, param_b): array([...])}`\n\n    \"\"\"\n    plot_data = {}\n    func_values_idx = 0\n\n    for pos in selected:\n        key = tuple(\n            sorted(\n                [\n                    internal_params.names[pos],\n                ]\n            )\n        )\n        y = func_values[func_values_idx : func_values_idx + n_gridpoints]\n        plot_data[key] = y\n        func_values_idx += n_gridpoints\n\n    if projection.is_dict:\n        for x_pos in selected:\n            for y_pos in selected:\n                if x_pos == y_pos:\n                    continue\n                key = tuple(\n                    sorted([internal_params.names[x_pos], internal_params.names[y_pos]])\n                )\n                plot_data[key] = func_values[\n                    func_values_idx : func_values_idx + (n_gridpoints**2)\n                ]\n                func_values_idx += n_gridpoints**2\n\n    return plot_data\n\n\ndef plot_line(  # type: ignore[no-untyped-def]\n    x: list[float],\n    y: list[float],\n    display_name: str,\n    y_range: list[float],\n    scatter_point,\n    plot_kwargs,\n    layout_kwargs,\n    grid_univariate: bool,\n) -> go.Figure:\n    \"\"\"Generate a 2D line plot with an overlayed scatter point.\n\n    This function constructs a line plot for a univariate parameter slice and\n    highlights the initial parameter's function value with a scatter marker.\n\n    Args:\n        x (list[float]): The parameter values for the x-axis.\n        y (list[float]): The function values for the y-axis.\n        display_name (str): The name of the parameter to be used as the x-axis\n            title.\n        y_range (list[float]): A list `[min, max]` defining the y-axis range.\n        scatter_point (dict): A dictionary with \"x\" and \"y\" keys for the\n            overlayed scatter marker.\n        plot_kwargs (dict): A dictionary of trace-level customizations.\n        layout_kwargs (dict): A dictionary of layout customizations.\n        grid_univariate (bool): If `True`, this is a diagonal plot in a grid,\n            and axis titles are omitted.\n\n    Returns:\n        go.Figure: A Plotly figure object containing the line plot.\n\n    \"\"\"\n    fig = px.line(x=x, y=y, **plot_kwargs[\"line_plot\"])\n    if plot_kwargs[\"scatter_plot\"] is not None:\n        fig.add_trace(\n            go.Scatter(\n                x=scatter_point[\"x\"],\n                y=scatter_point[\"y\"],\n                **plot_kwargs[\"scatter_plot\"],\n            )\n        )\n\n    if layout_kwargs:\n        fig.update_layout(**layout_kwargs)\n\n    if not grid_univariate:\n        fig.update_xaxes(title={\"text\": display_name})\n        fig.update_yaxes(title={\"text\": \"Function Value\"}, range=y_range)\n    else:\n        fig.update_xaxes(title=None)\n        fig.update_yaxes(title=None, range=y_range)\n    return fig\n\n\ndef plot_surface(  # type: ignore[no-untyped-def]\n    x: NDArray[np.float64],\n    y: NDArray[np.float64],\n    z,\n    scatter_point,\n    plot_kwargs,\n    layout_kwargs,\n):\n    \"\"\"Create a 3D surface plot of the function over two parameters.\n\n    This function constructs a 3D surface plot and highlights the initial\n    parameter's function value with a 3D scatter marker.\n\n    Args:\n        x (NDArray[np.float64]): A meshgrid of x-axis parameter values.\n        y (NDArray[np.float64]): A meshgrid of y-axis parameter values.\n        z (NDArray[np.float64]): A 2D array of function values corresponding\n            to the x-y grid.\n        scatter_point (dict): A dictionary with \"x\", \"y\", and \"z\" keys for the\n            overlayed 3D scatter marker.\n        plot_kwargs (dict): A dictionary of trace-level customizations.\n        layout_kwargs (dict): A dictionary of layout customizations.\n\n    Returns:\n        go.Figure: A Plotly figure object containing the surface plot.\n\n    \"\"\"\n    trace = go.Surface(z=z, x=x, y=y, **plot_kwargs[\"surface_plot\"])\n\n    fig = go.Figure(data=[trace], layout=layout_kwargs)\n    if plot_kwargs[\"scatter_plot\"] is not None:\n        fig.add_trace(\n            go.Scatter3d(\n                x=scatter_point[\"x\"],\n                y=scatter_point[\"y\"],\n                z=scatter_point[\"z\"],\n                **plot_kwargs[\"scatter_plot\"],\n            )\n        )\n    return fig\n\n\ndef plot_contour(  # type: ignore[no-untyped-def]\n    x: NDArray[np.float64],\n    y: NDArray[np.float64],\n    z: list[float],\n    scatter_point,\n    plot_kwargs,\n    layout_kwargs,\n):\n    \"\"\"Create a 2D contour plot for function values over a parameter grid.\n\n    This function constructs a 2D contour plot and highlights the initial\n    parameter's function value with a scatter marker.\n\n    Args:\n        x (NDArray[np.float64]): A meshgrid of x-axis parameter values.\n        y (NDArray[np.float64]): A meshgrid of y-axis parameter values.\n        z (list[float]): A list of function values corresponding to the grid.\n        scatter_point (dict): A dictionary with \"x\" and \"y\" keys for the\n            overlayed scatter marker.\n        plot_kwargs (dict): A dictionary of trace-level customizations.\n        layout_kwargs (dict): A dictionary of layout customizations.\n\n    Returns:\n        go.Figure: A Plotly figure object containing the contour plot.\n\n    \"\"\"\n    trace = go.Contour(\n        z=z, x=x[0], y=y[:, 0], coloraxis=\"coloraxis\", **plot_kwargs[\"contour_plot\"]\n    )\n    fig = go.Figure(data=[trace], layout=layout_kwargs)\n\n    if plot_kwargs[\"scatter_plot\"] is not None:\n        fig.add_trace(\n            go.Scatter(\n                x=scatter_point[\"x\"],\n                y=scatter_point[\"y\"],\n                **plot_kwargs[\"scatter_plot\"],\n            )\n        )\n    return fig\n\n\nclass ProjectionConfig(str, Enum):\n    \"\"\"An Enum to validate and represent supported projection types.\"\"\"\n\n    UNIVARIATE = \"univariate\"\n    CONTOUR = \"contour\"\n    SURFACE = \"surface\"\n\n    @classmethod\n    def validate(cls, value):  # type: ignore[no-untyped-def]\n        if value is None:\n            return None\n        if isinstance(value, str):\n            value = value.lower()\n            if value in cls._value2member_map_:\n                return cls(value)\n            raise ValueError(f\"Invalid projection: '{value}'\")\n        raise TypeError(f\"Expected str or None, got {type(value)}\")\n\n    @property\n    def is_univariate(self) -> bool:\n        return self == ProjectionConfig.UNIVARIATE\n\n    @property\n    def is_surface(self) -> bool:\n        return self == ProjectionConfig.SURFACE\n\n    @property\n    def is_contour(self) -> bool:\n        return self == ProjectionConfig.CONTOUR\n\n\nclass Projection:\n    \"\"\"A helper class to parse the `projection` argument.\n\n    This class handles parsing the `projection` argument, which can be a simple\n    string (e.g., \"univariate\") or a dictionary (e.g., `{\"lower\": \"contour\",\n    \"upper\": \"surface\"}`) for creating mixed-grid plots.\n\n    \"\"\"\n\n    def __init__(self, value):  # type: ignore[no-untyped-def]\n        self._univariate = False\n        self.lower = None\n        self.upper = None\n\n        self._parse(value)\n\n    def _parse(self, value):  # type: ignore[no-untyped-def]\n        if isinstance(value, str):\n            value = value.lower()\n            if value == ProjectionConfig.UNIVARIATE:\n                self._univariate = True\n            elif value in (ProjectionConfig.SURFACE, ProjectionConfig.CONTOUR):\n                self.lower = ProjectionConfig.validate(value)\n                self.upper = None\n            else:\n                raise ValueError(f\"Invalid projection: '{value}'\")\n        elif isinstance(value, dict):\n            self.lower = ProjectionConfig.validate(value.get(\"lower\"))\n            self.upper = ProjectionConfig.validate(value.get(\"upper\"))\n        else:\n            raise TypeError(\n                f\"Invalid type for projection: {type(value)}. \"\n                \"Must be a string or dict with 'lower' and 'upper' keys.\"\n            )\n\n    @property\n    def is_univariate(self) -> bool:\n        return self._univariate\n\n    @property\n    def is_dict(self) -> bool:\n        return not self._univariate\n\n    def get_config(self):  # type: ignore[no-untyped-def]\n        if self._univariate:\n            return ProjectionConfig.UNIVARIATE\n        return {\"lower\": self.lower, \"upper\": self.upper}\n\n\ndef compute_yaxis_range(y: list[float], expand_yrange: float) -> list[float]:\n    # Calculate expanded y-axis limits based on data range\n    y_min, y_max = np.min(y), np.max(y)\n    y_range = y_max - y_min\n    return [y_min - expand_yrange * y_range, y_max + expand_yrange * y_range]\n\n\ndef combine_plots(  # type: ignore[no-untyped-def]\n    plots: dict[tuple[int, int], go.Figure],\n    make_subplot_kwargs,\n    layout_kwargs,\n    expand_yrange: float,\n) -> go.Figure:\n    \"\"\"Combine individual Plotly figures into a single subplot layout.\n\n    This function merges traces from a dictionary of individual plots into a\n    single `go.Figure` with a subplot grid. It handles axis sharing, range\n    adjustments, and overall layout formatting.\n\n    Args:\n        plots (dict): A dictionary mapping `(row, col)` tuples to `go.Figure`\n            objects.\n        make_subplot_kwargs (dict): Keyword arguments for `make_subplots`.\n        layout_kwargs (dict): Keyword arguments for the final layout update.\n        expand_yrange (float): The expansion factor to apply to any shared\n            y-axes.\n\n    Returns:\n        go.Figure: A single, combined Plotly Figure object.\n\n    \"\"\"\n    plots = deepcopy(plots)\n\n    # --- NEW, SIMPLIFIED LOGIC FOR SINGLE PLOTS ---\n    # If the plot grid is just 1x1, do not rebuild the figure.\n    # Return the already correctly-scaled plot directly.\n    if make_subplot_kwargs.get(\"rows\") == 1 and make_subplot_kwargs.get(\"cols\") == 1:\n        # Extract the single figure from the plots dictionary.\n        (row, col), fig = plots.popitem()\n\n        # Apply final layout customizations like width and height.\n        fig.update_layout(**layout_kwargs)\n\n        # Get the correct titles for the x and y axes.\n        # Note: A bug in title assignment is also fixed here.\n        all_titles = make_subplot_kwargs.get(\"column_titles\", [\"\", \"\"])\n        x_title = all_titles[0]\n        y_title = all_titles[1]\n\n        # Assign titles correctly depending on whether it's a 3D or 2D plot.\n        if hasattr(fig.layout, \"scene\") and fig.layout.scene:\n            scene_key = next(key for key in fig.layout if key.startswith(\"scene\"))\n            fig.layout[scene_key].xaxis.title = x_title\n            fig.layout[scene_key].yaxis.title = y_title\n            fig.layout[scene_key].zaxis.title = \"Function Value\"\n        else:\n            fig.update_xaxes(title_text=x_title)\n            fig.update_yaxes(title_text=y_title)\n\n        return fig\n    # --- END OF NEW LOGIC ---\n\n    # --- Original logic for creating a grid of subplots (for len(plots) > 1) ---\n    fig = make_subplots(**make_subplot_kwargs)\n    fig.update_layout(**layout_kwargs)\n\n    for ann in fig.layout.annotations:\n        if abs(ann.y - 1) < 1e-3:\n            ann.update(y=-0.18 / make_subplot_kwargs[\"cols\"])\n        elif abs(ann.x - 0.98) < 1e-3:\n            ann.update(x=-0.18 / make_subplot_kwargs[\"rows\"], textangle=270)\n\n    shared_y = make_subplot_kwargs.get(\"shared_yaxes\", False)\n    shared_x = make_subplot_kwargs.get(\"shared_xaxes\", False)\n    all_y, all_x = [], []\n\n    for (row_idx, col_idx), subfig in plots.items():\n        for trace in subfig.data:\n            fig.add_trace(trace, row=row_idx + 1, col=col_idx + 1)\n            if shared_y and hasattr(trace, \"y\"):\n                arr = np.array(trace.y)\n                if arr.ndim > 0:\n                    all_y.append(arr)\n            if shared_x and hasattr(trace, \"x\"):\n                arr = np.array(trace.x)\n                if arr.ndim > 0:\n                    all_x.append(arr)\n\n        if hasattr(subfig.layout, \"xaxis\") and hasattr(subfig.layout.xaxis, \"title\"):\n            fig.update_xaxes(\n                title_text=subfig.layout.xaxis.title.text,\n                row=row_idx + 1,\n                col=col_idx + 1,\n            )\n        if hasattr(subfig.layout, \"yaxis\") and hasattr(subfig.layout.yaxis, \"title\"):\n            if shared_y:\n                if col_idx == 0:\n                    fig.update_yaxes(\n                        title_text=subfig.layout.yaxis.title.text,\n                        row=row_idx + 1,\n                        col=col_idx + 1,\n                    )\n            else:\n                fig.update_yaxes(\n                    title_text=subfig.layout.yaxis.title.text,\n                    row=row_idx + 1,\n                    col=col_idx + 1,\n                )\n\n    if shared_y and all_y:\n        y_range = compute_yaxis_range(np.concatenate(all_y).tolist(), expand_yrange)\n        fig.update_yaxes(range=y_range)\n    if shared_x and all_x:\n        x_all = np.concatenate(all_x)\n        fig.update_xaxes(range=[np.min(x_all), np.max(x_all)])\n\n    return fig\n\n\ndef _get_subplot_spec(  # type: ignore[no-untyped-def]\n    i: int, j: int, projection, n_selected: int\n) -> dict[str | None, str | None]:\n    # Determine subplot spec type (xy, scene, contour) for a given subplot position.\n    if i == j and n_selected != 2:\n        return {\"type\": \"xy\"}\n\n    projection_config = projection.get_config()\n    if n_selected == 2:\n        sub_projection = projection_config[\"lower\"]\n    else:\n        sub_projection = (\n            projection_config[\"lower\"] if i > j else projection_config[\"upper\"]\n        )\n\n    if sub_projection:\n        if sub_projection.is_surface:\n            return {\"type\": \"scene\"}\n        elif sub_projection.is_contour:\n            return {\"type\": \"contour\"}\n\n    return {}\n\n\ndef evaluate_plot_kwargs(plot_kwargs):  # type: ignore[no-untyped-def]\n    # Set default styling for plots if not provided by the user.\n    if plot_kwargs is None:\n        plot_kwargs = {}\n\n    plot_kwargs_defaults = {\n        \"line_plot\": {\n            \"color_discrete_sequence\": [\"#497ea7\"],\n            \"markers\": False,\n            \"template\": PLOTLY_TEMPLATE,\n        },\n        \"scatter_plot\": {\n            \"marker\": {\"color\": \"red\", \"size\": 5},\n        },\n        \"surface_plot\": {\n            \"colorscale\": \"Aggrnyl\",\n            \"showscale\": False,\n            \"opacity\": 0.8,\n        },\n        \"contour_plot\": {\n            \"colorscale\": \"Aggrnyl\",\n            \"showscale\": True,\n            # \"line_smoothing\": 0.85,\n        },\n    }\n\n    plot_kwargs_defaults.update(plot_kwargs)\n    return plot_kwargs_defaults\n\n\ndef evaluate_make_subplot_kwargs(  # type: ignore[no-untyped-def]\n    make_subplot_kwargs,\n    n_selected: int,\n    projection,\n    titles: dict[str, str],\n):\n    # Set default parameters for make_subplots() if not provided by user.\n    if make_subplot_kwargs is None:\n        make_subplot_kwargs = {}\n\n    if projection.is_dict and any(k in make_subplot_kwargs for k in [\"rows\", \"cols\"]):\n        raise ValueError(\n            f\"`rows` and `cols` cannot be manually specified when projection is \"\n            f\"{projection} is of grid type.\"\n        )\n\n    if projection.is_univariate:\n        cols = make_subplot_kwargs.get(\"cols\", 1 if n_selected == 1 else 2)\n        rows = (n_selected + cols - 1) // cols\n        make_subplot_defaults = {\n            \"rows\": rows,\n            \"cols\": cols,\n            \"shared_xaxes\": True,\n            \"shared_yaxes\": True,\n        }\n    else:\n        rows = cols = n_selected if n_selected > 2 else 1\n\n        specs = []\n        for i in range(rows):\n            specs_row = []\n            for j in range(cols):\n                specs_row.append(_get_subplot_spec(i, j, projection, n_selected))\n            specs.append(specs_row)\n\n        make_subplot_defaults = {\n            \"rows\": rows,\n            \"cols\": cols,\n            \"specs\": specs,\n            \"row_titles\": list(titles.values()),\n            \"column_titles\": list(titles.values()),\n        }\n\n    make_subplot_defaults.update(\n        {\n            \"horizontal_spacing\": 1 / (make_subplot_defaults[\"cols\"] * 5),\n            \"vertical_spacing\": (1 / max(make_subplot_defaults[\"rows\"] - 1, 1)) / 5,\n        }\n    )\n    make_subplot_defaults.update(make_subplot_kwargs)\n    return make_subplot_defaults\n\n\n# mypy: disable-error-code=\"dict-item\"\ndef evaluate_layout_kwargs(  # type: ignore[no-untyped-def]\n    layout_kwargs,\n    projection,\n    subplot_config,\n):\n    # Set default parameters for update_layout() if not provided by user.\n\n    # Default camera view\n    default_scene_camera_view = dict(x=2, y=2, z=0.5)\n\n    if layout_kwargs is None:\n        layout_kwargs = {}\n    layout_defaults = {}\n\n    if subplot_config.get(\"rows\", 0) > 1 or subplot_config.get(\"cols\", 0) > 1:\n        width = 300 * subplot_config.get(\"cols\", 0)\n        height = 300 * subplot_config.get(\"rows\", 0)\n    else:\n        width = 450\n        height = 450\n\n    if projection.is_dict:\n        scene_layout = {}\n        scene_counter = 0\n\n        template = \"plotly\"\n\n        rows = subplot_config.get(\"rows\", 0)\n        cols = subplot_config.get(\"cols\", 0)\n\n        scene_layout[\"coloraxis\"] = {\"colorscale\": \"aggrnyl\"}\n\n        if \"specs\" in subplot_config:\n            specs = subplot_config[\"specs\"]\n            for i in range(rows):\n                for j in range(cols):\n                    if \"type\" in specs[i][j] and specs[i][j][\"type\"] == \"scene\":\n                        scene_counter += 1\n                        scene_id = f\"scene{scene_counter}\"\n                        scene_layout[f\"{scene_id}\"] = {\n                            \"camera\": {\"eye\": default_scene_camera_view},\n                            \"xaxis\": dict(title=\"\", nticks=5),\n                            \"yaxis\": dict(title=\"\", nticks=5),\n                            \"zaxis\": dict(title=\"\", nticks=5),\n                        }\n\n            layout_defaults.update(scene_layout)\n    else:\n        template = PLOTLY_TEMPLATE\n\n    layout_defaults.update(\n        {\n            \"width\": width,\n            \"height\": height,\n            \"template\": template,\n            \"showlegend\": False,\n        }\n    )\n\n    layout_defaults.update(layout_kwargs)\n    return layout_defaults\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/conftest.py",
    "content": "import os\n\nimport pandas as pd\nimport pytest\nimport statsmodels.api as sm\n\nfrom optimagic.config import IS_MATPLOTLIB_INSTALLED\n\n\n@pytest.fixture(autouse=True)\ndef fresh_directory(tmp_path):  # noqa: PT004\n    \"\"\"Each test is executed in a fresh directory.\"\"\"\n    os.chdir(tmp_path)\n\n\n@pytest.fixture()\ndef logit_inputs():\n    spector_data = sm.datasets.spector.load_pandas()\n    spector_data.exog = sm.add_constant(spector_data.exog)\n    x_df = sm.add_constant(spector_data.exog)\n    out = {\n        \"y\": spector_data.endog,\n        \"x\": x_df.to_numpy(),\n        \"params\": pd.DataFrame([-10, 2, 0.2, 2], index=x_df.columns, columns=[\"value\"]),\n    }\n    return out\n\n\n@pytest.fixture()\ndef logit_object():\n    spector_data = sm.datasets.spector.load_pandas()\n    spector_data.exog = sm.add_constant(spector_data.exog)\n    logit_mod = sm.Logit(spector_data.endog, spector_data.exog)\n    return logit_mod\n\n\n@pytest.fixture()\ndef close_mpl_figures():\n    \"\"\"Close all matplotlib figures after test execution.\"\"\"\n    yield\n    if IS_MATPLOTLIB_INSTALLED:\n        import matplotlib.pyplot as plt\n\n        plt.close(\"all\")\n"
  },
  {
    "path": "tests/estimagic/__init__.py",
    "content": ""
  },
  {
    "path": "tests/estimagic/examples/test_logit.py",
    "content": "\"\"\"Tests for the logit example.\"\"\"\n\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom estimagic.examples.logit import logit_grad, logit_hess, logit_jac, logit_loglike\n\n\ndef test_logit_loglikes(logit_inputs, logit_object):\n    x = logit_inputs[\"params\"][\"value\"].to_numpy()\n    expected = logit_object.loglikeobs(x)\n    got = logit_loglike(**logit_inputs)\n\n    aaae(got, expected)\n\n\ndef test_logit_jac(logit_inputs, logit_object):\n    x = logit_inputs[\"params\"][\"value\"].to_numpy()\n    expected = logit_object.score_obs(x)\n\n    got = logit_jac(**logit_inputs)\n\n    aaae(got, expected)\n\n\ndef test_logit_grad(logit_inputs, logit_object):\n    x = logit_inputs[\"params\"][\"value\"].to_numpy()\n    expected = logit_object.score(x)\n    calculated = logit_grad(**logit_inputs)\n    aaae(calculated, expected)\n\n\ndef test_logit_hessian(logit_inputs, logit_object):\n    x = logit_inputs[\"params\"][\"value\"].to_numpy()\n    expected = logit_object.hessian(x)\n    got = logit_hess(**logit_inputs)\n    aaae(got, expected)\n"
  },
  {
    "path": "tests/estimagic/test_bootstrap.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nimport seaborn as sns\nimport statsmodels.api as sm\n\nfrom estimagic import bootstrap\n\n\ndef aaae(obj1, obj2, decimal=6):\n    arr1 = np.asarray(obj1)\n    arr2 = np.asarray(obj2)\n    np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal)\n\n\n@pytest.fixture()\ndef setup():\n    out = {}\n\n    out[\"df\"] = pd.DataFrame(\n        np.array([[1, 10], [2, 7], [3, 6], [4, 5]]), columns=[\"x1\", \"x2\"]\n    )\n\n    y = np.array([[2.0, 8.0], [2.0, 8.0], [2.5, 7.0], [3.0, 6.0], [3.25, 5.75]])\n    out[\"estimates_arr\"] = y\n    out[\"estimates_df\"] = pd.DataFrame(y, columns=[\"x1\", \"x2\"])\n    out[\"estimates_dict\"] = {\"x1\": [2, 2, 2.5, 3, 3.25], \"x2\": [8, 8, 7, 6, 5.75]}\n\n    return out\n\n\n@pytest.fixture()\ndef expected():\n    out = {}\n\n    summary = np.array(\n        [\n            [2.5, 0.576222, 1.5, 3.5, np.nan, np.nan],\n            [7.0, 0.956896, 5.5, 9.0, np.nan, np.nan],\n        ]\n    )\n\n    cov = np.array([[0.332032, -0.528158], [-0.528158, 0.915651]])\n    p_values = np.array([0.0, 0.0])\n    ci_lower = np.array([1.5, 5.5])\n    ci_upper = np.array([3.5, 9.0])\n\n    out[\"summary\"] = pd.DataFrame(\n        summary,\n        columns=[\"value\", \"standard_error\", \"ci_lower\", \"ci_upper\", \"p_value\", \"stars\"],\n        index=[\"x1\", \"x2\"],\n    )\n    out[\"ci_lower\"] = pd.Series(ci_lower, index=[\"x1\", \"x2\"])\n    out[\"ci_upper\"] = pd.Series(ci_upper, index=[\"x1\", \"x2\"])\n    out[\"ci_lower_x1\"] = pd.Series(ci_lower[0], index=[\"x1\"])\n    out[\"ci_upper_x1\"] = pd.Series(ci_upper[0], index=[\"x1\"])\n    out[\"cov\"] = pd.DataFrame(cov, columns=[\"x1\", \"x2\"], index=[\"x1\", \"x2\"])\n    out[\"se\"] = pd.Series(np.sqrt(np.diagonal(cov)), index=[\"x1\", \"x2\"])\n    out[\"p_values\"] = pd.Series(p_values, index=[\"x1\", \"x2\"])\n    out[\"p_value_x1\"] = pd.Series(p_values[0], index=[\"x1\"])\n\n    return out\n\n\n@pytest.fixture()\ndef seaborn_example():\n    out = {}\n\n    raw = sns.load_dataset(\"exercise\", index_col=0)\n    replacements = {\"1 min\": 1, \"15 min\": 15, \"30 min\": 30}\n    df = raw.assign(time=raw.time.cat.rename_categories(replacements).astype(int))\n    df[\"constant\"] = 1\n\n    lower_ci = pd.Series([90.709236, 0.151193], index=[\"constant\", \"time\"])\n    upper_ci = pd.Series([96.827145, 0.627507], index=[\"constant\", \"time\"])\n    expected = {\"lower_ci\": lower_ci, \"upper_ci\": upper_ci}\n\n    out[\"df\"] = df\n    out[\"expected\"] = expected\n\n    return out\n\n\ndef _outcome_func(data, shift=0):\n    \"\"\"Compute column means.\n\n    Args:\n        data (pd.Series or pd.DataFrame): The data set.\n        shift (float): Scalar that is added to the column means.\n\n    Returns:\n        pd.Series: Series where the k-th row corresponds to the mean\n            of the k-th column of the input data.\n\n    \"\"\"\n    # Return pd.Series when .mean() is applied to a Series\n    # Only applying .mean() to a pd.Series would yield a float\n    return pd.DataFrame(data).mean(axis=0) + shift\n\n\ndef _outcome_ols(data):\n    y = data[\"pulse\"]\n    x = data[[\"constant\", \"time\"]]\n    params = sm.OLS(y, x).fit().params\n\n    return params\n\n\n@pytest.mark.parametrize(\"shift\", [0, 10, -10])\ndef test_bootstrap_with_outcome_kwargs(shift, setup):\n    result = bootstrap(\n        outcome=_outcome_func,\n        data=setup[\"df\"],\n        seed=123,\n        outcome_kwargs={\"shift\": shift},\n    )\n\n    expected = pd.Series([2.5, 7.0], index=[\"x1\", \"x2\"])\n    aaae(result.base_outcome, expected + shift)\n\n\ndef test_bootstrap_existing_outcomes(setup):\n    result = bootstrap(\n        data=setup[\"df\"],\n        outcome=_outcome_func,\n        n_draws=3,\n    )\n    assert len(result.outcomes) == 3\n    result = bootstrap(\n        outcome=_outcome_func,\n        data=setup[\"df\"],\n        existing_result=result,\n        n_draws=2,\n    )\n    assert len(result.outcomes) == 2\n\n\ndef test_bootstrap_from_outcomes(setup, expected):\n    result = bootstrap(outcome=_outcome_func, data=setup[\"df\"], seed=1234)\n\n    _ = result.outcomes\n    summary = result.summary()\n    ci_lower, ci_upper = result.ci()\n    covariance = result.cov()\n    standard_errors = result.se()\n\n    with pytest.raises(NotImplementedError):\n        assert result._p_values\n\n    aaae(ci_lower, expected[\"ci_lower\"])\n    aaae(ci_upper, expected[\"ci_upper\"])\n    aaae(covariance, expected[\"cov\"])\n    aaae(standard_errors, expected[\"se\"])\n\n    aaae(summary[\"value\"], expected[\"summary\"][\"value\"])\n    aaae(summary[\"standard_error\"], expected[\"summary\"][\"standard_error\"])\n    aaae(summary[\"ci_lower\"], expected[\"summary\"][\"ci_lower\"])\n    aaae(summary[\"ci_upper\"], expected[\"summary\"][\"ci_upper\"])\n\n\ndef test_bootstrap_from_outcomes_private_methods(setup, expected):\n    result = bootstrap(outcome=_outcome_func, data=setup[\"df\"], seed=1234)\n\n    _ = result.outcomes\n    ci_lower, ci_upper = result._ci\n    covariance = result._cov\n    standard_errors = result._se\n\n    with pytest.raises(NotImplementedError):\n        assert result._p_values\n\n    aaae(ci_lower, expected[\"ci_lower\"])\n    aaae(ci_upper, expected[\"ci_upper\"])\n    aaae(covariance, expected[\"cov\"])\n    aaae(standard_errors, expected[\"se\"])\n\n\ndef test_bootstrap_from_outcomes_single_outcome(setup, expected):\n    result = bootstrap(outcome=_outcome_func, data=setup[\"df\"][\"x1\"], seed=1234)\n\n    _ = result.outcomes\n    ci_lower, ci_upper = result.ci()\n\n    aaae(ci_lower, expected[\"ci_lower_x1\"])\n    aaae(ci_upper, expected[\"ci_upper_x1\"])\n\n\ndef test_outcome_not_callable(setup):\n    expected_msg = \"outcome must be a callable.\"\n\n    with pytest.raises(TypeError) as error:\n        assert bootstrap(data=setup[\"df\"], outcome=setup[\"estimates_df\"])\n\n    assert str(error.value) == expected_msg\n\n\n@pytest.mark.parametrize(\"input_type\", [\"arr\", \"df\", \"dict\"])\ndef test_existing_result_wrong_input_type(input_type, setup):\n    expected_msg = \"existing_result must be None or a BootstrapResult.\"\n\n    with pytest.raises(ValueError) as error:\n        assert bootstrap(\n            outcome=_outcome_func,\n            data=setup[\"df\"],\n            existing_result=setup[\"estimates_\" + input_type],\n        )\n\n    assert str(error.value) == expected_msg\n\n\n@pytest.mark.parametrize(\"return_type\", [\"array\", \"dataframe\", \"pytree\"])\ndef test_cov_correct_return_type(return_type, setup):\n    result = bootstrap(\n        outcome=_outcome_func,\n        data=setup[\"df\"],\n    )\n    _ = result.cov(return_type=return_type)\n\n\ndef test_cov_wrong_return_type(setup):\n    result = bootstrap(\n        outcome=_outcome_func,\n        data=setup[\"df\"],\n    )\n\n    expected_msg = \"return_type must be one of pytree, array, or dataframe, not dict.\"\n\n    with pytest.raises(ValueError) as error:\n        assert result.cov(return_type=\"dict\")\n\n    assert str(error.value) == expected_msg\n\n\ndef test_existing_result(seaborn_example):\n    first_result = bootstrap(\n        data=seaborn_example[\"df\"], outcome=_outcome_ols, seed=1234\n    )\n\n    expected_msg = \"existing_result must be None or a BootstrapResult.\"\n    with pytest.raises(ValueError) as error:\n        assert bootstrap(\n            data=seaborn_example[\"df\"],\n            outcome=_outcome_ols,\n            existing_result=first_result.outcomes,\n        )\n    assert str(error.value) == expected_msg\n\n    my_result = bootstrap(\n        data=seaborn_example[\"df\"],\n        outcome=_outcome_ols,\n        existing_result=first_result,\n        seed=2,\n    )\n    lower_ci, upper_ci = my_result.ci(ci_method=\"t\")\n\n    aaae(lower_ci, seaborn_example[\"expected\"][\"lower_ci\"])\n    aaae(upper_ci, seaborn_example[\"expected\"][\"upper_ci\"])\n"
  },
  {
    "path": "tests/estimagic/test_bootstrap_ci.py",
    "content": "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pybaum import tree_just_flatten\n\nfrom estimagic.bootstrap_ci import calculate_ci, check_inputs\nfrom estimagic.bootstrap_samples import get_bootstrap_indices\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.utilities import get_rng\n\n\ndef aaae(obj1, obj2, decimal=6):\n    arr1 = np.asarray(obj1)\n    arr2 = np.asarray(obj2)\n    np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal)\n\n\n@pytest.fixture()\ndef setup():\n    out = {}\n\n    out[\"df\"] = pd.DataFrame(\n        np.array([[1, 10], [2, 7], [3, 6], [4, 5]]), columns=[\"x1\", \"x2\"]\n    )\n    out[\"estimates\"] = np.array(\n        [[2.0, 8.0], [2.0, 8.0], [2.5, 7.0], [3.0, 6.0], [3.25, 5.75]]\n    )\n\n    return out\n\n\n@pytest.fixture()\ndef expected():\n    out = {}\n\n    out[\"percentile_ci\"] = np.array([[2, 3.225], [5.775, 8.0]])\n    out[\"normal_ci\"] = np.array(\n        [\n            [1.5006105396891194, 3.499389460310881],\n            [5.130313521781885, 8.869686478218114],\n        ]\n    )\n    out[\"basic_ci\"] = np.array([[1.775, 3.0], [6.0, 8.225]])\n    out[\"bc_ci\"] = np.array([[2, 3.2342835077057543], [5.877526959881923, 8]])\n    out[\"t_ci\"] = np.array([[1.775, 3], [6.0, 8.225]])\n\n    return out\n\n\ndef _outcome_fun_series(data):\n    return data.mean(axis=0)\n\n\ndef _outcome_func_dict(data):\n    return data.mean(axis=0).to_dict()\n\n\ndef _outcome_func_arr(data):\n    return np.array(data.mean(axis=0))\n\n\nTEST_CASES = itertools.product(\n    [_outcome_fun_series, _outcome_func_dict, _outcome_func_arr],\n    [\"percentile\", \"normal\", \"basic\", \"bc\", \"t\"],\n)\n\n\n@pytest.mark.parametrize(\"outcome, method\", TEST_CASES)\ndef test_ci(outcome, method, setup, expected):\n    registry = get_registry(extended=True)\n\n    def outcome_flat(data):\n        return tree_just_flatten(outcome(data), registry=registry)\n\n    base_outcome = outcome_flat(setup[\"df\"])\n    lower, upper = calculate_ci(base_outcome, setup[\"estimates\"], ci_method=method)\n\n    aaae(lower, expected[method + \"_ci\"][:, 0])\n    aaae(upper, expected[method + \"_ci\"][:, 1])\n\n\ndef test_check_inputs_data():\n    data = \"this is not a data frame\"\n    expected_msg = \"Data must be a pandas.DataFrame or pandas.Series.\"\n\n    with pytest.raises(TypeError) as error:\n        check_inputs(data=data)\n    assert str(error.value) == expected_msg\n\n\ndef test_check_inputs_weight_by(setup):\n    expected_error_msg = \"Input 'weight_by' must be None or a column name of 'data'.\"\n    with pytest.raises(ValueError, match=expected_error_msg):\n        check_inputs(data=setup[\"df\"], weight_by=\"this is not a column name of df\")\n\n\ndef test_get_bootstrap_indices_heterogeneous_weights():\n    data = pd.DataFrame(\n        {\"id\": [0, 1], \"w_homogenous\": [0.5, 0.5], \"w_heterogenous\": [0.1, 0.9]}\n    )\n\n    res_homogenous = get_bootstrap_indices(\n        data, weight_by=\"w_homogenous\", n_draws=1_000, rng=get_rng(seed=0)\n    )\n    res_heterogenous = get_bootstrap_indices(\n        data, weight_by=\"w_heterogenous\", n_draws=1_000, rng=get_rng(seed=0)\n    )\n\n    # Given the weights, the first sample mean should be close to 0.5,\n    # while the second one should be close to 0.9\n    assert np.mean(res_homogenous) < 0.75 < np.mean(res_heterogenous)\n\n\ndef test_check_inputs_cluster_by(setup):\n    cluster_by = \"this is not a column name of df\"\n    expected_msg = \"Input 'cluster_by' must be None or a column name of 'data'.\"\n\n    with pytest.raises(ValueError) as error:\n        check_inputs(data=setup[\"df\"], cluster_by=cluster_by)\n    assert str(error.value) == expected_msg\n\n\ndef test_check_inputs_ci_method(setup):\n    ci_method = 4\n    expected_msg = (\n        \"ci_method must be 'percentile', 'bc',\"\n        f\" 't', 'basic' or 'normal', '{ci_method}'\"\n        f\" was supplied\"\n    )\n\n    with pytest.raises(ValueError) as error:\n        check_inputs(data=setup[\"df\"], ci_method=ci_method)\n    assert str(error.value) == expected_msg\n\n\ndef test_check_inputs_ci_level(setup):\n    ci_level = 666\n    expected_msg = \"Input 'ci_level' must be in [0,1].\"\n\n    with pytest.raises(ValueError) as error:\n        check_inputs(data=setup[\"df\"], ci_level=ci_level)\n    assert str(error.value) == expected_msg\n"
  },
  {
    "path": "tests/estimagic/test_bootstrap_outcomes.py",
    "content": "import functools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom estimagic.bootstrap_outcomes import (\n    _get_bootstrap_outcomes_from_indices,\n    get_bootstrap_outcomes,\n)\nfrom optimagic.batch_evaluators import joblib_batch_evaluator\nfrom optimagic.utilities import get_rng\n\n\n@pytest.fixture()\ndef data():\n    df = pd.DataFrame([[1, 10], [2, 7], [3, 6], [4, 5]], columns=[\"x1\", \"x2\"])\n    return df\n\n\ndef _mean_return_series(data):\n    out = np.mean(data, axis=0)\n    return out\n\n\ndef _mean_return_dict(data):\n    out = np.mean(data, axis=0)\n    return out.to_dict()\n\n\ndef _mean_return_array(data):\n    out = np.mean(data, axis=0).to_numpy()\n    return out\n\n\n@pytest.mark.parametrize(\n    \"outcome\",\n    [\n        (functools.partial(np.mean, axis=0)),\n        (_mean_return_series),\n        (_mean_return_dict),\n        (_mean_return_array),\n    ],\n)\ndef test_get_bootstrap_estimates_runs(outcome, data):\n    rng = get_rng(seed=1234)\n    get_bootstrap_outcomes(\n        data=data,\n        outcome=outcome,\n        rng=rng,\n        n_draws=5,\n    )\n\n\ndef test_bootstrap_estimates_from_indices_without_errors(data):\n    calculated = _get_bootstrap_outcomes_from_indices(\n        indices=[np.array([1, 3]), np.array([0, 2])],\n        data=data,\n        outcome=functools.partial(np.mean, axis=0),\n        n_cores=1,\n        error_handling=\"raise\",\n        batch_evaluator=joblib_batch_evaluator,\n    )\n\n    expected = [[3.0, 6.0], [2, 8]]\n    aaae(calculated, expected)\n\n\ndef test_get_bootstrap_estimates_with_error_and_raise(data):\n    rng = get_rng(seed=1234)\n\n    def _raise_assertion_error(data):  # noqa: ARG001\n        raise AssertionError()\n\n    with pytest.raises(AssertionError):\n        get_bootstrap_outcomes(\n            data=data,\n            outcome=_raise_assertion_error,\n            rng=rng,\n            n_draws=2,\n            error_handling=\"raise\",\n        )\n\n\ndef test_get_bootstrap_estimates_with_all_errors_and_continue(data):\n    rng = get_rng(seed=1234)\n\n    def _raise_assertion_error(data):  # noqa: ARG001\n        raise AssertionError()\n\n    with pytest.warns(UserWarning):\n        with pytest.raises(RuntimeError):\n            get_bootstrap_outcomes(\n                data=data,\n                outcome=_raise_assertion_error,\n                rng=rng,\n                n_draws=2,\n                error_handling=\"continue\",\n            )\n\n\ndef test_get_bootstrap_estimates_with_some_errors_and_continue(data):\n    rng = get_rng(seed=1234)\n\n    def _raise_assertion_error_sometimes(data):\n        assert rng.uniform() > 0.5\n        return data.mean()\n\n    with pytest.warns(UserWarning):\n        res_flat = get_bootstrap_outcomes(\n            data=data,\n            outcome=_raise_assertion_error_sometimes,\n            rng=rng,\n            n_draws=100,\n            error_handling=\"continue\",\n        )\n\n    assert 30 <= len(res_flat) <= 70\n"
  },
  {
    "path": "tests/estimagic/test_bootstrap_samples.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal as aae\nfrom pandas.testing import assert_frame_equal as afe\nfrom pandas.testing import assert_series_equal as ase\n\nfrom estimagic.bootstrap_samples import (\n    _calculate_bootstrap_indices_weights,\n    _convert_cluster_ids_to_indices,\n    _get_bootstrap_samples_from_indices,\n    get_bootstrap_indices,\n    get_bootstrap_samples,\n)\nfrom optimagic.utilities import get_rng\n\n\n@pytest.fixture()\ndef data():\n    df = pd.DataFrame()\n    df[\"id\"] = np.arange(900)\n    df[\"hh\"] = [3, 1, 2, 0, 0, 2, 5, 4, 5] * 100\n    df[\"weights\"] = np.ones(900)\n    return df\n\n\ndef test_get_bootstrap_indices_randomization_works_without_clustering(data):\n    rng = get_rng(seed=12345)\n    res = get_bootstrap_indices(data, n_draws=2, rng=rng)\n    assert set(res[0]) != set(res[1])\n\n\ndef test_get_bootstrap_indices_radomization_works_with_clustering(data):\n    rng = get_rng(seed=12345)\n    res = get_bootstrap_indices(data, cluster_by=\"hh\", n_draws=2, rng=rng)\n    assert set(res[0]) != set(res[1])\n\n\ndef test_get_bootstrap_indices_randomization_works_with_weights(data):\n    rng = get_rng(seed=12345)\n    res = get_bootstrap_indices(data, weight_by=\"weights\", n_draws=2, rng=rng)\n    assert set(res[0]) != set(res[1])\n\n\ndef test_get_bootstrap_indices_randomization_works_with_weights_and_clustering(data):\n    rng = get_rng(seed=12345)\n    res = get_bootstrap_indices(\n        data, weight_by=\"weights\", cluster_by=\"hh\", n_draws=2, rng=rng\n    )\n    assert set(res[0]) != set(res[1])\n\n\ndef test_get_bootstrap_indices_randomization_works_with_and_without_weights(data):\n    rng1 = get_rng(seed=12345)\n    rng2 = get_rng(seed=12345)\n    res1 = get_bootstrap_indices(data, n_draws=1, rng=rng1)\n    res2 = get_bootstrap_indices(data, weight_by=\"weights\", n_draws=1, rng=rng2)\n    assert not np.array_equal(res1, res2)\n\n\ndef test_get_boostrap_indices_randomization_works_with_extreme_case(data):\n    rng = get_rng(seed=12345)\n    weights = np.zeros(900)\n    weights[0] = 1.0\n    data[\"weights\"] = weights\n    res = get_bootstrap_indices(data, weight_by=\"weights\", n_draws=1, rng=rng)\n    assert len(np.unique(res)) == 1\n\n\ndef test_clustering_leaves_households_intact(data):\n    rng = get_rng(seed=12345)\n    indices = get_bootstrap_indices(data, cluster_by=\"hh\", n_draws=1, rng=rng)[0]\n    sampled = data.iloc[indices]\n    sampled_households = sampled[\"hh\"].unique()\n    for household in sampled_households:\n        expected_ids = set(data[data[\"hh\"] == household][\"id\"].unique())\n        actual_ids = set(sampled[sampled[\"hh\"] == household][\"id\"].unique())\n        assert expected_ids == actual_ids\n\n\ndef test_convert_cluster_ids_to_indices():\n    cluster_col = pd.Series([2, 2, 0, 1, 0, 1])\n    drawn_clusters = np.array([[1, 0]])\n    expected = np.array([3, 5, 2, 4])\n    calculated = _convert_cluster_ids_to_indices(cluster_col, drawn_clusters)[0]\n    aae(calculated, expected)\n\n\ndef test_get_bootstrap_samples_from_indices():\n    indices = [np.array([0, 1])]\n    data = pd.DataFrame(np.arange(6).reshape(3, 2))\n    expected = pd.DataFrame(np.arange(4).reshape(2, 2))\n    calculated = _get_bootstrap_samples_from_indices(data, indices)[0]\n    afe(calculated, expected)\n\n\ndef test_get_bootstrap_samples_runs(data):\n    rng = get_rng(seed=12345)\n    get_bootstrap_samples(data, n_draws=2, rng=rng)\n\n\n@pytest.fixture\ndef sample_data():\n    return pd.DataFrame({\"weight\": [1, 2, 3, 4], \"cluster\": [\"A\", \"A\", \"B\", \"B\"]})\n\n\ndef test_no_weights_no_clusters(sample_data):\n    result = _calculate_bootstrap_indices_weights(sample_data, None, None)\n    assert result is None\n\n\ndef test_weights_no_clusters(sample_data):\n    result = _calculate_bootstrap_indices_weights(sample_data, \"weight\", None)\n    expected = pd.Series([0.1, 0.2, 0.3, 0.4], index=sample_data.index, name=\"weight\")\n    pd.testing.assert_series_equal(result, expected)\n\n\ndef test_weights_and_clusters(sample_data):\n    result = _calculate_bootstrap_indices_weights(sample_data, \"weight\", \"cluster\")\n    expected = pd.Series(\n        [0.3, 0.7], index=pd.Index([\"A\", \"B\"], name=\"cluster\"), name=\"weight\"\n    )\n    ase(result, expected)\n\n\ndef test_invalid_weight_column():\n    data = pd.DataFrame({\"x\": [1, 2, 3]})\n    with pytest.raises(KeyError):\n        _calculate_bootstrap_indices_weights(data, \"weight\", None)\n\n\ndef test_invalid_cluster_column(sample_data):\n    with pytest.raises(KeyError):\n        _calculate_bootstrap_indices_weights(sample_data, \"weight\", \"invalid_cluster\")\n\n\ndef test_empty_dataframe():\n    empty_df = pd.DataFrame()\n    result = _calculate_bootstrap_indices_weights(empty_df, None, None)\n    assert result is None\n\n\ndef test_some_zero_weights_with_clusters():\n    data = pd.DataFrame({\"weight\": [0, 1, 0, 2], \"cluster\": [\"A\", \"A\", \"B\", \"B\"]})\n    result = _calculate_bootstrap_indices_weights(data, \"weight\", \"cluster\")\n    expected = pd.Series(\n        [1 / 3, 2 / 3], index=pd.Index([\"A\", \"B\"], name=\"cluster\"), name=\"weight\"\n    )\n    ase(result, expected)\n"
  },
  {
    "path": "tests/estimagic/test_estimate_ml.py",
    "content": "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport scipy as sp\nimport statsmodels.api as sm\nfrom numpy.testing import assert_array_equal\nfrom scipy.stats import multivariate_normal\nfrom statsmodels.base.model import GenericLikelihoodModel\n\nimport optimagic as om\nfrom estimagic.estimate_ml import estimate_ml\nfrom estimagic.examples.logit import (\n    logit_hess,\n    logit_jac,\n    logit_loglike,\n    scalar_logit_fun_and_jac,\n)\nfrom optimagic import mark\nfrom optimagic.optimizers import scipy_optimizers\nfrom optimagic.parameters.bounds import Bounds\n\n\ndef aaae(obj1, obj2, decimal=3):\n    arr1 = np.asarray(obj1)\n    arr2 = np.asarray(obj2)\n    np.testing.assert_array_almost_equal(arr1, arr2, decimal=decimal)\n\n\n# ==================================================================================\n# Test case with constraints using multivariate Normal model\n# ==================================================================================\n\n\n@mark.likelihood\ndef multivariate_normal_loglike(params, data):\n    mean = params[\"mean\"]\n    cov = params[\"cov\"]\n    mn = multivariate_normal(mean=mean, cov=cov)\n    return mn.logpdf(data)\n\n\n@pytest.fixture()\ndef multivariate_normal_example():\n    # true parameters\n    true_mean = np.arange(1, 4)\n    true_cov = np.diag(np.arange(1, 4))\n\n    # simulate 10.000 random samples\n    mn = multivariate_normal(mean=true_mean, cov=true_cov)\n    data = mn.rvs(size=10_000)\n\n    loglike_kwargs = {\"data\": data}\n\n    params = {\"mean\": np.ones(3), \"cov\": np.diag(np.ones(3))}\n    true_params = {\"mean\": true_mean, \"cov\": true_cov}\n    return params, true_params, loglike_kwargs\n\n\ndef test_estimate_ml_with_constraints(multivariate_normal_example):\n    params, true_params, loglike_kwargs = multivariate_normal_example\n\n    constraints = [\n        om.FixedConstraint(selector=lambda p: p[\"mean\"][0]),\n        om.FlatCovConstraint(selector=lambda p: p[\"cov\"][np.tril_indices(3)]),\n    ]\n\n    results = estimate_ml(\n        loglike=multivariate_normal_loglike,\n        params=params,\n        loglike_kwargs=loglike_kwargs,\n        optimize_options=\"scipy_lbfgsb\",\n        constraints=constraints,\n    )\n\n    aaae(results.params[\"mean\"], true_params[\"mean\"], decimal=1)\n    aaae(results.params[\"cov\"], true_params[\"cov\"], decimal=1)\n\n    # test free_mask of summary\n    expected_msg = (\n        \"seed is set to None and constraints are transforming. \"\n        \"This leads to randomness in the result. To avoid random behavior, \"\n        \"choose a non-None seed.\"\n    )\n    with pytest.warns(UserWarning, match=expected_msg):\n        summary = results.summary()\n\n    assert np.all(summary[\"mean\"][\"free\"].values == np.array([False, True, True]))\n    assert np.all(summary[\"cov\"][\"free\"].values)\n\n\n# ======================================================================================\n# Test case using Logit model\n# ======================================================================================\n\n\n@pytest.fixture()\ndef logit_np_inputs():\n    spector_data = sm.datasets.spector.load_pandas()\n    spector_data.exog = sm.add_constant(spector_data.exog)\n    x_df = sm.add_constant(spector_data.exog)\n\n    out = {\n        \"y\": spector_data.endog,\n        \"x\": x_df.to_numpy(),\n        \"params\": np.array([-10, 2, 0.2, 2]),\n    }\n    return out\n\n\n@pytest.fixture()\ndef fitted_logit_model(logit_object):\n    \"\"\"We need to use a generic model class to access all standard errors etc.\"\"\"\n\n    class GenericLogit(GenericLikelihoodModel):\n        def nloglikeobs(self, params, *args, **kwargs):\n            return -logit_object.loglikeobs(params, *args, **kwargs)\n\n    generic_logit = GenericLogit(logit_object.endog, logit_object.exog)\n    return generic_logit.fit()\n\n\ntest_cases = list(\n    itertools.product(\n        [\n            {\"algorithm\": \"scipy_lbfgsb\"},\n            \"scipy_lbfgsb\",\n            {\n                \"algorithm\": \"scipy_lbfgsb\",\n                \"fun_and_jac\": scalar_logit_fun_and_jac,\n            },\n        ],\n        [None, logit_jac, False],\n        [None, logit_hess, False],\n    )\n)\n\ntest_cases = [\n    case for case in test_cases if not (case[1] is False and case[2] is False)\n]\n\n\n@pytest.mark.parametrize(\"optimize_options, jacobian, hessian\", test_cases)\ndef test_estimate_ml_with_logit_no_constraints(\n    fitted_logit_model,\n    logit_np_inputs,\n    optimize_options,\n    jacobian,\n    hessian,\n):\n    \"\"\"Test that estimate_ml computes correct params and covariances under different\n    scenarios.\n    \"\"\"\n    # ==================================================================================\n    # estimate\n    # ==================================================================================\n\n    kwargs = {\"y\": logit_np_inputs[\"y\"], \"x\": logit_np_inputs[\"x\"]}\n\n    if \"fun_and_jac\" in optimize_options:\n        optimize_options[\"fun_and_jac_kwargs\"] = kwargs\n\n    got = estimate_ml(\n        loglike=logit_loglike,\n        params=logit_np_inputs[\"params\"],\n        loglike_kwargs=kwargs,\n        optimize_options=optimize_options,\n        jacobian=jacobian,\n        jacobian_kwargs=kwargs,\n        hessian=hessian,\n        hessian_kwargs=kwargs,\n    )\n\n    # ==================================================================================\n    # test\n    # ==================================================================================\n\n    exp = fitted_logit_model\n\n    if jacobian is not False and hessian is not False:\n        methods = [\"jacobian\", \"hessian\", \"robust\"]\n    elif jacobian is not False:\n        methods = [\"jacobian\"]\n    elif hessian is not False:\n        methods = [\"hessian\"]\n\n    statsmodels_suffix_map = {\n        \"jacobian\": \"jac\",\n        \"hessian\": \"\",\n        \"robust\": \"jhj\",\n    }\n\n    # compare estimated parameters\n    aaae(got.params, exp.params, decimal=4)\n\n    for method in methods:\n        # compare estimated standard errors\n        exp_se = getattr(exp, f\"bse{statsmodels_suffix_map[method]}\")\n        got_se = got.se(method=method)\n        aaae(got_se, exp_se, decimal=3)\n\n        # compare estimated confidence interval\n        if method == \"hessian\":\n            lower, upper = got.ci(method=method)\n            exp_lower = exp.conf_int().T[0]\n            exp_upper = exp.conf_int().T[1]\n            aaae(lower, exp_lower, decimal=3)\n            aaae(upper, exp_upper, decimal=3)\n\n        # compare covariance\n        if method == \"hessian\":\n            aaae(got.cov(method=method), exp.cov_params(), decimal=3)\n        elif method == \"robust\":\n            aaae(got.cov(method=method), exp.covjhj, decimal=2)\n        elif method == \"jacobian\":\n            aaae(got.cov(method=method), exp.covjac, decimal=4)\n\n        summary = got.summary(method=method)\n\n        aaae(summary[\"value\"], exp.params, decimal=4)\n        aaae(summary[\"standard_error\"], got.se(method=method))\n        lower, upper = got.ci(method=method)\n        aaae(summary[\"ci_lower\"], lower)\n        aaae(summary[\"ci_upper\"], upper)\n        aaae(summary[\"p_value\"], got.p_values(method=method))\n\n    if \"jacobian\" in methods:\n        aaae(got._se, got.se())\n        aaae(got._ci[0], got.ci()[0])\n        aaae(got._ci[1], got.ci()[1])\n        aaae(got._p_values, got.p_values())\n\n\ntest_cases_constr = list(\n    itertools.product(\n        [None, logit_jac],  # jacobian\n        [\n            om.FlatCovConstraint(selector=lambda x: x[[1, 2, 3]]),\n            om.LinearConstraint(\n                selector=lambda x: x[[0, 1]], lower_bound=-20, weights=1\n            ),\n            om.IncreasingConstraint(selector=lambda x: x[[0, 1]]),\n        ],\n    )\n)\n\n\n@pytest.mark.parametrize(\"jacobian, constraints\", test_cases_constr)\ndef test_estimate_ml_with_logit_constraints(\n    fitted_logit_model,\n    logit_np_inputs,\n    jacobian,\n    constraints,\n):\n    \"\"\"Test that estimate_ml computes correct params and standard errors under different\n    scenarios with constraints.\n    \"\"\"\n    seed = 1234\n\n    # ==================================================================================\n    # estimate\n    # ==================================================================================\n\n    kwargs = {\"y\": logit_np_inputs[\"y\"], \"x\": logit_np_inputs[\"x\"]}\n\n    optimize_options = {\n        \"algorithm\": \"scipy_lbfgsb\",\n        \"algo_options\": {\"convergence.ftol_rel\": 1e-12},\n    }\n\n    if \"fun_and_jac\" in optimize_options:\n        optimize_options[\"fun_and_jac_kwargs\"] = kwargs\n\n    got = estimate_ml(\n        loglike=logit_loglike,\n        params=logit_np_inputs[\"params\"],\n        loglike_kwargs=kwargs,\n        optimize_options=optimize_options,\n        jacobian=jacobian,\n        jacobian_kwargs=kwargs,\n        constraints=constraints,\n    )\n\n    # ==================================================================================\n    # test\n    # ==================================================================================\n\n    exp = fitted_logit_model\n\n    methods = [\"jacobian\", \"hessian\", \"robust\"]\n\n    statsmodels_suffix_map = {\n        \"jacobian\": \"jac\",\n        \"hessian\": \"\",\n        \"robust\": \"jhj\",\n    }\n\n    # compare estimated parameters\n    aaae(got.params, exp.params, decimal=3)\n\n    for method in methods:\n        # compare estimated standard errors\n        exp_se = getattr(exp, f\"bse{statsmodels_suffix_map[method]}\")\n        got_se = got.se(method=method, seed=seed)\n        corr = np.corrcoef(got_se, exp_se)\n        aaae(corr, np.ones_like(corr), decimal=4)\n\n        # compare estimated confidence interval\n        if method == \"hessian\":\n            lower, upper = got.ci(method=method, seed=seed)\n            exp_lower = exp.conf_int().T[0]\n            exp_upper = exp.conf_int().T[1]\n            corr_lower = np.corrcoef(lower, exp_lower)\n            corr_upper = np.corrcoef(upper, exp_upper)\n            aaae(corr_lower, np.ones_like(corr), decimal=4)\n            aaae(corr_upper, np.ones_like(corr), decimal=4)\n\n        summary = got.summary(method=method, seed=seed)\n\n        aaae(summary[\"value\"], exp.params, decimal=3)\n        aaae(summary[\"standard_error\"], got.se(method=method, seed=seed))\n        lower, upper = got.ci(method=method, seed=seed)\n        aaae(summary[\"ci_lower\"], lower)\n        aaae(summary[\"ci_upper\"], upper)\n        aaae(summary[\"p_value\"], got.p_values(method=method, seed=seed))\n\n\ndef test_estimate_ml_optimize_options_false(fitted_logit_model, logit_np_inputs):\n    \"\"\"Test that estimate_ml computes correct covariances given correct params.\"\"\"\n    kwargs = {\"y\": logit_np_inputs[\"y\"], \"x\": logit_np_inputs[\"x\"]}\n\n    params = pd.DataFrame({\"value\": fitted_logit_model.params})\n\n    got = estimate_ml(\n        loglike=logit_loglike,\n        params=params,\n        loglike_kwargs=kwargs,\n        optimize_options=False,\n    )\n\n    summary = got.summary()\n\n    # compare estimated parameters\n    aaae(summary[\"value\"], fitted_logit_model.params, decimal=4)\n\n    # compare estimated standard errors\n    aaae(summary[\"standard_error\"], fitted_logit_model.bsejac, decimal=3)\n\n    # compare covariance (if not robust case)\n    aaae(got.cov(method=\"jacobian\"), fitted_logit_model.covjac, decimal=4)\n\n\ndef test_estimate_ml_algorithm_type(logit_np_inputs):\n    \"\"\"Test that estimate_ml computes correct covariances given correct params.\"\"\"\n    kwargs = {\"y\": logit_np_inputs[\"y\"], \"x\": logit_np_inputs[\"x\"]}\n\n    params = pd.DataFrame({\"value\": logit_np_inputs[\"params\"]})\n\n    estimate_ml(\n        loglike=logit_loglike,\n        params=params,\n        loglike_kwargs=kwargs,\n        optimize_options=scipy_optimizers.ScipyLBFGSB,\n    )\n\n\ndef test_estimate_ml_algorithm(logit_np_inputs):\n    \"\"\"Test that estimate_ml computes correct covariances given correct params.\"\"\"\n    kwargs = {\"y\": logit_np_inputs[\"y\"], \"x\": logit_np_inputs[\"x\"]}\n\n    params = pd.DataFrame({\"value\": logit_np_inputs[\"params\"]})\n\n    estimate_ml(\n        loglike=logit_loglike,\n        params=params,\n        loglike_kwargs=kwargs,\n        optimize_options=scipy_optimizers.ScipyLBFGSB(stopping_maxfun=10),\n    )\n\n\n# ======================================================================================\n# Univariate normal case using dict params\n# ======================================================================================\n\n\n@mark.likelihood\ndef normal_loglike(params, y):\n    return sp.stats.norm.logpdf(y, loc=params[\"mean\"], scale=params[\"sd\"])\n\n\n@pytest.fixture()\ndef normal_inputs():\n    true = {\n        \"mean\": 1.0,\n        \"sd\": 1.0,\n    }\n    rng = np.random.default_rng(12345)\n    y = rng.normal(loc=true[\"mean\"], scale=true[\"sd\"], size=10_000)\n    return {\"true\": true, \"y\": y}\n\n\ndef test_estimate_ml_general_pytree(normal_inputs):\n    # ==================================================================================\n    # estimate\n    # ==================================================================================\n\n    kwargs = {\"y\": normal_inputs[\"y\"]}\n\n    start_params = {\"mean\": 5, \"sd\": 3}\n\n    got = estimate_ml(\n        loglike=normal_loglike,\n        params=start_params,\n        loglike_kwargs=kwargs,\n        optimize_options=\"scipy_lbfgsb\",\n        bounds=Bounds(lower={\"sd\": 0.0001}),\n        jacobian_kwargs=kwargs,\n        constraints=om.FlatSDCorrConstraint(selector=lambda p: p[\"sd\"]),\n    )\n\n    # ==================================================================================\n    # test\n    # ==================================================================================\n\n    true = normal_inputs[\"true\"]\n\n    assert (\n        np.abs(true[\"mean\"] - got.summary(method=\"jacobian\")[\"mean\"][\"value\"][0]) < 1e-1\n    )\n    assert np.abs(true[\"sd\"] - got.summary(method=\"jacobian\")[\"sd\"][\"value\"][0]) < 1e-1\n\n\ndef test_to_pickle(normal_inputs, tmp_path):\n    kwargs = {\"y\": normal_inputs[\"y\"]}\n\n    start_params = {\"mean\": 5, \"sd\": 3}\n\n    got = estimate_ml(\n        loglike=normal_loglike,\n        params=start_params,\n        loglike_kwargs=kwargs,\n        optimize_options=\"scipy_lbfgsb\",\n        bounds=Bounds(lower={\"sd\": 0.0001}),\n        jacobian_kwargs=kwargs,\n        constraints=om.FlatSDCorrConstraint(selector=lambda p: p[\"sd\"]),\n    )\n\n    got.to_pickle(tmp_path / \"bla.pkl\")\n\n\ndef test_caching(normal_inputs):\n    kwargs = {\"y\": normal_inputs[\"y\"]}\n\n    start_params = {\"mean\": 5, \"sd\": 3}\n\n    got = estimate_ml(\n        loglike=normal_loglike,\n        params=start_params,\n        loglike_kwargs=kwargs,\n        optimize_options=\"scipy_lbfgsb\",\n        bounds=Bounds(lower={\"sd\": 0.0001}),\n        jacobian_kwargs=kwargs,\n        constraints=om.FlatSDCorrConstraint(selector=lambda p: p[\"sd\"]),\n    )\n\n    assert got._cache == {}\n\n    cov = got.cov(method=\"robust\", return_type=\"array\")\n    assert got._cache == {}\n\n    cov = got.cov(method=\"robust\", return_type=\"array\", seed=0)\n    assert_array_equal(list(got._cache.values())[0], cov)\n"
  },
  {
    "path": "tests/estimagic/test_estimate_msm.py",
    "content": "\"\"\"Most test exploit the special case where simulate_moments just returns parameters.\"\"\"\n\nimport itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.testing import assert_array_equal\n\nfrom estimagic.estimate_msm import estimate_msm\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.optimizers import scipy_optimizers\nfrom optimagic.shared.check_option_dicts import (\n    check_optimization_options,\n)\n\n\ndef _sim_pd(params):\n    return pd.Series(params)\n\n\ndef _sim_np(params):\n    return params\n\n\ndef _sim_dict_pd(params):\n    return {\"simulated_moments\": pd.Series(params), \"other\": \"bla\"}\n\n\ndef _sim_dict_np(params):\n    return {\"simulated_moments\": params, \"other\": \"bla\"}\n\n\ncov_np = np.diag([1, 2, 3.0])\ncov_pd = pd.DataFrame(cov_np)\n\ntest_cases = list(\n    itertools.product(\n        [_sim_pd, _sim_np, _sim_dict_pd, _sim_dict_np],  # simulate_moments\n        [cov_np, cov_pd],  # moments_cov\n        [{\"algorithm\": \"scipy_lbfgsb\"}, \"scipy_lbfgsb\"],  # optimize_options\n    )\n)\n\n\n@pytest.mark.parametrize(\"simulate_moments, moments_cov, optimize_options\", test_cases)\ndef test_estimate_msm(simulate_moments, moments_cov, optimize_options):\n    start_params = np.array([3, 2, 1])\n\n    expected_params = np.zeros(3)\n\n    # abuse simulate_moments to get empirical moments in correct format\n    empirical_moments = simulate_moments(expected_params)\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    calculated = estimate_msm(\n        simulate_moments=simulate_moments,\n        empirical_moments=empirical_moments,\n        moments_cov=moments_cov,\n        params=start_params,\n        optimize_options=optimize_options,\n    )\n\n    # check that minimization works\n    aaae(calculated.params, expected_params)\n\n    # assert that optimization result exists and is of correct type\n    assert isinstance(calculated.optimize_result, OptimizeResult)\n\n    # check that cov works\n    calculated_cov = calculated.cov()\n    if isinstance(calculated_cov, pd.DataFrame):\n        calculated_cov = calculated_cov.to_numpy()\n\n    # this works only in the very special case with diagonal moments cov and\n    # jac = identity matrix\n    expected_cov = np.diag([1, 2, 3])\n    aaae(calculated_cov, expected_cov)\n    aaae(calculated.se(), np.sqrt([1, 2, 3]))\n\n    # works only because parameter point estimates are exactly zero\n    aaae(calculated.p_values(), np.ones(3))\n\n    expected_ci_upper = np.array([1.95996398, 2.77180765, 3.3947572])\n    expected_ci_lower = -expected_ci_upper\n\n    lower, upper = calculated.ci()\n    aaae(lower, expected_ci_lower)\n    aaae(upper, expected_ci_upper)\n\n    aaae(calculated.ci(), calculated._ci)\n    aaae(calculated.p_values(), calculated._p_values)\n    aaae(calculated.se(), calculated._se)\n    aaae(calculated.cov(), calculated._cov)\n\n    summary = calculated.summary()\n    aaae(summary[\"value\"], np.zeros(3))\n    aaae(summary[\"p_value\"], np.ones(3))\n    assert summary[\"stars\"].tolist() == [\"\"] * 3\n\n\ndef test_check_and_process_optimize_options_with_invalid_entries():\n    with pytest.raises(ValueError):\n        check_optimization_options({\"criterion\": lambda x: x}, \"estimate_msm\")\n\n\nls_test_cases = list(\n    itertools.product(\n        [_sim_pd, _sim_np, _sim_dict_pd, _sim_dict_np],  # simulate_moments\n        [cov_np, cov_pd],  # moments_cov\n        [{\"algorithm\": \"pounders\"}, \"pounders\"],  # optimize_options\n    )\n)\n\n\n@pytest.mark.parametrize(\n    \"simulate_moments, moments_cov, optimize_options\", ls_test_cases\n)\ndef test_estimate_msm_ls(simulate_moments, moments_cov, optimize_options):\n    start_params = np.array([3, 2, 1])\n\n    expected_params = np.zeros(3)\n\n    # abuse simulate_moments to get empirical moments in correct format\n    empirical_moments = simulate_moments(expected_params)\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    calculated = estimate_msm(\n        simulate_moments=simulate_moments,\n        empirical_moments=empirical_moments,\n        moments_cov=moments_cov,\n        params=start_params,\n        optimize_options=optimize_options,\n    )\n\n    aaae(calculated.params, expected_params)\n\n\ndef test_estimate_msm_with_jacobian():\n    start_params = np.array([3, 2, 1])\n\n    expected_params = np.zeros(3)\n\n    # abuse simulate_moments to get empirical moments in correct format\n    empirical_moments = _sim_np(expected_params)\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    calculated = estimate_msm(\n        simulate_moments=_sim_np,\n        empirical_moments=empirical_moments,\n        moments_cov=cov_np,\n        params=start_params,\n        optimize_options=\"scipy_lbfgsb\",\n        jacobian=lambda x: np.eye(len(x)),\n    )\n\n    aaae(calculated.params, expected_params)\n    aaae(calculated.cov(), cov_np)\n\n\ndef test_estimate_msm_with_algorithm_type():\n    start_params = np.array([3, 2, 1])\n    expected_params = np.zeros(3)\n    empirical_moments = _sim_np(expected_params)\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    estimate_msm(\n        simulate_moments=_sim_np,\n        empirical_moments=empirical_moments,\n        moments_cov=cov_np,\n        params=start_params,\n        optimize_options=scipy_optimizers.ScipyLBFGSB,\n        jacobian=lambda x: np.eye(len(x)),\n    )\n\n\ndef test_estimate_msm_with_algorithm():\n    start_params = np.array([3, 2, 1])\n    expected_params = np.zeros(3)\n    empirical_moments = _sim_np(expected_params)\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    estimate_msm(\n        simulate_moments=_sim_np,\n        empirical_moments=empirical_moments,\n        moments_cov=cov_np,\n        params=start_params,\n        optimize_options=scipy_optimizers.ScipyLBFGSB(stopping_maxfun=10),\n        jacobian=lambda x: np.eye(len(x)),\n    )\n\n\ndef test_to_pickle(tmp_path):\n    start_params = np.array([3, 2, 1])\n\n    # abuse simulate_moments to get empirical moments in correct format\n    empirical_moments = _sim_np(np.zeros(3))\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    calculated = estimate_msm(\n        simulate_moments=_sim_np,\n        empirical_moments=empirical_moments,\n        moments_cov=cov_np,\n        params=start_params,\n        optimize_options=\"scipy_lbfgsb\",\n    )\n\n    calculated.to_pickle(tmp_path / \"bla.pkl\")\n\n\ndef test_caching():\n    start_params = np.array([3, 2, 1])\n\n    # abuse simulate_moments to get empirical moments in correct format\n    empirical_moments = _sim_np(np.zeros(3))\n    if isinstance(empirical_moments, dict):\n        empirical_moments = empirical_moments[\"simulated_moments\"]\n\n    got = estimate_msm(\n        simulate_moments=_sim_np,\n        empirical_moments=empirical_moments,\n        moments_cov=cov_np,\n        params=start_params,\n        optimize_options=\"scipy_lbfgsb\",\n    )\n\n    assert got._cache == {}\n    cov = got.cov(method=\"robust\", return_type=\"array\")\n    assert got._cache == {}\n    cov = got.cov(method=\"robust\", return_type=\"array\", seed=0)\n    assert_array_equal(list(got._cache.values())[0], cov)\n"
  },
  {
    "path": "tests/estimagic/test_estimate_msm_dict_params_and_moments.py",
    "content": "\"\"\"Most test exploit the special case where simulate_moments just returns parameters.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pybaum import tree_just_flatten\n\nfrom estimagic.estimate_msm import estimate_msm\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef test_estimate_msm_dict_params_and_moments():\n    def simulate_moments(params):\n        return {k * 2: v for k, v in params.items()}\n\n    start_params = {\"a\": 3, \"b\": 2, \"c\": 1}\n\n    expected_params = {\"a\": 0, \"b\": 0, \"c\": 0}\n\n    empirical_moments = {\"aa\": 0, \"bb\": 0, \"cc\": 0}\n\n    moments_cov = {\n        \"aa\": {\"aa\": 1, \"bb\": 0, \"cc\": 0},\n        \"bb\": {\"aa\": 0, \"bb\": 2, \"cc\": 0},\n        \"cc\": {\"aa\": 0, \"bb\": 0, \"cc\": 3},\n    }\n\n    calculated = estimate_msm(\n        simulate_moments=simulate_moments,\n        empirical_moments=empirical_moments,\n        moments_cov=moments_cov,\n        params=start_params,\n        optimize_options=\"scipy_lbfgsb\",\n    )\n\n    # check that minimization works\n    assert_almost_equal(calculated.params, expected_params)\n\n    # this works only in the very special case with diagonal moments cov and\n    # jac = identity matrix\n    assert_almost_equal(calculated.cov(), moments_cov)\n\n    assert_almost_equal(calculated.se(), {\"a\": 1, \"b\": np.sqrt(2), \"c\": np.sqrt(3)})\n\n    # works only because parameter point estimates are exactly zero\n    assert_almost_equal(calculated.p_values(), {\"a\": 1, \"b\": 1, \"c\": 1})\n\n    expected_ci_upper = {\"a\": 1.95996398, \"b\": 2.77180765, \"c\": 3.3947572}\n    expected_ci_lower = {k: -v for k, v in expected_ci_upper.items()}\n\n    lower, upper = calculated.ci()\n    assert_almost_equal(lower, expected_ci_lower)\n    assert_almost_equal(upper, expected_ci_upper)\n\n    assert_almost_equal(calculated.ci(), calculated._ci)\n    assert_almost_equal(calculated.p_values(), calculated._p_values)\n    assert_almost_equal(calculated.se(), calculated._se)\n    assert_almost_equal(calculated.cov(), calculated._cov)\n\n    summary = calculated.summary()\n    summary_df = pd.concat(list(summary.values()))\n    aaae(summary_df[\"value\"], np.zeros(3))\n    aaae(summary_df[\"p_value\"], np.ones(3))\n    assert summary_df[\"stars\"].tolist() == [\"\"] * 3\n\n    expected_sensitivity_to_bias_dict = {\n        \"a\": {\"aa\": -1.0, \"bb\": 0.0, \"cc\": 0.0},\n        \"b\": {\"aa\": 0.0, \"bb\": -1.0, \"cc\": 0.0},\n        \"c\": {\"aa\": 0.0, \"bb\": 0.0, \"cc\": -1.0},\n    }\n\n    assert_almost_equal(\n        calculated.sensitivity(\"bias\"), expected_sensitivity_to_bias_dict\n    )\n\n    expected_sensitivity_to_bias_arr = -np.eye(3)\n\n    aaae(\n        calculated.sensitivity(\"bias\", return_type=\"array\"),\n        expected_sensitivity_to_bias_arr,\n    )\n    aaae(\n        calculated.sensitivity(\"bias\", return_type=\"dataframe\").to_numpy(),\n        expected_sensitivity_to_bias_arr,\n    )\n\n    expected_jacobian = {\n        \"a\": {\"aa\": 1.0, \"bb\": 0.0, \"cc\": 0.0},\n        \"b\": {\"aa\": 0.0, \"bb\": 1.0, \"cc\": 0.0},\n        \"c\": {\"aa\": 0.0, \"bb\": 0.0, \"cc\": 1.0},\n    }\n\n    assert_almost_equal(calculated.jacobian, expected_jacobian)\n\n\ndef assert_almost_equal(x, y, decimal=6):\n    if isinstance(x, np.ndarray):\n        x_flat = x\n        y_flat = y\n    else:\n        registry = get_registry(extended=True)\n        x_flat = np.array(tree_just_flatten(x, registry=registry))\n        y_flat = np.array(tree_just_flatten(x, registry=registry))\n\n    aaae(x_flat, y_flat, decimal=decimal)\n"
  },
  {
    "path": "tests/estimagic/test_estimation_table.py",
    "content": "import io\nimport textwrap\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport statsmodels.api as sm\nfrom pandas.testing import assert_frame_equal as afe\nfrom pandas.testing import assert_series_equal as ase\n\nfrom estimagic.config import EXAMPLE_DIR\nfrom estimagic.estimation_table import (\n    _apply_number_format,\n    _center_align_integers_and_non_numeric_strings,\n    _check_order_of_model_names,\n    _convert_frame_to_string_series,\n    _create_group_to_col_position,\n    _create_statistics_sr,\n    _customize_col_groups,\n    _customize_col_names,\n    _get_default_column_names_and_groups,\n    _get_digits_after_decimal,\n    _get_model_names,\n    _get_params_frames_with_common_index,\n    _process_frame_indices,\n    _process_model,\n    estimation_table,\n    render_html,\n    render_latex,\n)\n\n\n# ======================================================================================\n# Helper functions\n# ======================================================================================\ndef _get_models_multiindex():\n    df = pd.DataFrame(\n        data=np.ones((3, 4)), columns=[\"value\", \"ci_lower\", \"ci_upper\", \"p_value\"]\n    )\n    df.index = pd.MultiIndex.from_tuples(\n        [(\"p_1\", \"v_1\"), (\"p_1\", \"v_2\"), (\"p_2\", \"v_2\")]\n    )\n    info = {\"n_obs\": 400}\n    mod1 = {\"params\": df, \"info\": info, \"name\": \"m1\"}\n    mod2 = {\"params\": df, \"info\": info, \"name\": \"m2\"}\n    models = [mod1, mod2]\n    return models\n\n\ndef _get_models_single_index():\n    df = pd.DataFrame(\n        data=np.ones((3, 4)), columns=[\"value\", \"ci_lower\", \"ci_upper\", \"p_value\"]\n    )\n    df.index = [f\"p{i}\" for i in [1, 2, 3]]\n    info = {\"n_obs\": 400}\n    mod1 = {\"params\": df, \"info\": info, \"name\": \"m1\"}\n    mod2 = {\"params\": df, \"info\": info, \"name\": \"m2\"}\n    models = [mod1, mod2]\n    return models\n\n\ndef _get_models_multiindex_multi_column():\n    df = pd.DataFrame(\n        data=np.ones((3, 4)), columns=[\"value\", \"ci_lower\", \"ci_upper\", \"p_value\"]\n    )\n    df.index = pd.MultiIndex.from_tuples(\n        [(\"p_1\", \"v_1\"), (\"p_1\", \"v_2\"), (\"p_2\", \"v_2\")]\n    )\n    info = {\"n_obs\": 400}\n    mod1 = {\"params\": df.iloc[1:], \"info\": info, \"name\": \"m1\"}\n    mod2 = {\"params\": df, \"info\": info, \"name\": \"m2\"}\n    mod3 = {\"params\": df, \"info\": info, \"name\": \"m2\"}\n    models = [mod1, mod2, mod3]\n    return models\n\n\ndef _read_csv_string(string, index_cols=None):\n    string = textwrap.dedent(string)\n    return pd.read_csv(io.StringIO(string), index_col=index_cols)\n\n\n# ======================================================================================\n# Tests\n# ======================================================================================\n\n# test process_model for different model types\n\nfix_path = EXAMPLE_DIR / \"diabetes.csv\"\n\ndf_ = pd.read_csv(fix_path, index_col=0)\nest = sm.OLS(endog=df_[\"target\"], exog=sm.add_constant(df_[df_.columns[0:4]])).fit()\nest1 = sm.OLS(endog=df_[\"target\"], exog=sm.add_constant(df_[df_.columns[0:5]])).fit()\n\n\ndef test_estimation_table():\n    models = [est]\n    res = estimation_table(models, return_type=\"render_inputs\", append_notes=False)\n    exp = {}\n    body = \"\"\"\n        index,target\n        const,152.00$^{*** }$\n        ,(2.85)\n        Age,37.20$^{ }$\n        ,(64.10)\n        Sex,-107.00$^{* }$\n        ,(62.10)\n        BMI,787.00$^{*** }$\n        ,(65.40)\n        ABP,417.00$^{*** }$\n        ,(69.50)\n    \"\"\"\n    exp[\"body\"] = _read_csv_string(body).fillna(\"\")\n    exp[\"body\"].set_index(\"index\", inplace=True)\n    footer_str = \"\"\"\n         ,target\n        R$^2$,0.40\n        Adj. R$^2$,0.40\n        Residual Std. Error,60\n        F Statistic,72.90$^{***}$\n        Observations,442\n\n    \"\"\"\n    exp[\"footer\"] = _read_csv_string(footer_str).fillna(\"\")\n    exp[\"footer\"].set_index(\" \", inplace=True)\n    exp[\"footer\"].index.names = [None]\n    exp[\"footer\"].index = pd.MultiIndex.from_arrays([exp[\"footer\"].index])\n    afe(exp[\"footer\"].sort_index(), res[\"footer\"].sort_index())\n    afe(exp[\"body\"], res[\"body\"], check_index_type=False)\n\n\nMODELS = [\n    _get_models_multiindex(),\n    _get_models_single_index(),\n    _get_models_multiindex_multi_column(),\n]\nPARAMETRIZATION = [(\"latex\", render_latex, models) for models in MODELS]\nPARAMETRIZATION += [(\"html\", render_html, models) for models in MODELS]\n\n\n@pytest.mark.parametrize(\"return_type, render_func,models\", PARAMETRIZATION)\ndef test_one_and_stage_rendering_are_equal(return_type, render_func, models):\n    first_stage = estimation_table(\n        models, return_type=\"render_inputs\", confidence_intervals=True\n    )\n    second_stage = render_func(siunitx_warning=False, **first_stage)\n    one_stage = estimation_table(\n        models,\n        return_type=return_type,\n        siunitx_warning=False,\n        confidence_intervals=True,\n    )\n    assert one_stage == second_stage\n\n\ndef test_process_model_stats_model():\n    params = pd.DataFrame(\n        columns=[\"value\", \"p_value\", \"standard_error\", \"ci_lower\", \"ci_upper\"],\n        index=[\"const\", \"Age\", \"Sex\", \"BMI\", \"ABP\"],\n    )\n    params[\"value\"] = [152.133484, 37.241211, -106.577520, 787.179313, 416.673772]\n    params[\"p_value\"] = [\n        2.048808e-193,\n        5.616557e-01,\n        8.695658e-02,\n        5.345260e-29,\n        4.245663e-09,\n    ]\n    params[\"standard_error\"] = [2.852749, 64.117433, 62.125062, 65.424126, 69.494666]\n    params[\"ci_lower\"] = [146.526671, -88.775663, -228.678572, 658.594255, 280.088446]\n    params[\"ci_upper\"] = [157.740298, 163.258084, 15.523532, 915.764371, 553.259097]\n    info = {}\n    info[\"rsquared\"] = 0.40026108237714\n    info[\"rsquared_adj\"] = 0.39477148130050055\n    info[\"fvalue\"] = 72.91259907398705\n    info[\"f_pvalue\"] = 2.700722880950139e-47\n    info[\"df_model\"] = 4.0\n    info[\"df_resid\"] = 437.0\n    info[\"resid_std_err\"] = 59.97560860753488\n    info[\"n_obs\"] = 442.0\n    res = _process_model(est)\n    afe(res[\"params\"], params)\n    ase(pd.Series(res[\"info\"]), pd.Series(info))\n    assert res[\"name\"] == \"target\"\n\n\n# test convert_model_to_series for different arguments\ndef test_convert_model_to_series_with_ci():\n    df = pd.DataFrame(\n        np.array(\n            [[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3], [1.2, 3.3, 4.33]]\n        ).T,\n        columns=[\"value\", \"p_value\", \"ci_lower\", \"ci_upper\"],\n        index=[\"a\", \"b\", \"c\"],\n    ).astype(\"str\")\n    df[\"p_value\"] = df[\"p_value\"].astype(\"float\")\n    significance_levels = [0.1, 0.05, 0.01]\n    show_stars = True\n    res = _convert_frame_to_string_series(df, significance_levels, show_stars)\n    exp = pd.Series(\n        [\n            \"0.6$^{ }$\",\n            r\"(0.6;1.2)\",\n            \"2.3$^{** }$\",\n            r\"(2.3;3.3)\",\n            \"3.3$^{*** }$\",\n            r\"(3.3;4.33)\",\n        ],\n        index=[\"a\", \"\", \"b\", \"\", \"c\", \"\"],\n        name=\"\",\n    )\n    exp.index.name = \"index\"\n    ase(exp, res)\n\n\ndef test_convert_model_to_series_with_se():\n    df = pd.DataFrame(\n        np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3]]).T,\n        columns=[\"value\", \"p_value\", \"standard_error\"],\n        index=[\"a\", \"b\", \"c\"],\n    ).astype(\"str\")\n    df[\"p_value\"] = df[\"p_value\"].astype(\"float\")\n    significance_levels = [0.1, 0.05, 0.01]\n    show_stars = True\n    res = _convert_frame_to_string_series(df, significance_levels, show_stars)\n    exp = pd.Series(\n        [\"0.6$^{ }$\", \"(0.6)\", \"2.3$^{** }$\", \"(2.3)\", \"3.3$^{*** }$\", \"(3.3)\"],\n        index=[\"a\", \"\", \"b\", \"\", \"c\", \"\"],\n        name=\"\",\n    )\n    exp.index.name = \"index\"\n    ase(exp, res)\n\n\ndef test_convert_model_to_series_without_inference():\n    df = pd.DataFrame(\n        np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009]]).T,\n        columns=[\"value\", \"p_value\"],\n        index=[\"a\", \"b\", \"c\"],\n    ).astype(\"str\")\n    df[\"p_value\"] = df[\"p_value\"].astype(\"float\")\n    significance_levels = [0.1, 0.05, 0.01]\n    show_stars = True\n    res = _convert_frame_to_string_series(df, significance_levels, show_stars)\n    exp = pd.Series(\n        [\"0.6$^{ }$\", \"2.3$^{** }$\", \"3.3$^{*** }$\"], index=[\"a\", \"b\", \"c\"], name=\"\"\n    )\n    ase(exp, res)\n\n\n# test create stat series\ndef test_create_statistics_sr():\n    df = pd.DataFrame(np.empty((10, 3)), columns=[\"a\", \"b\", \"c\"])\n    df.index = pd.MultiIndex.from_arrays(np.array([np.arange(10), np.arange(10)]))\n    info = {\"rsquared\": 0.45, \"n_obs\": 400, \"rsquared_adj\": 0.0002}\n    number_format = (\"{0:.3g}\", \"{0:.5f}\", \"{0:.4g}\")\n    add_trailing_zeros = True\n    sig_levels = [0.1, 0.2]\n    show_stars = False\n    model = {\"params\": df, \"info\": info, \"name\": \"target\"}\n    stats_options = {\n        \"n_obs\": \"Observations\",\n        \"rsquared\": \"R2\",\n        \"rsquared_adj\": \"R2 Adj.\",\n    }\n    res = _create_statistics_sr(\n        model,\n        stats_options,\n        sig_levels,\n        show_stars,\n        number_format,\n        add_trailing_zeros,\n        max_trail=4,\n    )\n    exp = pd.Series([\"0.4500\", \"0.0002\", \"400\"])\n    exp.index = pd.MultiIndex.from_arrays(\n        np.array([np.array([\"R2\", \"R2 Adj.\", \"Observations\"]), np.array([\"\", \"\", \"\"])])\n    )\n    ase(exp.sort_index(), res.sort_index())\n\n\n# test _process_frame_axes for different arguments\ndef test_process_frame_indices_index():\n    df = pd.DataFrame(np.ones((3, 3)), columns=[\"\", \"\", \"\"])\n    df.index = pd.MultiIndex.from_arrays(\n        np.array([[\"today\", \"today\", \"today\"], [\"var1\", \"var2\", \"var3\"]])\n    )\n    df.index.names = [\"l1\", \"l2\"]\n    par_name_map = {\"today\": \"tomorrow\", \"var1\": \"1stvar\"}\n    index_name_map = [\"period\", \"variable\"]\n    column_names = list(\"abc\")\n    res = _process_frame_indices(\n        df,\n        custom_param_names=par_name_map,\n        custom_index_names=index_name_map,\n        column_names=column_names,\n        show_col_names=True,\n        show_col_groups=False,\n        column_groups=None,\n    )\n    # expected:\n    params = \"\"\"\n        period,variable,a,b,c\n        tomorrow,1stvar,1,1,1\n        tomorrow,var2,1,1,1\n        tomorrow,var3,1,1,1\n    \"\"\"\n    exp = _read_csv_string(params).fillna(\"\")\n    exp.set_index([\"period\", \"variable\"], inplace=True)\n    afe(res, exp, check_dtype=False)\n\n\ndef test_process_frame_indices_columns():\n    df = pd.DataFrame(np.ones((3, 3)), columns=[\"\", \"\", \"\"])\n    col_names = list(\"abc\")\n    col_groups = [\"first\", \"first\", \"second\"]\n    res = _process_frame_indices(\n        df=df,\n        custom_index_names=None,\n        custom_param_names=None,\n        show_col_groups=True,\n        show_col_names=True,\n        column_names=col_names,\n        column_groups=col_groups,\n    )\n    arrays = [np.array(col_groups), np.array(col_names)]\n    exp = pd.DataFrame(data=np.ones((3, 3)), columns=arrays)\n    afe(res, exp, check_dtype=False)\n\n\ndef test_apply_number_format_tuple():\n    number_format = (\"{0:.2g}\", \"{0:.2f}\", \"{0:.2g}\")\n    raw = pd.DataFrame(data=[1234.2332, 0.0001])\n    exp = pd.DataFrame(data=[\"1.2e+03\", \"0\"])\n    res = _apply_number_format(\n        df_raw=raw, number_format=number_format, format_integers=False\n    )\n    afe(exp, res)\n\n\ndef test_apply_number_format_int():\n    number_format = 3\n    raw = pd.DataFrame(data=[\"1234.2332\", \"1.2e+03\"])\n    exp = pd.DataFrame(data=[\"1234.233\", \"1200\"])\n    res = _apply_number_format(\n        df_raw=raw, number_format=number_format, format_integers=False\n    )\n    afe(exp, res)\n\n\ndef test_apply_number_format_callable():\n    def nsf(num, n=3):\n        \"\"\"N-Significant Figures.\"\"\"\n        numstr = (\"{0:.%ie}\" % (n - 1)).format(num)\n        return numstr\n\n    raw = pd.DataFrame(data=[1234.2332, 0.0001])\n    exp = pd.DataFrame(data=[\"1.23e+03\", \"1.00e-04\"])\n    res = _apply_number_format(df_raw=raw, number_format=nsf, format_integers=False)\n    afe(exp, res)\n\n\ndef test_get_digits_after_decimal():\n    df = pd.DataFrame(\n        data=[[\"12.456\", \"0.00003\", \"1.23e+05\"], [\"16\", \"0.03\", \"1.2e+05\"]]\n    ).T\n    exp = 5\n    res = _get_digits_after_decimal(df)\n    assert exp == res\n\n\ndef test_create_group_to_col_position():\n    col_groups = [\n        \"a_name\",\n        \"a_name\",\n        \"a_name\",\n        \"second_name\",\n        \"second_name\",\n        \"third_name\",\n    ]\n    exp = {\"a_name\": [0, 1, 2], \"second_name\": [3, 4], \"third_name\": [5]}\n    res = _create_group_to_col_position(col_groups)\n    assert exp == res\n\n\ndef test_get_model_names():\n    m1 = {\"params\": None, \"info\": None, \"name\": \"a_name\"}\n    m3 = {\"params\": None, \"info\": None, \"name\": None}\n    m5 = {\"params\": None, \"info\": None, \"name\": \"third_name\"}\n    models = [m1, m3, m5]\n    res = _get_model_names(models)\n    exp = [\"a_name\", \"(2)\", \"third_name\"]\n    assert res == exp\n\n\ndef test_get_default_column_names_and_groups():\n    model_names = [\"a_name\", \"a_name\", \"(3)\", \"(4)\", \"third_name\"]\n    res_names, res_groups = _get_default_column_names_and_groups(model_names)\n    exp_names = [f\"({i + 1})\" for i in range(len(model_names))]\n    exp_groups = [\"a_name\", \"a_name\", \"(3)\", \"(4)\", \"third_name\"]\n    assert res_names == exp_names\n    assert res_groups == exp_groups\n\n\ndef test_get_default_column_names_and_groups_undefined_groups():\n    model_names = [\"a_name\", \"second_name\", \"(3)\", \"(4)\", \"third_name\"]\n    res_names, res_groups = _get_default_column_names_and_groups(model_names)\n    exp_names = model_names\n    assert res_names == exp_names\n    assert pd.isna(res_groups)\n\n\ndef test_customize_col_groups():\n    default = [\"a_name\", \"a_name\", \"(3)\", \"(4)\", \"third_name\"]\n    mapping = {\"a_name\": \"first_name\", \"third_name\": \"fifth_name\"}\n    exp = [\"first_name\", \"first_name\", \"(3)\", \"(4)\", \"fifth_name\"]\n    res = _customize_col_groups(default, mapping)\n    assert exp == res\n\n\ndef test_customize_col_names_dict():\n    default = list(\"abcde\")\n    custom = {\"a\": \"1\", \"c\": \"3\", \"e\": \"5\"}\n    res = _customize_col_names(default_col_names=default, custom_col_names=custom)\n    exp = [\"1\", \"b\", \"3\", \"d\", \"5\"]\n    assert exp == res\n\n\ndef test_customize_col_names_list():\n    default = list(\"abcde\")\n    custom = list(\"12345\")\n    res = _customize_col_names(default_col_names=default, custom_col_names=custom)\n    exp = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n    assert exp == res\n\n\ndef test_get_params_frames_with_common_index():\n    m1 = {\n        \"params\": pd.DataFrame(np.ones(5), index=list(\"abcde\")),\n        \"info\": None,\n        \"name\": None,\n    }\n    m2 = {\n        \"params\": pd.DataFrame(np.ones(3), index=list(\"abc\")),\n        \"info\": None,\n        \"name\": None,\n    }\n    res = _get_params_frames_with_common_index([m1, m2])\n    exp = [\n        pd.DataFrame(np.ones(5), index=list(\"abcde\")),\n        pd.DataFrame(\n            np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=list(\"abcde\")\n        ),\n    ]\n    afe(res[0], exp[0])\n    afe(res[1], exp[1])\n\n\ndef test_get_params_frames_with_common_index_multiindex():\n    mi = pd.MultiIndex.from_tuples([(\"a\", 1), (\"a\", 2), (\"b\", 1), (\"b\", 2), (\"b\", 3)])\n    m1 = {\"params\": pd.DataFrame(np.ones(5), index=mi), \"info\": None, \"name\": None}\n    m2 = {\"params\": pd.DataFrame(np.ones(3), index=mi[:3]), \"info\": None, \"name\": None}\n    res = _get_params_frames_with_common_index([m1, m2])\n    exp = [\n        pd.DataFrame(np.ones(5), index=mi),\n        pd.DataFrame(np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=mi),\n    ]\n    afe(res[0], exp[0])\n    afe(res[1], exp[1])\n\n\ndef test_check_order_of_model_names_raises_error():\n    model_names = [\"a\", \"b\", \"a\"]\n    with pytest.raises(ValueError):\n        _check_order_of_model_names(model_names)\n\n\ndef test_manual_extra_info():\n    footer_str = \"\"\"\n         ,target\n        R$^2$,0.40\n        Adj. R$^2$,0.40\n        Residual Std. Error,60.5\n        F Statistic,72.90$^{***}$\n        Observations,442\n        Controls,Yes\n\n    \"\"\"\n    footer = _read_csv_string(footer_str).fillna(\"\")\n    footer.set_index(\" \", inplace=True)\n    footer.index.names = [None]\n    footer.index = pd.MultiIndex.from_arrays([footer.index])\n    exp = footer.copy(deep=True)\n    exp.loc[\"Controls\"] = \"\\\\multicolumn{1}{c}{Yes}\"\n    exp.loc[\"Observations\"] = \"\\\\multicolumn{1}{c}{442}\"\n    for i, r in footer.iterrows():\n        res = _center_align_integers_and_non_numeric_strings(r)\n        ase(exp.loc[i], res)\n"
  },
  {
    "path": "tests/estimagic/test_lollipop_plot.py",
    "content": "import numpy as np\nimport pandas as pd\n\nfrom estimagic.lollipop_plot import lollipop_plot\n\n\ndef test_lollipop_plot_runs():\n    df = pd.DataFrame(\n        np.arange(12).reshape(4, 3),\n        index=pd.MultiIndex.from_tuples([(0, \"a\"), (\"b\", 1), (\"a\", \"b\"), (2, 3)]),\n        columns=[\"a\", \"b\", \"c\"],\n    )\n\n    for grid in [True, False]:\n        lollipop_plot(df, combine_plots_in_grid=grid)\n"
  },
  {
    "path": "tests/estimagic/test_ml_covs.py",
    "content": "from itertools import product\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom estimagic import ml_covs\nfrom estimagic.ml_covs import (\n    _clustering,\n    _sandwich_step,\n    _stratification,\n    cov_cluster_robust,\n    cov_hessian,\n    cov_jacobian,\n    cov_robust,\n    cov_strata_robust,\n)\n\n\n@pytest.fixture()\ndef jac():\n    _jac = np.array(\n        [\n            [0.017986, 0.089931, 0, 0.035972],\n            [0.0024726, 0.014836, 0.0024726, 0.0098905],\n            [0.0009111, 0.002733, 0, 0.009111],\n            [-0.993307, -4.966536, 0, -3.973229],\n            [0.119203, 0.238406, 0, 0.119203],\n        ]\n    )\n    return _jac\n\n\n@pytest.fixture()\ndef hess():\n    _hess = np.array(\n        [\n            [-0.132681, -0.349071, -0.002467, -0.185879],\n            [-0.349071, -1.124730, -0.014799, -0.606078],\n            [-0.002467, -0.014799, -0.002467, -0.009866],\n            [-0.185879, -0.606078, -0.009866, -0.412500],\n        ]\n    )\n    return _hess\n\n\n@pytest.fixture()\ndef design_options():\n    df = pd.DataFrame(\n        data=[\n            [164, 88, 0.116953],\n            [562, 24, 0.174999],\n            [459, 71, 0.374608],\n            [113, 25, 0.369494],\n            [311, 63, 0.203738],\n        ],\n        columns=[\"psu\", \"strata\", \"weight\"],\n    )\n    return df\n\n\ndef test_clustering(jac, design_options):\n    calculated = _clustering(jac, design_options)\n    expected = np.array(\n        [\n            [1.251498, 6.204213, 0.000008, 4.951907],\n            [6.204213, 30.914541, 0.000046, 24.706263],\n            [0.000008, 0.000046, 0.000008, 0.000031],\n            [4.951907, 24.706263, 0.000031, 19.752791],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_stratification(jac, design_options):\n    calculated = _stratification(jac, design_options)\n\n    expected = np.array(\n        [\n            [1.0012, 4.963, 0.000006, 3.9615],\n            [4.9634, 24.732, 0.000037, 19.765],\n            [0.000006, 0.000037, 0.000006, 0.000024],\n            [3.961525, 19.76501, 0.000024, 15.8022],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_sandwich_step(hess):\n    calculated = _sandwich_step(hess, meat=np.ones((4, 4)))\n\n    expected = np.array(\n        [\n            [5194.925, -1876.241, 36395.846, -279.962],\n            [-1876.2415, 677.638707, -13145.02087, 101.11338],\n            [36395.8461, -13145.0208, 254990.7081, -1961.4250],\n            [-279.962055, 101.113381, -1961.425002, 15.087562],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_cov_robust(jac, hess):\n    calculated = cov_robust(jac, hess)\n\n    expected = np.array(\n        [\n            [911.67667, -172.809772, 2264.15098415, -534.7422541],\n            [-172.809772, 32.823296, -429.142924, 101.253230],\n            [2264.150984, -429.142924, 5647.129400, -1333.791658],\n            [-534.742254, 101.253230, -1333.791658, 315.253633],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_cov_cluster_robust(jac, hess, design_options):\n    calculated = cov_cluster_robust(\n        jac,\n        hess,\n        design_options,\n    )\n\n    expected = np.array(\n        [\n            [911.411, -172.753, 2264.03, -534.648],\n            [-172.753, 32.8104, -429.901, 101.228],\n            [2263.03, -428.901, 5643, -1333.24],\n            [-534.648, 101.228, -1333.24, 315.225],\n        ]\n    )\n\n    np.allclose(calculated, expected)\n\n\ndef test_cov_strata_robust(jac, hess, design_options):\n    calculated = cov_strata_robust(\n        jac,\n        hess,\n        design_options,\n    )\n\n    expected = np.array(\n        [\n            [729.153, -138.203, 1810.42, -427.719],\n            [-138.203, 26.2483, -343.121, 80.9828],\n            [1810.42, -343.121, 4514.4, -1066.59],\n            [-427.719, 80.9828, -1066.59, 252.18],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_cov_hessian(hess):\n    calculated = cov_hessian(hess)\n\n    expected = np.array(\n        [\n            [44.7392, -14.563, 41.659, 0.2407],\n            [-14.56307, 9.01046, -14.14055, -6.3383],\n            [41.65906, -14.14055, 487.09343, -9.645899],\n            [0.240678, -6.338334, -9.645898, 11.859284],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\ndef test_cov_jacobian(jac):\n    calculated = cov_jacobian(jac)\n    expected = np.array(\n        [\n            [937.03508, -780.893, 781.1802, 741.8099],\n            [-780.893, 749.9739, -749.918, -742.28097],\n            [781.1802, -749.918045, 164316.58829, 741.88592],\n            [741.8099, -742.280970, 741.8859, 742.520006],\n        ]\n    )\n    np.allclose(calculated, expected)\n\n\nFIX_PATH = Path(__file__).resolve().parent / \"pickled_statsmodels_ml_covs\"\n\n\ndef get_expected_covariance(model, cov_method):\n    \"\"\"Load expected covariance matrix.\n\n    Args:\n        model (str): one of ['logit', 'probit']\n        cov_method (str): one of ['jacobian', 'hessian', 'robust']\n\n    Returns:\n        expected_covariance\n\n    \"\"\"\n    _name = cov_method if cov_method != \"robust\" else \"sandwich\"\n    fix_name = f\"{model}_{_name}.pickle\"\n    expected_cov = pd.read_pickle(FIX_PATH / fix_name)\n    return expected_cov\n\n\ndef get_input(model, input_types):\n    \"\"\"Load the inputs.\n\n    Args:\n        model (str): one of ['logit', 'probit']\n        input_types (list): can contain the elements 'jacobian' and 'hessian'\n\n    Returns:\n        inputs (dict): The inputs for the covariance function\n\n    \"\"\"\n    inputs = {}\n    for typ in input_types:\n        fix_name = f\"{model}_{typ}_matrix.pickle\"\n        input_matrix = pd.read_pickle(FIX_PATH / fix_name)\n        inputs[typ] = input_matrix\n\n    short_names = {\"jacobian\": \"jac\", \"hessian\": \"hess\"}\n    inputs = {short_names[key]: val for key, val in inputs.items()}\n    return inputs\n\n\nmodels = [\"probit\", \"logit\"]\nmethods = [\"jacobian\", \"hessian\", \"robust\"]\ntest_cases = list(product(models, methods))\n\n\n@pytest.mark.parametrize(\"model, method\", test_cases)\ndef test_cov_function_against_statsmodels(model, method):\n    expected = get_expected_covariance(model, method)\n\n    if method in [\"jacobian\", \"hessian\"]:\n        input_types = [method]\n    elif method == \"robust\":\n        input_types = [\"jacobian\", \"hessian\"]\n\n    inputs = get_input(model, input_types)\n\n    calculated = getattr(ml_covs, f\"cov_{method}\")(**inputs)\n\n    aaae(calculated, expected)\n"
  },
  {
    "path": "tests/estimagic/test_msm_covs.py",
    "content": "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pandas.testing import assert_frame_equal\n\nfrom estimagic.msm_covs import cov_optimal, cov_robust\nfrom optimagic.utilities import get_rng\n\nrng = get_rng(seed=1234)\n\njac_np = rng.uniform(size=(10, 5))\njac_pd = pd.DataFrame(jac_np)\n\nmoments_cov_np = rng.uniform(size=(10, 10)) + np.eye(10) * 2.5\nmoments_cov_pd = pd.DataFrame(moments_cov_np)\n\ntest_cases = itertools.product([jac_np, jac_pd], [moments_cov_np, moments_cov_pd])\n\n\n@pytest.mark.parametrize(\"jac, moments_cov\", test_cases)\ndef test_cov_robust_and_cov_optimal_are_equivalent_in_special_case(jac, moments_cov):\n    weights = np.linalg.inv(moments_cov)\n    if isinstance(moments_cov, pd.DataFrame):\n        weights = pd.DataFrame(\n            weights, index=moments_cov.index, columns=moments_cov.columns\n        )\n\n    sandwich = cov_robust(jac, weights, moments_cov)\n    optimal = cov_optimal(jac, weights)\n\n    if isinstance(sandwich, pd.DataFrame):\n        assert_frame_equal(sandwich, optimal)\n\n    else:\n        aaae(sandwich, optimal)\n"
  },
  {
    "path": "tests/estimagic/test_msm_sensitivity.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom scipy import stats\n\nfrom estimagic.config import EXAMPLE_DIR\nfrom estimagic.msm_covs import cov_optimal\nfrom estimagic.msm_sensitivity import (\n    calculate_actual_sensitivity_to_noise,\n    calculate_actual_sensitivity_to_removal,\n    calculate_fundamental_sensitivity_to_noise,\n    calculate_fundamental_sensitivity_to_removal,\n    calculate_sensitivity_to_bias,\n    calculate_sensitivity_to_weighting,\n)\nfrom optimagic.differentiation.derivatives import first_derivative\n\n\ndef simulate_aggregated_moments(params, x, y):\n    \"\"\"Calculate aggregated moments for example from Honore, DePaula, Jorgensen.\"\"\"\n    mom_value = simulate_moment_contributions(params, x, y)\n    moments = mom_value.mean(axis=1)\n\n    return moments\n\n\ndef simulate_moment_contributions(params, x, y):\n    \"\"\"Calculate moment contributions for example from Honore, DePaula, Jorgensen.\"\"\"\n    y_estimated = x.to_numpy() @ (params[\"value\"].to_numpy())\n\n    x_np = x.T.to_numpy()\n\n    residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)\n\n    mom_value = []\n\n    length = len(x_np)\n\n    for i in range(length):\n        for j in range(i, length):\n            moment = residual * x_np[i] * x_np[j]\n            mom_value.append(moment)\n\n    mom_value = np.stack(mom_value, axis=1)[0]\n    mom_value = pd.DataFrame(data=mom_value)\n\n    return mom_value\n\n\n@pytest.fixture()\ndef moments_cov(params, func_kwargs):\n    mom_value = simulate_moment_contributions(params, **func_kwargs)\n    mom_value = mom_value.to_numpy()\n    s = np.cov(mom_value, ddof=0)\n    return s\n\n\n@pytest.fixture()\ndef params():\n    params_index = [[\"beta\"], [\"intersection\", \"x1\", \"x2\"]]\n    params_index = pd.MultiIndex.from_product(params_index, names=[\"type\", \"name\"])\n    params = pd.DataFrame(\n        data=[[0.57735], [0.57735], [0.57735]], index=params_index, columns=[\"value\"]\n    )\n    return params\n\n\n@pytest.fixture()\ndef func_kwargs():\n    data = pd.read_csv(EXAMPLE_DIR / \"sensitivity_probit_example_data.csv\")\n    y_data = data[[\"y\"]]\n    x_data = data[[\"intercept\", \"x1\", \"x2\"]]\n    func_kwargs = {\"x\": x_data, \"y\": y_data}\n    return func_kwargs\n\n\n@pytest.fixture()\ndef jac(params, func_kwargs):\n    derivative_dict = first_derivative(\n        func=simulate_aggregated_moments,\n        params=params,\n        func_kwargs=func_kwargs,\n    )\n\n    g = derivative_dict.derivative\n    return g.to_numpy()\n\n\n@pytest.fixture()\ndef weights(moments_cov):\n    return np.linalg.inv(moments_cov)\n\n\n@pytest.fixture()\ndef params_cov_opt(jac, weights):\n    return cov_optimal(jac, weights)\n\n\ndef test_sensitivity_to_bias(jac, weights, params):\n    calculated = calculate_sensitivity_to_bias(jac, weights)\n    expected = pd.DataFrame(\n        data=[\n            [4.010481, 2.068143, 2.753155, 0.495683, 1.854492, 0.641020],\n            [0.605718, 6.468960, -2.235886, 1.324065, -1.916986, -0.116590],\n            [2.218011, -1.517303, 7.547212, -0.972578, 1.956985, 0.255691],\n        ],\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_fundamental_sensitivity_to_noise(\n    jac, weights, moments_cov, params_cov_opt, params\n):\n    calculated = calculate_fundamental_sensitivity_to_noise(\n        jac,\n        weights,\n        moments_cov,\n        params_cov_opt,\n    )\n    expected = pd.DataFrame(\n        data=[\n            [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528],\n            [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667],\n            [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929],\n        ],\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_actual_sensitivity_to_noise(jac, weights, moments_cov, params_cov_opt, params):\n    sensitivity_to_bias = calculate_sensitivity_to_bias(jac, weights)\n    calculated = calculate_actual_sensitivity_to_noise(\n        sensitivity_to_bias,\n        weights,\n        moments_cov,\n        params_cov_opt,\n    )\n    expected = pd.DataFrame(\n        data=[\n            [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528],\n            [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667],\n            [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929],\n        ],\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_actual_sensitivity_to_removal(\n    jac, weights, moments_cov, params_cov_opt, params\n):\n    calculated = calculate_actual_sensitivity_to_removal(\n        jac, weights, moments_cov, params_cov_opt\n    )\n\n    expected = pd.DataFrame(\n        data=[\n            [1.020791, 0.343558, 0.634299, 0.014418, 0.058827, 0.017187],\n            [0.016262, 2.313441, 0.285552, 0.052574, 0.043585, 0.000306],\n            [0.189769, 0.114946, 2.984443, 0.022729, 0.042140, 0.005072],\n        ],\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_fundamental_sensitivity_to_removal(jac, moments_cov, params_cov_opt, params):\n    calculated = calculate_fundamental_sensitivity_to_removal(\n        jac, moments_cov, params_cov_opt\n    )\n\n    expected = pd.DataFrame(\n        data=[\n            [0.992910, 0.340663, 0.634157, 0.009277, 0.058815, 0.013542],\n            [0.015455, 2.274235, 0.285389, 0.045166, 0.042882, 0.000306],\n            [0.189311, 0.114299, 2.970578, 0.022262, 0.040827, 0.001343],\n        ],\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_sensitivity_to_weighting(jac, weights, moments_cov, params_cov_opt, params):\n    calculated = calculate_sensitivity_to_weighting(\n        jac, weights, moments_cov, params_cov_opt\n    )\n\n    expected = pd.DataFrame(\n        data=np.zeros((3, 6)),\n        index=params.index,\n    )\n\n    aaae(calculated, expected)\n"
  },
  {
    "path": "tests/estimagic/test_msm_sensitivity_via_estimate_msm.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom scipy import stats\n\nfrom estimagic.config import EXAMPLE_DIR\nfrom estimagic.estimate_msm import estimate_msm\n\n\ndef simulate_aggregated_moments(params, x, y):\n    \"\"\"Calculate aggregated moments for example from Honore, DePaula, Jorgensen.\"\"\"\n    mom_value = simulate_moment_contributions(params, x, y)\n    moments = mom_value.mean(axis=1)\n\n    return moments\n\n\ndef simulate_moment_contributions(params, x, y):\n    \"\"\"Calculate moment contributions for example from Honore, DePaula, Jorgensen.\"\"\"\n    y_estimated = x.to_numpy() @ (params[\"value\"].to_numpy())\n\n    x_np = x.T.to_numpy()\n\n    residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)\n\n    mom_value = []\n\n    length = len(x_np)\n\n    for i in range(length):\n        for j in range(i, length):\n            moment = residual * x_np[i] * x_np[j]\n            mom_value.append(moment)\n\n    mom_value = np.stack(mom_value, axis=1)[0]\n    mom_value = pd.DataFrame(data=mom_value)\n\n    return mom_value\n\n\n@pytest.fixture()\ndef moments_cov(params, func_kwargs):\n    mom_value = simulate_moment_contributions(params, **func_kwargs)\n    mom_value = mom_value.to_numpy()\n    s = np.cov(mom_value, ddof=0)\n    return s\n\n\n@pytest.fixture()\ndef params():\n    params_index = [[\"beta\"], [\"intersection\", \"x1\", \"x2\"]]\n    params_index = pd.MultiIndex.from_product(params_index, names=[\"type\", \"name\"])\n    params = pd.DataFrame(\n        data=[[0.57735], [0.57735], [0.57735]], index=params_index, columns=[\"value\"]\n    )\n    return params\n\n\n@pytest.fixture()\ndef func_kwargs():\n    data = pd.read_csv(EXAMPLE_DIR / \"sensitivity_probit_example_data.csv\")\n    y_data = data[[\"y\"]]\n    x_data = data[[\"intercept\", \"x1\", \"x2\"]]\n    func_kwargs = {\"x\": x_data, \"y\": y_data}\n    return func_kwargs\n\n\n@pytest.fixture()\ndef msm_res(params, moments_cov, func_kwargs):\n    res = estimate_msm(\n        simulate_moments=simulate_aggregated_moments,\n        # only needed for shape since optimization is skipped\n        empirical_moments=np.zeros(6),\n        params=params,\n        optimize_options=False,\n        moments_cov=moments_cov,\n        simulate_moments_kwargs=func_kwargs,\n        weights=\"optimal\",\n    )\n    return res\n\n\ndef test_sensitivity_to_bias(msm_res):\n    calculated = msm_res.sensitivity(kind=\"bias\")\n    expected = np.array(\n        [\n            [4.010481, 2.068143, 2.753155, 0.495683, 1.854492, 0.641020],\n            [0.605718, 6.468960, -2.235886, 1.324065, -1.916986, -0.116590],\n            [2.218011, -1.517303, 7.547212, -0.972578, 1.956985, 0.255691],\n        ]\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_fundamental_sensitivity_to_noise(msm_res):\n    calculated = msm_res.sensitivity(kind=\"noise_fundamental\")\n    expected = np.array(\n        [\n            [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528],\n            [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667],\n            [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929],\n        ]\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_actual_sensitivity_to_noise(msm_res):\n    calculated = msm_res.sensitivity(kind=\"noise\")\n    expected = np.array(\n        [\n            [1.108992, 0.191341, 0.323757, 0.020377, 0.085376, 0.029528],\n            [0.017262, 1.277374, 0.145700, 0.099208, 0.062248, 0.000667],\n            [0.211444, 0.064198, 1.516571, 0.048900, 0.059264, 0.002929],\n        ]\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_actual_sensitivity_to_removal(msm_res):\n    calculated = msm_res.sensitivity(kind=\"removal\")\n\n    expected = np.array(\n        [\n            [1.020791, 0.343558, 0.634299, 0.014418, 0.058827, 0.017187],\n            [0.016262, 2.313441, 0.285552, 0.052574, 0.043585, 0.000306],\n            [0.189769, 0.114946, 2.984443, 0.022729, 0.042140, 0.005072],\n        ]\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_fundamental_sensitivity_to_removal(msm_res):\n    calculated = msm_res.sensitivity(kind=\"removal_fundamental\")\n\n    expected = np.array(\n        [\n            [0.992910, 0.340663, 0.634157, 0.009277, 0.058815, 0.013542],\n            [0.015455, 2.274235, 0.285389, 0.045166, 0.042882, 0.000306],\n            [0.189311, 0.114299, 2.970578, 0.022262, 0.040827, 0.001343],\n        ]\n    )\n\n    aaae(calculated, expected)\n\n\ndef test_sensitivity_to_weighting(msm_res):\n    calculated = msm_res.sensitivity(kind=\"weighting\")\n\n    expected = np.zeros((3, 6))\n\n    aaae(calculated, expected)\n"
  },
  {
    "path": "tests/estimagic/test_msm_weighting.py",
    "content": "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom estimagic.msm_weighting import (\n    _assemble_block_diagonal_matrix,\n    get_moments_cov,\n    get_weighting_matrix,\n)\nfrom optimagic.parameters.block_trees import block_tree_to_matrix\nfrom optimagic.utilities import get_rng\n\n\n@pytest.fixture()\ndef expected_values():\n    values = np.array([[1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 5, 6], [0, 0, 7, 8]])\n    return values\n\n\ncov_np = np.diag([1, 2, 3])\ncov_pd = pd.DataFrame(cov_np)\n\ntest_cases = itertools.product([cov_np, cov_pd], [\"diagonal\", \"optimal\", \"identity\"])\n\n\n@pytest.mark.parametrize(\"moments_cov, method\", test_cases)\ndef test_get_weighting_matrix(moments_cov, method):\n    if isinstance(moments_cov, np.ndarray):\n        fake_emp_moms = np.ones(len(moments_cov))\n    else:\n        fake_emp_moms = pd.Series(np.ones(len(moments_cov)), index=moments_cov.index)\n    calculated = get_weighting_matrix(moments_cov, method, fake_emp_moms)\n\n    if isinstance(moments_cov, pd.DataFrame):\n        assert calculated.index.equals(moments_cov.index)\n        assert calculated.columns.equals(moments_cov.columns)\n        calculated = calculated.to_numpy()\n\n    if method == \"identity\":\n        expected = np.identity(cov_np.shape[0])\n    else:\n        expected = np.diag(1 / np.array([1, 2, 3]))\n\n    aaae(calculated, expected)\n\n\ndef test_assemble_block_diagonal_matrix_pd(expected_values):\n    matrices = [\n        pd.DataFrame([[1, 2], [3, 4]]),\n        pd.DataFrame([[5, 6], [7, 8]], columns=[2, 3], index=[2, 3]),\n    ]\n    calculated = _assemble_block_diagonal_matrix(matrices)\n    assert isinstance(calculated, pd.DataFrame)\n    assert calculated.index.equals(calculated.columns)\n    assert calculated.index.tolist() == [0, 1, 2, 3]\n    aaae(calculated, expected_values)\n\n\ndef test_assemble_block_diagonal_matrix_mixed(expected_values):\n    matrices = [pd.DataFrame([[1, 2], [3, 4]]), np.array([[5, 6], [7, 8]])]\n    calculated = _assemble_block_diagonal_matrix(matrices)\n    assert isinstance(calculated, np.ndarray)\n    aaae(calculated, expected_values)\n\n\ndef test_get_moments_cov_runs_with_pytrees():\n    rng = get_rng(1234)\n    data = rng.normal(scale=[10, 5, 1], size=(100, 3))\n    data = pd.DataFrame(data=data)\n\n    def calc_moments(data, keys):\n        means = data.mean()\n        means.index = keys\n        return means.to_dict()\n\n    moment_kwargs = {\"keys\": [\"a\", \"b\", \"c\"]}\n\n    calculated = get_moments_cov(\n        data=data,\n        calculate_moments=calc_moments,\n        moment_kwargs=moment_kwargs,\n        bootstrap_kwargs={\"n_draws\": 100},\n    )\n\n    fake_tree = {\"a\": 1, \"b\": 2, \"c\": 3}\n    cov = block_tree_to_matrix(calculated, fake_tree, fake_tree)\n    assert cov.shape == (3, 3)\n\n    assert cov[0, 0] > cov[1, 1] > cov[2, 2]\n\n\ndef test_get_moments_cov_passes_bootstrap_kwargs_to_bootstrap():\n    rng = get_rng(1234)\n    data = rng.normal(scale=[10, 5, 1], size=(100, 3))\n    data = pd.DataFrame(data=data)\n    data[\"cluster\"] = np.random.choice([1, 2, 3], size=100)\n\n    def calc_moments(data, keys):\n        means = data.mean()\n        means.index = keys\n        return means.to_dict()\n\n    moment_kwargs = {\"keys\": [\"a\", \"b\", \"c\", \"cluster\"]}\n\n    with pytest.raises(ValueError, match=\"a must be a positive integer unless no\"):\n        get_moments_cov(\n            data=data,\n            calculate_moments=calc_moments,\n            moment_kwargs=moment_kwargs,\n            bootstrap_kwargs={\"n_draws\": -1},\n        )\n\n    with pytest.raises(ValueError, match=\"Invalid bootstrap_kwargs: {'cluster'}\"):\n        get_moments_cov(\n            data=data,\n            calculate_moments=calc_moments,\n            moment_kwargs=moment_kwargs,\n            bootstrap_kwargs={\"cluster\": \"cluster\"},\n        )\n"
  },
  {
    "path": "tests/estimagic/test_shared.py",
    "content": "from typing import NamedTuple\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pybaum import leaf_names, tree_equal\n\nfrom estimagic.shared_covs import (\n    _to_numpy,\n    calculate_estimation_summary,\n    get_derivative_case,\n    process_pandas_arguments,\n    transform_covariance,\n    transform_free_cov_to_cov,\n    transform_free_values_to_params_tree,\n)\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.utilities import get_rng\n\n\n@pytest.fixture()\ndef inputs():\n    jac = pd.DataFrame(np.ones((5, 3)), columns=[\"a\", \"b\", \"c\"])\n    hess = pd.DataFrame(np.eye(3) / 2, columns=list(\"abc\"), index=list(\"abc\"))\n    weights = pd.DataFrame(np.eye(5))\n    moments_cov = 1 / weights\n    out = {\"jac\": jac, \"hess\": hess, \"weights\": weights, \"moments_cov\": moments_cov}\n    return out\n\n\ndef test_process_pandas_arguments_all_pd(inputs):\n    *arrays, names = process_pandas_arguments(**inputs)\n    for arr in arrays:\n        assert isinstance(arr, np.ndarray)\n\n    expected_names = {\"moments\": list(range(5)), \"params\": [\"a\", \"b\", \"c\"]}\n\n    for key, value in expected_names.items():\n        assert names[key].tolist() == value\n\n\ndef test_process_pandas_arguments_incompatible_names(inputs):\n    inputs[\"jac\"].columns = [\"c\", \"d\", \"e\"]\n\n    with pytest.raises(ValueError):\n        process_pandas_arguments(**inputs)\n\n\ndef _from_internal(x, return_type=\"flat\"):  # noqa: ARG001\n    return x\n\n\nclass FakeConverter(NamedTuple):\n    has_transforming_constraints: bool = True\n    params_from_internal: callable = _from_internal\n\n\nclass FakeInternalParams(NamedTuple):\n    values: np.ndarray = np.arange(2)\n    lower_bounds: np.ndarray = np.full(2, -np.inf)\n    upper_bounds: np.ndarray = np.full(2, np.inf)\n    free_mask: np.ndarray = np.array([True, True])\n\n\ndef test_transform_covariance_no_bounds():\n    internal_cov = np.eye(2)\n\n    converter = FakeConverter()\n    internal_params = FakeInternalParams()\n\n    got = transform_covariance(\n        internal_params=internal_params,\n        internal_cov=internal_cov,\n        converter=converter,\n        rng=get_rng(seed=5687),\n        n_samples=100,\n        bounds_handling=\"ignore\",\n    )\n\n    expected_sample = get_rng(seed=5687).multivariate_normal(\n        np.arange(2), np.eye(2), 100\n    )\n    expected = np.cov(expected_sample, rowvar=False)\n\n    aaae(got, expected)\n\n\ndef test_transform_covariance_with_clipping():\n    rng = get_rng(seed=1234)\n\n    internal_cov = np.eye(2)\n\n    converter = FakeConverter()\n    internal_params = FakeInternalParams(\n        lower_bounds=np.ones(2), upper_bounds=np.ones(2)\n    )\n\n    got = transform_covariance(\n        internal_params=internal_params,\n        internal_cov=internal_cov,\n        converter=converter,\n        rng=rng,\n        n_samples=100,\n        bounds_handling=\"clip\",\n    )\n\n    expected = np.zeros((2, 2))\n\n    aaae(got, expected)\n\n\ndef test_transform_covariance_invalid_bounds():\n    rng = get_rng(seed=1234)\n\n    internal_cov = np.eye(2)\n\n    converter = FakeConverter()\n    internal_params = FakeInternalParams(\n        lower_bounds=np.ones(2), upper_bounds=np.ones(2)\n    )\n\n    with pytest.raises(ValueError):\n        transform_covariance(\n            internal_params=internal_params,\n            internal_cov=internal_cov,\n            converter=converter,\n            rng=rng,\n            n_samples=10,\n            bounds_handling=\"raise\",\n        )\n\n\nclass FakeFreeParams(NamedTuple):\n    free_mask: np.ndarray = np.array([True, False, True])\n    all_names: list = [\"a\", \"b\", \"c\"]\n    free_names: list = [\"a\", \"c\"]\n\n\ndef test_transform_free_cov_to_cov_pytree():\n    got = transform_free_cov_to_cov(\n        free_cov=np.eye(2),\n        free_params=FakeFreeParams(),\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n        return_type=\"pytree\",\n    )\n\n    assert got[\"a\"][\"a\"] == 1\n    assert got[\"c\"][\"c\"] == 1\n    assert got[\"a\"][\"c\"] == 0\n    assert got[\"c\"][\"a\"] == 0\n    assert np.isnan(got[\"a\"][\"b\"])\n\n\ndef test_transform_free_cov_to_cov_array():\n    got = transform_free_cov_to_cov(\n        free_cov=np.eye(2),\n        free_params=FakeFreeParams(),\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n        return_type=\"array\",\n    )\n\n    expected = np.array([[1, np.nan, 0], [np.nan, np.nan, np.nan], [0, np.nan, 1]])\n\n    assert np.array_equal(got, expected, equal_nan=True)\n\n\ndef test_transform_free_cov_to_cov_dataframe():\n    got = transform_free_cov_to_cov(\n        free_cov=np.eye(2),\n        free_params=FakeFreeParams(),\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n        return_type=\"dataframe\",\n    )\n\n    expected = np.array([[1, np.nan, 0], [np.nan, np.nan, np.nan], [0, np.nan, 1]])\n\n    assert np.array_equal(got.to_numpy(), expected, equal_nan=True)\n    assert isinstance(got, pd.DataFrame)\n    assert list(got.columns) == list(\"abc\")\n    assert list(got.index) == list(\"abc\")\n\n\ndef test_transform_free_cov_to_cov_invalid():\n    with pytest.raises(ValueError):\n        transform_free_cov_to_cov(\n            free_cov=np.eye(2),\n            free_params=FakeFreeParams(),\n            params={\"a\": 1, \"b\": 2, \"c\": 3},\n            return_type=\"bla\",\n        )\n\n\ndef test_transform_free_values_to_params_tree():\n    got = transform_free_values_to_params_tree(\n        values=np.array([10, 11]),\n        free_params=FakeFreeParams(),\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n    )\n\n    assert got[\"a\"] == 10\n    assert got[\"c\"] == 11\n    assert np.isnan(got[\"b\"])\n\n\ndef test_get_derivative_case():\n    assert get_derivative_case(lambda x: True) == \"closed-form\"  # noqa: ARG005\n    assert get_derivative_case(False) == \"skip\"\n    assert get_derivative_case(None) == \"numerical\"\n\n\ndef test_to_numpy_invalid():\n    with pytest.raises(TypeError):\n        _to_numpy(15)\n\n\ndef test_calculate_estimation_summary():\n    # input data\n    summary_data = {\n        \"value\": {\n            \"a\": pd.Series([0], index=[\"i\"]),\n            \"b\": pd.DataFrame({\"c1\": [1], \"c2\": [2]}),\n        },\n        \"standard_error\": {\n            \"a\": pd.Series([0.1], index=[\"i\"]),\n            \"b\": pd.DataFrame({\"c1\": [0.2], \"c2\": [0.3]}),\n        },\n        \"ci_lower\": {\n            \"a\": pd.Series([-0.2], index=[\"i\"]),\n            \"b\": pd.DataFrame({\"c1\": [-0.4], \"c2\": [-0.6]}),\n        },\n        \"ci_upper\": {\n            \"a\": pd.Series([0.2], index=[\"i\"]),\n            \"b\": pd.DataFrame({\"c1\": [0.4], \"c2\": [0.6]}),\n        },\n        \"p_value\": {\n            \"a\": pd.Series([0.001], index=[\"i\"]),\n            \"b\": pd.DataFrame({\"c1\": [0.2], \"c2\": [0.07]}),\n        },\n        \"free\": np.array([True, True, True]),\n    }\n\n    registry = get_registry(extended=True)\n    names = leaf_names(summary_data[\"value\"], registry=registry)\n    free_names = names\n\n    # function call\n    summary = calculate_estimation_summary(summary_data, names, free_names)\n\n    # expectations\n    expectation = {\n        \"a\": pd.DataFrame(\n            {\n                \"value\": 0,\n                \"standard_error\": 0.1,\n                \"ci_lower\": -0.2,\n                \"ci_upper\": 0.2,\n                \"p_value\": 0.001,\n                \"free\": True,\n                \"stars\": \"***\",\n            },\n            index=[\"i\"],\n        ),\n        \"b\": pd.DataFrame(\n            {\n                \"value\": [1, 2],\n                \"standard_error\": [0.2, 0.3],\n                \"ci_lower\": [-0.4, -0.6],\n                \"ci_upper\": [0.4, 0.6],\n                \"p_value\": [0.2, 0.7],\n                \"free\": [True, True],\n                \"stars\": [\"\", \"*\"],\n            },\n            index=pd.MultiIndex.from_tuples([(0, \"c1\"), (0, \"c2\")]),\n        ),\n    }\n\n    tree_equal(summary, expectation)\n"
  },
  {
    "path": "tests/optimagic/__init__.py",
    "content": ""
  },
  {
    "path": "tests/optimagic/benchmarking/__init__.py",
    "content": ""
  },
  {
    "path": "tests/optimagic/benchmarking/test_benchmark_reports.py",
    "content": "from itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom optimagic import (\n    OptimizeResult,\n    convergence_report,\n    get_benchmark_problems,\n    rank_report,\n    traceback_report,\n)\n\n\n@pytest.fixture\ndef benchmark_example():\n    all_problems = get_benchmark_problems(\"example\")\n    problems = {\n        k: v\n        for k, v in all_problems.items()\n        if k in [\"bard_good_start\", \"box_3d\", \"rosenbrock_good_start\"]\n    }\n    _stop_after_10 = {\n        \"stopping_max_criterion_evaluations\": 10,\n        \"stopping_max_iterations\": 10,\n    }\n    optimizers = {\n        \"lbfgsb\": {\"algorithm\": \"scipy_lbfgsb\", \"algo_options\": _stop_after_10},\n        \"nm\": {\"algorithm\": \"scipy_neldermead\", \"algo_options\": _stop_after_10},\n    }\n\n    results = {\n        (\"bard_good_start\", \"lbfgsb\"): {\n            \"params_history\": [\n                [1.0, 1.0, 1.0],\n                [0.48286315298120086, 1.6129119244711858, 1.5974181569859445],\n                [0.09754340799557773, 1.7558262514618663, 1.7403560082627973],\n            ],\n            \"criterion_history\": np.array(\n                [\n                    4.16816959e01,\n                    3.20813118e00,\n                    9.97263708e-03,\n                ]\n            ),\n            \"time_history\": [\n                0.0,\n                0.0003762839987757616,\n                0.0007037959985609632,\n            ],\n            \"batches_history\": [0, 1, 2],\n            \"solution\": OptimizeResult,  # success\n        },\n        (\"box_3d\", \"lbfgsb\"): {\n            \"params_history\": [\n                [0.0, 10.0, 20.0],\n                [-0.6579976970071755, 10.014197643614924, 19.247113914560085],\n                [-3.2899884850358774, 10.070988218074623, 16.235569572800433],\n            ],\n            \"criterion_history\": np.array(\n                [\n                    1.03115381e03,\n                    8.73640769e02,\n                    9.35093416e02,\n                ]\n            ),\n            \"time_history\": [\n                0.0,\n                0.000555748996703187,\n                0.0009771709992492106,\n            ],\n            \"batches_history\": [0, 1, 2],\n            \"solution\": OptimizeResult,  # failed\n        },\n        (\"rosenbrock_good_start\", \"lbfgsb\"): {\n            \"params_history\": [\n                [-1.2, 1.0],\n                [0.0, 0.0],\n            ],\n            \"criterion_history\": np.array([1.795769e6, 1e3]),\n            \"time_history\": [\n                0.0,\n                5.73799989069812e-04,\n            ],\n            \"batches_history\": [0, 1],\n            \"solution\": \"lbfgsb traceback\",  # error\n        },\n        (\"bard_good_start\", \"nm\"): {\n            \"params_history\": [\n                [1.0, 1.0, 1.0],\n                [1.05, 1.0, 1.0],\n                [0.7999999999999998, 1.1999999999999993, 1.0499999999999994],\n                [0.08241056, 1.13303608, 2.34369519],\n            ],\n            \"criterion_history\": np.array(\n                [\n                    41.68169586,\n                    43.90748158,\n                    23.92563745,\n                    0.00821487730657897,\n                ]\n            ),\n            \"time_history\": [\n                0.0,\n                3.603900040616281e-05,\n                0.0004506860022956971,\n                0.00015319500016630627,\n            ],\n            \"batches_history\": [0, 1, 2, 4],\n            \"solution\": OptimizeResult,  # success\n        },\n        (\"box_3d\", \"nm\"): {\n            \"params_history\": [\n                [0.0, 10.0, 20.0],\n                [0.025, 10.0, 20.0],\n                [0.0, 10.5, 20.0],\n            ],\n            \"criterion_history\": np.array(\n                [1031.15381061, 1031.17836473, 1030.15033678]\n            ),\n            \"time_history\": [\n                0.0,\n                5.73799989069812e-05,\n                0.00010679600018193014,\n            ],\n            \"batches_history\": [0, 1, 2],\n            \"solution\": \"some traceback\",  # error\n        },\n        (\"rosenbrock_good_start\", \"nm\"): {\n            \"params_history\": [\n                [-1.2, 1.0],\n                [0.0, 0.0],\n            ],\n            \"criterion_history\": np.array([1.795769e6, 1e3]),\n            \"time_history\": [\n                0.0,\n                5.73799989069812e-04,\n            ],\n            \"batches_history\": [0, 1],\n            \"solution\": \"another traceback\",  # error\n        },\n    }\n\n    return problems, optimizers, results\n\n\n# ====================================================================================\n# Convergence report\n# ====================================================================================\n\nkeys = [\"stopping_criterion\"]\nstopping_criterion = [\"x_and_y\", \"x_or_y\", \"x\", \"y\"]\nx_precision = [1e-4, 1e-6]\ny_precision = [1e-4, 1e-6]\nCONVERGENCE_REPORT_OPTIONS = [\n    dict(zip(keys, value, strict=False))\n    for value in product(stopping_criterion, x_precision, y_precision)\n]\n\n\n@pytest.mark.parametrize(\"options\", CONVERGENCE_REPORT_OPTIONS)\ndef test_convergence_report(options, benchmark_example):\n    problems, optimizers, results = benchmark_example\n\n    df = convergence_report(problems=problems, results=results, **options)\n\n    expected_columns = list(optimizers.keys()) + [\"dimensionality\"]\n    assert df.shape == (len(problems), len(expected_columns))\n    assert set(df.columns) == set(expected_columns)\n\n    assert df[\"lbfgsb\"].loc[\"box_3d\"] == \"failed\"\n    assert df[\"nm\"].loc[\"box_3d\"] == \"error\"\n\n\n# ====================================================================================\n# Rank report\n# ====================================================================================\n\nkeys = [\"runtime_measure\", \"stopping_criterion\"]\nruntime_measure = [\"n_evaluations\", \"walltime\", \"n_batches\"]\nRANK_REPORT_OPTIONS = [\n    dict(zip(keys, value, strict=False))\n    for value in product(runtime_measure, stopping_criterion)\n]\n\n\n@pytest.mark.parametrize(\"options\", RANK_REPORT_OPTIONS)\ndef test_rank_report(options, benchmark_example):\n    problems, optimizers, results = benchmark_example\n\n    df = rank_report(problems=problems, results=results, **options)\n\n    assert df.shape == (len(problems), len(optimizers) + 1)  # +1 for dimensionality\n    assert set(df.columns) == set(optimizers.keys()) | {\"dimensionality\"}\n\n    assert df[\"lbfgsb\"].loc[\"box_3d\"] == \"failed\"\n    assert df[\"nm\"].loc[\"box_3d\"] == \"error\"\n\n\n# ====================================================================================\n# Traceback report\n# ====================================================================================\n\n\n@pytest.mark.parametrize(\"return_type\", [\"text\", \"markdown\", \"dict\", \"dataframe\"])\ndef test_traceback_report(return_type, benchmark_example):\n    problems, optimizers, results = benchmark_example\n    n_failed_problems = 3\n\n    report = traceback_report(\n        problems=problems, results=results, return_type=return_type\n    )\n\n    if return_type in [\"text\", \"dict\"]:\n        assert len(report) == n_failed_problems\n\n    elif return_type == \"markdown\":\n        for algorithm_name in optimizers:\n            assert algorithm_name in report\n\n    elif return_type == \"dataframe\":\n        assert report.shape == (n_failed_problems, 2)\n        assert list(report.index.names) == [\"algorithm\", \"problem\"]\n"
  },
  {
    "path": "tests/optimagic/benchmarking/test_cartis_roberts.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom optimagic.benchmarking.cartis_roberts import (\n    CARTIS_ROBERTS_PROBLEMS,\n    get_start_points_bdvalues,\n    get_start_points_msqrta,\n)\n\n\n@pytest.mark.parametrize(\"name, specification\", list(CARTIS_ROBERTS_PROBLEMS.items()))\ndef test_cartis_roberts_function_at_start_x(name, specification):  # noqa: ARG001\n    _criterion = specification[\"fun\"]\n    _x = np.array(specification[\"start_x\"])\n    assert isinstance(specification[\"start_x\"], list)\n    _contributions = _criterion(_x)\n    calculated = _contributions @ _contributions\n    expected = specification[\"start_criterion\"]\n    assert np.allclose(calculated, expected)\n    assert isinstance(specification[\"start_x\"], list)\n\n\n@pytest.mark.parametrize(\"name, specification\", list(CARTIS_ROBERTS_PROBLEMS.items()))\ndef test_cartis_roberts_function_at_solution_x(name, specification):  # noqa: ARG001\n    _criterion = specification[\"fun\"]\n    _x = specification[\"solution_x\"]\n    if _x is not None:\n        assert isinstance(_x, list)\n        _x = np.array(_x)\n        _contributions = _criterion(_x)\n        calculated = _contributions @ _contributions\n        expected = specification[\"solution_criterion\"]\n        assert np.allclose(calculated, expected, atol=1e-7)\n\n\ndef test_get_start_points_bdvalues():\n    expected = np.array([-0.1389, -0.2222, -0.2500, -0.2222, -0.1389])\n    result = get_start_points_bdvalues(5)\n    assert_array_almost_equal(expected, result, decimal=4)\n\n\ndef test_get_start_points_msqrta():\n    matlab_mat = np.array(\n        [\n            [0.8415, -0.7568, 0.4121, -0.2879, -0.1324],\n            [-0.9918, -0.9538, 0.9200, -0.6299, -0.5064],\n            [0.9988, -0.4910, -0.6020, 0.9395, -0.9301],\n            [-0.9992, -0.0265, -0.4041, 0.2794, -0.8509],\n            [0.9235, 0.1935, 0.9365, -0.8860, 0.1760],\n        ]\n    )\n    expected = 0.2 * matlab_mat.flatten()\n    result = get_start_points_msqrta(5)\n    assert_array_almost_equal(result, expected, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/benchmarking/test_get_benchmark_problems.py",
    "content": "from itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom optimagic.benchmarking.get_benchmark_problems import (\n    _step_func,\n    get_benchmark_problems,\n)\n\nPARMETRIZATION = []\nfor name in [\"more_wild\", \"cartis_roberts\", \"example\", \"estimagic\"]:\n    for additive, multiplicative, scaling in product([False, True], repeat=3):\n        PARMETRIZATION.append((name, additive, multiplicative, scaling))\n\n\n@pytest.mark.parametrize(\n    \"name, additive_noise, multiplicative_noise, scaling\", PARMETRIZATION\n)\ndef test_get_problems(name, additive_noise, multiplicative_noise, scaling):\n    is_noisy = any((additive_noise, multiplicative_noise))\n    problems = get_benchmark_problems(\n        name=name,\n        additive_noise=additive_noise,\n        multiplicative_noise=multiplicative_noise,\n        scaling=scaling,\n    )\n    first_name = list(problems)[0]\n    first = problems[first_name]\n    func = first[\"inputs\"][\"fun\"]\n    params = first[\"inputs\"][\"params\"]\n\n    first_eval = func(params)\n    second_eval = func(params)\n\n    if is_noisy:\n        assert not np.allclose(first_eval, second_eval)\n    else:\n        assert np.allclose(first_eval, second_eval)\n\n    for problem in problems.values():\n        assert isinstance(problem[\"inputs\"][\"params\"], np.ndarray)\n        assert isinstance(problem[\"solution\"][\"params\"], np.ndarray)\n\n\ndef test_step_func():\n    p = np.array([0.0001, 0.0002])\n    got = _step_func(p, lambda x: x @ x)\n    assert np.allclose(got, 0)\n    assert not np.allclose(p @ p, 0)\n"
  },
  {
    "path": "tests/optimagic/benchmarking/test_more_wild.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.benchmarking.more_wild import (\n    MORE_WILD_PROBLEMS,\n    get_start_points_mancino,\n)\n\n\n@pytest.mark.parametrize(\"name, specification\", list(MORE_WILD_PROBLEMS.items()))\ndef test_more_wild_function_at_start_x(name, specification):  # noqa: ARG001\n    _criterion = specification[\"fun\"]\n    assert isinstance(specification[\"start_x\"], list)\n    _x = np.array(specification[\"start_x\"])\n    _contributions = _criterion(_x)\n    calculated = _contributions @ _contributions\n    expected = specification[\"start_criterion\"]\n    assert np.allclose(calculated, expected)\n\n    if specification.get(\"solution_x\") is not None:\n        assert isinstance(specification[\"solution_x\"], list)\n        _x = np.array(specification[\"solution_x\"])\n        _contributions = _criterion(_x)\n        calculated = _contributions @ _contributions\n        expected = specification[\"solution_criterion\"]\n        assert np.allclose(calculated, expected, rtol=1e-8, atol=1e-8)\n\n\ndef test_get_start_points_mancino():\n    expected = (np.array([102.4824, 96.3335, 90.4363, 84.7852, 79.3747]),)\n    result = get_start_points_mancino(5)\n    assert np.allclose(expected, result)\n"
  },
  {
    "path": "tests/optimagic/benchmarking/test_noise_distributions.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom optimagic.benchmarking.get_benchmark_problems import _sample_from_distribution\nfrom optimagic.benchmarking.noise_distributions import NOISE_DISTRIBUTIONS\nfrom optimagic.utilities import get_rng\n\n\n@pytest.mark.parametrize(\"distribution\", NOISE_DISTRIBUTIONS)\ndef test_sample_from_distribution(distribution):\n    mean = 0.33\n    std = 0.55\n    correlation = 0.44\n    sample = _sample_from_distribution(\n        distribution=distribution,\n        mean=mean,\n        std=std,\n        size=(100_000, 5),\n        correlation=correlation,\n        rng=get_rng(seed=0),\n    )\n    calculated_mean = sample.mean()\n    calculated_std = sample.std()\n    corrmat = pd.DataFrame(sample).corr().to_numpy().round(2)\n    calculated_avgcorr = corrmat[~np.eye(len(corrmat)).astype(bool)].mean()\n\n    assert np.allclose(calculated_mean, mean, atol=0.001)\n    assert np.allclose(calculated_std, std, atol=0.001)\n    assert np.allclose(calculated_avgcorr, correlation, atol=0.001)\n"
  },
  {
    "path": "tests/optimagic/benchmarking/test_run_benchmark.py",
    "content": "import pytest\n\nfrom optimagic import get_benchmark_problems\nfrom optimagic.benchmarking.run_benchmark import run_benchmark\n\n\ndef test_run_benchmark_dict_options():\n    all_problems = get_benchmark_problems(\"more_wild\")\n    first_two_names = list(all_problems)[:2]\n    first_two = {name: all_problems[name] for name in first_two_names}\n\n    optimize_options = {\n        \"default_lbfgsb\": \"scipy_lbfgsb\",\n        \"tuned_lbfgsb\": {\n            \"algorithm\": \"scipy_lbfgsb\",\n            \"algo_options\": {\"convergence.relative_criterion_tolerance\": 1e-10},\n        },\n    }\n\n    result = run_benchmark(\n        problems=first_two,\n        optimize_options=optimize_options,\n        error_handling=\"raise\",\n    )\n\n    expected_keys = {\n        (\"linear_full_rank_good_start\", \"default_lbfgsb\"),\n        (\"linear_full_rank_bad_start\", \"default_lbfgsb\"),\n        (\"linear_full_rank_good_start\", \"tuned_lbfgsb\"),\n        (\"linear_full_rank_bad_start\", \"tuned_lbfgsb\"),\n    }\n    assert set(result) == expected_keys\n\n\ndef test_run_benchmark_list_options():\n    all_problems = get_benchmark_problems(\"example\")\n    first_two_names = list(all_problems)[:2]\n    first_two = {name: all_problems[name] for name in first_two_names}\n    optimize_options = [\"scipy_lbfgsb\", \"scipy_neldermead\"]\n\n    result = run_benchmark(\n        problems=first_two,\n        optimize_options=optimize_options,\n    )\n\n    expected_keys = {\n        (\"helical_valley_good_start\", \"scipy_lbfgsb\"),\n        (\"rosenbrock_good_start\", \"scipy_lbfgsb\"),\n        (\"helical_valley_good_start\", \"scipy_neldermead\"),\n        (\"rosenbrock_good_start\", \"scipy_neldermead\"),\n    }\n    assert set(result) == expected_keys\n\n\ndef test_run_benchmark_failing():\n    all_problems = get_benchmark_problems(\"more_wild\")\n    failing_name = \"jennrich_sampson\"\n    failing = {failing_name: all_problems[failing_name]}\n\n    optimize_options = [\"scipy_lbfgsb\"]\n\n    with pytest.warns():\n        result = run_benchmark(problems=failing, optimize_options=optimize_options)\n\n    key = (failing_name, \"scipy_lbfgsb\")\n    assert isinstance(result[key][\"solution\"], str)\n"
  },
  {
    "path": "tests/optimagic/differentiation/test_compare_derivatives_with_jax.py",
    "content": "\"\"\"Compare first and second derivative behavior to that of jax.\n\nThis test module only runs if jax is installed.\n\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pybaum import tree_equal\n\nfrom optimagic.config import IS_JAX_INSTALLED\nfrom optimagic.differentiation.derivatives import first_derivative, second_derivative\n\nif not IS_JAX_INSTALLED:\n    pytestmark = pytest.mark.skip(reason=\"jax is not installed.\")\nelse:\n    import jax\n    import jax.numpy as jnp\n\n    jax.config.update(\"jax_enable_x64\", True)\n\n\n# arrays have to be equal up to 5 decimals\nDECIMALS = 5\n\n\ndef _tree_equal_numpy_leaves(tree1, tree2):\n    equality_checkers = {np.ndarray: lambda x, y: aaae(x, y, decimal=DECIMALS)}\n    tree_equal(tree1, tree2, equality_checkers=equality_checkers)\n\n\ndef _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax=None):\n    \"\"\"Computes first and second derivative using optimagic and jax.\n\n    Then converts leaves of jax output to numpy so that we can use numpy.testing. For\n    higher dimensional output we need to define two function, one with numpy array\n    output and one with jax.numpy array output.\n\n    \"\"\"\n    func_jax = func if func_jax is None else func_jax\n\n    optimagic_jac = first_derivative(func, params).derivative\n    jax_jac = jax.jacobian(func_jax)(params)\n\n    optimagic_hess = second_derivative(func, params).derivative\n    jax_hess = jax.hessian(func_jax)(params)\n\n    out = {\n        \"jac\": {\"optimagic\": optimagic_jac, \"jax\": jax_jac},\n        \"hess\": {\"optimagic\": optimagic_hess, \"jax\": jax_hess},\n    }\n    return out\n\n\n@pytest.mark.jax()\ndef test_scalar_input_scalar_output():\n    def func(params):\n        return params**2\n\n    params = 1.0\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_array_input_scalar_output():\n    def func(params):\n        return params @ params\n\n    params = np.array([1.0, 2, 3])\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_dict_input_scalar_output():\n    def func(params):\n        return params[\"a\"] * params[\"b\"]\n\n    params = {\"a\": 1.0, \"b\": 2.0}\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_array_dict_input_scalar_output():\n    def func(params):\n        return params[\"a\"].sum() * params[\"b\"].prod()\n\n    params = {\n        \"a\": np.array([1.0, 2, 3]),\n        \"b\": np.arange(9, dtype=np.float64).reshape(3, 3),\n    }\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_array_input_array_output():\n    def func(params):\n        return np.array([params.sum(), params.prod()])\n\n    def func_jax(params):\n        return jnp.array([params.sum(), params.prod()])\n\n    params = np.array([1.0, 2, 3])\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_array_dict_input_array_output():\n    def func(params):\n        return params[\"b\"] * np.array([params[\"a\"].sum(), params[\"a\"].prod()])\n\n    def func_jax(params):\n        return params[\"b\"] * jnp.array([params[\"a\"].sum(), params[\"a\"].prod()])\n\n    params = {\"a\": np.array([1.0, 2, 3]), \"b\": 2.0}\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n\n\n@pytest.mark.jax()\ndef test_array_dict_input_dict_output():\n    def func(params):\n        value = params[\"b\"] * np.array([params[\"a\"].sum(), params[\"a\"].prod()])\n        return [value[0], {\"c\": 0.0, \"d\": value[1]}]\n\n    def func_jax(params):\n        value = params[\"b\"] * jnp.array([params[\"a\"].sum(), params[\"a\"].prod()])\n        return [value[0], {\"c\": 0.0, \"d\": value[1]}]\n\n    params = {\"a\": np.array([1.0, 2, 3]), \"b\": 2.0}\n\n    result = _compute_testable_optimagic_and_jax_derivatives(func, params, func_jax)\n    _tree_equal_numpy_leaves(result[\"jac\"][\"optimagic\"], result[\"jac\"][\"jax\"])\n    _tree_equal_numpy_leaves(result[\"hess\"][\"optimagic\"], result[\"hess\"][\"jax\"])\n"
  },
  {
    "path": "tests/optimagic/differentiation/test_derivatives.py",
    "content": "from dataclasses import dataclass\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import get_type_hints\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pandas.testing import assert_frame_equal\nfrom scipy.optimize._numdiff import approx_derivative\n\nfrom optimagic.differentiation.derivatives import (\n    Evals,\n    NumdiffResult,\n    _consolidate_one_step_derivatives,\n    _convert_evaluation_data_to_frame,\n    _convert_richardson_candidates_to_frame,\n    _is_scalar_nan,\n    _nan_skipping_batch_evaluator,\n    _reshape_cross_step_evals,\n    _reshape_one_step_evals,\n    _reshape_two_step_evals,\n    _select_minimizer_along_axis,\n    first_derivative,\n    second_derivative,\n)\nfrom optimagic.differentiation.generate_steps import Steps\nfrom optimagic.examples.numdiff_functions import (\n    logit_loglike,\n    logit_loglike_gradient,\n    logit_loglike_hessian,\n    logit_loglikeobs,\n    logit_loglikeobs_jacobian,\n)\nfrom optimagic.parameters.bounds import Bounds\n\n\n@pytest.fixture()\ndef binary_choice_inputs():\n    fix_path = Path(__file__).resolve().parent / \"binary_choice_inputs.pickle\"\n    inputs = pd.read_pickle(fix_path)\n    return inputs\n\n\nmethods = [\"forward\", \"backward\", \"central\"]\nmethods_second_derivative = [\"forward\", \"backward\", \"central_average\", \"central_cross\"]\n\n\n@pytest.mark.parametrize(\"method\", methods)\ndef test_first_derivative_jacobian(binary_choice_inputs, method):\n    fix = binary_choice_inputs\n    func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n\n    bounds = Bounds(\n        lower=np.full(fix[\"params_np\"].shape, -np.inf),\n        upper=np.full(fix[\"params_np\"].shape, np.inf),\n    )\n\n    calculated = first_derivative(\n        func=func,\n        method=method,\n        params=fix[\"params_np\"],\n        step_size=None,\n        bounds=bounds,\n        min_steps=1e-8,\n        f0=func(fix[\"params_np\"]),\n        n_cores=1,\n    )\n\n    expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n    aaae(calculated.derivative, expected, decimal=6)\n\n\ndef test_first_derivative_jacobian_works_at_defaults(binary_choice_inputs):\n    fix = binary_choice_inputs\n    func = partial(logit_loglikeobs, y=fix[\"y\"], x=fix[\"x\"])\n    calculated = first_derivative(func=func, params=fix[\"params_np\"], n_cores=1)\n    expected = logit_loglikeobs_jacobian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n    aaae(calculated.derivative, expected, decimal=6)\n\n\n@pytest.mark.parametrize(\"method\", methods)\ndef test_first_derivative_gradient(binary_choice_inputs, method):\n    fix = binary_choice_inputs\n    func = partial(logit_loglike, y=fix[\"y\"], x=fix[\"x\"])\n\n    calculated = first_derivative(\n        func=func,\n        method=method,\n        params=fix[\"params_np\"],\n        f0=func(fix[\"params_np\"]),\n        n_cores=1,\n    )\n\n    expected = logit_loglike_gradient(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n    aaae(calculated.derivative, expected, decimal=4)\n\n\n@pytest.mark.parametrize(\"method\", methods_second_derivative)\ndef test_second_derivative_hessian(binary_choice_inputs, method):\n    fix = binary_choice_inputs\n    func = partial(logit_loglike, y=fix[\"y\"], x=fix[\"x\"])\n\n    calculated = second_derivative(\n        func=func,\n        method=method,\n        params=fix[\"params_np\"],\n        f0=func(fix[\"params_np\"]),\n        n_cores=1,\n    )\n\n    expected = logit_loglike_hessian(fix[\"params_np\"], fix[\"y\"], fix[\"x\"])\n\n    assert np.max(np.abs(calculated.derivative - expected)) < 1.5 * 10 ** (-2)\n    assert np.mean(np.abs(calculated.derivative - expected)) < 1.5 * 10 ** (-3)\n\n\n@pytest.mark.parametrize(\"method\", methods)\ndef test_first_derivative_scalar(method):  # noqa: ARG001\n    def f(x):\n        return x**2\n\n    calculated = first_derivative(f, 3.0, n_cores=1)\n    expected = 6.0\n    assert calculated.derivative == expected\n\n\n@pytest.mark.parametrize(\"method\", methods_second_derivative)\ndef test_second_derivative_scalar(method):  # noqa: ARG001\n    def f(x):\n        return x**2\n\n    calculated = second_derivative(f, 3.0, n_cores=1)\n    expected = 2.0\n\n    assert np.abs(calculated.derivative - expected) < 1.5 * 10 ** (-6)\n\n\ndef test_nan_skipping_batch_evaluator():\n    arglist = [np.nan, np.ones(2), np.array([3, 4]), np.nan, np.array([1, 2])]\n    expected = [\n        np.full(2, np.nan),\n        np.ones(2),\n        np.array([9, 16]),\n        np.full(2, np.nan),\n        np.array([1, 4]),\n    ]\n    calculated = _nan_skipping_batch_evaluator(\n        func=lambda x: x**2,\n        arguments=arglist,\n        n_cores=1,\n        error_handling=\"continue\",\n        batch_evaluator=\"joblib\",\n    )\n    for arr_calc, arr_exp in zip(calculated, expected, strict=False):\n        if np.isnan(arr_exp).all():\n            assert np.isnan(arr_calc).all()\n        else:\n            aaae(arr_calc, arr_exp)\n\n\ndef test_consolidate_one_step_derivatives():\n    forward = np.ones((1, 4, 3))\n    forward[:, :, 0] = np.nan\n    backward = np.zeros_like(forward)\n\n    calculated = _consolidate_one_step_derivatives(\n        {\"forward\": forward, \"backward\": backward}, [\"forward\", \"backward\"]\n    )\n    expected = np.array([[0, 1, 1]] * 4)\n    aaae(calculated, expected)\n\n\n@pytest.fixture()\ndef example_function_gradient_fixtures():\n    def f(x):\n        \"\"\"F:R^3 -> R.\"\"\"\n        x1, x2, x3 = x[0], x[1], x[2]\n        y1 = np.sin(x1) + np.cos(x2) + x3 - x3\n        return y1\n\n    def fprime(x):\n        \"\"\"Gradient(f)(x):R^3 -> R^3.\"\"\"\n        x1, x2, x3 = x[0], x[1], x[2]\n        grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])\n        return grad\n\n    return {\"func\": f, \"func_prime\": fprime}\n\n\n@pytest.fixture()\ndef example_function_jacobian_fixtures():\n    def f(x):\n        \"\"\"F:R^3 -> R^2.\"\"\"\n        x1, x2, x3 = x[0], x[1], x[2]\n        y1, y2 = np.sin(x1) + np.cos(x2), np.exp(x3)\n        return np.array([y1, y2])\n\n    def fprime(x):\n        \"\"\"Jacobian(f)(x):R^3 -> R^(2x3)\"\"\"\n        x1, x2, x3 = x[0], x[1], x[2]\n        jac = np.array([[np.cos(x1), -np.sin(x2), 0], [0, 0, np.exp(x3)]])\n        return jac\n\n    return {\"func\": f, \"func_prime\": fprime}\n\n\n@pytest.mark.filterwarnings(\"ignore:The `n_steps` argument\")\ndef test_first_derivative_gradient_richardson(example_function_gradient_fixtures):\n    f = example_function_gradient_fixtures[\"func\"]\n    fprime = example_function_gradient_fixtures[\"func_prime\"]\n\n    true_fprime = fprime(np.ones(3))\n    scipy_fprime = approx_derivative(f, np.ones(3))\n\n    our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n    aaae(scipy_fprime, our_fprime.derivative)\n    aaae(true_fprime, our_fprime.derivative)\n\n\n@pytest.mark.filterwarnings(\"ignore:The `n_steps` argument\")\ndef test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):\n    f = example_function_jacobian_fixtures[\"func\"]\n    fprime = example_function_jacobian_fixtures[\"func_prime\"]\n\n    true_fprime = fprime(np.ones(3))\n    scipy_fprime = approx_derivative(f, np.ones(3))\n\n    our_fprime = first_derivative(f, np.ones(3), n_steps=3, method=\"central\", n_cores=1)\n\n    aaae(scipy_fprime, our_fprime.derivative)\n    aaae(true_fprime, our_fprime.derivative)\n\n\ndef test_convert_evaluation_data_to_frame():\n    arr = np.arange(4).reshape(2, 2)\n    arr2 = arr.reshape(2, 1, 2)\n    steps = Steps(pos=arr, neg=-arr)\n    evals = Evals(pos=arr2, neg=-arr2)\n    expected = [\n        [1, 0, 0, 0, 0, 0],\n        [1, 0, 1, 0, 1, 1],\n        [1, 1, 0, 0, 2, 2],\n        [1, 1, 1, 0, 3, 3],\n        [-1, 0, 0, 0, 0, 0],\n        [-1, 0, 1, 0, 1, -1],\n        [-1, 1, 0, 0, 2, -2],\n        [-1, 1, 1, 0, 3, -3],\n    ]\n    expected = pd.DataFrame(\n        expected, columns=[\"sign\", \"step_number\", \"dim_x\", \"dim_f\", \"step\", \"eval\"]\n    )\n    got = _convert_evaluation_data_to_frame(steps, evals)\n    assert_frame_equal(expected, got.reset_index(), check_dtype=False)\n\n\ndef test__convert_richardson_candidates_to_frame():\n    jac = {\n        \"forward1\": np.array([[0, 1], [2, 3]]),\n        \"forward2\": np.array([[0.5, 1], [2, 3]]),\n    }\n    err = {\n        \"forward1\": np.array([[0, 0], [0, 1]]),\n        \"forward2\": np.array([[1, 0], [0, 0]]),\n    }\n    expected = [\n        [\"forward\", 1, 0, 0, 0, 0],\n        [\"forward\", 1, 1, 0, 1, 0],\n        [\"forward\", 1, 0, 1, 2, 0],\n        [\"forward\", 1, 1, 1, 3, 1],\n        [\"forward\", 2, 0, 0, 0.5, 1],\n        [\"forward\", 2, 1, 0, 1, 0],\n        [\"forward\", 2, 0, 1, 2, 0],\n        [\"forward\", 2, 1, 1, 3, 0],\n    ]\n    expected = pd.DataFrame(\n        expected, columns=[\"method\", \"num_term\", \"dim_x\", \"dim_f\", \"der\", \"err\"]\n    )\n    expected = expected.set_index([\"method\", \"num_term\", \"dim_x\", \"dim_f\"])\n    got = _convert_richardson_candidates_to_frame(jac, err)\n    assert_frame_equal(got, expected, check_dtype=False, check_index_type=False)\n\n\ndef test__select_minimizer_along_axis():\n    der = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])\n    err = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])\n    expected = (np.array([[0, 5], [6, 3]]), np.array([[0, 0], [0, 0]]))\n    got = _select_minimizer_along_axis(der, err)\n    aaae(expected, got)\n\n\ndef test_reshape_one_step_evals():\n    n_steps, dim_f, dim_x = 2, 3, 4\n    raw_evals_one_step = np.arange(2 * n_steps * dim_f * dim_x)\n\n    pos_expected = np.array(\n        [\n            [[0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11]],\n            [[12, 15, 18, 21], [13, 16, 19, 22], [14, 17, 20, 23]],\n        ]\n    )\n    neg_expected = np.array(\n        [\n            [[24, 27, 30, 33], [25, 28, 31, 34], [26, 29, 32, 35]],\n            [[36, 39, 42, 45], [37, 40, 43, 46], [38, 41, 44, 47]],\n        ]\n    )\n\n    got = _reshape_one_step_evals(raw_evals_one_step, n_steps, dim_x)\n    assert np.all(got.pos == pos_expected)\n    assert np.all(got.neg == neg_expected)\n\n\ndef test_reshape_two_step_evals():\n    n_steps, dim_x, dim_f = 1, 2, 2\n    raw_evals_two_step = np.arange(2 * n_steps * dim_f * dim_x * dim_x)\n\n    pos_expected = np.array([[[[0, 2], [2, 6]], [[1, 3], [3, 7]]]])\n    neg_expected = np.array([[[[8, 10], [10, 14]], [[9, 11], [11, 15]]]])\n\n    got = _reshape_two_step_evals(raw_evals_two_step, n_steps, dim_x)\n    assert np.all(got.pos == pos_expected)\n    assert np.all(got.neg == neg_expected)\n\n\ndef test_reshape_cross_step_evals():\n    n_steps = 1\n    dim_x = 2\n    dim_f = 2\n    f0 = np.array([-1000, 1000])\n\n    raw_evals_cross_step = np.arange(2 * n_steps * dim_f * dim_x * dim_x)\n\n    expected_pos = np.array([[[[-1000, 2], [10, -1000]], [[1000, 3], [11, 1000]]]])\n    expected_neg = expected_pos.swapaxes(2, 3)\n\n    got = _reshape_cross_step_evals(raw_evals_cross_step, n_steps, dim_x, f0)\n    assert np.all(got.pos == expected_pos)\n    assert np.all(got.neg == expected_neg)\n\n\ndef test_is_scalar_nan():\n    assert _is_scalar_nan(np.nan)\n    assert not _is_scalar_nan(1.0)\n    assert not _is_scalar_nan(np.array([np.nan]))\n\n\n@dataclass\nclass MyOutput:\n    value: float\n    message: str\n\n\ndef test_first_derivative_with_unpacking():\n    def f(x):\n        return MyOutput(x @ x, \"success\")\n\n    got = first_derivative(\n        func=f,\n        params=np.ones(3),\n        unpacker=lambda out: out.value,\n    )\n\n    assert isinstance(got.func_value, MyOutput)\n    aaae(got.derivative, np.ones(3) * 2)\n\n\ndef test_second_derivative_with_unpacking():\n    def f(x):\n        return MyOutput(x @ x, \"success\")\n\n    got = second_derivative(\n        func=f,\n        params=np.ones(3),\n        unpacker=lambda out: out.value,\n    )\n\n    assert isinstance(got.func_value, MyOutput)\n    aaae(got.derivative, np.eye(3) * 2, decimal=4)\n\n\n@pytest.mark.filterwarnings(\"ignore:The dictionary access for\")\ndef test_numdiff_result_getitem():\n    res = NumdiffResult(\n        derivative=1,\n        func_value=2,\n        _func_evals=pd.DataFrame([0, 1]),\n        _derivative_candidates=pd.DataFrame([2, 3]),\n    )\n    assert res[\"derivative\"] == res.derivative\n    assert res[\"func_value\"] == res.func_value\n    assert_frame_equal(res[\"_func_evals\"], res._func_evals)\n    assert_frame_equal(res[\"_derivative_candidates\"], res._derivative_candidates)\n\n\ndef test_first_and_second_derivative_have_same_type_hints():\n    # exclude method from comparison, as the argument options differ here\n    exclude = [\"method\"]\n    first_hints = {\n        k: v for k, v in get_type_hints(first_derivative).items() if k not in exclude\n    }\n    second_hints = {\n        k: v for k, v in get_type_hints(second_derivative).items() if k not in exclude\n    }\n    assert first_hints == second_hints\n\n\ndef test_first_derivative_pytree_step_size():\n    params = {\"a\": np.array([1, 2, 3]), \"b\": 4}\n\n    got = first_derivative(\n        lambda params: params[\"a\"] @ params[\"a\"] + 2 * params[\"b\"],\n        params=params,\n        step_size=params,\n    )\n    assert np.allclose(got.derivative[\"a\"], np.array([2, 4, 6]))\n    assert np.allclose(got.derivative[\"b\"], 2)\n\n\ndef test_second_derivative_pytree_step_size():\n    params = {\"a\": np.array([1, 2, 3]), \"b\": 4}\n\n    got = second_derivative(\n        lambda params: params[\"a\"] @ params[\"a\"] + 2 * params[\"b\"],\n        params=params,\n        step_size=params,\n    )\n    assert np.allclose(got.derivative[\"a\"][\"a\"], np.eye(3) * 2)\n    assert np.allclose(got.derivative[\"a\"][\"b\"], np.zeros(3))\n    assert np.allclose(got.derivative[\"b\"][\"b\"], 0)\n\n\ndef test_first_derivative_pytree_min_steps():\n    params = {\"a\": np.array([1, 2, 3]), \"b\": 4}\n    bounds = Bounds(\n        lower={\"a\": np.array([0, 1, 2]), \"b\": 3},\n        upper={\"a\": np.array([2, 3, 4]), \"b\": 5},\n    )\n    min_steps = {\"a\": np.array([0.2, 0.5, 0.7]), \"b\": 0.2}\n\n    got = first_derivative(\n        lambda params: params[\"a\"] @ params[\"a\"] + 2 * params[\"b\"],\n        params=params,\n        bounds=bounds,\n        min_steps=min_steps,\n    )\n    assert np.allclose(got.derivative[\"a\"], np.array([2, 4, 6]))\n    assert np.allclose(got.derivative[\"b\"], 2)\n\n\ndef test_second_derivative_pytree_min_steps():\n    params = {\"a\": np.array([1, 2, 3]), \"b\": 4}\n    bounds = Bounds(\n        lower={\"a\": np.array([0, 1, 2]), \"b\": 3},\n        upper={\"a\": np.array([2, 3, 4]), \"b\": 5},\n    )\n    min_steps = {\"a\": np.array([0.2, 0.5, 0.7]), \"b\": 0.2}\n\n    got = second_derivative(\n        lambda params: params[\"a\"] @ params[\"a\"] + 2 * params[\"b\"],\n        params=params,\n        bounds=bounds,\n        min_steps=min_steps,\n    )\n    assert np.allclose(got.derivative[\"a\"][\"a\"], np.eye(3) * 2)\n    assert np.allclose(got.derivative[\"a\"][\"b\"], np.zeros(3))\n    assert np.allclose(got.derivative[\"b\"][\"b\"], 0)\n"
  },
  {
    "path": "tests/optimagic/differentiation/test_finite_differences.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.differentiation.derivatives import Evals\nfrom optimagic.differentiation.finite_differences import jacobian\nfrom optimagic.differentiation.generate_steps import Steps\n\n\n@pytest.fixture()\ndef jacobian_inputs():\n    \"\"\"Very contrived test case for finite difference formulae with linear function.\"\"\"\n    steps_pos = np.array([[0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2]])\n    steps = Steps(pos=steps_pos, neg=-steps_pos)\n\n    jac1 = (np.arange(1, 13)).reshape(3, 4)\n    jac2 = jac1 * 1.1\n\n    evals_pos1 = jac1 @ (np.zeros((4, 4)) + np.eye(4) * 0.1)\n    evals_pos2 = jac2 @ (np.zeros((4, 4)) + np.eye(4) * 0.2)\n    evals_neg1 = jac1 @ (np.zeros((4, 4)) - np.eye(4) * 0.1)\n    evals_neg2 = jac2 @ (np.zeros((4, 4)) - np.eye(4) * 0.2)\n    evals = Evals(\n        pos=np.array([evals_pos1, evals_pos2]), neg=np.array([evals_neg1, evals_neg2])\n    )\n\n    expected_jac = np.array([jac1, jac2])\n\n    f0 = np.zeros(3)\n\n    out = {\"evals\": evals, \"steps\": steps, \"f0\": f0, \"expected_jac\": expected_jac}\n    return out\n\n\nmethods = [\"forward\", \"backward\", \"central\"]\n\n\n@pytest.mark.parametrize(\"method\", methods)\ndef test_jacobian_finite_differences(jacobian_inputs, method):\n    expected_jac = jacobian_inputs.pop(\"expected_jac\")\n    calculated_jac = jacobian(**jacobian_inputs, method=method)\n    aaae(calculated_jac, expected_jac)\n"
  },
  {
    "path": "tests/optimagic/differentiation/test_generate_steps.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.differentiation.generate_steps import (\n    _calculate_or_validate_base_steps,\n    _fillna,\n    _rescale_to_accomodate_bounds,\n    _set_unused_side_to_nan,\n    generate_steps,\n)\nfrom optimagic.parameters.bounds import Bounds\n\n\ndef test_scalars_as_base_steps():\n    steps_scalar = _calculate_or_validate_base_steps(\n        0.1, np.ones(3), \"first_derivative\", None, scaling_factor=1\n    )\n\n    steps_array = _calculate_or_validate_base_steps(\n        np.full(3, 0.1), np.ones(3), \"first_derivative\", None, scaling_factor=1\n    )\n\n    aaae(steps_scalar, steps_array)\n\n\ndef test_scalars_as_min_steps():\n    steps_scalar = _calculate_or_validate_base_steps(\n        0.1, np.ones(3), \"first_derivative\", 0.12, scaling_factor=1.5\n    )\n\n    steps_array = _calculate_or_validate_base_steps(\n        np.full(3, 0.1),\n        np.ones(3),\n        \"first_derivative\",\n        np.full(3, 0.12),\n        scaling_factor=1.5,\n    )\n\n    aaae(steps_scalar, steps_array)\n\n\ndef test_calculate_or_validate_base_steps_invalid_too_small():\n    base_steps = np.array([1e-10, 0.01, 0.01])\n    min_steps = np.full(3, 1e-8)\n    with pytest.raises(ValueError):\n        _calculate_or_validate_base_steps(\n            base_steps, np.ones(3), \"first_derivative\", min_steps, scaling_factor=1\n        )\n\n\ndef test_calculate_or_validate_base_steps_wrong_shape():\n    base_steps = np.array([0.01, 0.01, 0.01])\n    min_steps = np.full(3, 1e-8)\n    with pytest.raises(ValueError):\n        _calculate_or_validate_base_steps(\n            base_steps, np.ones(2), \"first_derivative\", min_steps, scaling_factor=1\n        )\n\n\ndef test_calculate_or_validate_base_steps_jacobian():\n    x = np.array([0.05, 1, -5])\n    expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps)\n    calculated = _calculate_or_validate_base_steps(\n        None, x, \"first_derivative\", 0, scaling_factor=1.0\n    )\n    aaae(calculated, expected, decimal=12)\n\n\ndef test_calculate_or_validate_base_steps_jacobian_with_scaling_factor():\n    x = np.array([0.05, 1, -5])\n    expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) * 2\n    calculated = _calculate_or_validate_base_steps(\n        None, x, \"first_derivative\", 0, scaling_factor=2.0\n    )\n    aaae(calculated, expected, decimal=12)\n\n\ndef test_calculate_or_validate_base_steps_binding_min_step():\n    x = np.array([0.05, 1, -5])\n    expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps)\n    expected[0] = 1e-8\n    calculated = _calculate_or_validate_base_steps(\n        None, x, \"first_derivative\", 1e-8, scaling_factor=1.0\n    )\n    aaae(calculated, expected, decimal=12)\n\n\ndef test_calculate_or_validate_base_steps_hessian():\n    x = np.array([0.05, 1, -5])\n    expected = np.array([0.1, 1, 5]) * np.finfo(float).eps ** (1 / 3)\n    calculated = _calculate_or_validate_base_steps(\n        None, x, \"second_derivative\", 0, scaling_factor=1.0\n    )\n    aaae(calculated, expected, decimal=12)\n\n\ndef test_set_unused_side_to_nan_forward():\n    pos = np.ones((3, 2))\n    neg = -np.ones((3, 2))\n    method = \"forward\"\n    x = np.zeros(3)\n    upper_bounds = np.array([0.5, 2, 3])\n    lower_bounds = np.array([-2, -0.1, -0.1])\n\n    expected_pos = np.array([[np.nan, np.nan], [1, 1], [1, 1]])\n    expected_neg = np.array([[-1, -1], [np.nan, np.nan], [np.nan, np.nan]])\n\n    calculated_pos, calculated_neg = _set_unused_side_to_nan(\n        x, pos, neg, method, lower_bounds, upper_bounds\n    )\n\n    assert np.allclose(calculated_pos, expected_pos, equal_nan=True)\n    assert np.allclose(calculated_neg, expected_neg, equal_nan=True)\n\n\ndef test_set_unused_side_to_nan_backward():\n    pos = np.ones((3, 2))\n    neg = -np.ones((3, 2))\n    method = \"backward\"\n    x = np.zeros(3)\n    upper_bounds = np.array([0.5, 2, 3])\n    lower_bounds = np.array([-2, -0.1, -2])\n\n    expected_pos = np.array([[np.nan, np.nan], [1, 1], [np.nan, np.nan]])\n    expected_neg = np.array([[-1, -1], [np.nan, np.nan], [-1, -1]])\n\n    calculated_pos, calculated_neg = _set_unused_side_to_nan(\n        x, pos, neg, method, lower_bounds, upper_bounds\n    )\n\n    assert np.allclose(calculated_pos, expected_pos, equal_nan=True)\n    assert np.allclose(calculated_neg, expected_neg, equal_nan=True)\n\n\ndef test_fillna():\n    a = np.array([np.nan, 3, 4])\n    assert np.allclose(_fillna(a, 0), np.array([0, 3, 4.0]))\n\n\ndef test_rescale_to_accomodate_bounds():\n    pos = np.array([[1, 2], [1.5, 3], [1, 2], [3, np.nan]])\n    neg = -pos\n    base_steps = np.array([1, 1.5, 2, 3])\n    min_step = 0.1\n    lower_bounds = -4 * np.ones(4)\n    upper_bounds = np.ones(4) * 2.5\n\n    expected_pos = np.array([[1, 2], [1.25, 2.5], [1, 2], [2.5, np.nan]])\n    expected_neg = -expected_pos\n\n    calculated_pos, calculated_neg = _rescale_to_accomodate_bounds(\n        base_steps, pos, neg, lower_bounds, upper_bounds, min_step\n    )\n\n    np.allclose(calculated_pos, expected_pos, equal_nan=True)\n    np.allclose(calculated_neg, expected_neg, equal_nan=True)\n\n\ndef test_rescale_to_accomodate_bounds_binding_min_step():\n    pos = np.array([[1, 2], [1.5, 3], [1, 2]])\n    neg = -pos\n    base_steps = np.array([1, 1.5, 2])\n    min_step = np.array([0, 1.4, 0])\n    lower_bounds = -4 * np.ones(3)\n    upper_bounds = np.ones(3) * 2.5\n\n    expected_pos = np.array([[1, 2], [1.4, 2.8], [1, 2]])\n    expected_neg = -expected_pos\n\n    calculated_pos, calculated_neg = _rescale_to_accomodate_bounds(\n        base_steps, pos, neg, lower_bounds, upper_bounds, min_step\n    )\n\n    aaae(calculated_pos, expected_pos)\n    aaae(calculated_neg, expected_neg)\n\n\ndef test_generate_steps_binding_min_step():\n    calculated_steps = generate_steps(\n        x=np.arange(3),\n        method=\"central\",\n        n_steps=2,\n        target=\"first_derivative\",\n        base_steps=np.array([0.1, 0.2, 0.3]),\n        bounds=Bounds(lower=np.full(3, -np.inf), upper=np.full(3, 2.5)),\n        step_ratio=2,\n        min_steps=np.full(3, 1e-8),\n        scaling_factor=1.0,\n    )\n\n    expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.25, 0.5]]).T\n    expected_neg = -expected_pos\n\n    aaae(calculated_steps.pos, expected_pos)\n    aaae(calculated_steps.neg, expected_neg)\n\n\ndef test_generate_steps_min_step_equals_base_step():\n    calculated_steps = generate_steps(\n        x=np.arange(3),\n        method=\"central\",\n        n_steps=2,\n        target=\"first_derivative\",\n        base_steps=np.array([0.1, 0.2, 0.3]),\n        bounds=Bounds(lower=np.full(3, -np.inf), upper=np.full(3, 2.5)),\n        step_ratio=2,\n        min_steps=None,\n        scaling_factor=1.0,\n    )\n\n    expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.3, np.nan]]).T\n    expected_neg = np.array([[-0.1, -0.2], [-0.2, -0.4], [-0.3, -0.6]]).T\n    aaae(calculated_steps.pos, expected_pos)\n    aaae(calculated_steps.neg, expected_neg)\n"
  },
  {
    "path": "tests/optimagic/differentiation/test_numdiff_options.py",
    "content": "import pytest\n\nfrom optimagic.differentiation.numdiff_options import (\n    NumdiffOptions,\n    pre_process_numdiff_options,\n)\nfrom optimagic.exceptions import InvalidNumdiffOptionsError\n\n\ndef test_pre_process_numdiff_options_trivial_case():\n    numdiff_options = NumdiffOptions(\n        method=\"central\",\n        step_size=0.1,\n        scaling_factor=0.5,\n        min_steps=None,\n        batch_evaluator=\"joblib\",\n    )\n    got = pre_process_numdiff_options(numdiff_options)\n    assert got == numdiff_options\n\n\ndef test_pre_process_numdiff_options_none_case():\n    assert pre_process_numdiff_options(None) is None\n\n\ndef test_pre_process_numdiff_options_dict_case():\n    got = pre_process_numdiff_options(\n        {\"method\": \"central\", \"step_size\": 0.1, \"batch_evaluator\": \"pathos\"}\n    )\n    assert got == NumdiffOptions(\n        method=\"central\", step_size=0.1, batch_evaluator=\"pathos\"\n    )\n\n\ndef test_pre_process_numdiff_options_invalid_type():\n    with pytest.raises(InvalidNumdiffOptionsError):\n        pre_process_numdiff_options(numdiff_options=\"invalid\")\n\n\ndef test_pre_process_numdiff_options_invalid_dict_key():\n    with pytest.raises(InvalidNumdiffOptionsError, match=\"Invalid numdiff options\"):\n        pre_process_numdiff_options(numdiff_options={\"wrong_key\": \"central\"})\n\n\ndef test_pre_process_numdiff_options_invalid_dict_value():\n    with pytest.raises(InvalidNumdiffOptionsError, match=\"Invalid numdiff `method`:\"):\n        pre_process_numdiff_options(numdiff_options={\"method\": \"invalid\"})\n\n\ndef test_numdiff_options_invalid_method():\n    with pytest.raises(InvalidNumdiffOptionsError, match=\"Invalid numdiff `method`:\"):\n        NumdiffOptions(method=\"invalid\")\n\n\ndef test_numdiff_options_invalid_step_size():\n    with pytest.raises(\n        InvalidNumdiffOptionsError, match=\"Invalid numdiff `step_size`:\"\n    ):\n        NumdiffOptions(step_size=0)\n\n\ndef test_numdiff_options_invalid_scaling_factor():\n    with pytest.raises(\n        InvalidNumdiffOptionsError, match=\"Invalid numdiff `scaling_factor`:\"\n    ):\n        NumdiffOptions(scaling_factor=-1)\n\n\ndef test_numdiff_options_invalid_min_steps():\n    with pytest.raises(\n        InvalidNumdiffOptionsError, match=\"Invalid numdiff `min_steps`:\"\n    ):\n        NumdiffOptions(min_steps=-1)\n\n\ndef test_numdiff_options_invalid_n_cores():\n    with pytest.raises(InvalidNumdiffOptionsError, match=\"Invalid numdiff `n_cores`:\"):\n        NumdiffOptions(n_cores=-1)\n\n\ndef test_numdiff_options_invalid_batch_evaluator():\n    with pytest.raises(\n        InvalidNumdiffOptionsError, match=\"Invalid batch evaluator: invalid\"\n    ):\n        NumdiffOptions(batch_evaluator=\"invalid\")\n"
  },
  {
    "path": "tests/optimagic/examples/test_criterion_functions.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.testing import assert_array_equal\nfrom pandas.testing import assert_frame_equal\n\nfrom optimagic.examples.criterion_functions import (\n    rhe_fun_and_gradient,\n    rhe_function_value,\n    rhe_gradient,\n    rhe_scalar,\n    rosenbrock_fun_and_gradient,\n    rosenbrock_function_value,\n    rosenbrock_gradient,\n    rosenbrock_scalar,\n    sos_fun_and_gradient,\n    sos_gradient,\n    sos_likelihood_fun_and_jac,\n    sos_likelihood_jacobian,\n    sos_ls,\n    sos_ls_fun_and_jac,\n    sos_ls_jacobian,\n    sos_ls_with_pd_objects,\n    sos_scalar,\n    trid_fun_and_gradient,\n    trid_gradient,\n    trid_scalar,\n)\nfrom optimagic.optimization.fun_value import FunctionValue\n\nTRID_GRAD = pd.DataFrame({\"value\": [7, 1, -6, 11, -19.0]})\nRHE_GRAD = pd.DataFrame({\"value\": [90, 72, 36, 28, -10.0]})\nROSENBROCK_GRAD = pd.DataFrame({\"value\": [259216, 255616, 54610, 145412, -10800.0]})\n\n\n@pytest.fixture()\ndef input_params():\n    params = pd.DataFrame({\"value\": [9, 9, 6, 7, -5]})\n    return params\n\n\ndef test_trid_scalar(input_params):\n    got = trid_scalar(input_params)\n    assert got == 83\n\n\ndef test_trid_gradient(input_params):\n    got = trid_gradient(input_params)\n    assert_frame_equal(got, TRID_GRAD)\n\n\ndef test_trid_fun_and_gradient(input_params):\n    got = trid_fun_and_gradient(input_params)\n    assert_frame_equal(got[1], TRID_GRAD)\n\n\ndef test_rhe_scalar(input_params):\n    got = rhe_scalar(input_params)\n    assert got == 960\n\n\ndef test_rhe_gradient(input_params):\n    got = rhe_gradient(input_params)\n    assert_frame_equal(got, RHE_GRAD)\n\n\ndef test_rhe_fun_and_gradient(input_params):\n    got = rhe_fun_and_gradient(input_params)\n    assert_frame_equal(got[1], RHE_GRAD)\n\n\ndef test_rosenbrock_scalar(input_params):\n    got = rosenbrock_scalar(input_params)\n    assert got == 1456789\n\n\ndef test_rosenbrock_gradient(input_params):\n    got = rosenbrock_gradient(input_params)\n    assert_frame_equal(got, ROSENBROCK_GRAD)\n\n\ndef test_rosenbrock_fun_and_gradient(input_params):\n    got = rosenbrock_fun_and_gradient(input_params)\n    assert_frame_equal(got[1], ROSENBROCK_GRAD)\n\n\ndef test_rhe_function_value(input_params):\n    got = rhe_function_value(input_params)\n    assert isinstance(got, FunctionValue)\n    expected = np.array([9, 12.72792206, 14.07124728, 15.71623365, 16.4924225])\n    aaae(got.value, expected)\n\n\ndef test_rosenbrock_function_value(input_params):\n    got = rosenbrock_function_value(input_params)\n    assert isinstance(got, FunctionValue)\n    expected = np.array([720.04444307, 750.04266545, 290.04310025, 540.0333323, 0])\n    aaae(got.value, expected)\n\n\nSOS_GRAD = {\"a\": 2, \"b\": 4.0}\nSOS_LL_JAC = {\"a\": np.array([2, 0]), \"b\": np.array([0, 4])}\nSOS_LS_JAC = {\"a\": np.array([1, 0]), \"b\": np.array([0, 1])}\n\n\ndef test_sos_ls():\n    got = sos_ls({\"a\": 1, \"b\": 2})\n    aaae(got, np.array([1, 2.0]))\n\n\ndef test_sos_ls_with_pd_objects():\n    got = sos_ls_with_pd_objects({\"a\": 1, \"b\": 2})\n    assert isinstance(got, pd.Series)\n    aaae(got.to_numpy(), np.array([1, 2.0]))\n\n\ndef test_sos_scalar():\n    got = sos_scalar({\"a\": 1, \"b\": 2})\n    assert got == 5\n\n\ndef test_sos_gradient():\n    got = sos_gradient({\"a\": 1, \"b\": 2})\n    assert got == SOS_GRAD\n\n\ndef test_sos_likelihood_jacobian():\n    got = sos_likelihood_jacobian({\"a\": 1, \"b\": 2})\n\n    for key, val in SOS_LL_JAC.items():\n        assert_array_equal(got[key], val)\n\n\ndef test_sos_ls_jacobian():\n    got = sos_ls_jacobian({\"a\": 1, \"b\": 2})\n\n    for key, val in SOS_LS_JAC.items():\n        assert_array_equal(got[key], val)\n\n\ndef test_sos_fun_and_gradient():\n    got_val, got_grad = sos_fun_and_gradient({\"a\": 1, \"b\": 2})\n    assert got_val == 5\n    assert_array_equal(got_grad, SOS_GRAD)\n\n\ndef test_sos_likelihood_fun_and_jac():\n    got_val, got_jac = sos_likelihood_fun_and_jac({\"a\": 1, \"b\": 2})\n    aaae(got_val, np.array([1, 4]))\n    for key, val in SOS_LL_JAC.items():\n        assert_array_equal(got_jac[key], val)\n\n\ndef test_sos_ls_fun_and_jac():\n    got_val, got_jac = sos_ls_fun_and_jac({\"a\": 1, \"b\": 2})\n    aaae(got_val, np.array([1, 2]))\n    for key, val in SOS_LS_JAC.items():\n        assert_array_equal(got_jac[key], val)\n"
  },
  {
    "path": "tests/optimagic/logging/test_base.py",
    "content": "from dataclasses import dataclass\n\nimport pytest\n\nfrom optimagic.logging.base import InputType, NonUpdatableKeyValueStore, OutputType\nfrom optimagic.typing import DictLikeAccess\n\n\ndef test_key_value_store_raise_errors():\n    class NoDataClass(NonUpdatableKeyValueStore):\n        def __init__(self):\n            super().__init__({1}, [], \"key\")\n\n        def insert(self, value: InputType) -> None:\n            pass\n\n        def _select_by_key(self, key: int) -> list[OutputType]:\n            pass\n\n        def _select_all(self) -> list[OutputType]:\n            pass\n\n        def select_last_rows(self, n_rows: int) -> list[OutputType]:\n            pass\n\n    class WrongPrimaryKey(NonUpdatableKeyValueStore):\n        @dataclass(frozen=True)\n        class InputDummy(DictLikeAccess):\n            value: str\n\n        @dataclass(frozen=True)\n        class OutputDummy(DictLikeAccess):\n            id: int\n            value: str\n\n        def __init__(self):\n            super().__init__(\n                WrongPrimaryKey.InputDummy, WrongPrimaryKey.OutputDummy, \"ID\"\n            )\n\n        def insert(self, value: InputType) -> None:\n            pass\n\n        def _select_by_key(self, key: int) -> list[OutputType]:\n            pass\n\n        def _select_all(self) -> list[OutputType]:\n            pass\n\n        def select_last_rows(self, n_rows: int) -> list[OutputType]:\n            pass\n\n    with pytest.raises(ValueError):\n        NoDataClass()\n\n    with pytest.raises(ValueError):\n        WrongPrimaryKey()\n"
  },
  {
    "path": "tests/optimagic/logging/test_logger.py",
    "content": "from dataclasses import asdict\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pybaum import tree_equal, tree_just_flatten\n\nfrom optimagic.logging.logger import (\n    LogOptions,\n    LogReader,\n    LogStore,\n    SQLiteLogOptions,\n    SQLiteLogReader,\n)\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.tree_registry import get_registry\nfrom optimagic.typing import Direction\n\n\n@pytest.fixture()\ndef example_db(tmp_path):\n    path = tmp_path / \"test.db\"\n\n    def _crit(params):\n        x = np.array(list(params.values()))\n        return x @ x\n\n    minimize(\n        fun=_crit,\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n        algorithm=\"scipy_lbfgsb\",\n        logging=path,\n    )\n    return path\n\n\ndef test_read_start_params(example_db):\n    res = LogReader.from_options(SQLiteLogOptions(example_db)).read_start_params()\n    assert res == {\"a\": 1, \"b\": 2, \"c\": 3}\n\n\ndef test_log_reader_read_start_params(example_db):\n    reader = LogReader.from_options(SQLiteLogOptions(example_db))\n    res = reader.read_start_params()\n    assert res == {\"a\": 1, \"b\": 2, \"c\": 3}\n\n\ndef test_log_reader_read_iteration(example_db):\n    reader = SQLiteLogReader(example_db)\n    first_row = reader.read_iteration(0)\n    assert first_row[\"params\"] == {\"a\": 1, \"b\": 2, \"c\": 3}\n    assert first_row[\"rowid\"] == 1\n    assert first_row[\"scalar_fun\"] == 14\n\n    last_row = reader.read_iteration(-1)\n    assert list(last_row[\"params\"]) == [\"a\", \"b\", \"c\"]\n    assert np.allclose(last_row[\"scalar_fun\"], 0)\n\n\ndef test_log_reader_index_exception(example_db):\n    with pytest.raises(IndexError):\n        SQLiteLogReader(example_db).read_iteration(10)\n\n    with pytest.raises(IndexError):\n        SQLiteLogReader(example_db).read_iteration(-4)\n\n\ndef test_log_reader_read_history(example_db):\n    reader = SQLiteLogReader(example_db)\n    res = reader.read_history()\n    assert res[\"time\"][0] == 0\n    assert res[\"fun\"][0] == 14\n    assert res[\"params\"][0] == {\"a\": 1, \"b\": 2, \"c\": 3}\n\n\ndef test_log_reader_read_multistart_history(example_db):\n    reader = SQLiteLogReader(example_db)\n    history, local_history, exploration = reader.read_multistart_history(\n        direction=Direction.MINIMIZE\n    )\n    assert local_history is None\n    assert exploration is None\n\n    registry = get_registry(extended=True)\n    assert tree_equal(\n        tree_just_flatten(asdict(history), registry=registry),\n        tree_just_flatten(asdict(reader.read_history()), registry=registry),\n    )\n\n\ndef test_read_steps_table(example_db):\n    res = SQLiteLogReader(example_db)._step_store.to_df()\n    assert isinstance(res, pd.DataFrame)\n    assert res.loc[0, \"rowid\"] == 1\n    assert res.loc[0, \"type\"] == \"optimization\"\n    assert res.loc[0, \"status\"] == \"complete\"\n\n\ndef test_read_optimization_problem_table(example_db):\n    res = SQLiteLogReader(example_db).problem_df\n    assert isinstance(res, pd.DataFrame)\n\n\ndef test_non_existing_database_raises_error(tmp_path):\n    with pytest.raises(FileNotFoundError):\n        SQLiteLogReader(tmp_path / \"i_do_not_exist.db\").read_start_params()\n\n\ndef test_available_log_options():\n    available_types = LogOptions.available_option_types()\n    assert len(available_types) == 1\n    assert available_types[0] is SQLiteLogOptions\n\n\ndef test_no_registered():\n    class DummyOptions(LogOptions):\n        pass\n\n    with pytest.raises(ValueError, match=\"DummyOptions\"):\n        LogReader.from_options(DummyOptions())\n\n    with pytest.raises(ValueError, match=\"DummyOptions\"):\n        LogStore.from_options(DummyOptions())\n"
  },
  {
    "path": "tests/optimagic/logging/test_sqlalchemy.py",
    "content": "import pickle\nimport sys\nfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n\nimport numpy as np\nimport pytest\nfrom sqlalchemy import inspect\n\nfrom optimagic.logging import ExistenceStrategy\nfrom optimagic.logging.logger import LogStore, SQLiteLogOptions\nfrom optimagic.logging.sqlalchemy import IterationStore, StepStore\nfrom optimagic.logging.types import (\n    IterationState,\n    StepResult,\n    StepStatus,\n    StepType,\n)\n\n\nclass TestIterationStore:\n    @pytest.fixture\n    def store(self, tmp_path):\n        \"\"\"Fixture to set up the IterationStore.\"\"\"\n        return IterationStore(SQLiteLogOptions(tmp_path / \"test.db\"))\n\n    @staticmethod\n    def create_test_point(i: int):\n        return IterationState(\n            params=np.array([i, i + 1]),\n            timestamp=123456.0 + i,\n            exceptions=None,\n            valid=True,\n            scalar_fun=0.5 + i,\n            step=i,\n            raw_fun=None,\n        )\n\n    def test_table_creation(self, store):\n        \"\"\"Test that the IterationStore table is created properly.\"\"\"\n        assert store.table_name in inspect(store.engine).get_table_names()\n\n    def test_insert_and_query(self, store):\n        \"\"\"Test inserting and querying data in the IterationStore.\"\"\"\n        result = self.create_test_point(2456)\n        store.insert(result)\n        queried_result = store.select(1)[0]\n        assert queried_result is not None\n        assert queried_result.scalar_fun == result.scalar_fun\n\n    def test_update_raise(self, store):\n        \"\"\"Test updating an entry in the IterationStore.\"\"\"\n        # Insert initial data\n        result = self.create_test_point(568)\n        store.insert(result)\n        queried_result = store.select(1)[0]\n\n        # Update the value\n        updated_result = IterationState(\n            params=queried_result.params,\n            timestamp=queried_result.timestamp,\n            exceptions=queried_result.exceptions,\n            valid=queried_result.valid,\n            scalar_fun=1.0,  # New value\n            step=queried_result.step,\n            raw_fun=queried_result.raw_fun,\n        )\n        msg = f\"'{IterationStore.__name__}' object does not allow to update items in\"\n        \"the store\"\n        with pytest.raises(AttributeError, match=msg):\n            store.update(key=1, value=updated_result)\n\n        with pytest.raises(AttributeError):\n            store.sellect_typo  # type:ignore # noqa: B018\n\n    def test_serialization(self, store):\n        \"\"\"Test the serialization and deserialization of the IterationStore.\"\"\"\n        pickled_store = pickle.dumps(store)\n        unpickled_store = pickle.loads(pickled_store)\n        assert store.table_name == unpickled_store.table_name\n        assert store.table_name in inspect(unpickled_store.engine).get_table_names()\n\n    @pytest.mark.parametrize(\n        \"executor_factory\",\n        [\n            lambda: ThreadPoolExecutor(max_workers=10),\n            lambda: ProcessPoolExecutor(max_workers=10),\n        ],\n        ids=[\"threads\", \"processes\"],\n    )\n    def test_parallel_insert(self, store, executor_factory):\n        \"\"\"Test multithreaded writing and reading in the IterationStore.\"\"\"\n        with executor_factory() as executor:\n            # Insert data concurrently\n            to_insert = list(map(self.create_test_point, range(10)))\n            futures = [executor.submit(store.insert, item) for item in to_insert]\n            for future in futures:\n                future.result()\n\n        result = store.select()\n\n        assert [row.rowid for row in result] == list(range(1, 11))\n        assert set([row.step for row in result]) == set(range(10))\n\n        result_last = store.select_last_rows(5)\n        assert len(result_last) == 5\n\n    @pytest.mark.skipif(\n        not sys.platform.startswith(\"win\"),\n        reason=\"On linux and macOS, this will result in a warning\",\n    )\n    def test_db_replacement_error(self, store):\n        store.insert(self.create_test_point(245))\n        with pytest.raises(RuntimeError, match=\"PermissionError\"):\n            LogStore.from_options(\n                SQLiteLogOptions(\n                    store._db_config.url.split(\"sqlite:///\")[-1],\n                    if_database_exists=ExistenceStrategy.REPLACE,\n                )\n            )\n\n    def test_db_existence_raise(self, store):\n        store.insert(self.create_test_point(245))\n        with pytest.raises(FileExistsError):\n            LogStore.from_options(\n                SQLiteLogOptions(\n                    store._db_config.url.split(\"sqlite:///\")[-1],\n                )\n            )\n\n\nclass TestStepStore:\n    @pytest.fixture\n    def store(self, tmp_path):\n        \"\"\"Fixture to set up the IterationStore.\"\"\"\n        return StepStore(SQLiteLogOptions(tmp_path / \"test.db\"))\n\n    @staticmethod\n    def create_test_point(i: int):\n        return StepResult(\n            f\"random_{i}\", StepType.OPTIMIZATION, StepStatus.RUNNING, n_iterations=i\n        )\n\n    def test_table_creation(self, store):\n        \"\"\"Test that the IterationStore table is created properly.\"\"\"\n        assert store.table_name in inspect(store.engine).get_table_names()\n\n    def test_insert_and_query(self, store):\n        \"\"\"Test inserting and querying data in the IterationStore.\"\"\"\n        result = self.create_test_point(2456)\n        store.insert(result)\n        queried_result = store.select(1)[0]\n        assert queried_result is not None\n        assert queried_result.n_iterations == result.n_iterations\n\n    def test_insert_string(self, store):\n        result = StepResult(\"strings\", \"optimization\", \"running\", n_iterations=1)\n        store.insert(result)\n        queried_result = store.select(1)[0]\n        assert queried_result is not None\n        assert queried_result.status is StepStatus.RUNNING\n        assert queried_result.type is StepType.OPTIMIZATION\n\n    def test_update(self, store):\n        \"\"\"Test updating an entry in the IterationStore.\"\"\"\n        # Insert initial data\n        result = self.create_test_point(568)\n        store.insert(result)\n        queried_result = store.select(1)[0]\n\n        # Update the value\n        updated_result = StepResult(\n            queried_result.name,\n            queried_result.type,\n            queried_result.status,\n            n_iterations=50,\n        )\n        store.update(key=1, value=updated_result)\n\n        # Verify the update\n        updated_entry = store.select(1)[0]\n        assert updated_entry is not None\n        assert updated_entry.n_iterations == 50\n\n        store.update(key=1, value={\"n_iterations\": 34})\n        updated_entry = store.select(1)[0]\n        assert updated_entry is not None\n        assert updated_entry.n_iterations == 34\n\n        with pytest.raises(ValueError):\n            store.update(key=1, value={\"N_iterations_typo\": 34})\n\n    def test_serialization(self, store):\n        \"\"\"Test the serialization and deserialization of the IterationStore.\"\"\"\n        pickled_store = pickle.dumps(store)\n        unpickled_store = pickle.loads(pickled_store)\n        assert store.table_name == unpickled_store.table_name\n        assert store.table_name in inspect(unpickled_store.engine).get_table_names()\n\n    @pytest.mark.parametrize(\n        \"executor_factory\",\n        [\n            lambda: ThreadPoolExecutor(max_workers=10),\n            lambda: ProcessPoolExecutor(max_workers=10),\n        ],\n        ids=[\"threads\", \"processes\"],\n    )\n    def test_parallel_insert(self, store, executor_factory):\n        \"\"\"Test multithreaded writing and reading in the IterationStore.\"\"\"\n        with executor_factory() as executor:\n            # Insert data concurrently\n            to_insert = list(map(self.create_test_point, range(10)))\n            futures = [executor.submit(store.insert, item) for item in to_insert]\n            for future in futures:\n                future.result()\n\n        result = store.select()\n\n        assert [row.rowid for row in result] == list(range(1, 11))\n        assert set([row.n_iterations for row in result]) == set(range(10))\n\n        result_last = store.select_last_rows(5)\n        assert len(result_last) == 5\n\n    @pytest.mark.parametrize(\n        \"executor_factory\",\n        [\n            lambda: ThreadPoolExecutor(max_workers=10),\n            lambda: ProcessPoolExecutor(max_workers=10),\n        ],\n        ids=[\"threads\", \"processes\"],\n    )\n    def test_parallel_update(self, store, executor_factory):\n        \"\"\"Test multithreaded writing and reading in the IterationStore.\"\"\"\n        with executor_factory() as executor:\n            # Insert data concurrently\n            to_insert = list(map(self.create_test_point, range(10)))\n            futures = [executor.submit(store.insert, item) for item in to_insert]\n            for future in futures:\n                future.result()\n\n        with executor_factory() as executor:\n            # Update data concurrently\n            to_update = [\n                (2, {\"status\": StepStatus.COMPLETE}),\n                (2, {\"n_iterations\": 200}),\n            ]\n            futures = [executor.submit(store.update, *item) for item in to_update]\n            for future in futures:\n                future.result()\n\n        result = store.select(2)[0]\n        assert result.status == StepStatus.COMPLETE\n        assert result.n_iterations == 200\n"
  },
  {
    "path": "tests/optimagic/logging/test_types.py",
    "content": "import pytest\n\nfrom optimagic.logging.types import (\n    IterationStateWithId,\n    ProblemInitializationWithId,\n    StepResultWithId,\n)\n\n\ndef test_raise_on_missing_id():\n    with pytest.raises(ValueError, match=\"rowid\"):\n        IterationStateWithId(1, 2, 3, True, None, None, None)\n\n    with pytest.raises(ValueError, match=\"rowid\"):\n        StepResultWithId(\"n\", \"optimization\", \"skipped\")\n\n    with pytest.raises(ValueError, match=\"rowid\"):\n        ProblemInitializationWithId(\"minimize\", 2)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_algorithm.py",
    "content": "from dataclasses import dataclass\n\nimport numpy as np\nimport pytest\n\nfrom optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError\nfrom optimagic.optimization.algorithm import AlgoInfo, Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.history import HistoryEntry\nfrom optimagic.typing import (\n    AggregationLevel,\n    EvalTask,\n    NonNegativeFloat,\n    PositiveFloat,\n    PositiveInt,\n)\n\n# ======================================================================================\n# Test AlgoInfo does proper validation of arguments\n# ======================================================================================\n\nINVALID_ALGO_INFO_KWARGS = [\n    {\"name\": 3},\n    {\"solver_type\": \"scalar\"},\n    {\"is_available\": \"yes\"},\n    {\"is_global\": \"no\"},\n    {\"needs_jac\": \"yes\"},\n    {\"needs_hess\": \"no\"},\n    {\"needs_bounds\": \"no\"},\n    {\"supports_parallelism\": \"yes\"},\n    {\"supports_bounds\": \"no\"},\n    {\"supports_infinite_bounds\": \"no\"},\n    {\"supports_linear_constraints\": \"yes\"},\n    {\"supports_nonlinear_constraints\": \"no\"},\n    {\"disable_history\": \"no\"},\n]\n\n\n@pytest.mark.parametrize(\"kwargs\", INVALID_ALGO_INFO_KWARGS)\ndef test_algo_info_validation(kwargs):\n    valid_kwargs = {\n        \"name\": \"test\",\n        \"solver_type\": AggregationLevel.LEAST_SQUARES,\n        \"is_available\": True,\n        \"is_global\": True,\n        \"needs_jac\": True,\n        \"needs_hess\": True,\n        \"needs_bounds\": True,\n        \"supports_parallelism\": True,\n        \"supports_bounds\": True,\n        \"supports_infinite_bounds\": True,\n        \"supports_linear_constraints\": True,\n        \"supports_nonlinear_constraints\": True,\n        \"disable_history\": True,\n    }\n\n    combined_kwargs = {**valid_kwargs, **kwargs}\n    msg = \"The following arguments to AlgoInfo or `mark.minimizer` are invalid\"\n    with pytest.raises(InvalidAlgoInfoError, match=msg):\n        AlgoInfo(**combined_kwargs)\n\n\n# ======================================================================================\n# Test InternalOptimizeResult does proper validation of arguments\n# ======================================================================================\n\n\nINVALID_INTERNAL_OPTIMIZE_RESULT_KWARGS = [\n    {\"x\": 3},\n    {\"fun\": [1, 2, 3]},\n    {\"success\": \"successful\"},\n    {\"message\": 3},\n    {\"n_fun_evals\": \"3\"},\n    {\"n_jac_evals\": \"3\"},\n    {\"n_hess_evals\": \"3\"},\n    {\"n_iterations\": \"3\"},\n    {\"status\": \"3\"},\n    {\"jac\": \"3\"},\n    {\"hess\": \"3\"},\n    {\"hess_inv\": \"3\"},\n    {\"max_constraint_violation\": \"3\"},\n]\n\n\n@pytest.mark.parametrize(\"kwargs\", INVALID_INTERNAL_OPTIMIZE_RESULT_KWARGS)\ndef test_internal_optimize_result_validation(kwargs):\n    valid_kwargs = {\n        \"x\": np.array([1, 2, 3]),\n        \"fun\": 3.0,\n        \"success\": True,\n        \"message\": \"success\",\n        \"n_fun_evals\": 3,\n        \"n_jac_evals\": 3,\n        \"n_hess_evals\": 3,\n        \"n_iterations\": 3,\n        \"status\": 3,\n        \"jac\": np.array([1, 2, 3]),\n        \"hess\": np.array([1, 2, 3]),\n        \"hess_inv\": np.array([1, 2, 3]),\n        \"max_constraint_violation\": 3.0,\n    }\n\n    combined_kwargs = {**valid_kwargs, **kwargs}\n    msg = \"The following arguments to InternalOptimizeResult are invalid\"\n    with pytest.raises(TypeError, match=msg):\n        InternalOptimizeResult(**combined_kwargs)\n\n\n# ======================================================================================\n# Test the copy constructors of Algorithm\n# ======================================================================================\n\n\n@dataclass(frozen=True)\nclass DummyAlgorithm(Algorithm):\n    initial_radius: PositiveFloat = 1.0\n    max_radius: PositiveFloat = 10.0\n    convergence_ftol_rel: NonNegativeFloat = 1e-6\n    stopping_maxiter: PositiveInt = 1000\n\n    def _solve_internal_problem(self, problem, x0):\n        hist_entry = HistoryEntry(\n            params=x0,\n            fun=0.0,\n            start_time=0.0,\n            task=EvalTask.FUN,\n        )\n        problem.history.add_entry(hist_entry)\n        return InternalOptimizeResult(x=x0, fun=0.0, success=True)\n\n\ndef test_with_option():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_option(initial_radius=2.0, max_radius=20.0)\n    assert new_algo is not algo\n    assert new_algo.initial_radius == 2.0\n    assert new_algo.max_radius == 20.0\n\n\ndef test_with_option_invalid_key():\n    algo = DummyAlgorithm()\n    with pytest.raises(InvalidAlgoOptionError):\n        algo.with_option(invalid_key=2.0)\n\n\ndef test_with_stopping():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_stopping(maxiter=2000)\n    assert new_algo is not algo\n    assert new_algo.stopping_maxiter == 2000\n\n\ndef test_with_stopping_with_full_option_name():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_stopping(stopping_maxiter=2000)\n    assert new_algo is not algo\n    assert new_algo.stopping_maxiter == 2000\n\n\ndef test_with_stopping_invalid_key():\n    algo = DummyAlgorithm()\n    with pytest.raises(InvalidAlgoOptionError):\n        algo.with_stopping(invalid_key=2000)\n\n\ndef test_with_convergence():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_convergence(ftol_rel=1e-5)\n    assert new_algo is not algo\n    assert new_algo.convergence_ftol_rel == 1e-5\n\n\ndef test_with_convergence_with_full_option_name():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_convergence(convergence_ftol_rel=1e-5)\n    assert new_algo is not algo\n    assert new_algo.convergence_ftol_rel == 1e-5\n\n\ndef test_with_convergence_invalid_key():\n    algo = DummyAlgorithm()\n    with pytest.raises(InvalidAlgoOptionError):\n        algo.with_convergence(invalid_key=1e-5)\n\n\ndef test_with_option_if_applicable():\n    algo = DummyAlgorithm()\n    with pytest.warns(UserWarning):\n        new_algo = algo.with_option_if_applicable(\n            invalid=15,\n            initial_radius=42,\n        )\n    assert new_algo is not algo\n    assert new_algo.initial_radius == 42\n\n\n# ======================================================================================\n# Test the type conversions of algo options\n# ======================================================================================\n\n\ndef test_algorithm_does_type_conversion():\n    algo = DummyAlgorithm(\n        initial_radius=\"1.0\",\n        max_radius=\"10.0\",\n        convergence_ftol_rel=\"1e-6\",\n        stopping_maxiter=\"1000\",\n    )\n\n    assert isinstance(algo.initial_radius, float)\n    assert algo.initial_radius == 1.0\n    assert isinstance(algo.max_radius, float)\n    assert algo.max_radius == 10.0\n    assert isinstance(algo.convergence_ftol_rel, float)\n    assert algo.convergence_ftol_rel == 1e-6\n    assert isinstance(algo.stopping_maxiter, int)\n    assert algo.stopping_maxiter == 1000\n\n\ndef test_algorithm_does_type_conversion_in_with_option():\n    algo = DummyAlgorithm()\n    new_algo = algo.with_option(\n        initial_radius=\"2.0\",\n        max_radius=\"20.0\",\n    )\n\n    assert isinstance(new_algo.initial_radius, float)\n    assert new_algo.initial_radius == 2.0\n    assert isinstance(new_algo.max_radius, float)\n    assert new_algo.max_radius == 20.0\n\n\ndef test_error_with_negative_radius():\n    with pytest.raises(Exception):  # noqa: B017\n        DummyAlgorithm(initial_radius=-1.0)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_convergence_report.py",
    "content": "import numpy as np\nimport pandas as pd\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimization.convergence_report import get_convergence_report\nfrom optimagic.optimization.history import History\nfrom optimagic.typing import Direction, EvalTask\n\n\ndef test_get_convergence_report_minimize():\n    hist = History(\n        direction=Direction.MINIMIZE,\n        params=[{\"a\": 0}, {\"a\": 2.1}, {\"a\": 2.5}, {\"a\": 2.0}],\n        fun=[5, 4.1, 4.4, 4.0],\n        start_time=[0, 1, 2, 3],\n        stop_time=[1, 2, 3, 4],\n        task=4 * [EvalTask.FUN],\n        batches=[0, 1, 2, 3],\n    )\n\n    calculated = pd.DataFrame.from_dict(get_convergence_report(hist))\n\n    expected = np.array([[0.025, 0.25], [0.05, 1.0], [0.1, 1], [0.1, 2.0]])\n    aaae(calculated.to_numpy(), expected)\n\n\ndef test_get_convergence_report_maximize():\n    hist = History(\n        direction=Direction.MAXIMIZE,\n        params=[{\"a\": 0}, {\"a\": 2.1}, {\"a\": 2.5}, {\"a\": 2.0}],\n        fun=[-5, -4.1, -4.4, -4.0],\n        start_time=[0, 1, 2, 3],\n        stop_time=[1, 2, 3, 4],\n        task=4 * [EvalTask.FUN],\n        batches=[0, 1, 2, 3],\n    )\n\n    calculated = pd.DataFrame.from_dict(get_convergence_report(hist))\n\n    expected = np.array([[0.025, 0.25], [0.05, 1.0], [0.1, 1], [0.1, 2.0]])\n    aaae(calculated.to_numpy(), expected)\n\n\ndef test_history_is_too_short():\n    # first value is best, so history of accepted parameters has only one entry\n    hist = History(\n        direction=Direction.MAXIMIZE,\n        params=[{\"a\": 0}, {\"a\": 2.1}, {\"a\": 2.5}, {\"a\": 2.0}],\n        fun=[5, 4.1, 4.4, 4.0],\n        start_time=[0, 1, 2, 3],\n        stop_time=[1, 2, 3, 4],\n        task=4 * [EvalTask.FUN],\n        batches=[0, 1, 2, 3],\n    )\n\n    calculated = get_convergence_report(hist)\n    assert calculated is None\n"
  },
  {
    "path": "tests/optimagic/optimization/test_create_optimization_problem.py",
    "content": "import pytest\n\nfrom optimagic.optimization.create_optimization_problem import (\n    pre_process_user_algorithm,\n)\nfrom optimagic.optimizers.scipy_optimizers import ScipyLBFGSB\n\n\ndef test_pre_process_user_algorithm_valid_string():\n    got = pre_process_user_algorithm(\"scipy_lbfgsb\")\n    assert isinstance(got, ScipyLBFGSB)\n\n\ndef test_pre_process_user_algorithm_invalid_string():\n    with pytest.raises(ValueError):\n        pre_process_user_algorithm(\"not_an_algorithm\")\n\n\ndef test_pre_process_user_algorithm_valid_instance():\n    got = pre_process_user_algorithm(ScipyLBFGSB())\n    assert isinstance(got, ScipyLBFGSB)\n\n\ndef test_pre_process_user_algorithm_valid_class():\n    got = pre_process_user_algorithm(ScipyLBFGSB)\n    assert isinstance(got, ScipyLBFGSB)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_error_penalty.py",
    "content": "import functools\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.differentiation.derivatives import first_derivative\nfrom optimagic.optimization.error_penalty import (\n    _likelihood_penalty,\n    _penalty_residuals,\n    _scalar_penalty,\n    get_error_penalty_function,\n)\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.typing import AggregationLevel, Direction\nfrom optimagic.utilities import get_rng\n\n\n@pytest.mark.parametrize(\"seed\", range(10))\ndef test_penalty_aggregations(seed):\n    rng = get_rng(seed)\n    x = rng.uniform(size=5)\n    x0 = rng.uniform(size=5)\n    slope = 0.3\n    constant = 3\n    dim_out = 10\n\n    scalar, _ = _scalar_penalty(x, constant, slope, x0)\n    contribs, _ = _likelihood_penalty(x, constant, slope, x0, dim_out)\n    root_contribs, _ = _penalty_residuals(x, constant, slope, x0, dim_out)\n\n    assert np.isclose(scalar.value, contribs.value.sum())\n    assert np.isclose(scalar.value, (root_contribs.value**2).sum())\n\n\npairs = [\n    (_scalar_penalty, AggregationLevel.SCALAR),\n    (_likelihood_penalty, AggregationLevel.LIKELIHOOD),\n    (_penalty_residuals, AggregationLevel.LEAST_SQUARES),\n]\n\n\n@pytest.mark.parametrize(\"func, solver_type\", pairs)\ndef test_penalty_derivatives(func, solver_type):\n    rng = get_rng(seed=5)\n    x = rng.uniform(size=5)\n    x0 = rng.uniform(size=5)\n    slope = 0.3\n    constant = 3\n    dim_out = 8\n\n    _, calculated = func(x, constant, slope, x0, dim_out)\n\n    partialed = functools.partial(\n        func, constant=constant, slope=slope, x0=x0, dim_out=dim_out\n    )\n    expected = first_derivative(\n        partialed, x, unpacker=lambda x: x[0].internal_value(solver_type)\n    )\n\n    aaae(calculated, expected.derivative)\n\n\n@pytest.mark.parametrize(\"seed\", range(10))\ndef test_penalty_aggregations_via_get_error_penalty(seed):\n    rng = get_rng(seed)\n    x = rng.uniform(size=5)\n    x0 = rng.uniform(size=5)\n    slope = 0.3\n    constant = 3\n\n    scalar_func = get_error_penalty_function(\n        start_x=x0,\n        start_criterion=ScalarFunctionValue(3),\n        error_penalty={\"slope\": slope, \"constant\": constant},\n        solver_type=AggregationLevel.SCALAR,\n        direction=Direction.MINIMIZE,\n    )\n\n    contribs_func = get_error_penalty_function(\n        start_x=x0,\n        start_criterion=LikelihoodFunctionValue(np.ones(10)),\n        error_penalty={\"slope\": slope, \"constant\": constant},\n        solver_type=AggregationLevel.LIKELIHOOD,\n        direction=Direction.MINIMIZE,\n    )\n\n    root_contribs_func = get_error_penalty_function(\n        start_x=x0,\n        start_criterion=LeastSquaresFunctionValue(np.ones(10)),\n        error_penalty={\"slope\": slope, \"constant\": constant},\n        solver_type=AggregationLevel.LEAST_SQUARES,\n        direction=Direction.MINIMIZE,\n    )\n\n    scalar, _ = scalar_func(x)\n    contribs, _ = contribs_func(x)\n    root_contribs, _ = root_contribs_func(x)\n\n    assert np.isclose(scalar.value, contribs.value.sum())\n    assert np.isclose(scalar.value, (root_contribs.value**2).sum())\n"
  },
  {
    "path": "tests/optimagic/optimization/test_fun_value.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal as aae\n\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.optimization.fun_value import (\n    FunctionValue,\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n    enforce_return_type,\n    enforce_return_type_with_jac,\n)\nfrom optimagic.typing import AggregationLevel\n\nSCALAR_VALUES = [\n    ScalarFunctionValue(5),\n]\n\nLS_VALUES = [\n    LeastSquaresFunctionValue(np.array([1, 2])),\n    LeastSquaresFunctionValue({\"a\": 1, \"b\": 2}),\n]\n\nLIKELIHOOD_VALUES = [\n    LikelihoodFunctionValue(np.array([1, 4])),\n    LikelihoodFunctionValue({\"a\": 1, \"b\": 4}),\n]\n\n\n@pytest.mark.parametrize(\"value\", SCALAR_VALUES + LS_VALUES + LIKELIHOOD_VALUES)\ndef test_values_for_scalar_optimizers(value):\n    got = value.internal_value(AggregationLevel.SCALAR)\n    assert isinstance(got, float)\n    assert got == 5.0\n\n\n@pytest.mark.parametrize(\"value\", LS_VALUES)\ndef test_values_for_least_squares_optimizers(value):\n    got = value.internal_value(AggregationLevel.LEAST_SQUARES)\n    assert isinstance(got, np.ndarray)\n    assert got.dtype == np.float64\n    aae(got, np.array([1.0, 2]))\n\n\n@pytest.mark.parametrize(\"value\", LS_VALUES + LIKELIHOOD_VALUES)\ndef test_values_for_likelihood_optimizers(value):\n    got = value.internal_value(AggregationLevel.LIKELIHOOD)\n    assert isinstance(got, np.ndarray)\n    assert got.dtype == np.float64\n    aae(got, np.array([1.0, 4]))\n\n\n@pytest.mark.parametrize(\"value\", SCALAR_VALUES + LIKELIHOOD_VALUES)\ndef test_invalid_values_for_least_squares_optimizers(value):\n    with pytest.raises(InvalidFunctionError):\n        SCALAR_VALUES[0].internal_value(AggregationLevel.LEAST_SQUARES)\n\n\n@pytest.mark.parametrize(\"value\", SCALAR_VALUES)\ndef test_invalid_values_for_likelihood_optimizers(value):\n    with pytest.raises(InvalidFunctionError):\n        SCALAR_VALUES[0].internal_value(AggregationLevel.LIKELIHOOD)\n\n\ndef test_enforce_scalar_with_scalar_return():\n    @enforce_return_type(AggregationLevel.SCALAR)\n    def f(x):\n        return 3\n\n    got = f(np.ones(3))\n    assert isinstance(got, ScalarFunctionValue)\n    assert got.value == 3\n\n\ndef test_enforce_scalar_with_function_value_return():\n    @enforce_return_type(AggregationLevel.SCALAR)\n    def f(x):\n        return FunctionValue(3)\n\n    got = f(np.ones(3))\n    assert isinstance(got, ScalarFunctionValue)\n    assert got.value == 3\n\n\ndef test_enforce_scalar_trivial_case():\n    @enforce_return_type(AggregationLevel.SCALAR)\n    def f(x):\n        return ScalarFunctionValue(3)\n\n    got = f(3)\n    assert isinstance(got, ScalarFunctionValue)\n    assert got.value == 3\n\n\ndef test_enforce_scalar_invalid_return():\n    @enforce_return_type(AggregationLevel.SCALAR)\n    def f(x):\n        return x\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n\n\ndef test_enforce_least_squares_with_vector_return():\n    @enforce_return_type(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return np.ones(3)\n\n    got = f(np.ones(3))\n    assert isinstance(got, LeastSquaresFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_least_squares_with_function_value_return():\n    @enforce_return_type(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return FunctionValue(np.ones(3))\n\n    got = f(np.ones(3))\n    assert isinstance(got, LeastSquaresFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_least_squares_trivial_case():\n    @enforce_return_type(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return LeastSquaresFunctionValue(np.ones(3))\n\n    got = f(np.ones(3))\n    assert isinstance(got, LeastSquaresFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_least_squares_invalid_return():\n    @enforce_return_type(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return 3\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n\n\ndef test_enforce_likelihood_with_vector_return():\n    @enforce_return_type(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return np.ones(3)\n\n    got = f(np.ones(3))\n    assert isinstance(got, LikelihoodFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_likelihood_with_function_value_return():\n    @enforce_return_type(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return FunctionValue(np.ones(3))\n\n    got = f(np.ones(3))\n    assert isinstance(got, LikelihoodFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_likelihood_trivial_case():\n    @enforce_return_type(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return LikelihoodFunctionValue(np.ones(3))\n\n    got = f(np.ones(3))\n    assert isinstance(got, LikelihoodFunctionValue)\n    aae(got.value, np.ones(3))\n\n\ndef test_enforce_likelihood_invalid_return():\n    @enforce_return_type(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return 3\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n\n\ndef test_enforce_scalar_with_jac_with_scalar_return():\n    @enforce_return_type_with_jac(AggregationLevel.SCALAR)\n    def f(x):\n        return 3, np.zeros(3)\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, ScalarFunctionValue)\n    assert got_value.value == 3\n    aae(got_jac, np.zeros(3))\n\n\ndef test_enforce_scalar_with_jac_with_function_value_return():\n    @enforce_return_type_with_jac(AggregationLevel.SCALAR)\n    def f(x):\n        return FunctionValue(3), np.zeros(3)\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, ScalarFunctionValue)\n    assert got_value.value == 3\n    aae(got_jac, np.zeros(3))\n\n\ndef test_enforce_scalar_with_jac_trivial_case():\n    @enforce_return_type_with_jac(AggregationLevel.SCALAR)\n    def f(x):\n        return ScalarFunctionValue(3), np.zeros(3)\n\n    got_value, got_jac = f(3)\n    assert isinstance(got_value, ScalarFunctionValue)\n    assert got_value.value == 3\n    aae(got_jac, np.zeros(3))\n\n\ndef test_enforce_scalar_with_jac_invalid_return():\n    @enforce_return_type_with_jac(AggregationLevel.SCALAR)\n    def f(x):\n        return x, np.zeros(3)\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n\n\ndef test_enforce_least_squares_with_jac_with_vector_return():\n    @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return np.ones(3), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LeastSquaresFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_least_squares_with_jac_with_function_value_return():\n    @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return FunctionValue(np.ones(3)), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LeastSquaresFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_least_squares_with_jac_trivial_case():\n    @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return LeastSquaresFunctionValue(np.ones(3)), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LeastSquaresFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_least_squares_with_jac_invalid_return():\n    @enforce_return_type_with_jac(AggregationLevel.LEAST_SQUARES)\n    def f(x):\n        return 3, np.zeros((3, 3))\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n\n\ndef test_enforce_likelihood_with_jac_with_vector_return():\n    @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return np.ones(3), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LikelihoodFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_likelihood_with_jac_with_function_value_return():\n    @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return FunctionValue(np.ones(3)), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LikelihoodFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_likelihood_with_jac_trivial_case():\n    @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return LikelihoodFunctionValue(np.ones(3)), np.zeros((3, 3))\n\n    got_value, got_jac = f(np.ones(3))\n    assert isinstance(got_value, LikelihoodFunctionValue)\n    aae(got_value.value, np.ones(3))\n    aae(got_jac, np.zeros((3, 3)))\n\n\ndef test_enforce_likelihood_with_jac_invalid_return():\n    @enforce_return_type_with_jac(AggregationLevel.LIKELIHOOD)\n    def f(x):\n        return 3, np.zeros((3, 3))\n\n    with pytest.raises(InvalidFunctionError):\n        f(np.ones(3))\n"
  },
  {
    "path": "tests/optimagic/optimization/test_function_formats_ls.py",
    "content": "\"\"\"Test different ways of specifying objective functions and their derivatives.\n\nWe also test that least-squares problems can be optimized with scalar optimizers.\n\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import mark, maximize, minimize\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.optimization.fun_value import (\n    FunctionValue,\n    LeastSquaresFunctionValue,\n)\n\n# ======================================================================================\n# minimize cases with numpy params\n# ======================================================================================\n\n\n@mark.least_squares\ndef sos_ls(x):\n    return x\n\n\ndef typed_sos_ls(x: np.ndarray) -> LeastSquaresFunctionValue:\n    return LeastSquaresFunctionValue(x)\n\n\n@mark.least_squares\ndef sos_ls_with_info(x):\n    return FunctionValue(x, info={\"x\": x})\n\n\nMIN_FUNS = [\n    sos_ls,\n    typed_sos_ls,\n    sos_ls_with_info,\n]\n\n\ndef jac(x):\n    return 2 * x\n\n\n@mark.least_squares\ndef jac_ls(x):\n    return np.diag(2 * x)\n\n\nMIN_JACS = [None, [jac, jac_ls]]\n\n\nALGORITHMS = [\"scipy_lbfgsb\", \"scipy_ls_lm\"]\n\n\n@pytest.mark.parametrize(\"fun\", MIN_FUNS)\n@pytest.mark.parametrize(\"jac\", MIN_JACS)\n@pytest.mark.parametrize(\"use_fun_and_jac\", [False, True])\n@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)\ndef test_least_squares_minimize(fun, jac, use_fun_and_jac, algorithm):\n    start_params = np.array([1, 2, 3])\n    if use_fun_and_jac and jac is not None:\n\n        def fun_and_jac_scalar(x):\n            return x @ x, 2 * x\n\n        @mark.least_squares\n        def fun_and_jac_ls(x):\n            return x, np.diag(2 * x)\n\n        fun_and_jac = [fun_and_jac_scalar, fun_and_jac_ls]\n    else:\n        fun_and_jac = None\n\n    res = minimize(\n        fun=fun,\n        params=start_params,\n        algorithm=algorithm,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n    aaae(res.params, np.zeros(3))\n\n\n# ======================================================================================\n# minimize cases with dict params\n# ======================================================================================\n\n\ndef dict_jac(params):\n    return {k: 2 * v for k, v in params.items()}\n\n\n@mark.least_squares\ndef dict_jac_ls(params):\n    out = {}\n    for outer_key in params:\n        row = {}\n        for inner_key in params:\n            if outer_key == inner_key:\n                row[inner_key] = 2 * params[inner_key]\n            else:\n                row[inner_key] = 0\n        out[outer_key] = row\n    return out\n\n\nMIN_JACS_DICT = [None, [dict_jac, dict_jac_ls]]\n\n\n@pytest.mark.parametrize(\"fun\", MIN_FUNS)\n@pytest.mark.parametrize(\"jac\", MIN_JACS_DICT)\n@pytest.mark.parametrize(\"use_fun_and_jac\", [False, True])\n@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)\ndef test_least_squares_minimize_dict(fun, jac, use_fun_and_jac, algorithm):\n    start_params = {\"a\": 1, \"b\": 2, \"c\": 3}\n\n    if use_fun_and_jac and jac is not None:\n\n        def fun_and_jac_dict_scalar(params):\n            x = np.array(list(params.values()))\n            return x @ x, dict_jac(params)\n\n        @mark.least_squares\n        def fun_and_jac_dict_ls(params):\n            return params, dict_jac_ls(params)\n\n        fun_and_jac = [fun_and_jac_dict_scalar, fun_and_jac_dict_ls]\n    else:\n        fun_and_jac = None\n\n    res = minimize(\n        fun=fun,\n        params=start_params,\n        algorithm=algorithm,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n\n    for key in start_params:\n        assert np.allclose(res.params[key], 0, atol=1e-5)\n\n\n# ======================================================================================\n# invalid cases\n# ======================================================================================\n\n\n@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)\ndef test_maximize_with_ls_problems_raises_error(algorithm):\n    with pytest.raises(InvalidFunctionError):\n        maximize(\n            fun=sos_ls,\n            params=np.array([1, 2, 3]),\n            algorithm=algorithm,\n        )\n\n\n@mark.least_squares\ndef invalid_sos_ls(x):\n    return x @ x\n\n\n@mark.least_squares\ndef invalid_sos_ls_with_info(x):\n    return FunctionValue(x @ x, info={\"x\": x})\n\n\nINVALID_FUNS = [\n    invalid_sos_ls,\n    invalid_sos_ls_with_info,\n]\n\n\n@pytest.mark.parametrize(\"fun\", INVALID_FUNS)\n@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)\ndef test_invalid_least_squares_minimize(fun, algorithm):\n    start_params = np.array([1, 2, 3])\n\n    with pytest.raises(InvalidFunctionError):\n        minimize(\n            fun=fun,\n            params=start_params,\n            algorithm=algorithm,\n        )\n\n\n@mark.least_squares\ndef invalid_jac_ls(x):\n    return 2 * x\n\n\n@mark.least_squares\ndef invalid_jac_ls_2(x):\n    return FunctionValue(2 * x)\n\n\nINVALID_JACS = [invalid_jac_ls, invalid_jac_ls_2]\n\n\n@pytest.mark.parametrize(\"jac\", INVALID_JACS)\ndef test_least_squares_minimize_with_invalid_jac(jac):\n    with pytest.raises(Exception):  # noqa: B017\n        minimize(\n            fun=sos_ls,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_ls_lm\",\n            jac=jac,\n        )\n\n\n@mark.least_squares\ndef invalid_fun_and_jac_value(x):\n    return x @ x, np.diag(2 * x)\n\n\n@mark.least_squares\ndef invalid_fun_and_jac_derivative(x):\n    return x, 2 * x\n\n\nINVALID_FUN_AND_JACS = [invalid_fun_and_jac_value, invalid_fun_and_jac_derivative]\n\n\n@pytest.mark.parametrize(\"fun_and_jac\", INVALID_FUN_AND_JACS)\ndef test_least_squares_minimize_with_invalid_fun_and_jac(fun_and_jac):\n    with pytest.raises(InvalidFunctionError):\n        minimize(\n            fun=sos_ls,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_ls_lm\",\n            fun_and_jac=fun_and_jac,\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_function_formats_scalar.py",
    "content": "\"\"\"Test different ways of specifying objective functions and their derivatives.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark, maximize, minimize\nfrom optimagic.exceptions import InvalidFunctionError\nfrom optimagic.optimization.fun_value import FunctionValue, ScalarFunctionValue\n\n# ======================================================================================\n# minimize cases with numpy params\n# ======================================================================================\n\n\ndef sos(x):\n    return x @ x\n\n\n@mark.scalar\ndef marked_sos(x):\n    return x @ x\n\n\ndef typed_sos_float(x: np.ndarray) -> float:\n    return x @ x\n\n\ndef typed_sos_value(x: np.ndarray) -> ScalarFunctionValue:\n    return ScalarFunctionValue(x @ x)\n\n\ndef sos_with_info(x):\n    return FunctionValue(x @ x, info={\"x\": x})\n\n\nMIN_FUNS = [\n    sos,\n    marked_sos,\n    typed_sos_float,\n    typed_sos_value,\n    sos_with_info,\n]\n\n\ndef jac(x):\n    return 2 * x\n\n\n@mark.scalar\ndef marked_jac(x):\n    return 2 * x\n\n\nMIN_JACS = [None, jac, marked_jac]\n\nFUN_AND_JAC_CASES = [None, \"marked\", \"unmarked\"]\n\n\n@pytest.mark.parametrize(\"fun\", MIN_FUNS)\n@pytest.mark.parametrize(\"jac\", MIN_JACS)\n@pytest.mark.parametrize(\"fun_and_jac_case\", FUN_AND_JAC_CASES)\ndef test_minimize_with_numpy_inputs(fun, jac, fun_and_jac_case):\n    if fun_and_jac_case is None:\n        fun_and_jac = None\n    elif fun_and_jac_case == \"marked\":\n\n        @mark.scalar\n        def fun_and_jac(x):\n            return fun(x), 2 * x\n    else:\n\n        def fun_and_jac(x):\n            return fun(x), 2 * x\n\n    res = minimize(\n        fun=fun,\n        params=np.array([1, 2, 3]),\n        algorithm=\"scipy_lbfgsb\",\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n    aaae(res.params, np.zeros(3))\n\n\n# ======================================================================================\n# maximize cases with numpy params\n# ======================================================================================\n\n\ndef neg_sos(x):\n    return -x @ x\n\n\n@mark.scalar\ndef marked_neg_sos(x):\n    return -x @ x\n\n\ndef typed_neg_sos_float(x: np.ndarray) -> float:\n    return -x @ x\n\n\ndef typed_neg_sos_value(x: np.ndarray) -> ScalarFunctionValue:\n    return ScalarFunctionValue(-x @ x)\n\n\ndef neg_sos_with_info(x):\n    return FunctionValue(-x @ x, info={\"x\": x})\n\n\nMAX_FUNS = [\n    neg_sos,\n    marked_neg_sos,\n    typed_neg_sos_float,\n    typed_neg_sos_value,\n    neg_sos_with_info,\n]\n\n\ndef neg_jac(x):\n    return -2 * x\n\n\n@mark.scalar\ndef marked_neg_jac(x):\n    return -2 * x\n\n\nMAX_JACS = [None, neg_jac, marked_neg_jac]\n\n\n@pytest.mark.parametrize(\"fun\", MAX_FUNS)\n@pytest.mark.parametrize(\"jac\", MAX_JACS)\n@pytest.mark.parametrize(\"fun_and_jac_case\", FUN_AND_JAC_CASES)\ndef test_maximize_with_numpy_inputs(fun, jac, fun_and_jac_case):\n    if fun_and_jac_case is None:\n        fun_and_jac = None\n    elif fun_and_jac_case == \"marked\":\n\n        @mark.scalar\n        def fun_and_jac(x):\n            return fun(x), -2 * x\n    else:\n\n        def fun_and_jac(x):\n            return fun(x), -2 * x\n\n    res = maximize(\n        fun=fun,\n        params=np.array([1, 2, 3]),\n        algorithm=\"scipy_lbfgsb\",\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n    aaae(res.params, np.zeros(3))\n\n\n# ======================================================================================\n# minimize cases with dict params\n# ======================================================================================\n\n\ndef sos_dict(params):\n    x = np.array(list(params.values()))\n    return x @ x\n\n\n@mark.scalar\ndef marked_sos_dict(params):\n    x = np.array(list(params.values()))\n    return x @ x\n\n\ndef typed_sos_dict_float(params: dict) -> float:\n    x = np.array(list(params.values()))\n    return x @ x\n\n\ndef typed_sos_dict_value(params: dict) -> ScalarFunctionValue:\n    x = np.array(list(params.values()))\n    return ScalarFunctionValue(x @ x)\n\n\ndef sos_dict_with_info(params):\n    x = np.array(list(params.values()))\n    return FunctionValue(x @ x, info={\"x\": x})\n\n\nMIN_FUNS_DICT = [\n    sos_dict,\n    marked_sos_dict,\n    typed_sos_dict_float,\n    typed_sos_dict_value,\n    sos_dict_with_info,\n]\n\n\ndef jac_dict(params):\n    return {k: 2 * v for k, v in params.items()}\n\n\n@mark.scalar\ndef marked_jac_dict(params):\n    return {k: 2 * v for k, v in params.items()}\n\n\nMIN_JACS_DICT = [None, jac_dict, marked_jac_dict]\n\n\n@pytest.mark.parametrize(\"fun\", MIN_FUNS_DICT)\n@pytest.mark.parametrize(\"jac\", MIN_JACS_DICT)\n@pytest.mark.parametrize(\"fun_and_jac_case\", FUN_AND_JAC_CASES)\ndef test_minimize_with_dict_inputs(fun, jac, fun_and_jac_case):\n    if fun_and_jac_case is None:\n        fun_and_jac = None\n    elif fun_and_jac_case == \"marked\":\n\n        @mark.scalar\n        def fun_and_jac(params):\n            return fun(params), {k: 2 * v for k, v in params.items()}\n    else:\n\n        def fun_and_jac(params):\n            return fun(params), {k: 2 * v for k, v in params.items()}\n\n    res = minimize(\n        fun=fun,\n        params={\"x\": 1, \"y\": 2, \"z\": 3},\n        algorithm=\"scipy_lbfgsb\",\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n    for number in res.params.values():\n        assert np.allclose(number, 0, atol=1e-5)\n\n\n# ======================================================================================\n# maximize cases with dict params\n# ======================================================================================\n\n\ndef neg_sos_dict(params):\n    x = np.array(list(params.values()))\n    return -x @ x\n\n\n@mark.scalar\ndef marked_neg_sos_dict(params):\n    x = np.array(list(params.values()))\n    return -x @ x\n\n\ndef typed_neg_sos_dict_float(params: dict) -> float:\n    x = np.array(list(params.values()))\n    return -x @ x\n\n\ndef typed_neg_sos_dict_value(params: dict) -> ScalarFunctionValue:\n    x = np.array(list(params.values()))\n    return ScalarFunctionValue(-x @ x)\n\n\ndef neg_sos_dict_with_info(params):\n    x = np.array(list(params.values()))\n    return FunctionValue(-x @ x, info={\"x\": x})\n\n\nMAX_FUNS_DICT = [\n    neg_sos_dict,\n    marked_neg_sos_dict,\n    typed_neg_sos_dict_float,\n    typed_neg_sos_dict_value,\n    neg_sos_dict_with_info,\n]\n\n\ndef neg_jac_dict(params):\n    return {k: -2 * v for k, v in params.items()}\n\n\n@mark.scalar\ndef marked_neg_jac_dict(params):\n    return {k: -2 * v for k, v in params.items()}\n\n\nMAX_JACS_DICT = [None, neg_jac_dict, marked_neg_jac_dict]\n\n\n@pytest.mark.parametrize(\"fun\", MAX_FUNS_DICT)\n@pytest.mark.parametrize(\"jac\", MAX_JACS_DICT)\n@pytest.mark.parametrize(\"fun_and_jac_case\", FUN_AND_JAC_CASES)\ndef test_maximize_with_dict_inputs(fun, jac, fun_and_jac_case):\n    if fun_and_jac_case is None:\n        fun_and_jac = None\n    elif fun_and_jac_case == \"marked\":\n\n        @mark.scalar\n        def fun_and_jac(params):\n            return fun(params), {k: -2 * v for k, v in params.items()}\n    else:\n\n        def fun_and_jac(params):\n            return fun(params), {k: -2 * v for k, v in params.items()}\n\n    res = maximize(\n        fun=fun,\n        params={\"x\": 1, \"y\": 2, \"z\": 3},\n        algorithm=\"scipy_lbfgsb\",\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n    )\n    for number in res.params.values():\n        assert np.allclose(number, 0, atol=1e-5)\n\n\n# ======================================================================================\n# invalid cases; Only test minimize for things that cannot plausibly depend on the\n# direction of the optimization\n# ======================================================================================\n\n\ndef test_invalid_marker_for_jac_in_minimize():\n    @mark.least_squares\n    def jac(x):\n        return 2 * x\n\n    with pytest.warns(UserWarning):\n        minimize(\n            fun=sos,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_lbfgsb\",\n            jac=jac,\n        )\n\n\ndef test_invalid_marker_for_fun_and_jac_in_minimize():\n    @mark.least_squares\n    def fun_and_jac(x):\n        return x @ x, 2 * x\n\n    with pytest.warns(UserWarning):\n        minimize(\n            fun=sos,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac=fun_and_jac,\n        )\n\n\ndef invalid_sos(x):\n    return x\n\n\n@mark.scalar\ndef invalid_marked_sos(x):\n    return x\n\n\ndef invalid_typed_sos_array(x: np.ndarray) -> NDArray[np.float64]:\n    return x\n\n\ndef invalid_typed_sos_value(x: np.ndarray) -> ScalarFunctionValue:\n    return ScalarFunctionValue(x)\n\n\ndef invalid_sos_with_info(x):\n    return FunctionValue(x, info={\"x\": x})\n\n\nINVALID_FUNS = [\n    invalid_sos,\n    invalid_marked_sos,\n    invalid_typed_sos_array,\n    invalid_typed_sos_value,\n    invalid_sos_with_info,\n]\n\n\n@pytest.mark.parametrize(\"fun\", INVALID_FUNS)\ndef test_minimize_with_invalid_fun(fun):\n    with pytest.raises(InvalidFunctionError):\n        minimize(\n            fun=fun,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n\ndef invalid_jac(x):\n    return np.eye(len(x))\n\n\n@mark.scalar\ndef invalid_marked_jac(x):\n    return np.eye(len(x))\n\n\nINVALID_JACS = [invalid_jac, invalid_marked_jac]\n\n\n@pytest.mark.parametrize(\"jac\", INVALID_JACS)\ndef test_minimize_with_invalid_jac(jac):\n    with pytest.raises(Exception):  # noqa: B017\n        minimize(\n            fun=sos,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_lbfgsb\",\n            jac=jac,\n        )\n\n\ndef invalid_fun_and_jac(x):\n    return x, np.eye(len(x))\n\n\n@mark.scalar\ndef invalid_marked_fun_and_jac(x):\n    return x, np.eye(len(x))\n\n\nINVALID_FUN_AND_JACS = [invalid_fun_and_jac, invalid_marked_fun_and_jac]\n\n\n@pytest.mark.parametrize(\"fun_and_jac\", INVALID_FUN_AND_JACS)\ndef test_minimize_with_invalid_fun_and_jac(fun_and_jac):\n    with pytest.raises(Exception):  # noqa: B017\n        minimize(\n            fun=sos,\n            params=np.array([1, 2, 3]),\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac=fun_and_jac,\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_history.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.testing import assert_array_equal\nfrom pandas.testing import assert_frame_equal\nfrom pybaum import tree_map\n\nimport optimagic as om\nfrom optimagic.optimization.history import (\n    History,\n    HistoryEntry,\n    _apply_reduction_to_batches,\n    _calculate_monotone_sequence,\n    _get_batch_starts_and_stops,\n    _get_flat_param_names,\n    _get_flat_params,\n    _is_1d_array,\n    _task_to_categorical,\n    _validate_args_are_all_none_or_lists_of_same_length,\n)\nfrom optimagic.typing import Direction, EvalTask\n\n# ======================================================================================\n# Test methods to add data to History (add_entry, add_batch, init)\n# ======================================================================================\n\n\n@pytest.fixture\ndef history_entries():\n    return [\n        HistoryEntry(\n            params={\"a\": 1, \"b\": [2, 3]},\n            fun=1,\n            start_time=0.1,\n            stop_time=0.2,\n            task=EvalTask.FUN,\n        ),\n        HistoryEntry(\n            params={\"a\": 4, \"b\": [5, 6]},\n            fun=3,\n            start_time=0.2,\n            stop_time=0.3,\n            task=EvalTask.FUN,\n        ),\n        HistoryEntry(\n            params={\"a\": 7, \"b\": [8, 9]},\n            fun=2,\n            start_time=0.3,\n            stop_time=0.4,\n            task=EvalTask.FUN,\n        ),\n    ]\n\n\ndef test_history_add_entry(history_entries):\n    history = History(Direction.MINIMIZE)\n    for entry in history_entries:\n        history.add_entry(entry)\n\n    assert history.direction == Direction.MINIMIZE\n\n    assert history.params == [\n        {\"a\": 1, \"b\": [2, 3]},\n        {\"a\": 4, \"b\": [5, 6]},\n        {\"a\": 7, \"b\": [8, 9]},\n    ]\n    assert history.task == [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN]\n    assert history.batches == [0, 1, 2]\n    aaae(history.fun, [1, 3, 2])\n    aaae(history.start_time, [0.1, 0.2, 0.3])\n    aaae(history.stop_time, [0.2, 0.3, 0.4])\n\n    assert_array_equal(history.monotone_fun, np.array([1, 1, 1], dtype=np.float64))\n    assert_array_equal(\n        history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3)\n    )\n\n\ndef test_history_add_batch(history_entries):\n    history = History(Direction.MAXIMIZE)\n    history.add_batch(history_entries)\n\n    assert history.direction == Direction.MAXIMIZE\n\n    assert history.params == [\n        {\"a\": 1, \"b\": [2, 3]},\n        {\"a\": 4, \"b\": [5, 6]},\n        {\"a\": 7, \"b\": [8, 9]},\n    ]\n    assert history.task == [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN]\n    assert history.batches == [0, 0, 0]\n    aaae(history.fun, [1, 3, 2])\n    aaae(history.start_time, [0.1, 0.2, 0.3])\n    aaae(history.stop_time, [0.2, 0.3, 0.4])\n\n    assert_array_equal(history.monotone_fun, np.array([1, 3, 3], dtype=np.float64))\n    assert_array_equal(\n        history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3)\n    )\n\n\ndef test_history_from_data():\n    data = {\n        \"params\": [{\"a\": 1, \"b\": [2, 3]}, {\"a\": 4, \"b\": [5, 6]}, {\"a\": 7, \"b\": [8, 9]}],\n        \"fun\": [1, 3, 2],\n        \"task\": [EvalTask.FUN, EvalTask.FUN, EvalTask.FUN],\n        \"batches\": [0, 0, 0],\n        \"start_time\": [0.0, 0.15, 0.3],\n        \"stop_time\": [0.1, 0.25, 0.4],\n    }\n\n    history = History(\n        direction=Direction.MAXIMIZE,\n        **data,\n    )\n\n    assert history.direction == Direction.MAXIMIZE\n\n    assert history.params == data[\"params\"]\n    assert history.task == data[\"task\"]\n    assert history.batches == data[\"batches\"]\n    aaae(history.fun, data[\"fun\"])\n    aaae(history.start_time, data[\"start_time\"])\n    aaae(history.stop_time, data[\"stop_time\"])\n\n    assert_array_equal(history.monotone_fun, np.array([1, 3, 3], dtype=np.float64))\n    assert_array_equal(\n        history.flat_params, np.arange(1, 10, dtype=np.float64).reshape(3, 3)\n    )\n\n\n# ======================================================================================\n# Test functionality of History\n# ======================================================================================\n\n\n@pytest.fixture\ndef params():\n    params_tree = {\"a\": None, \"b\": {\"c\": None, \"d\": (None, None)}}\n    return [\n        tree_map(lambda _: k, params_tree, is_leaf=lambda leaf: leaf is None)  # noqa: B023\n        for k in range(6)\n    ]\n\n\n@pytest.fixture\ndef history_data(params):\n    return {\n        \"fun\": [10, 9, None, None, 2, 5],\n        \"task\": [\n            EvalTask.FUN,\n            EvalTask.FUN,\n            EvalTask.JAC,\n            EvalTask.JAC,\n            EvalTask.FUN,\n            EvalTask.FUN_AND_JAC,\n        ],\n        \"start_time\": [0, 2, 5, 7, 10, 12],\n        \"stop_time\": [1, 4, 6, 9, 11, 14],\n        \"params\": params,\n        \"batches\": [0, 1, 2, 3, 4, 5],\n    }\n\n\n@pytest.fixture\ndef history(history_data):\n    return History(direction=Direction.MINIMIZE, **history_data)\n\n\n@pytest.fixture\ndef history_parallel(history_data):\n    data = history_data.copy()\n    data[\"batches\"] = [0, 0, 1, 1, 2, 3]\n    return History(direction=Direction.MINIMIZE, **data)\n\n\n# Function data, function value, and monotone function value\n# --------------------------------------------------------------------------------------\n\n\ndef test_history_fun_data_with_fun_evaluations_cost_model(history: History):\n    got = history.fun_data(\n        cost_model=om.timing.fun_evaluations,\n        monotone=False,\n    )\n    exp = pd.DataFrame(\n        {\n            \"fun\": [10, 9, np.nan, np.nan, 2, 5],\n            \"time\": [1, 2, 2, 2, 3, 4],\n            \"task\": [\n                \"fun\",\n                \"fun\",\n                \"jac\",\n                \"jac\",\n                \"fun\",\n                \"fun_and_jac\",\n            ],\n        }\n    )\n    assert_frame_equal(got, exp, check_dtype=False, check_categorical=False)\n\n\ndef test_history_fun_data_with_fun_evaluations_cost_model_and_monotone(\n    history: History,\n):\n    got = history.fun_data(\n        cost_model=om.timing.fun_evaluations,\n        monotone=True,\n    )\n    exp = pd.DataFrame(\n        {\n            \"fun\": [10, 9, np.nan, np.nan, 2, 2],\n            \"time\": [1, 2, 2, 2, 3, 4],\n            \"task\": [\n                \"fun\",\n                \"fun\",\n                \"jac\",\n                \"jac\",\n                \"fun\",\n                \"fun_and_jac\",\n            ],\n        }\n    )\n    assert_frame_equal(got, exp, check_dtype=False, check_categorical=False)\n\n\ndef test_history_fun_data_with_fun_batches_cost_model(history_parallel: History):\n    got = history_parallel.fun_data(\n        cost_model=om.timing.fun_batches,\n        monotone=False,\n    )\n    exp = pd.DataFrame(\n        {\n            \"fun\": [9, np.nan, 2, 5],\n            \"time\": [1.0, 1.0, 2.0, 3.0],\n            \"task\": [\n                \"fun\",\n                \"jac\",\n                \"fun\",\n                \"fun_and_jac\",\n            ],\n        }\n    )\n    assert_frame_equal(got, exp, check_dtype=False, check_categorical=False)\n\n\ndef test_history_fun_data_with_evaluation_time_cost_model(history: History):\n    got = history.fun_data(\n        cost_model=om.timing.evaluation_time,\n        monotone=False,\n    )\n    exp = pd.DataFrame(\n        {\n            \"fun\": [10, 9, np.nan, np.nan, 2, 5],\n            \"time\": [1, 3, 4, 6, 7, 9],\n            \"task\": [\n                \"fun\",\n                \"fun\",\n                \"jac\",\n                \"jac\",\n                \"fun\",\n                \"fun_and_jac\",\n            ],\n        }\n    )\n    assert_frame_equal(got, exp, check_dtype=False, check_categorical=False)\n\n\ndef test_fun_property(history: History):\n    assert_array_equal(history.fun, [10, 9, None, None, 2, 5])\n\n\ndef test_monotone_fun_property(history: History):\n    assert_array_equal(history.monotone_fun, np.array([10, 9, np.nan, np.nan, 2, 2]))\n\n\n# Acceptance\n# --------------------------------------------------------------------------------------\n\n\ndef test_is_accepted_property(history: History):\n    got = history.is_accepted\n    exp = np.array([True, True, False, False, True, False])\n    assert_array_equal(got, exp)\n\n\n# Parameter data, params, flat params, and flat params names\n# --------------------------------------------------------------------------------------\n\n\ndef test_params_data_fun_evaluations_cost_model(history: History):\n    got = history.params_data()\n    exp = (\n        pd.DataFrame(\n            {\n                \"counter\": np.tile(np.arange(6), reps=4),\n                \"name\": np.repeat(\n                    [\n                        \"a\",\n                        \"b_c\",\n                        \"b_d_0\",\n                        \"b_d_1\",\n                    ],\n                    6,\n                ),\n                \"value\": np.tile(list(range(6)), 4),\n                \"task\": np.tile(\n                    [\n                        \"fun\",\n                        \"fun\",\n                        \"jac\",\n                        \"jac\",\n                        \"fun\",\n                        \"fun_and_jac\",\n                    ],\n                    4,\n                ),\n                \"fun\": np.tile(\n                    [\n                        10,\n                        9,\n                        np.nan,\n                        np.nan,\n                        2,\n                        5,\n                    ],\n                    4,\n                ),\n            }\n        )\n        .set_index([\"counter\", \"name\"])\n        .sort_index()\n    )\n    assert_frame_equal(got, exp, check_categorical=False, check_dtype=False)\n\n\ndef test_params_data_fun_evaluations_cost_model_parallel(history_parallel: History):\n    got = history_parallel.params_data()\n    exp = (\n        pd.DataFrame(\n            {\n                \"counter\": np.tile(np.arange(6), reps=4),\n                \"name\": np.repeat(\n                    [\n                        \"a\",\n                        \"b_c\",\n                        \"b_d_0\",\n                        \"b_d_1\",\n                    ],\n                    6,\n                ),\n                \"value\": np.tile(list(range(6)), 4),\n                \"task\": np.tile(\n                    [\n                        \"fun\",\n                        \"fun\",\n                        \"jac\",\n                        \"jac\",\n                        \"fun\",\n                        \"fun_and_jac\",\n                    ],\n                    4,\n                ),\n                \"fun\": np.tile(\n                    [\n                        10,\n                        9,\n                        np.nan,\n                        np.nan,\n                        2,\n                        5,\n                    ],\n                    4,\n                ),\n            }\n        )\n        .set_index([\"counter\", \"name\"])\n        .sort_index()\n    )\n    assert_frame_equal(got, exp, check_categorical=False, check_dtype=False)\n\n\ndef test_params_data_fun_evaluations_cost_model_parallel_collapse_batches(\n    history_parallel: History,\n):\n    got = history_parallel.params_data(collapse_batches=True)\n    exp = (\n        pd.DataFrame(\n            {\n                \"counter\": np.tile([0, 1, 2, 3], reps=4),\n                \"name\": np.repeat(\n                    [\n                        \"a\",\n                        \"b_c\",\n                        \"b_d_0\",\n                        \"b_d_1\",\n                    ],\n                    4,\n                ),\n                \"value\": np.tile([1, 2, 4, 5], 4),\n                \"task\": np.tile([\"fun\", \"jac\", \"fun\", \"fun_and_jac\"], 4),\n                \"fun\": np.tile([9, np.nan, 2, 5], 4),\n            }\n        )\n        .set_index([\"counter\", \"name\"])\n        .sort_index()\n    )\n    assert_frame_equal(got, exp, check_categorical=False, check_dtype=False)\n\n\ndef test_params_property(history, params):\n    assert history.params == params\n\n\ndef test_flat_params_property(history: History):\n    got = history.flat_params\n    assert_array_equal(got, [[k for _ in range(4)] for k in range(6)])\n\n\ndef test_flat_param_names(history: History):\n    assert history.flat_param_names == [\"a\", \"b_c\", \"b_d_0\", \"b_d_1\"]\n\n\n# Time\n# --------------------------------------------------------------------------------------\n\n\ndef test_get_total_timings_per_task_fun(history: History):\n    got = history._get_timings_per_task(EvalTask.FUN, cost_factor=1)\n    exp = np.array([1, 1, 0, 0, 1, 0])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_per_task_jac_cost_factor_none(history: History):\n    got = history._get_timings_per_task(EvalTask.JAC, cost_factor=None)\n    exp = np.array([0, 0, 1, 2, 0, 0])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_per_task_fun_and_jac(history: History):\n    got = history._get_timings_per_task(EvalTask.FUN_AND_JAC, cost_factor=-0.5)\n    exp = np.array([0, 0, 0, 0, 0, -0.5])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_custom_cost_model(history: History):\n    cost_model = om.timing.CostModel(\n        fun=0.5, jac=1, fun_and_jac=2, label=\"test\", aggregate_batch_time=sum\n    )\n    got = history._get_total_timings(cost_model)\n    exp = np.array(\n        [\n            0.5,\n            0.5,\n            1,\n            1,\n            0.5,\n            2,\n        ]\n    )\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_fun_evaluations(history: History):\n    got = history._get_total_timings(cost_model=om.timing.fun_evaluations)\n    exp = np.array([1, 1, 0, 0, 1, 1])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_fun_batches(history: History):\n    got = history._get_total_timings(cost_model=om.timing.fun_batches)\n    exp = np.array([1, 1, 0, 0, 1, 1])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_fun_batches_parallel(history_parallel: History):\n    got = history_parallel._get_total_timings(cost_model=om.timing.fun_batches)\n    exp = np.array([1, 1, 0, 0, 1, 1])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_evaluation_time(history: History):\n    got = history._get_total_timings(cost_model=om.timing.evaluation_time)\n    exp = np.array([1, 2, 1, 2, 1, 2])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_wall_time(history: History):\n    got = history._get_total_timings(cost_model=\"wall_time\")\n    exp = np.array([1, 4, 6, 9, 11, 14])\n    assert_array_equal(got, exp)\n\n\ndef test_get_total_timings_invalid_cost_model(history: History):\n    with pytest.raises(\n        TypeError, match=\"cost_model must be a CostModel or 'wall_time'.\"\n    ):\n        history._get_total_timings(cost_model=\"invalid\")\n\n\ndef test_start_time_property(history: History):\n    assert history.start_time == [0, 2, 5, 7, 10, 12]\n\n\ndef test_stop_time_property(history: History):\n    assert history.stop_time == [1, 4, 6, 9, 11, 14]\n\n\n# Batches\n# --------------------------------------------------------------------------------------\n\n\ndef test_batches_property(history: History):\n    assert history.batches == [0, 1, 2, 3, 4, 5]\n\n\n# Tasks\n# --------------------------------------------------------------------------------------\n\n\ndef test_task_property(history: History):\n    assert history.task == [\n        EvalTask.FUN,\n        EvalTask.FUN,\n        EvalTask.JAC,\n        EvalTask.JAC,\n        EvalTask.FUN,\n        EvalTask.FUN_AND_JAC,\n    ]\n\n\n# ======================================================================================\n# Unit tests\n# ======================================================================================\n\n\ndef test_is_1d_array():\n    assert _is_1d_array(np.arange(2)) is True\n    assert _is_1d_array(np.eye(2)) is False\n    assert _is_1d_array([0, 1]) is False\n\n\ndef test_get_flat_params_pytree():\n    params = [\n        {\"a\": 1, \"b\": [0, 1], \"c\": np.arange(2)},\n        {\"a\": 2, \"b\": [1, 2], \"c\": np.arange(2)},\n    ]\n    got = _get_flat_params(params)\n    exp = [\n        [1, 0, 1, 0, 1],\n        [2, 1, 2, 0, 1],\n    ]\n    assert_array_equal(got, exp)\n\n\ndef test_get_flat_params_fast_path():\n    params = [np.arange(2)]\n    got = _get_flat_params(params)\n    exp = [[0, 1]]\n    assert_array_equal(got, exp)\n\n\ndef test_get_flat_param_names_pytree():\n    got = _get_flat_param_names(param={\"a\": 0, \"b\": [0, 1], \"c\": np.arange(2)})\n    exp = [\"a\", \"b_0\", \"b_1\", \"c_0\", \"c_1\"]\n    assert got == exp\n\n\ndef test_get_flat_param_names_fast_path():\n    got = _get_flat_param_names(param=np.arange(2))\n    exp = [\"0\", \"1\"]\n    assert got == exp\n\n\ndef test_calculate_monotone_sequence_maximize():\n    sequence = [0, 1, 0, 0, 2, 10, 0]\n    exp = [0, 1, 1, 1, 2, 10, 10]\n    got = _calculate_monotone_sequence(sequence, direction=Direction.MAXIMIZE)\n    assert_array_equal(exp, got)\n\n\ndef test_calculate_monotone_sequence_minimize():\n    sequence = [10, 11, 8, 12, 0, 5]\n    exp = [10, 10, 8, 8, 0, 0]\n    got = _calculate_monotone_sequence(sequence, direction=Direction.MINIMIZE)\n    assert_array_equal(exp, got)\n\n\ndef test_validate_args_are_all_none_or_lists_of_same_length():\n    _validate_args_are_all_none_or_lists_of_same_length(None, None)\n    _validate_args_are_all_none_or_lists_of_same_length([1], [1])\n\n    with pytest.raises(ValueError, match=\"All list arguments must have the same\"):\n        _validate_args_are_all_none_or_lists_of_same_length([1], [1, 2])\n\n    with pytest.raises(ValueError, match=\"All arguments must be lists of the same\"):\n        _validate_args_are_all_none_or_lists_of_same_length(None, [1])\n\n\ndef test_task_as_categorical():\n    task = [EvalTask.FUN, EvalTask.JAC, EvalTask.FUN_AND_JAC]\n    got = _task_to_categorical(task)\n    assert got.tolist() == [\"fun\", \"jac\", \"fun_and_jac\"]\n    assert isinstance(got.dtype, pd.CategoricalDtype)\n\n\ndef test_get_batch_starts_and_stops():\n    batches = [0, 0, 1, 1, 1, 2, 2, 3]\n    got_starts, got_stops = _get_batch_starts_and_stops(batches)\n    assert got_starts == [0, 2, 5, 7]\n    assert got_stops == [2, 5, 7, 8]\n\n\ndef test_apply_to_batch_sum():\n    data = np.array([0, 1, 2, 3, 4])\n    batch_ids = [0, 0, 1, 1, 2]\n    exp = np.array([1, 5, 4])\n    got = _apply_reduction_to_batches(data, batch_ids, sum)\n    assert_array_equal(exp, got)\n\n\ndef test_apply_to_batch_max():\n    data = np.array([0, 1, 2, 3, 4])\n    batch_ids = [0, 0, 1, 1, 2]\n    exp = np.array([1, 3, 4])\n    got = _apply_reduction_to_batches(data, batch_ids, max)\n    assert_array_equal(exp, got)\n\n\ndef test_apply_to_batch_broken_func():\n    data = np.array([0, 1, 2, 3, 4])\n    batch_ids = [0, 0, 1, 1, 2]\n    with pytest.raises(ValueError, match=\"Calling function <lambda> on batch [0, 0]\"):\n        _apply_reduction_to_batches(data, batch_ids, reduction_function=lambda _: 1 / 0)\n\n\ndef test_apply_to_batch_func_with_non_scalar_return():\n    data = np.array([0, 1, 2, 3, 4])\n    batch_ids = [0, 0, 1, 1, 2]\n    with pytest.raises(ValueError, match=\"Function <lambda> did not return a scalar\"):\n        _apply_reduction_to_batches(\n            data, batch_ids, reduction_function=lambda _list: _list\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_history_collection.py",
    "content": "import sys\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.testing import assert_array_equal as aae\n\nfrom optimagic import SQLiteLogReader, mark\nfrom optimagic.algorithms import AVAILABLE_ALGORITHMS\nfrom optimagic.logging import SQLiteLogOptions\nfrom optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.typing import AggregationLevel\n\nOPTIMIZERS = []\nBOUNDED = []\nfor name, algo in AVAILABLE_ALGORITHMS.items():\n    info = algo.algo_info\n    if not info.disable_history:\n        if info.supports_parallelism:\n            OPTIMIZERS.append(name)\n        if info.supports_bounds:\n            BOUNDED.append(name)\n\n\n@pytest.mark.skipif(sys.platform == \"win32\", reason=\"Slow on windows.\")\n@pytest.mark.parametrize(\"algorithm\", OPTIMIZERS)\ndef test_history_collection_with_parallelization(algorithm, tmp_path):\n    lb = np.zeros(5) if algorithm in BOUNDED else None\n    ub = np.full(5, 10) if algorithm in BOUNDED else None\n\n    path = tmp_path / \"log.db\"\n\n    algo_options = {\"n_cores\": 2}\n    if algorithm == \"nevergrad_pso\":\n        algo_options[\"stopping_maxfun\"] = 15\n    else:\n        algo_options[\"stopping_maxiter\"] = 3\n\n    collected_hist = minimize(\n        fun=mark.least_squares(lambda x: x),\n        params=np.arange(5),\n        algorithm=algorithm,\n        bounds=Bounds(lower=lb, upper=ub),\n        algo_options=algo_options,\n        logging=SQLiteLogOptions(path=path, if_database_exists=\"replace\"),\n    ).history\n\n    reader = SQLiteLogReader(path)\n\n    log_hist = reader.read_history()\n\n    # We cannot expect the order to be the same\n    aaae(sorted(collected_hist.fun), sorted(log_hist.fun))\n\n\n@mark.minimizer(\n    name=\"dummy\",\n    solver_type=AggregationLevel.SCALAR,\n    is_available=True,\n    is_global=False,\n    needs_jac=False,\n    needs_hess=False,\n    needs_bounds=False,\n    supports_parallelism=True,\n    supports_bounds=False,\n    supports_infinite_bounds=False,\n    supports_linear_constraints=False,\n    supports_nonlinear_constraints=False,\n    disable_history=False,\n)\n@dataclass(frozen=True)\nclass DummyOptimizer(Algorithm):\n    n_cores: int = 1\n    batch_size: int = 1\n\n    def _solve_internal_problem(self, problem, x0):\n        assert self.batch_size in [1, 2, 4]\n\n        xs = np.arange(15).repeat(len(x0)).reshape(15, len(x0))\n\n        start_index = 0\n\n        for iteration in range(3):\n            start_index = iteration * 5\n            # do four evaluations in a batch evaluator\n            problem.batch_fun(\n                list(xs[start_index : start_index + 4]),\n                n_cores=self.n_cores,\n                batch_size=self.batch_size,\n            )\n\n            # do one evaluation without the batch evaluator\n            problem.fun(xs[start_index + 4])\n\n        out = InternalOptimizeResult(\n            x=xs[-1],\n            fun=5,\n            success=True,\n            n_fun_evals=15,\n            n_iterations=3,\n        )\n        return out\n\n\ndef _get_fake_history(batch_size):\n    if batch_size == 1:\n        batches = list(range(15))\n    elif batch_size == 2:\n        batches = [0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8]\n    elif batch_size == 4:\n        batches = [0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 4, 4, 4, 4, 5]\n    else:\n        raise ValueError(\"batch_size must be 1, 2 or 4.\")\n\n    out = {\n        \"params\": list(np.arange(15).repeat(5).reshape(15, 5)),\n        \"criterion\": [5] * 15,\n        \"batches\": batches,\n    }\n\n    return out\n\n\ndef _fake_criterion(x):\n    return 5\n\n\nCASES = [(1, 1), (1, 2), (2, 2), (1, 4), (2, 4)]\n\n\n@pytest.mark.skipif(sys.platform == \"win32\", reason=\"Slow on windows.\")\n@pytest.mark.parametrize(\"n_cores, batch_size\", CASES)\ndef test_history_collection_with_dummy_optimizer(n_cores, batch_size):\n    options = {\n        \"batch_size\": batch_size,\n        \"n_cores\": n_cores,\n    }\n\n    res = minimize(\n        fun=_fake_criterion,\n        params=np.arange(5),\n        algorithm=DummyOptimizer,\n        algo_options=options,\n    )\n\n    got_history = res.history\n\n    expected_history = _get_fake_history(batch_size)\n\n    aae(got_history.batches, expected_history[\"batches\"])\n    assert got_history.fun == expected_history[\"criterion\"][: len(got_history.fun)]\n    aaae(got_history.params, expected_history[\"params\"][: len(got_history.params)])\n"
  },
  {
    "path": "tests/optimagic/optimization/test_infinite_and_incomplete_bounds.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import mark\nfrom optimagic.config import IS_NEVERGRAD_INSTALLED\nfrom optimagic.optimization.optimize import minimize\n\n\n@mark.least_squares\ndef sos(x):\n    return x\n\n\n@pytest.mark.skipif(\n    not IS_NEVERGRAD_INSTALLED,\n    reason=\"nevergrad not installed\",\n)\ndef test_no_bounds_with_nevergrad():\n    res = minimize(\n        fun=sos,\n        params=np.arange(3),\n        algorithm=\"nevergrad_cmaes\",\n        collect_history=True,\n        skip_checks=True,\n        algo_options={\"seed\": 12345, \"stopping_maxfun\": 10000},\n    )\n    aaae(res.x, np.zeros(3), 4)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_internal_optimization_problem.py",
    "content": "from copy import copy\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import NumdiffOptions\nfrom optimagic.batch_evaluators import process_batch_evaluator\nfrom optimagic.config import CRITERION_PENALTY_CONSTANT, CRITERION_PENALTY_SLOPE\nfrom optimagic.exceptions import UserFunctionRuntimeError\nfrom optimagic.optimization.error_penalty import get_error_penalty_function\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.optimization.internal_optimization_problem import (\n    InternalBounds,\n    InternalOptimizationProblem,\n    SphereExampleInternalOptimizationProblem,\n    SphereExampleInternalOptimizationProblemWithConverter,\n)\nfrom optimagic.parameters.conversion import Converter\nfrom optimagic.typing import AggregationLevel, Direction, ErrorHandling, EvalTask\n\n\n@pytest.fixture\ndef base_problem():\n    \"\"\"Set up a basic InternalOptimizationProblem that can be modified for tests.\"\"\"\n\n    def fun(params):\n        return LeastSquaresFunctionValue(value=params, info={\"mean\": params.mean()})\n\n    def jac(params):\n        return 2 * params\n\n    def fun_and_jac(params):\n        return fun(params), jac(params)\n\n    converter = Converter(\n        params_to_internal=lambda x: x,\n        params_from_internal=lambda x: x,\n        derivative_to_internal=lambda d, x: d,\n        has_transforming_constraints=False,\n    )\n\n    solver_type = AggregationLevel.SCALAR\n\n    direction = Direction.MINIMIZE\n\n    bounds = InternalBounds(lower=None, upper=None)\n\n    numdiff_options = NumdiffOptions()\n\n    error_handling = ErrorHandling.RAISE\n\n    batch_evaluator = process_batch_evaluator(batch_evaluator=\"joblib\")\n\n    linear_constraints = None\n\n    nonlinear_constraints = None\n\n    problem = InternalOptimizationProblem(\n        fun=fun,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n        converter=converter,\n        solver_type=solver_type,\n        direction=direction,\n        bounds=bounds,\n        numdiff_options=numdiff_options,\n        error_handling=error_handling,\n        error_penalty_func=None,\n        batch_evaluator=batch_evaluator,\n        linear_constraints=linear_constraints,\n        nonlinear_constraints=nonlinear_constraints,\n        logger=None,\n    )\n\n    return problem\n\n\n# ======================================================================================\n# Test fun, jac, fun_and_jac\n# ======================================================================================\n\n\ndef test_base_problem_fun(base_problem):\n    got = base_problem.fun(np.array([1, 2, 3]))\n    expected = 14\n    assert got == expected\n\n\ndef test_base_problem_jac(base_problem):\n    got = base_problem.jac(np.array([1, 2, 3]))\n    expected = 2 * np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_base_problem_fun_and_jac(base_problem):\n    got_fun, got_jac = base_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3]))\n    assert got_fun == expected_fun\n    aaae(got_jac, expected_jac)\n\n\ndef test_fun_and_jac_is_called_for_jac_if_jac_is_not_given(base_problem):\n    \"\"\"This makes sure we don't use numdiff if we don't have to.\"\"\"\n    call_log = []\n\n    def fun_and_jac(params):\n        call_log.append(\"fun_and_jac\")\n        return LeastSquaresFunctionValue(value=params), 2 * np.array([1, 2, 3])\n\n    base_problem._jac = None\n    base_problem._fun_and_jac = fun_and_jac\n    base_problem.jac(np.array([1, 2, 3]))\n\n    assert call_log == [\"fun_and_jac\"]\n\n\ndef test_jac_is_called_for_fun_and_jac_if_fun_is_not_given(base_problem):\n    \"\"\"This makes sure we don't use numdiff if we don't have to.\"\"\"\n    call_log = []\n\n    def jac(params):\n        call_log.append(\"jac\")\n        return 2 * np.array([1, 2, 3])\n\n    base_problem._fun_and_jac = None\n    base_problem._jac = jac\n\n    base_problem.fun_and_jac(np.array([1, 2, 3]))\n\n    assert call_log == [\"jac\"]\n\n\ndef test_base_problem_jac_via_numdiff(base_problem):\n    base_problem._jac = None\n    base_problem._fun_and_jac = None\n\n    got = base_problem.jac(np.array([1, 2, 3]))\n    expected = 2 * np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_base_problem_fun_and_jac_via_numdiff(base_problem):\n    base_problem._jac = None\n    base_problem._fun_and_jac = None\n\n    got_fun, got_jac = base_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3]))\n    assert got_fun == expected_fun\n    aaae(got_jac, expected_jac)\n\n\ndef test_error_in_fun_with_error_handling_raise(base_problem):\n    def fun(params):\n        raise ValueError(\"Test error\")\n\n    base_problem._fun = fun\n\n    with pytest.raises(UserFunctionRuntimeError):\n        base_problem.fun(np.array([1, 2, 3]))\n\n\ndef test_error_in_fun_during_numdiff_with_error_handling_raise(base_problem):\n    def fun(params):\n        raise ValueError(\"Test error\")\n\n    base_problem._fun = fun\n    base_problem._jac = None\n    base_problem._fun_and_jac = None\n\n    with pytest.raises(UserFunctionRuntimeError):\n        base_problem.jac(np.array([1, 2, 3]))\n\n\ndef test_base_problem_different_jac_versions(base_problem):\n    got_jac_1 = base_problem.jac(np.array([1, 2, 3]))\n    _, got_jac_2 = base_problem.fun_and_jac(np.array([1, 2, 3]))\n\n    base_problem._jac = None\n    base_problem._fun_and_jac = None\n    got_jac_3 = base_problem.jac(np.array([1, 2, 3]))\n\n    aaae(got_jac_1, got_jac_2)\n    aaae(got_jac_1, got_jac_3)\n\n\ndef test_base_problem_fun_for_ls_optimizer(base_problem):\n    base_problem._solver_type = AggregationLevel.LEAST_SQUARES\n\n    got = base_problem.fun(np.array([1, 2, 3]))\n    expected = np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_base_problem_exploration_fun(base_problem):\n    got = base_problem.exploration_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1\n    )\n    expected = [14, 77]\n    assert got == expected\n\n\n# ======================================================================================\n# test history\n# ======================================================================================\n\n\ndef test_history_with_fun(base_problem):\n    base_problem.fun(np.array([1, 2, 3]))\n\n    assert len(base_problem.history.params) == 1\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    assert base_problem.history.fun == [14]\n    assert base_problem.history.task == [EvalTask.FUN]\n    assert base_problem.history.batches == [0]\n\n\ndef test_history_with_batch_fun(base_problem):\n    base_problem.batch_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2\n    )\n    assert len(base_problem.history.params) == 2\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    aaae(base_problem.history.params[1], [4, 5, 6])\n    assert base_problem.history.fun == [14, 77]\n    assert base_problem.history.task == [EvalTask.FUN, EvalTask.FUN]\n    assert base_problem.history.batches == [0, 0]\n\n\ndef test_history_with_jac(base_problem):\n    base_problem.jac(np.array([1, 2, 3]))\n\n    assert len(base_problem.history.params) == 1\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    assert base_problem.history.fun == [None]\n    assert base_problem.history.task == [EvalTask.JAC]\n    assert base_problem.history.batches == [0]\n\n\ndef test_history_with_batch_jac(base_problem):\n    base_problem.batch_jac(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2\n    )\n    assert len(base_problem.history.params) == 2\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    aaae(base_problem.history.params[1], [4, 5, 6])\n    assert base_problem.history.fun == [None, None]\n    assert base_problem.history.task == [EvalTask.JAC, EvalTask.JAC]\n    assert base_problem.history.batches == [0, 0]\n\n\ndef test_history_with_fun_and_jac(base_problem):\n    base_problem.fun_and_jac(np.array([1, 2, 3]))\n\n    assert len(base_problem.history.params) == 1\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    assert base_problem.history.fun == [14]\n    assert base_problem.history.task == [EvalTask.FUN_AND_JAC]\n    assert base_problem.history.batches == [0]\n\n\ndef test_history_with_batch_fun_and_jac(base_problem):\n    base_problem.batch_fun_and_jac(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2\n    )\n    assert len(base_problem.history.params) == 2\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    aaae(base_problem.history.params[1], [4, 5, 6])\n    assert base_problem.history.fun == [14, 77]\n    assert base_problem.history.task == [EvalTask.FUN_AND_JAC, EvalTask.FUN_AND_JAC]\n    assert base_problem.history.batches == [0, 0]\n\n\ndef test_history_with_exploration_fun(base_problem):\n    base_problem.exploration_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1, batch_size=2\n    )\n    assert len(base_problem.history.params) == 2\n    aaae(base_problem.history.params[0], [1, 2, 3])\n    aaae(base_problem.history.params[1], [4, 5, 6])\n    assert base_problem.history.fun == [14, 77]\n    assert base_problem.history.task == [EvalTask.EXPLORATION, EvalTask.EXPLORATION]\n    assert base_problem.history.batches == [0, 0]\n\n\ndef test_with_history_copy_constructor(base_problem):\n    new = base_problem.with_new_history()\n    new.fun(np.array([1, 2, 3]))\n\n    assert len(new.history.params) == 1\n    assert len(base_problem.history.params) == 0\n\n\n# ======================================================================================\n# test batch versions\n# ======================================================================================\n\n\n@pytest.mark.parametrize(\"n_cores\", [1, 2])\ndef test_batch_fun(base_problem, n_cores):\n    got = base_problem.batch_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores\n    )\n    expected = [14, 77]\n    assert got == expected\n\n\n@pytest.mark.parametrize(\"n_cores\", [1, 2])\ndef test_batch_jac(base_problem, n_cores):\n    got = base_problem.batch_jac(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores\n    )\n    expected = [2 * np.array([1, 2, 3]), 2 * np.array([4, 5, 6])]\n    aaae(got[0], expected[0])\n    aaae(got[1], expected[1])\n\n\n@pytest.mark.parametrize(\"n_cores\", [1, 2])\ndef test_batch_fun_and_jac(base_problem, n_cores):\n    res = base_problem.batch_fun_and_jac(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=n_cores\n    )\n    got_fun = [r[0] for r in res]\n    got_jac = [r[1] for r in res]\n    expected_fun = [14, 77]\n    expected_jac = [2 * np.array([1, 2, 3]), 2 * np.array([4, 5, 6])]\n    assert got_fun == expected_fun\n    aaae(got_jac, expected_jac)\n\n\n# ======================================================================================\n# test sign flipping\n# ======================================================================================\n\n\n@pytest.fixture\ndef max_problem(base_problem):\n    \"\"\"Flip the sign of the functions.\n\n    The sign should be flipped back by InternalOptimizationProblem such that in the end\n    the same values for fun, jac, and fun_and_jac are returned as for the base_problem.\n\n    \"\"\"\n\n    def fun(params):\n        return ScalarFunctionValue(value=-params @ params)\n\n    def jac(params):\n        return -2 * params\n\n    def fun_and_jac(params):\n        return fun(params), jac(params)\n\n    max_problem = copy(base_problem)\n    max_problem._direction = Direction.MAXIMIZE\n    max_problem._fun = fun\n    max_problem._jac = jac\n    max_problem._fun_and_jac = fun_and_jac\n\n    return max_problem\n\n\ndef test_max_problem_fun(max_problem):\n    got = max_problem.fun(np.array([1, 2, 3]))\n    expected = 14\n    assert got == expected\n\n\ndef test_max_problem_jac(max_problem):\n    got = max_problem.jac(np.array([1, 2, 3]))\n    expected = 2 * np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_max_problem_fun_and_jac(max_problem):\n    got_fun, got_jac = max_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3]))\n    assert got_fun == expected_fun\n    aaae(got_jac, expected_jac)\n\n\ndef test_jac_via_numdiff(max_problem):\n    max_problem._jac = None\n    max_problem._fun_and_jac = None\n\n    got = max_problem.jac(np.array([1, 2, 3]))\n    expected = 2 * np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_fun_and_jac_via_numdiff(max_problem):\n    max_problem._jac = None\n    max_problem._fun_and_jac = None\n\n    got_fun, got_jac = max_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = (14, 2 * np.array([1, 2, 3]))\n    assert got_fun == expected_fun\n    aaae(got_jac, expected_jac)\n\n\ndef test_max_problem_exploration_fun(max_problem):\n    got = max_problem.exploration_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1\n    )\n    expected = [14, 77]\n    assert got == expected\n\n\n# ======================================================================================\n# test pytree ls output and params\n# ======================================================================================\n\n\n@pytest.fixture\ndef pytree_problem(base_problem):\n    def fun(params):\n        assert isinstance(params, dict)\n        return LeastSquaresFunctionValue(value=params)\n\n    def jac(params):\n        assert isinstance(params, dict)\n        out = {}\n        for outer_key in params:\n            row = {}\n            for inner_key in params:\n                if inner_key == outer_key:\n                    row[inner_key] = 1\n                else:\n                    row[inner_key] = 0\n            out[outer_key] = row\n        return out\n\n    def fun_and_jac(params):\n        assert isinstance(params, dict)\n        return fun(params), jac(params)\n\n    def derivative_flatten(tree, x):\n        out = [list(row.values()) for row in tree.values()]\n        return np.array(out)\n\n    converter = Converter(\n        params_to_internal=lambda x: np.array(list(x.values())),\n        params_from_internal=lambda x: {\n            k: v for k, v in zip([\"a\", \"b\", \"c\"], x, strict=False)\n        },\n        derivative_to_internal=derivative_flatten,\n        has_transforming_constraints=False,\n    )\n\n    solver_type = AggregationLevel.LEAST_SQUARES\n\n    direction = Direction.MINIMIZE\n\n    bounds = InternalBounds(lower=None, upper=None)\n\n    numdiff_options = NumdiffOptions()\n\n    error_handling = ErrorHandling.RAISE\n\n    batch_evaluator = process_batch_evaluator(batch_evaluator=\"joblib\")\n\n    linear_constraints = None\n\n    nonlinear_constraints = None\n\n    problem = InternalOptimizationProblem(\n        fun=fun,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n        converter=converter,\n        solver_type=solver_type,\n        direction=direction,\n        bounds=bounds,\n        numdiff_options=numdiff_options,\n        error_handling=error_handling,\n        error_penalty_func=None,\n        batch_evaluator=batch_evaluator,\n        linear_constraints=linear_constraints,\n        nonlinear_constraints=nonlinear_constraints,\n        logger=None,\n    )\n\n    return problem\n\n\ndef test_pytree_problem_fun(pytree_problem):\n    got = pytree_problem.fun(np.array([1, 2, 3]))\n    expected = np.array([1, 2, 3])\n    aaae(got, expected)\n\n\ndef test_pytree_problem_fun_scalar_output(pytree_problem):\n    pytree_problem._solver_type = AggregationLevel.SCALAR\n    got = pytree_problem.fun(np.array([1, 2, 3]))\n    expected = 14\n    assert got == expected\n\n\ndef test_pytree_problem_jac(pytree_problem):\n    got = pytree_problem.jac(np.array([1, 2, 3]))\n    expected = np.eye(3)\n    aaae(got, expected)\n\n\ndef test_pytree_problem_fun_and_jac(pytree_problem):\n    got_fun, got_jac = pytree_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = np.array([1, 2, 3]), np.eye(3)\n    aaae(got_jac, expected_jac)\n    aaae(got_fun, expected_fun)\n\n\ndef test_pytree_problem_exploration_fun(pytree_problem):\n    got = pytree_problem.exploration_fun(\n        [np.array([1, 2, 3]), np.array([4, 5, 6])], n_cores=1\n    )\n    expected = [14, 77]\n    assert got == expected\n\n\ndef test_numerical_jac_for_pytree_problem(pytree_problem):\n    pytree_problem._jac = None\n    pytree_problem._fun_and_jac = None\n\n    got = pytree_problem.jac(np.array([1, 2, 3]))\n    expected = np.eye(3)\n    aaae(got, expected)\n\n\ndef test_numerical_fun_and_jac_for_pytree_problem(pytree_problem):\n    pytree_problem._jac = None\n    pytree_problem._fun_and_jac = None\n\n    got_fun, got_jac = pytree_problem.fun_and_jac(np.array([1, 2, 3]))\n    expected_fun, expected_jac = np.array([1, 2, 3]), np.eye(3)\n    aaae(got_fun, expected_fun)\n    aaae(got_jac, expected_jac)\n\n\n# ======================================================================================\n# test error penalty with minimize\n# ======================================================================================\n\n\n@pytest.fixture\ndef error_min_problem():\n    \"\"\"Set up a basic InternalOptimizationProblem that can be modified for tests.\"\"\"\n\n    def fun(params):\n        raise ValueError(\"Test error\")\n\n    def jac(params):\n        raise ValueError(\"Test error\")\n\n    def fun_and_jac(params):\n        raise ValueError(\"Test error\")\n\n    converter = Converter(\n        params_to_internal=lambda x: x,\n        params_from_internal=lambda x: x,\n        derivative_to_internal=lambda d, x: d,\n        has_transforming_constraints=False,\n    )\n\n    solver_type = AggregationLevel.SCALAR\n\n    direction = Direction.MINIMIZE\n\n    bounds = InternalBounds(lower=None, upper=None)\n\n    numdiff_options = NumdiffOptions()\n\n    error_handling = ErrorHandling.CONTINUE\n\n    batch_evaluator = process_batch_evaluator(batch_evaluator=\"joblib\")\n\n    linear_constraints = None\n\n    nonlinear_constraints = None\n\n    start_params = np.array([1, 2, 3])\n\n    error_penalty_function = get_error_penalty_function(\n        start_x=start_params,\n        error_penalty=None,\n        start_criterion=ScalarFunctionValue(14),\n        direction=direction,\n        solver_type=solver_type,\n    )\n\n    problem = InternalOptimizationProblem(\n        fun=fun,\n        jac=jac,\n        fun_and_jac=fun_and_jac,\n        converter=converter,\n        solver_type=solver_type,\n        direction=direction,\n        bounds=bounds,\n        numdiff_options=numdiff_options,\n        error_handling=error_handling,\n        error_penalty_func=error_penalty_function,\n        batch_evaluator=batch_evaluator,\n        linear_constraints=linear_constraints,\n        nonlinear_constraints=nonlinear_constraints,\n        logger=None,\n    )\n\n    return problem\n\n\ndef test_error_in_fun_minimize(error_min_problem):\n    got = error_min_problem.fun(np.array([2, 3, 4]))\n    expected = 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE\n    assert np.allclose(got, expected)\n\n\ndef test_error_in_jac_minimize(error_min_problem):\n    got = error_min_problem.jac(np.array([2, 3, 4]))\n    expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    aaae(got, expected)\n\n\ndef test_error_in_fun_and_jac_minimize(error_min_problem):\n    got_fun, got_jac = error_min_problem.fun_and_jac(np.array([2, 3, 4]))\n    expected_fun = (\n        28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE\n    )\n    expected_jac = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    assert np.allclose(got_fun, expected_fun)\n    aaae(got_jac, expected_jac)\n\n\ndef test_error_in_numerical_jac_minimize(error_min_problem):\n    error_min_problem._jac = None\n    error_min_problem._fun_and_jac = None\n\n    got = error_min_problem.jac(np.array([2, 3, 4]))\n    expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    aaae(got, expected)\n\n\ndef test_error_in_exploration_fun_minimize(error_min_problem):\n    got = error_min_problem.exploration_fun(\n        [np.array([2, 3, 4]), np.array([5, 6, 7])], n_cores=1\n    )\n    expected = [-np.inf, -np.inf]\n    assert np.allclose(got, expected)\n\n\n# ======================================================================================\n# test error penalty with maximize\n# ======================================================================================\n\n\n@pytest.fixture\ndef error_max_problem(error_min_problem):\n    problem = copy(error_min_problem)\n    problem._direction = Direction.MAXIMIZE\n\n    error_penalty_function = get_error_penalty_function(\n        start_x=np.array([1, 2, 3]),\n        error_penalty=None,\n        start_criterion=ScalarFunctionValue(-14),\n        direction=problem._direction,\n        solver_type=problem._solver_type,\n    )\n\n    problem._error_penalty_func = error_penalty_function\n    return problem\n\n\ndef test_error_in_fun_maximize(error_max_problem):\n    got = error_max_problem.fun(np.array([2, 3, 4]))\n    expected = 28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE\n    assert np.allclose(got, expected)\n\n\ndef test_error_in_jac_maximize(error_max_problem):\n    got = error_max_problem.jac(np.array([2, 3, 4]))\n    expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    aaae(got, expected)\n\n\ndef test_error_in_fun_and_jac_maximize(error_max_problem):\n    got_fun, got_jac = error_max_problem.fun_and_jac(np.array([2, 3, 4]))\n    expected_fun = (\n        28 + CRITERION_PENALTY_CONSTANT + np.sqrt(3) * CRITERION_PENALTY_SLOPE\n    )\n    expected_jac = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    assert np.allclose(got_fun, expected_fun)\n    aaae(got_jac, expected_jac)\n\n\ndef test_error_in_numerical_jac_maximize(error_max_problem):\n    error_max_problem._jac = None\n    error_max_problem._fun_and_jac = None\n\n    got = error_max_problem.jac(np.array([2, 3, 4]))\n    expected = np.full(3, CRITERION_PENALTY_SLOPE) / np.sqrt(3)\n    aaae(got, expected)\n\n\ndef test_error_in_exploration_fun_maximize(error_max_problem):\n    got = error_max_problem.exploration_fun(\n        [np.array([2, 3, 4]), np.array([5, 6, 7])], n_cores=1\n    )\n    expected = [-np.inf, -np.inf]\n    assert np.allclose(got, expected)\n\n\n# ======================================================================================\n# test SphereExampleInternalOptimizationProblem\n# ======================================================================================\n\n\ndef test_sphere_example_internal_optimization_problem():\n    problem = SphereExampleInternalOptimizationProblem()\n    assert problem.fun(np.array([1, 2, 3])) == 14\n    aaae(problem.jac(np.array([1, 2, 3])), np.array([2, 4, 6]))\n    f, j = problem.fun_and_jac(np.array([1, 2, 3]))\n    assert f == 14\n    aaae(j, np.array([2, 4, 6]))\n\n\ndef test_sphere_example_internal_optimization_problem_with_converter():\n    problem = SphereExampleInternalOptimizationProblemWithConverter()\n    assert problem.fun(np.array([1, 2, 3])) == 14\n    aaae(problem.jac(np.array([1, 2, 3])), np.array([2, 4, 6]))\n    f, j = problem.fun_and_jac(np.array([1, 2, 3]))\n    assert f == 14\n    aaae(j, np.array([2, 4, 6]))\n"
  },
  {
    "path": "tests/optimagic/optimization/test_invalid_jacobian_value.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.exceptions import UserFunctionRuntimeError\nfrom optimagic.optimization.optimize import minimize\n\n# ======================================================================================\n# Test setup:\n# --------------------------------------------------------------------------------------\n# We test that minimize raises an error if the user function returns a jacobian\n# containing invalid values (np.inf, np.nan). To test that this works not only at\n# the start parameters, we create jac functions that return invalid values if the\n# parameter norm becomes smaller than one.\n# ======================================================================================\n\n\n@pytest.fixture\ndef params():\n    return {\"a\": 1, \"b\": np.array([3, 4])}\n\n\ndef sphere(params):\n    return params[\"a\"] ** 2 + (params[\"b\"] ** 2).sum()\n\n\ndef sphere_gradient(params):\n    return {\n        \"a\": 2 * params[\"a\"],\n        \"b\": 2 * params[\"b\"],\n    }\n\n\ndef sphere_and_gradient(params):\n    return sphere(params), sphere_gradient(params)\n\n\ndef params_norm(params):\n    squared_norm = params[\"a\"] ** 2 + np.linalg.norm(params[\"b\"]) ** 2\n    return np.sqrt(squared_norm)\n\n\ndef get_invalid_jac(invalid_jac_value):\n    \"\"\"Get function that returns invalid jac if the parameter norm < 1.\"\"\"\n\n    def jac(params):\n        if params_norm(params) < 1:\n            return invalid_jac_value\n        else:\n            return sphere_gradient(params)\n\n    return jac\n\n\ndef get_invalid_fun_and_jac(invalid_jac_value):\n    \"\"\"Get function that returns invalid fun and jac if the parameter norm < 1.\"\"\"\n\n    def fun_and_jac(params):\n        if params_norm(params) < 1:\n            return sphere(params), invalid_jac_value\n        else:\n            return sphere_and_gradient(params)\n\n    return fun_and_jac\n\n\nINVALID_JACOBIAN_VALUES = [\n    {\"a\": np.inf, \"b\": 2 * np.array([1, 2])},\n    {\"a\": 1, \"b\": 2 * np.array([np.inf, 2])},\n    {\"a\": np.nan, \"b\": 2 * np.array([1, 2])},\n    {\"a\": 1, \"b\": 2 * np.array([np.nan, 2])},\n]\n\n\n# ======================================================================================\n# Test Invalid Jacobian raises proper error with jac argument\n# ======================================================================================\n\n\n@pytest.mark.parametrize(\"invalid_jac_value\", INVALID_JACOBIAN_VALUES)\ndef test_minimize_with_invalid_jac(invalid_jac_value, params):\n    with pytest.raises(\n        UserFunctionRuntimeError,\n        match=(\n            \"The optimization failed because the derivative provided via jac \"\n            \"contains infinite or NaN values.\"\n        ),\n    ):\n        minimize(\n            fun=sphere,\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            jac=get_invalid_jac(invalid_jac_value),\n        )\n\n\n# ======================================================================================\n# Test Invalid Jacobian raises proper error with fun_and_jac argument\n# ======================================================================================\n\n\n@pytest.mark.parametrize(\"invalid_jac_value\", INVALID_JACOBIAN_VALUES)\ndef test_minimize_with_invalid_fun_and_jac(invalid_jac_value, params):\n    with pytest.raises(\n        UserFunctionRuntimeError,\n        match=(\n            \"The optimization failed because the derivative provided via fun_and_jac \"\n            \"contains infinite or NaN values.\"\n        ),\n    ):\n        minimize(\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac=get_invalid_fun_and_jac(invalid_jac_value),\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_jax_derivatives.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.config import IS_JAX_INSTALLED\nfrom optimagic.optimization.optimize import minimize\n\nif IS_JAX_INSTALLED:\n    import jax\n    import jax.numpy as jnp\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef test_scipy_conference_example():\n    def criterion(x):\n        first = (x[\"a\"] - jnp.pi) ** 2\n        second = jnp.linalg.norm(x[\"b\"] - jnp.arange(3))\n        third = jnp.linalg.norm(x[\"c\"] - jnp.eye(2))\n        return first + second + third\n\n    start_params = {\n        \"a\": 1.0,\n        \"b\": jnp.ones(3).astype(float),\n        \"c\": jnp.ones((2, 2)).astype(float),\n    }\n\n    gradient = jax.grad(criterion)\n\n    res = minimize(\n        fun=criterion,\n        jac=gradient,\n        params=start_params,\n        algorithm=\"scipy_lbfgsb\",\n    )\n\n    assert isinstance(res.params[\"b\"], jnp.ndarray)\n    aaae(res.params[\"b\"], jnp.arange(3))\n    aaae(res.params[\"c\"], jnp.eye(2))\n    assert np.allclose(res.params[\"a\"], np.pi, atol=1e-4)\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef test_params_is_jax_scalar():\n    def criterion(x):\n        return x**2\n\n    res = minimize(\n        fun=criterion,\n        params=jnp.array(1.0),\n        algorithm=\"scipy_lbfgsb\",\n        jac=jax.grad(criterion),\n    )\n\n    assert isinstance(res.params, jnp.ndarray)\n    assert np.allclose(res.params, 0.0)\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef params_is_1d_array():\n    def criterion(x):\n        return x @ x\n\n    res = minimize(\n        fun=criterion,\n        params=jnp.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n        jac=jax.grad(criterion),\n    )\n\n    assert isinstance(res.params, jnp.ndarray)\n    assert aaae(res.params, jnp.arange(3))\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\n@pytest.mark.parametrize(\"algorithm\", [\"scipy_lbfgsb\", \"scipy_ls_lm\"])\ndef test_dict_output_works(algorithm):\n    def criterion(x):\n        return {\"root_contributions\": x, \"value\": x @ x}\n\n    def scalar_wrapper(x):\n        return criterion(x)[\"value\"]\n\n    def ls_wrapper(x):\n        return criterion(x)[\"root_contributions\"]\n\n    deriv_dict = {\n        \"value\": jax.grad(scalar_wrapper),\n        \"root_contributions\": jax.jacobian(ls_wrapper),\n    }\n\n    res = minimize(\n        fun=criterion,\n        params=jnp.array([1.0, 2.0, 3.0]),\n        algorithm=algorithm,\n        jac=deriv_dict,\n    )\n\n    assert isinstance(res.params, jnp.ndarray)\n    aaae(res.params, np.zeros(3))\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef test_least_squares_optimizer_pytree():\n    def criterion(x):\n        return {\"root_contributions\": x}\n\n    def ls_wrapper(x):\n        return criterion(x)[\"root_contributions\"]\n\n    params = {\"a\": 1.0, \"b\": 2.0, \"c\": jnp.array([1.0, 2.0])}\n    jac = jax.jacobian(ls_wrapper)\n\n    res = minimize(\n        fun=criterion,\n        params=params,\n        algorithm=\"scipy_ls_lm\",\n        jac=jac,\n    )\n\n    assert isinstance(res.params, dict)\n    assert np.allclose(res.params[\"a\"], 0)\n    assert np.allclose(res.params[\"b\"], 0)\n    aaae(res.params[\"c\"], np.zeros(2))\n"
  },
  {
    "path": "tests/optimagic/optimization/test_many_algorithms.py",
    "content": "\"\"\"Test all available algorithms on a simple sum of squares function.\n\n- only minimize\n- only numerical derivative\n\n\"\"\"\n\nimport sys\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import mark\nfrom optimagic.algorithms import AVAILABLE_ALGORITHMS, GLOBAL_ALGORITHMS\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.bounds import Bounds\n\nAVAILABLE_LOCAL_ALGORITHMS = [\n    name\n    for name, algo in AVAILABLE_ALGORITHMS.items()\n    if name not in GLOBAL_ALGORITHMS and name != \"bhhh\"\n]\n\nAVAILABLE_BOUNDED_ALGORITHMS = [\n    name\n    for name, algo in AVAILABLE_ALGORITHMS.items()\n    if algo.algo_info.supports_bounds\n]\n\nPRECISION_LOOKUP = {\n    \"scipy_trust_constr\": 3,\n    \"iminuit_migrad\": 2,\n}\n\n\n@pytest.fixture\ndef algo(algorithm):\n    return AVAILABLE_ALGORITHMS[algorithm]\n\n\ndef _get_options(algo):\n    options = {}\n    \"Max time before termination\"\n    if hasattr(algo, \"stopping_maxtime\"):\n        options.update({\"stopping_maxtime\": 1})\n\n    \"Fix seed if algorithm is stochastic\"\n    if hasattr(algo, \"seed\"):\n        options.update({\"seed\": 12345})\n    return options\n\n\ndef _get_required_decimals(algorithm, algo):\n    # if algo is experimental, do not expect solution\n    if algo.algo_info.experimental:\n        return 0\n\n    if algorithm in PRECISION_LOOKUP:\n        return PRECISION_LOOKUP[algorithm]\n    else:\n        return 1 if algo.algo_info.is_global else 4\n\n\n@mark.least_squares\ndef sos(x):\n    return x\n\n\ndef _get_params_and_binding_bounds(algo):\n    if algo.algo_info.is_global:\n        params = np.array([0.5, -0.5])\n        bounds = Bounds(lower=np.array([0.25, -1]), upper=np.array([1, -0.25]))\n        expected = np.array([0.25, -0.25])\n\n    else:\n        params = np.array([3, 2, -3])\n        if algo.algo_info.supports_infinite_bounds:\n            bounds = Bounds(\n                lower=np.array([1, -np.inf, -np.inf]),\n                upper=np.array([np.inf, np.inf, -1]),\n            )\n        else:\n            bounds = Bounds(lower=np.array([1, -10, -10]), upper=np.array([10, 10, -1]))\n        expected = np.array([1, 0, -1])\n\n    return params, bounds, expected\n\n\n# Tests all bounded algorithms with binding bounds\n@pytest.mark.parametrize(\"algorithm\", AVAILABLE_BOUNDED_ALGORITHMS)\ndef test_sum_of_squares_with_binding_bounds(algorithm, algo):\n    params, bounds, expected = _get_params_and_binding_bounds(algo)\n    algo_options = _get_options(algo)\n    decimal = _get_required_decimals(algorithm, algo)\n\n    res = minimize(\n        fun=sos,\n        params=params,\n        bounds=bounds,\n        algorithm=algorithm,\n        collect_history=True,\n        algo_options=algo_options,\n        skip_checks=True,\n    )\n    assert res.success in [True, None]\n    aaae(res.params, expected, decimal)\n\n\ndef _get_params_and_bounds_on_local(algo):\n    params = np.arange(3)\n    bounds = None\n    expected = np.zeros(3)\n    if algo.algo_info.needs_bounds:\n        bounds = Bounds(lower=np.full(3, -10), upper=np.full(3, 10))\n    return params, bounds, expected\n\n\n# Test all local algorithms without bounds unless needed\n@pytest.mark.parametrize(\"algorithm\", AVAILABLE_LOCAL_ALGORITHMS)\ndef test_sum_of_squares_on_local_algorithms(algorithm, algo):\n    params, bounds, expected = _get_params_and_bounds_on_local(algo)\n    algo_options = _get_options(algo)\n    decimal = _get_required_decimals(algorithm, algo)\n\n    res = minimize(\n        fun=sos,\n        params=params,\n        bounds=bounds,\n        algorithm=algorithm,\n        collect_history=True,\n        algo_options=algo_options,\n        skip_checks=True,\n    )\n    assert res.success in [True, None]\n    aaae(res.params, expected, decimal)\n\n\ndef _get_params_and_bounds_on_global_and_bounded(algo):\n    if algo.algo_info.is_global:\n        params = np.array([0.35, 0.35])\n        bounds = Bounds(lower=np.array([-0.2, -0.5]), upper=np.array([1, 0.5]))\n        expected = np.array([0, 0])\n    else:\n        params = np.arange(3)\n        bounds = Bounds(lower=np.full(3, -10), upper=np.full(3, 10))\n        expected = np.zeros(3)\n    return params, bounds, expected\n\n\nskip_msg = (\n    \"The very slow tests of global algorithms are only run on linux which always \"\n    \"runs much faster in continuous integration.\"\n)\n\n\n# Test all global algorithms and local algorithms with bounds\n@pytest.mark.skipif(sys.platform == \"win32\", reason=skip_msg)\n@pytest.mark.parametrize(\"algorithm\", AVAILABLE_BOUNDED_ALGORITHMS)\ndef test_sum_of_squares_on_global_and_bounded_algorithms(algorithm, algo):\n    params, bounds, expected = _get_params_and_bounds_on_global_and_bounded(algo)\n    algo_options = _get_options(algo)\n    decimal = _get_required_decimals(algorithm, algo)\n\n    res = minimize(\n        fun=sos,\n        params=params,\n        bounds=bounds,\n        algorithm=algorithm,\n        collect_history=True,\n        algo_options=algo_options,\n        skip_checks=True,\n    )\n    assert res.success in [True, None]\n    aaae(res.params, expected, decimal)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_multistart.py",
    "content": "from dataclasses import dataclass\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimization.algorithm import InternalOptimizeResult\nfrom optimagic.optimization.multistart import (\n    _draw_exploration_sample,\n    get_batched_optimization_sample,\n    run_explorations,\n    update_convergence_state,\n)\n\n\n@pytest.fixture()\ndef params():\n    df = pd.DataFrame(index=[\"a\", \"b\", \"c\"])\n    df[\"value\"] = [0, 1, 2.0]\n    df[\"soft_lower_bound\"] = [-1, 0, np.nan]\n    df[\"upper_bound\"] = [2, 2, np.nan]\n    return df\n\n\n@pytest.fixture()\ndef constraints():\n    return [{\"type\": \"fixed\", \"loc\": \"c\", \"value\": 2}]\n\n\ndim = 2\ndistributions = [\"uniform\", \"triangular\"]\nrules = [\"sobol\", \"halton\", \"latin_hypercube\", \"random\"]\nlower = [np.zeros(dim), np.ones(dim) * 0.5, -np.ones(dim)]\nupper = [np.ones(dim), np.ones(dim) * 0.75, np.ones(dim) * 2]\ntest_cases = list(product(distributions, rules, lower, upper))\n\n\n@pytest.mark.parametrize(\"dist, rule, lower, upper\", test_cases)\ndef test_draw_exploration_sample(dist, rule, lower, upper):\n    results = []\n\n    for _ in range(2):\n        results.append(\n            _draw_exploration_sample(\n                x=np.ones_like(lower) * 0.5,\n                lower=lower,\n                upper=upper,\n                n_samples=3,\n                distribution=dist,\n                method=rule,\n                seed=1234,\n            )\n        )\n\n    aaae(results[0], results[1])\n    calculated = results[0]\n    assert calculated.shape == (3, 2)\n\n\ndef test_run_explorations():\n    @dataclass\n    class Dummy:\n        def exploration_fun(self, x, n_cores):\n            out = []\n            for vec in x:\n                if vec.sum() == 5:\n                    out.append(np.inf)\n                else:\n                    out.append(-vec.sum())\n            return out\n\n        def with_step_id(self, step_id):\n            return self\n\n    calculated = run_explorations(\n        internal_problem=Dummy(),\n        sample=np.arange(6).reshape(3, 2),\n        n_cores=1,\n        step_id=0,\n    )\n\n    exp_values = np.array([-9, -1])\n    exp_sample = np.array([[4, 5], [0, 1]])\n\n    aaae(calculated.sorted_sample, exp_sample)\n    aaae(calculated.sorted_values, exp_values)\n\n\ndef test_get_batched_optimization_sample():\n    calculated = get_batched_optimization_sample(\n        sorted_sample=np.arange(12).reshape(6, 2),\n        stopping_maxopt=5,\n        batch_size=4,\n    )\n    expected = [[[0, 1], [2, 3], [4, 5], [6, 7]], [[8, 9]]]\n\n    assert len(calculated[0]) == 4\n    assert len(calculated[1]) == 1\n    assert len(calculated) == 2\n\n    for calc_batch, exp_batch in zip(calculated, expected, strict=False):\n        assert isinstance(calc_batch, list)\n        for calc_entry, exp_entry in zip(calc_batch, exp_batch, strict=False):\n            assert isinstance(calc_entry, np.ndarray)\n            assert calc_entry.tolist() == exp_entry\n\n\n@pytest.fixture()\ndef current_state():\n    state = {\n        \"best_x\": np.ones(3),\n        \"best_y\": 5,\n        \"best_res\": None,\n        \"x_history\": [np.arange(3) - 1e-20, np.ones(3)],\n        \"y_history\": [6, 5],\n        \"result_history\": [],\n        \"start_history\": [],\n    }\n\n    return state\n\n\n@pytest.fixture()\ndef starts():\n    return [np.zeros(3)]\n\n\n@pytest.fixture()\ndef results():\n    res = InternalOptimizeResult(\n        x=np.arange(3) + 1e-10,\n        fun=4,\n    )\n    return [res]\n\n\ndef test_update_state_converged(current_state, starts, results):\n    criteria = {\n        \"xtol\": 1e-3,\n        \"max_discoveries\": 2,\n    }\n\n    new_state, is_converged = update_convergence_state(\n        current_state=current_state,\n        starts=starts,\n        results=results,\n        convergence_criteria=criteria,\n        solver_type=\"value\",\n    )\n\n    aaae(new_state[\"best_x\"], np.arange(3))\n    assert new_state[\"best_y\"] == 4\n    assert new_state[\"y_history\"] == [6, 5, 4]\n    assert new_state[\"result_history\"][0].fun == 4\n    aaae(new_state[\"start_history\"][0], np.zeros(3))\n\n    assert is_converged\n\n\ndef test_update_state_not_converged(current_state, starts, results):\n    criteria = {\n        \"xtol\": 1e-3,\n        \"max_discoveries\": 5,\n    }\n\n    _, is_converged = update_convergence_state(\n        current_state=current_state,\n        starts=starts,\n        results=results,\n        convergence_criteria=criteria,\n        solver_type=\"value\",\n    )\n\n    assert not is_converged\n"
  },
  {
    "path": "tests/optimagic/optimization/test_multistart_options.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.exceptions import InvalidMultistartError\nfrom optimagic.optimization.multistart_options import (\n    MultistartOptions,\n    _linear_weights,\n    _tiktak_weights,\n    get_internal_multistart_options_from_public,\n    pre_process_multistart,\n)\n\n\ndef test_pre_process_multistart_trivial_case():\n    multistart = MultistartOptions(n_samples=10, convergence_max_discoveries=55)\n    got = pre_process_multistart(multistart)\n    assert got == multistart\n\n\ndef test_pre_process_multistart_none_case():\n    assert pre_process_multistart(None) is None\n\n\ndef test_pre_process_multistart_false_case():\n    assert pre_process_multistart(False) is None\n\n\ndef test_pre_process_multistart_dict_case():\n    got = pre_process_multistart(\n        multistart={\n            \"n_samples\": 10,\n            \"convergence_max_discoveries\": 55,\n        }\n    )\n    assert got == MultistartOptions(\n        n_samples=10,\n        convergence_max_discoveries=55,\n    )\n\n\ndef test_pre_process_multistart_invalid_type():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid multistart options\"):\n        pre_process_multistart(multistart=\"invalid\")\n\n\ndef test_pre_process_multistart_invalid_dict_key():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid multistart options\"):\n        pre_process_multistart(multistart={\"invalid\": \"invalid\"})\n\n\ndef test_pre_process_multistart_invalid_dict_value():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid number of samples\"):\n        pre_process_multistart(multistart={\"n_samples\": \"invalid\"})\n\n\n@pytest.mark.parametrize(\"value\", [\"invalid\", -1])\ndef test_multistart_options_invalid_n_samples_value(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid number of samples\"):\n        MultistartOptions(n_samples=value)\n\n\n@pytest.mark.parametrize(\"value\", [\"invalid\", -1])\ndef test_multistart_options_invalid_stopping_maxopt(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid number of optimizations\"):\n        MultistartOptions(stopping_maxopt=value)\n\n\ndef test_multistart_options_stopping_maxopt_less_than_n_samples():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid number of samples\"):\n        MultistartOptions(n_samples=1, stopping_maxopt=2)\n\n\ndef test_multistart_options_invalid_sampling_distribution():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid sampling distribution\"):\n        MultistartOptions(sampling_distribution=\"invalid\")\n\n\ndef test_multistart_options_invalid_sampling_method():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid sampling method\"):\n        MultistartOptions(sampling_method=\"invalid\")\n\n\ndef test_multistart_options_invalid_mixing_weight_method():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid mixing weight method\"):\n        MultistartOptions(mixing_weight_method=\"invalid\")\n\n\n@pytest.mark.parametrize(\"value\", [(\"a\", \"b\"), (1, 2, 3), {\"a\": 1.0, \"b\": 3.0}])\ndef test_multistart_options_invalid_mixing_weight_bounds(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid mixing weight bounds\"):\n        MultistartOptions(mixing_weight_bounds=value)\n\n\ndef test_multistart_options_invalid_convergence_xtol_rel():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid relative params\"):\n        MultistartOptions(convergence_xtol_rel=\"invalid\")\n\n\n@pytest.mark.parametrize(\"value\", [\"invalid\", -1])\ndef test_multistart_options_invalid_convergence_max_discoveries(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid max discoveries\"):\n        MultistartOptions(convergence_max_discoveries=value)\n\n\n@pytest.mark.parametrize(\"value\", [\"invalid\", -1])\ndef test_multistart_options_invalid_n_cores(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid number of cores\"):\n        MultistartOptions(n_cores=value)\n\n\n@pytest.mark.parametrize(\"value\", [\"invalid\", -1])\ndef test_multistart_options_invalid_batch_size(value):\n    with pytest.raises(InvalidMultistartError, match=\"Invalid batch size\"):\n        MultistartOptions(batch_size=value)\n\n\ndef test_multistart_options_batch_size_smaller_than_n_cores():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid batch size\"):\n        MultistartOptions(batch_size=1, n_cores=2)\n\n\ndef test_multistart_options_invalid_batch_evaluator():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid batch evaluator\"):\n        MultistartOptions(batch_evaluator=\"invalid\")\n\n\ndef test_multistart_options_invalid_seed():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid seed\"):\n        MultistartOptions(seed=\"invalid\")\n\n\ndef test_multistart_options_invalid_error_handling():\n    with pytest.raises(InvalidMultistartError, match=\"Invalid error handling\"):\n        MultistartOptions(error_handling=\"invalid\")\n\n\ndef test_linear_weights():\n    calculated = _linear_weights(5, 10, 0.4, 0.8)\n    expected = 0.6\n    assert np.allclose(calculated, expected)\n\n\ndef test_tiktak_weights():\n    assert np.allclose(0.3, _tiktak_weights(0, 10, 0.3, 0.8))\n    assert np.allclose(0.8, _tiktak_weights(10, 10, 0.3, 0.8))\n\n\ndef test_get_internal_multistart_options_from_public_defaults():\n    options = MultistartOptions()\n\n    got = get_internal_multistart_options_from_public(\n        options,\n        params=np.arange(5),\n        params_to_internal=lambda x: x,\n    )\n\n    assert got.convergence_xtol_rel == 0.01\n    assert got.convergence_max_discoveries == options.convergence_max_discoveries\n    assert got.n_cores == options.n_cores\n    assert got.error_handling == \"continue\"\n    assert got.n_samples == 500\n    assert got.stopping_maxopt == 50\n    assert got.batch_size == 1\n"
  },
  {
    "path": "tests/optimagic/optimization/test_optimize.py",
    "content": "\"\"\"Tests for (almost) algorithm independent properties of maximize and minimize.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.examples.criterion_functions import sos_scalar\nfrom optimagic.exceptions import InvalidFunctionError, InvalidNumdiffOptionsError\nfrom optimagic.optimization.optimize import maximize, minimize\n\n\ndef test_sign_is_switched_back_after_maximization():\n    params = pd.DataFrame()\n    params[\"value\"] = [1, 2, 3]\n    res = maximize(\n        lambda params: 1 - params[\"value\"] @ params[\"value\"],\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n    )\n\n    assert np.allclose(res.fun, 1)\n\n\ndef test_scipy_lbfgsb_actually_calls_criterion_and_derivative():\n    params = pd.DataFrame(data=np.ones((10, 1)), columns=[\"value\"])\n\n    def raising_crit_and_deriv(params):  # noqa: ARG001\n        raise NotImplementedError(\"This should not be called.\")\n\n    with pytest.raises(InvalidFunctionError, match=\"Error while evaluating\"):\n        minimize(\n            fun=sos_scalar,\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac=raising_crit_and_deriv,\n        )\n\n\ndef test_with_invalid_numdiff_options():\n    with pytest.raises(InvalidNumdiffOptionsError):\n        minimize(\n            fun=lambda x: x @ x,\n            params=np.arange(5),\n            algorithm=\"scipy_lbfgsb\",\n            numdiff_options={\"bla\": 15},\n        )\n\n\n# provided fun or fun_and_jac is provided\ndef test_with_optional_fun_argument():\n    expected = np.zeros(5)\n    res = minimize(\n        fun_and_jac=lambda x: (x @ x, 2 * x),\n        params=np.arange(5),\n        algorithm=\"scipy_lbfgsb\",\n    )\n    aaae(res.x, expected)\n\n\ndef test_fun_and_jac_list():\n    with pytest.raises(NotImplementedError):\n        minimize(\n            fun_and_jac=[lambda x: (x @ x, 2 * x)],\n            params=np.arange(5),\n            algorithm=\"scipy_lbfgsb\",\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_optimize_result.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom optimagic.optimization.optimize_result import OptimizeResult, _create_stars\nfrom optimagic.utilities import get_rng\n\n\n@pytest.fixture()\ndef convergence_report():\n    conv_report = pd.DataFrame(\n        index=[\n            \"relative_criterion_change\",\n            \"relative_params_change\",\n            \"absolute_criterion_change\",\n            \"absolute_params_change\",\n        ],\n        columns=[\"one_step\", \"five_steps\"],\n    )\n    u = get_rng(seed=0).uniform\n    conv_report[\"one_step\"] = [\n        u(1e-12, 1e-10),\n        u(1e-9, 1e-8),\n        u(1e-7, 1e-6),\n        u(1e-6, 1e-5),\n    ]\n    conv_report[\"five_steps\"] = [1e-8, 1e-4, 1e-3, 100]\n    return conv_report\n\n\n@pytest.fixture()\ndef base_inputs():\n    out = {\n        \"params\": np.ones(3),\n        \"fun\": 500,\n        \"start_fun\": 1000,\n        \"start_params\": np.full(3, 10),\n        \"direction\": \"minimize\",\n        \"message\": \"OPTIMIZATION TERMINATED SUCCESSFULLY\",\n        \"success\": True,\n        \"n_fun_evals\": 100,\n        \"n_jac_evals\": 0,\n        \"n_iterations\": 80,\n        \"history\": {\"criterion\": list(range(10))},\n        \"algorithm\": \"scipy_lbfgsb\",\n        \"n_free\": 2,\n    }\n    return out\n\n\ndef test_optimize_result_runs(base_inputs, convergence_report):\n    res = OptimizeResult(\n        convergence_report=convergence_report,\n        **base_inputs,\n    )\n    res.__repr__()\n\n\ndef test_create_stars():\n    sr = pd.Series([1e-12, 1e-9, 1e-7, 1e-4, 1e-2])\n    calculated = _create_stars(sr).tolist()\n    expected = [\"***\", \"** \", \"*  \", \"   \", \"   \"]\n    assert calculated == expected\n\n\ndef test_to_pickle(base_inputs, convergence_report, tmp_path):\n    res = OptimizeResult(\n        convergence_report=convergence_report,\n        **base_inputs,\n    )\n    res.to_pickle(tmp_path / \"bla.pkl\")\n\n\ndef test_dict_access(base_inputs):\n    res = OptimizeResult(**base_inputs)\n    assert res[\"fun\"] == 500\n    assert res[\"nfev\"] == 100\n"
  },
  {
    "path": "tests/optimagic/optimization/test_params_versions.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pybaum import tree_just_flatten\n\nfrom optimagic.examples.criterion_functions import (\n    sos_gradient,\n    sos_ls,\n    sos_ls_jacobian,\n    sos_scalar,\n)\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.tree_registry import get_registry\n\nREGISTRY = get_registry(extended=True)\n\nPARAMS = [\n    {\"a\": 1.0, \"b\": 2, \"c\": 3, \"d\": 4, \"e\": 5},\n    np.arange(5),\n    list(range(5)),\n    tuple(range(5)),\n    pd.Series(np.arange(5)),\n    {\"a\": 1, \"b\": np.array([2, 3]), \"c\": [pd.Series([4, 5])]},\n]\n\nSCALAR_PARAMS = [6, 6.2, np.array([4]), np.array([4.5])]\n\n\n@pytest.mark.parametrize(\"params\", PARAMS + SCALAR_PARAMS)\ndef test_tree_params_numerical_derivative_scalar_criterion(params):\n    flat = np.array(tree_just_flatten(params, registry=REGISTRY))\n    expected = np.zeros_like(flat)\n\n    res = minimize(\n        fun=sos_scalar,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n    )\n    calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY))\n    aaae(calculated, expected)\n\n\n@pytest.mark.parametrize(\"params\", PARAMS + SCALAR_PARAMS)\ndef test_tree_params_scalar_criterion(params):\n    flat = np.array(tree_just_flatten(params, registry=REGISTRY))\n    expected = np.zeros_like(flat)\n\n    res = minimize(\n        fun=sos_scalar,\n        jac=sos_gradient,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n    )\n    calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY))\n    aaae(calculated, expected)\n\n\nTEST_CASES_SOS_LS = []\nfor p in PARAMS:\n    for algo in [\"scipy_lbfgsb\", \"scipy_ls_lm\"]:\n        TEST_CASES_SOS_LS.append((p, algo))\n\n\n@pytest.mark.parametrize(\"params, algorithm\", TEST_CASES_SOS_LS)\ndef test_tree_params_numerical_derivative_sos_ls(params, algorithm):\n    flat = np.array(tree_just_flatten(params, registry=REGISTRY))\n    expected = np.zeros_like(flat)\n\n    res = minimize(\n        fun=sos_ls,\n        params=params,\n        algorithm=algorithm,\n    )\n    calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY))\n    aaae(calculated, expected)\n\n\n@pytest.mark.parametrize(\"params, algorithm\", TEST_CASES_SOS_LS)\ndef test_tree_params_sos_ls(params, algorithm):\n    flat = np.array(tree_just_flatten(params, registry=REGISTRY))\n    expected = np.zeros_like(flat)\n\n    derivatives = [sos_gradient, sos_ls_jacobian]\n    res = minimize(\n        fun=sos_ls,\n        jac=derivatives,\n        params=params,\n        algorithm=algorithm,\n    )\n    calculated = np.array(tree_just_flatten(res.params, registry=REGISTRY))\n    aaae(calculated, expected)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_process_result.py",
    "content": "from optimagic.optimization.process_results import _sum_or_none\n\n\ndef test_sum_or_none():\n    assert _sum_or_none([1, 2, 3]) == 6\n    assert _sum_or_none([1, 2, None]) is None\n"
  },
  {
    "path": "tests/optimagic/optimization/test_scipy_aliases.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic.exceptions import AliasError\n\n\ndef test_x0_works_in_minimize():\n    res = om.minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n    )\n    aaae(res.params, np.zeros(3))\n\n\ndef test_x0_works_in_maximize():\n    res = om.maximize(\n        fun=lambda x: -x @ x,\n        x0=np.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n    )\n    aaae(res.params, np.zeros(3))\n\n\ndef test_x0_and_params_do_not_work_together_in_minimize():\n    with pytest.raises(AliasError, match=\"x0 is an alias\"):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n\ndef test_x0_and_params_do_not_work_together_in_maximize():\n    with pytest.raises(AliasError, match=\"x0 is an alias\"):\n        om.maximize(\n            fun=lambda x: -x @ x,\n            x0=np.arange(3),\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n\nMETHODS = [\n    \"Nelder-Mead\",\n    \"Powell\",\n    \"CG\",\n    \"BFGS\",\n    \"Newton-CG\",\n    \"L-BFGS-B\",\n    \"TNC\",\n    \"COBYLA\",\n    \"SLSQP\",\n    \"trust-constr\",\n]\n\n\n@pytest.mark.parametrize(\"method\", METHODS)\ndef test_method_works_in_minimize(method):\n    res = om.minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        method=\"L-BFGS-B\",\n    )\n    aaae(res.params, np.zeros(3))\n\n\n@pytest.mark.parametrize(\"method\", METHODS)\ndef test_method_works_in_maximize(method):\n    res = om.maximize(\n        fun=lambda x: -x @ x,\n        x0=np.arange(3),\n        method=\"L-BFGS-B\",\n    )\n    aaae(res.params, np.zeros(3))\n\n\ndef test_method_and_algorithm_do_not_work_together_in_minimize():\n    with pytest.raises(AliasError, match=\"method is an alias\"):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            method=\"L-BFGS-B\",\n        )\n\n\ndef test_method_and_algorithm_do_not_work_together_in_maximize():\n    with pytest.raises(AliasError, match=\"method is an alias\"):\n        om.maximize(\n            fun=lambda x: -x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            method=\"L-BFGS-B\",\n        )\n\n\ndef test_exception_for_hess():\n    msg = \"The hess argument is not yet supported\"\n    with pytest.raises(NotImplementedError, match=msg):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            hess=lambda x: np.eye(len(x)),\n        )\n\n\ndef test_exception_for_hessp():\n    msg = \"The hessp argument is not yet supported\"\n    with pytest.raises(NotImplementedError, match=msg):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            hessp=lambda x, p: np.eye(len(x)) @ p,\n        )\n\n\ndef test_exception_for_callback():\n    msg = \"The callback argument is not yet supported\"\n    with pytest.raises(NotImplementedError, match=msg):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            callback=print,\n        )\n\n\ndef test_exception_for_options():\n    msg = \"The options argument is not supported\"\n    with pytest.raises(NotImplementedError, match=msg):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            options={\"maxiter\": 100},\n        )\n\n\ndef test_exception_for_tol():\n    msg = \"The tol argument is not supported\"\n    with pytest.raises(NotImplementedError, match=msg):\n        om.minimize(\n            fun=lambda x: x @ x,\n            x0=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            tol=1e-6,\n        )\n\n\ndef test_args_works_in_minimize():\n    res = om.minimize(\n        fun=lambda x, a: ((x - a) ** 2).sum(),\n        x0=np.arange(3),\n        args=(1,),\n        algorithm=\"scipy_lbfgsb\",\n    )\n    aaae(res.params, np.ones(3))\n\n\ndef test_args_works_in_maximize():\n    res = om.maximize(\n        fun=lambda x, a: -((x - a) ** 2).sum(),\n        x0=np.arange(3),\n        args=(1,),\n        algorithm=\"scipy_lbfgsb\",\n    )\n    aaae(res.params, np.ones(3))\n\n\ndef test_args_does_not_work_with_together_with_any_kwargs():\n    with pytest.raises(AliasError, match=\"args is an alternative\"):\n        om.minimize(\n            fun=lambda x, a: ((x - a) ** 2).sum(),\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            args=(1,),\n            fun_kwargs={\"a\": 1},\n        )\n\n\ndef test_jac_equal_true_works_in_minimize():\n    res = om.minimize(\n        fun=lambda x: (x @ x, 2 * x),\n        params=np.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n        jac=True,\n    )\n    aaae(res.params, np.zeros(3))\n\n\ndef test_jac_equal_true_works_in_maximize():\n    res = om.maximize(\n        fun=lambda x: (-x @ x, -2 * x),\n        params=np.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n        jac=True,\n    )\n    aaae(res.params, np.zeros(3))\n"
  },
  {
    "path": "tests/optimagic/optimization/test_useful_exceptions.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom optimagic.exceptions import (\n    InvalidFunctionError,\n    InvalidKwargsError,\n    UserFunctionRuntimeError,\n)\nfrom optimagic.optimization.optimize import minimize\n\n\ndef test_missing_criterion_kwargs():\n    def f(params, bla, blubb):  # noqa: ARG001\n        return (params[\"value\"].to_numpy() ** 2).sum()\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n\n    with pytest.raises(InvalidKwargsError):\n        minimize(f, params, \"scipy_lbfgsb\", fun_kwargs={\"bla\": 3})\n\n\ndef test_missing_derivative_kwargs():\n    def f(params):\n        return (params[\"value\"].to_numpy() ** 2).sum()\n\n    def grad(params, bla, blubb):  # noqa: ARG001\n        return params[\"value\"].to_numpy() * 2\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n\n    with pytest.raises(InvalidKwargsError):\n        minimize(f, params, \"scipy_lbfgsb\", jac=grad, jac_kwargs={\"bla\": 3})\n\n\ndef test_missing_criterion_and_derivative_kwargs():\n    def f(params):\n        return (params[\"value\"].to_numpy() ** 2).sum()\n\n    def f_and_grad(params, bla, blubb):  # noqa: ARG001\n        return f(params), params[\"value\"].to_numpy() * 2\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n\n    with pytest.raises(InvalidKwargsError):\n        minimize(\n            f,\n            params,\n            \"scipy_lbfgsb\",\n            fun_and_jac=f_and_grad,\n            fun_and_jac_kwargs={\"bla\": 3},\n        )\n\n\ndef test_typo_in_criterion_kwarg():\n    def f(params, bla, foo):  # noqa: ARG001\n        return (params[\"value\"].to_numpy() ** 2).sum()\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n\n    snippet = \"Did you mean\"\n    with pytest.raises(InvalidKwargsError, match=snippet):\n        minimize(f, params, \"scipy_lbfgsb\", fun_kwargs={\"bla\": 3, \"foa\": 4})\n\n\ndef test_criterion_with_runtime_error_derivative_free():\n    def f(params):\n        x = params[\"value\"].to_numpy()\n        if x.sum() < 1:\n            raise RuntimeError(\"Great error message\")\n\n        return x @ x\n\n    params = pd.DataFrame(np.full((3, 1), 10), columns=[\"value\"])\n    snippet = \"when evaluating fun during optimization\"\n    with pytest.raises(UserFunctionRuntimeError, match=snippet):\n        minimize(f, params, \"scipy_neldermead\")\n\n\ndef test_criterion_with_runtime_error_during_numerical_derivative():\n    def f(params):\n        x = params[\"value\"].to_numpy()\n        if (x != 1).any():\n            raise RuntimeError(\"Great error message\")\n\n        return x @ x\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n    snippet = \"evaluating a numerical derivative\"\n    with pytest.raises(UserFunctionRuntimeError, match=snippet):\n        minimize(f, params, \"scipy_lbfgsb\")\n\n\ndef test_criterion_fails_at_start_values():\n    def just_fail(params):  # noqa: ARG001\n        raise RuntimeError()\n\n    params = pd.DataFrame(np.ones((3, 1)), columns=[\"value\"])\n    snippet = \"Error while evaluating fun at start params.\"\n    with pytest.raises(InvalidFunctionError, match=snippet):\n        minimize(just_fail, params, \"scipy_lbfgsb\")\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_advanced_constraints.py",
    "content": "\"\"\"Tests using constraints with optional entries or combination of constraints.\n\n- Only sum of squares\n- Only scipy_lbfgsb\n- Only minimize\n\n\"\"\"\n\nimport itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic.examples.criterion_functions import sos_gradient, sos_scalar\nfrom optimagic.optimization.optimize import minimize\n\nCONSTR_INFO = {\n    \"cov_bounds_distance\": om.FlatCovConstraint(regularization=0.1),\n    \"sdcorr_bounds_distance\": om.FlatSDCorrConstraint(regularization=0.1),\n    \"fixed_and_decreasing\": [\n        om.DecreasingConstraint(lambda x: x.loc[[1, 2, 3, 4]]),\n        om.FixedConstraint(lambda x: x.loc[2]),\n    ],\n    \"fixed_and_increasing\": [\n        om.IncreasingConstraint(lambda x: x.loc[[0, 1, 2, 3]]),\n        om.FixedConstraint(lambda x: x.loc[2]),\n    ],\n}\n\n\nSTART_INFO = {\n    \"cov_bounds_distance\": [1, 0.1, 2, 0.2, 0.3, 3],\n    \"sdcorr_bounds_distance\": [1, 2, 3, 0.1, 0.2, 0.3],\n    \"fixed_and_decreasing\": [1, 4, 4, 2, 1],\n    \"fixed_and_increasing\": [1, 2, 3, 4, 1],\n}\n\nRES_INFO = {\n    \"cov_bounds_distance\": [0.1, 0, 0.1, 0, 0, 0.1],\n    \"sdcorr_bounds_distance\": [0.1, 0.1, 0.1, 0, 0, 0.0],\n    \"fixed_and_decreasing\": [0, 4, 4, 0, 0],\n    \"fixed_and_increasing\": [0, 0, 3, 3, 0],\n}\n\n\nderivatives = [sos_gradient, None]\nconstr_names = list(CONSTR_INFO.keys())\n\n\ntest_cases = list(itertools.product(derivatives, constr_names))\n\n\n@pytest.mark.parametrize(\"derivative, constr_name\", test_cases)\ndef test_with_covariance_constraint_bounds_distance(derivative, constr_name):\n    params = pd.Series(START_INFO[constr_name], name=\"value\").to_frame()\n\n    res = minimize(\n        fun=sos_scalar,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        jac=derivative,\n        constraints=CONSTR_INFO[constr_name],\n    )\n\n    assert res.success, \"scipy_lbfgsb did not converge.\"\n\n    expected = np.array(RES_INFO[constr_name])\n    aaae(res.params[\"value\"].to_numpy(), expected, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_bounds.py",
    "content": "import numpy as np\nfrom scipy.optimize import Bounds as ScipyBounds\n\nfrom optimagic.optimization.optimize import maximize, minimize\n\n\ndef test_minimize_with_scipy_bounds():\n    minimize(\n        lambda x: x @ x,\n        np.arange(3),\n        bounds=ScipyBounds(np.full(3, -1), np.full(3, 5)),\n        algorithm=\"scipy_lbfgsb\",\n    )\n\n\ndef test_minimize_with_sequence_bounds():\n    minimize(\n        lambda x: x @ x,\n        np.arange(3),\n        bounds=[(-1, 5)] * 3,\n        algorithm=\"scipy_lbfgsb\",\n    )\n\n\ndef test_maximize_with_scipy_bounds():\n    maximize(\n        lambda x: -x @ x,\n        np.arange(3),\n        bounds=ScipyBounds(np.full(3, -1), np.full(3, 5)),\n        algorithm=\"scipy_lbfgsb\",\n    )\n\n\ndef test_maximize_with_sequence_bounds():\n    maximize(\n        lambda x: -x @ x,\n        np.arange(3),\n        bounds=[(-1, 5)] * 3,\n        algorithm=\"scipy_lbfgsb\",\n    )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_constraints.py",
    "content": "\"\"\"Test many different criterion functions and many sets of constraints.\n\n- only minimize\n- only gradient based algorithms scipy_lbfgsb (scalar) and scipy_ls_dogbox (least\n  squares)\n- closed form and numerical derivatives\n\n\"\"\"\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport statsmodels.api as sm\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic import mark\nfrom optimagic.examples.criterion_functions import (\n    rhe_function_value,\n    rhe_gradient,\n    rosenbrock_function_value,\n    rosenbrock_gradient,\n    sos_gradient,\n    sos_likelihood_jacobian,\n    sos_ls,\n    sos_ls_jacobian,\n    trid_gradient,\n    trid_scalar,\n)\nfrom optimagic.exceptions import InvalidConstraintError, InvalidParamsError\nfrom optimagic.optimization.optimize import maximize, minimize\nfrom optimagic.parameters.bounds import Bounds\n\n\n@mark.likelihood\ndef logit_loglike(params, y, x):\n    \"\"\"Log-likelihood function of a logit model.\n\n    Args:\n        params (pd.DataFrame): The index consists of the parameter names,\n            the \"value\" column are the parameter values.\n        y (np.array): 1d numpy array with the dependent variable\n        x (np.array): 2d numpy array with the independent variables\n\n    Returns:\n        loglike (np.array): 1d numpy array with likelihood contribution  per individual\n\n    \"\"\"\n    if isinstance(params, pd.DataFrame):\n        p = params[\"value\"].to_numpy()\n    else:\n        p = params\n    q = 2 * y - 1\n    loglikes = np.log(1 / (1 + np.exp(-(q * np.dot(x, p)))))\n\n    return loglikes\n\n\nFUNC_INFO = {\n    \"sos\": {\n        \"criterion\": sos_ls,\n        \"gradient\": sos_gradient,\n        \"jacobian\": sos_likelihood_jacobian,\n        \"ls_jacobian\": sos_ls_jacobian,\n        \"default_result\": np.zeros(3),\n        \"fixed_result\": [1, 0, 0],\n        \"entries\": [\"value\", \"contributions\", \"root_contributions\"],\n        \"linear_result\": [0.8, 1.6, 0],\n        \"probability_result\": [0.5, 0.5, 0],\n    },\n    \"rotated_hyper_ellipsoid\": {\n        \"criterion\": rhe_function_value,\n        \"gradient\": rhe_gradient,\n        \"entries\": [\"value\", \"contributions\", \"root_contributions\"],\n        \"default_result\": np.zeros(3),\n        \"fixed_result\": [1, 0, 0],\n        \"linear_result\": [0.571428571, 1.714285714, 0],\n        \"probability_result\": [0.4, 0.6, 0],\n    },\n    \"rosenbrock\": {\n        \"criterion\": rosenbrock_function_value,\n        \"gradient\": rosenbrock_gradient,\n        \"entries\": [\"value\", \"contributions\"],\n        \"default_result\": np.ones(3),\n        \"linear_result\": \"unknown\",\n        \"probability_result\": \"unknown\",\n    },\n    \"trid\": {\n        \"criterion\": trid_scalar,\n        \"gradient\": trid_gradient,\n        \"entries\": [\"value\"],\n        \"default_result\": [3, 4, 3],\n        \"fixed_result\": [1, 2.666666667, 2.333333333],\n        \"equality_result\": [3, 3, 3],\n        \"pairwise_equality_result\": [3.333333333, 3.333333333, 2.666666667],\n        \"increasing_result\": [2.666666667, 3.3333333, 3.3333333],\n        \"decreasing_result\": \"unknown\",\n        \"linear_result\": [1.185185185, 1.4074074069999998, 1.703703704],\n        \"probability_result\": [0.272727273, 0.727272727, 1.363636364],\n        \"covariance_result\": \"unknown\",\n        \"sdcorr_result\": \"unknown\",\n    },\n}\n\nCONSTR_INFO = {\n    \"numpy\": {\n        \"fixed\": om.FixedConstraint(selector=lambda x: x[0]),\n        \"equality\": om.EqualityConstraint(selector=lambda x: x[[0, 1, 2]]),\n        \"pairwise_equality\": om.PairwiseEqualityConstraint(\n            selectors=[lambda x: x[0], lambda x: x[1]]\n        ),\n        \"increasing\": om.IncreasingConstraint(selector=lambda x: x[[1, 2]]),\n        \"decreasing\": om.DecreasingConstraint(selector=lambda x: x[[0, 1]]),\n        \"linear\": om.LinearConstraint(\n            selector=lambda x: x[[0, 1]], value=4, weights=[1, 2]\n        ),\n        \"probability\": om.ProbabilityConstraint(selector=lambda x: x[[0, 1]]),\n        \"covariance\": om.FlatCovConstraint(selector=lambda x: x[[0, 1, 2]]),\n        \"sdcorr\": om.FlatSDCorrConstraint(selector=lambda x: x[[0, 1, 2]]),\n    },\n    \"pandas\": {\n        \"fixed\": om.FixedConstraint(selector=lambda p: p.loc[0]),\n        \"equality\": om.EqualityConstraint(selector=lambda p: p.loc[[0, 1, 2]]),\n        \"pairwise_equality\": om.PairwiseEqualityConstraint(\n            selectors=[lambda p: p.loc[0], lambda p: p.loc[1]]\n        ),\n        \"increasing\": om.IncreasingConstraint(selector=lambda p: p.loc[[1, 2]]),\n        \"decreasing\": om.DecreasingConstraint(selector=lambda p: p.loc[[0, 1]]),\n        \"linear\": om.LinearConstraint(\n            selector=lambda p: p.loc[[0, 1]], value=4, weights=[1, 2]\n        ),\n        \"probability\": om.ProbabilityConstraint(selector=lambda p: p.loc[[0, 1]]),\n        \"covariance\": om.FlatCovConstraint(selector=lambda p: p.loc[[0, 1, 2]]),\n        \"sdcorr\": om.FlatSDCorrConstraint(selector=lambda p: p.loc[[0, 1, 2]]),\n    },\n}\n\n\nSTART_INFO = {\n    \"fixed\": [1, 1.5, 4.5],\n    \"equality\": [1, 1, 1],\n    \"pairwise_equality\": [2, 2, 3],\n    \"increasing\": [1, 2, 3],\n    \"decreasing\": [3, 2, 1],\n    \"linear\": [2, 1, 3],\n    \"probability\": [0.8, 0.2, 3],\n    \"covariance\": [2, 1, 2],\n    \"sdcorr\": [2, 2, 0.5],\n}\n\nKNOWN_FAILURES = {\n    (\"rosenbrock\", \"equality\"),\n    (\"rosenbrock\", \"decreasing\"),  # imprecise\n}\n\nPARAMS_TYPES = [\"numpy\", \"pandas\"]\n\ntest_cases = []\nfor crit_name in FUNC_INFO:\n    for ptype in PARAMS_TYPES:\n        for constr_name in CONSTR_INFO[ptype]:\n            unknown_res = FUNC_INFO[crit_name].get(f\"{constr_name}_result\") == \"unknown\"\n            known_failure = (crit_name, constr_name) in KNOWN_FAILURES\n            if not any([unknown_res, known_failure]):\n                for deriv in None, FUNC_INFO[crit_name][\"gradient\"]:\n                    test_cases.append(\n                        (crit_name, \"scipy_lbfgsb\", deriv, constr_name, ptype)\n                    )\n\n                if \"root_contributions\" in FUNC_INFO[crit_name][\"entries\"]:\n                    for deriv in [FUNC_INFO[crit_name].get(\"ls_jacobian\"), None]:\n                        test_cases.append(\n                            (crit_name, \"scipy_ls_dogbox\", deriv, constr_name, ptype)\n                        )\n\n\n@pytest.mark.parametrize(\n    \"criterion_name, algorithm, derivative, constraint_name, params_type\",\n    test_cases,\n)\ndef test_constrained_minimization(\n    criterion_name, algorithm, derivative, constraint_name, params_type\n):\n    constraints = CONSTR_INFO[params_type][constraint_name]\n    criterion = FUNC_INFO[criterion_name][\"criterion\"]\n    if params_type == \"pandas\":\n        params = pd.Series(START_INFO[constraint_name], name=\"value\").to_frame()\n    else:\n        params = np.array(START_INFO[constraint_name])\n\n    res = minimize(\n        fun=criterion,\n        params=params,\n        algorithm=algorithm,\n        jac=derivative,\n        constraints=constraints,\n        algo_options={\"convergence.ftol_rel\": 1e-12},\n    )\n\n    if params_type == \"pandas\":\n        calculated = res.params[\"value\"].to_numpy()\n    else:\n        calculated = res.params\n\n    expected = FUNC_INFO[criterion_name].get(\n        f\"{constraint_name}_result\", FUNC_INFO[criterion_name][\"default_result\"]\n    )\n\n    aaae(calculated, expected, decimal=4)\n\n\n@pytest.mark.filterwarnings(\"ignore:Specifying constraints as a dictionary is\")\ndef test_fix_that_differs_from_start_value_raises_an_error():\n    # We use the old constraint interface here, as the new interface prohibits the\n    # usage of the 'value' attribute, rendering the test useless.\n    # TODO: Remove this test when the old constraint interface is deprecated.\n    with pytest.raises(InvalidParamsError):\n        minimize(\n            fun=lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            constraints=[{\"selector\": lambda x: x[1], \"value\": 10, \"type\": \"fixed\"}],\n        )\n\n\ndef test_three_independent_constraints():\n    params = np.arange(10)\n    params[0] = 2\n\n    constraints = [\n        om.FlatCovConstraint(lambda x: x[[0, 1, 2]]),\n        om.FixedConstraint(lambda x: x[[4, 5]]),\n        om.LinearConstraint(lambda x: x[[7, 8]], value=15, weights=1),\n    ]\n\n    res = minimize(\n        fun=lambda x: x @ x,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=constraints,\n        algo_options={\"convergence.ftol_rel\": 1e-12},\n    )\n    expected = np.array([0] * 4 + [4, 5] + [0] + [7.5] * 2 + [0])\n\n    # TODO: Increase precision back to decimal=4. The reduced precision is likely due\n    # to the re-written L-BFGS-B algorithm in SciPy 1.15.\n    # See https://github.com/optimagic-dev/optimagic/issues/556.\n    aaae(res.params, expected, decimal=3)\n\n\nINVALID_CONSTRAINT_COMBIS = [\n    [\n        om.FlatCovConstraint(lambda x: x[[1, 0, 2]]),\n        om.ProbabilityConstraint(lambda x: x[[0, 1]]),\n    ],\n    [\n        om.FlatCovConstraint(lambda x: x[[6, 3, 5, 2, 1, 4]]),\n        om.IncreasingConstraint(lambda x: x[[0, 1, 2]]),\n    ],\n]\n\n\n@pytest.mark.parametrize(\"constraints\", INVALID_CONSTRAINT_COMBIS)\ndef test_incompatible_constraints_raise_errors(constraints):\n    params = np.arange(10)\n\n    with pytest.raises(InvalidConstraintError):\n        minimize(\n            fun=lambda x: x @ x,\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            constraints=constraints,\n        )\n\n\ndef test_bug_from_copenhagen_presentation():\n    # Make sure maximum of work hours is optimal\n    def u(params):\n        return params[\"work\"][\"hours\"] ** 2\n\n    start_params = {\n        \"work\": {\"hourly_wage\": 25.5, \"hours\": 2_000},\n        \"time_budget\": 24 * 7 * 365,\n    }\n\n    def return_all_but_working_hours(params):\n        out = deepcopy(params)\n        del out[\"work\"][\"hours\"]\n        return out\n\n    res = maximize(\n        fun=u,\n        params=start_params,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=[\n            om.FixedConstraint(selector=return_all_but_working_hours),\n            om.IncreasingConstraint(lambda p: [p[\"work\"][\"hours\"], p[\"time_budget\"]]),\n        ],\n        bounds=Bounds(lower={\"work\": {\"hours\": 0}}),\n    )\n\n    assert np.allclose(res.params[\"work\"][\"hours\"], start_params[\"time_budget\"])\n\n\ndef test_constraint_inheritance():\n    \"\"\"Test that probability constraint applies both sets of parameters in a pairwise\n    equality constraint, no matter to which set they were applied originally.\n    \"\"\"\n    for loc in [[0, 1], [2, 3]]:\n\n        def selector(x, loc=loc):\n            # bind loc to the function\n            return x[loc]\n\n        constraints = [\n            om.PairwiseEqualityConstraint(\n                selectors=[lambda x: x[[0, 1]], lambda x: x[[3, 2]]]\n            ),\n            om.ProbabilityConstraint(selector),\n        ]\n\n        res = minimize(\n            fun=lambda x: x @ x,\n            params=np.array([0.1, 0.9, 0.9, 0.1]),\n            algorithm=\"scipy_lbfgsb\",\n            constraints=constraints,\n        )\n        aaae(res.params, [0.5] * 4)\n\n\ndef test_invalid_start_params():\n    def criterion(x):\n        return np.dot(x, x)\n\n    x = np.arange(3)\n\n    with pytest.raises(InvalidParamsError):\n        minimize(\n            criterion,\n            params=x,\n            algorithm=\"scipy_lbfgsb\",\n            constraints=om.ProbabilityConstraint(selector=lambda x: x[[1, 2]]),\n        )\n\n\ndef test_covariance_constraint_in_2_by_2_case():\n    spector_data = sm.datasets.spector.load_pandas()\n    spector_data.exog = sm.add_constant(spector_data.exog)\n    x_df = sm.add_constant(spector_data.exog)\n\n    start_params = np.array([-10, 2, 0.2, 2])\n    kwargs = {\"y\": spector_data.endog, \"x\": x_df.to_numpy()}\n\n    result = maximize(\n        fun=logit_loglike,\n        fun_kwargs=kwargs,\n        params=start_params,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FlatCovConstraint(selector=lambda x: x[[1, 2, 3]]),\n    )\n\n    expected = np.array([-13.0213351, 2.82611417, 0.09515704, 2.37867869])\n    aaae(result.params, expected, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_logging.py",
    "content": "\"\"\"Test optimizations with logging in a temporary database.\n\n- Only minimize\n- Only dict criterion\n- scipy_lbfgsb and scipy_ls_dogbox\n- with and without derivatives\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom pybaum import tree_just_flatten\n\nfrom optimagic import mark\nfrom optimagic.examples.criterion_functions import (\n    sos_derivatives,\n    sos_ls,\n)\nfrom optimagic.logging.logger import SQLiteLogOptions\nfrom optimagic.logging.types import ExistenceStrategy\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.tree_registry import get_registry\n\n\n@mark.least_squares\ndef flexible_sos_ls(params):\n    return params\n\n\nalgorithms = [\"scipy_lbfgsb\", \"scipy_ls_dogbox\"]\nderivatives = [None, sos_derivatives]\nparams = [pd.DataFrame({\"value\": np.arange(3)}), np.arange(3), {\"a\": 1, \"b\": 2, \"c\": 3}]\n\ntest_cases = []\nfor algo in algorithms:\n    for p in params:\n        test_cases.append((algo, p))\n\n\n@pytest.mark.parametrize(\"algorithm, params\", test_cases)\ndef test_optimization_with_valid_logging(algorithm, params):\n    res = minimize(\n        flexible_sos_ls,\n        params=params,\n        algorithm=algorithm,\n        logging=\"logging.db\",\n    )\n    registry = get_registry(extended=True)\n    flat = np.array(tree_just_flatten(res.params, registry=registry))\n    aaae(flat, np.zeros(3))\n\n\ndef test_optimization_with_existing_exsting_database():\n    minimize(\n        sos_ls,\n        pd.Series([1, 2, 3], name=\"value\").to_frame(),\n        algorithm=\"scipy_lbfgsb\",\n        logging=SQLiteLogOptions(\n            \"logging.db\", if_database_exists=ExistenceStrategy.REPLACE\n        ),\n    )\n\n    with pytest.raises(FileExistsError):\n        minimize(\n            sos_ls,\n            pd.Series([1, 2, 3], name=\"value\").to_frame(),\n            algorithm=\"scipy_lbfgsb\",\n            logging=SQLiteLogOptions(\n                \"logging.db\", if_database_exists=ExistenceStrategy.RAISE\n            ),\n        )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_multistart.py",
    "content": "import functools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic.examples.criterion_functions import (\n    sos_ls,\n    sos_scalar,\n)\nfrom optimagic.logging import SQLiteLogReader\nfrom optimagic.optimization.optimize import maximize, minimize\nfrom optimagic.optimization.optimize_result import OptimizeResult\nfrom optimagic.parameters.bounds import Bounds\n\ncriteria = [sos_scalar, sos_ls]\n\n\n@pytest.fixture()\ndef params():\n    params = pd.DataFrame()\n    params[\"value\"] = np.arange(4)\n    params[\"soft_lower_bound\"] = [-5] * 4\n    params[\"soft_upper_bound\"] = [10] * 4\n    return params\n\n\ntest_cases = [\n    (sos_scalar, \"minimize\"),\n    (sos_ls, \"minimize\"),\n    (sos_scalar, \"maximize\"),\n]\n\n\ndef _switch_sign(func):\n    \"\"\"Switch sign of all outputs of a function.\"\"\"\n\n    @functools.wraps(func)\n    def wrapper(*args, **kwargs):\n        unswitched = func(*args, **kwargs)\n        if isinstance(unswitched, dict):\n            switched = {key: -val for key, val in unswitched.items()}\n        elif isinstance(unswitched, (tuple, list)):\n            switched = []\n            for entry in unswitched:\n                if isinstance(entry, dict):\n                    switched.append({key: -val for key, val in entry.items()})\n                else:\n                    switched.append(-entry)\n            if isinstance(unswitched, tuple):\n                switched = tuple(switched)\n        else:\n            switched = -unswitched\n        return switched\n\n    return wrapper\n\n\n@pytest.mark.parametrize(\"criterion, direction\", test_cases)\ndef test_multistart_optimization_with_sum_of_squares_at_defaults(\n    criterion, direction, params\n):\n    if direction == \"minimize\":\n        res = minimize(\n            fun=criterion,\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            multistart=True,\n        )\n    else:\n        res = maximize(\n            fun=_switch_sign(criterion),\n            params=params,\n            algorithm=\"scipy_lbfgsb\",\n            multistart=True,\n        )\n\n    assert hasattr(res, \"multistart_info\")\n    ms_info = res.multistart_info\n    assert len(ms_info.exploration_sample) == 400\n    assert isinstance(ms_info.exploration_results, list)\n    assert len(ms_info.exploration_results) == 400\n    assert all(isinstance(entry, float) for entry in ms_info.exploration_results)\n    assert all(isinstance(entry, OptimizeResult) for entry in ms_info.local_optima)\n    assert all(isinstance(entry, pd.DataFrame) for entry in ms_info.start_parameters)\n    assert np.allclose(res.fun, 0)\n    aaae(res.params[\"value\"], np.zeros(4))\n\n\ndef test_multistart_with_existing_sample(params):\n    sample = [params.assign(value=x) for x in np.arange(20).reshape(5, 4) / 10]\n    options = om.MultistartOptions(sample=sample)\n\n    res = minimize(\n        fun=sos_ls,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=options,\n    )\n\n    assert all(\n        got.equals(expected)\n        for expected, got in zip(\n            sample, res.multistart_info.exploration_sample, strict=False\n        )\n    )\n\n\ndef test_convergence_via_max_discoveries_works(params):\n    options = om.MultistartOptions(\n        convergence_xtol_rel=np.inf,\n        convergence_max_discoveries=2,\n    )\n\n    res = maximize(\n        fun=_switch_sign(sos_scalar),\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=options,\n    )\n\n    assert len(res.multistart_info.local_optima) == 2\n\n\ndef test_steps_are_logged_as_skipped_if_convergence(tmp_path, params):\n    options = om.MultistartOptions(\n        n_samples=10 * len(params),\n        convergence_xtol_rel=np.inf,\n        convergence_max_discoveries=2,\n    )\n    path = tmp_path / \"logging.db\"\n    minimize(\n        fun=sos_ls,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=options,\n        logging=path,\n    )\n\n    steps_table = SQLiteLogReader(path)._step_store.to_df()\n    expected_status = [\"complete\", \"complete\", \"complete\", \"skipped\", \"skipped\"]\n    assert steps_table[\"status\"].tolist() == expected_status\n\n\ndef test_all_steps_occur_in_optimization_iterations_if_no_convergence(params):\n    options = om.MultistartOptions(\n        convergence_max_discoveries=np.inf,\n        n_samples=10 * len(params),\n    )\n\n    minimize(\n        fun=sos_ls,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=options,\n        logging=\"logging.db\",\n    )\n\n    logging = SQLiteLogReader(\"logging.db\")\n    iterations = logging._iteration_store.to_df()\n\n    present_steps = set(iterations[\"step\"])\n\n    assert present_steps == {1, 2, 3, 4, 5}\n\n\ndef test_with_non_transforming_constraints(params):\n    res = minimize(\n        fun=sos_ls,\n        params=params,\n        constraints=om.FixedConstraint(selector=lambda p: p.loc[[0, 1]]),\n        algorithm=\"scipy_lbfgsb\",\n        multistart=om.MultistartOptions(seed=12345),\n    )\n\n    aaae(res.params[\"value\"].to_numpy(), np.array([0, 1, 0, 0]))\n\n\ndef test_error_is_raised_with_transforming_constraints(params):\n    with pytest.raises(NotImplementedError):\n        minimize(\n            fun=sos_ls,\n            params=params,\n            constraints=om.ProbabilityConstraint(selector=lambda p: p.loc[[0, 1]]),\n            algorithm=\"scipy_lbfgsb\",\n            multistart=om.MultistartOptions(seed=12345),\n        )\n\n\ndef test_multistart_with_numpy_params():\n    res = minimize(\n        fun=lambda params: params @ params,\n        params=np.arange(5),\n        algorithm=\"scipy_lbfgsb\",\n        bounds=Bounds(soft_lower=np.full(5, -10), soft_upper=np.full(5, 10)),\n        multistart=om.MultistartOptions(seed=12345),\n    )\n\n    aaae(res.params, np.zeros(5))\n\n\ndef test_multistart_with_rng_seed():\n    rng = np.random.default_rng(12345)\n\n    res = minimize(\n        fun=lambda params: params @ params,\n        params=np.arange(5),\n        algorithm=\"scipy_lbfgsb\",\n        bounds=Bounds(soft_lower=np.full(5, -10), soft_upper=np.full(5, 10)),\n        multistart=om.MultistartOptions(seed=rng),\n    )\n\n    aaae(res.params, np.zeros(5))\n\n\ndef test_with_invalid_bounds():\n    with pytest.raises(ValueError):\n        minimize(\n            fun=lambda x: x @ x,\n            params=np.arange(5),\n            algorithm=\"scipy_neldermead\",\n            multistart=True,\n        )\n\n\ndef test_with_scaling():\n    def _crit(params):\n        x = params - np.arange(len(params))\n        return x @ x\n\n    res = minimize(\n        fun=_crit,\n        params=np.full(5, 10),\n        bounds=Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 11)),\n        algorithm=\"scipy_lbfgsb\",\n        multistart=True,\n    )\n\n    aaae(res.params, np.arange(5))\n\n\ndef test_with_ackley():\n    def ackley(x):\n        out = (\n            -20 * np.exp(-0.2 * np.sqrt(np.mean(x**2)))\n            - np.exp(np.mean(np.cos(2 * np.pi * x)))\n            + 20\n            + np.exp(1)\n        )\n        return out\n\n    dim = 5\n\n    kwargs = {\n        \"fun\": ackley,\n        \"params\": np.full(dim, -10),\n        \"bounds\": Bounds(lower=np.full(dim, -32), upper=np.full(dim, 32)),\n        \"algo_options\": {\"stopping.maxfun\": 1000},\n    }\n\n    minimize(\n        **kwargs,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=om.MultistartOptions(\n            n_samples=200,\n            stopping_maxopt=20,\n            convergence_max_discoveries=10,\n        ),\n    )\n\n\ndef test_multistart_with_least_squares_optimizers():\n    est = minimize(\n        fun=sos_ls,\n        params=np.array([-1, 1.0]),\n        bounds=Bounds(soft_lower=np.full(2, -10), soft_upper=np.full(2, 10)),\n        algorithm=\"scipy_ls_trf\",\n        multistart=om.MultistartOptions(n_samples=3, stopping_maxopt=3),\n    )\n\n    aaae(est.params, np.zeros(2))\n\n\ndef test_with_ackley_using_dict_options():\n    def ackley(x):\n        out = (\n            -20 * np.exp(-0.2 * np.sqrt(np.mean(x**2)))\n            - np.exp(np.mean(np.cos(2 * np.pi * x)))\n            + 20\n            + np.exp(1)\n        )\n        return out\n\n    dim = 5\n\n    kwargs = {\n        \"fun\": ackley,\n        \"params\": np.full(dim, -10),\n        \"bounds\": Bounds(lower=np.full(dim, -32), upper=np.full(dim, 32)),\n        \"algo_options\": {\"stopping.maxfun\": 1000},\n    }\n\n    minimize(\n        **kwargs,\n        algorithm=\"scipy_lbfgsb\",\n        multistart={\n            \"n_samples\": 200,\n            \"stopping_maxopt\": 20,\n            \"convergence_max_discoveries\": 10,\n        },\n    )\n\n\n@pytest.mark.slow\ndef test_with_batch_evaluator(params):\n    options = om.MultistartOptions(batch_evaluator=\"threading\")\n\n    minimize(\n        fun=sos_scalar,\n        params=params,\n        algorithm=\"scipy_lbfgsb\",\n        multistart=options,\n    )\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_nonlinear_constraints.py",
    "content": "import itertools\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic import maximize, minimize\nfrom optimagic.algorithms import NonlinearConstrainedAlgorithms\nfrom optimagic.config import IS_CYIPOPT_INSTALLED\nfrom optimagic.parameters.bounds import Bounds\n\nNLC_ALGORITHMS = NonlinearConstrainedAlgorithms()._available_algorithms_dict\n\n# ======================================================================================\n# Two-dimension example with equality and inequality constraints\n# ======================================================================================\n\n\n@pytest.fixture()\ndef nlc_2d_example():\n    \"\"\"Non-linear constraints: 2-dimensional example.\n\n    See the example section in https://en.wikipedia.org/wiki/Nonlinear_programming.\n\n    \"\"\"\n\n    def criterion(x):\n        return np.sum(x)\n\n    def derivative(x):\n        return np.ones_like(x)\n\n    def constraint_func(x):\n        value = np.dot(x, x)\n        return np.array([value - 1, 2 - value])\n\n    def constraint_jac(x):\n        return 2 * np.vstack((x.reshape(1, -1), -x.reshape(1, -1)))\n\n    constraints_long = om.NonlinearConstraint(\n        func=constraint_func,\n        derivative=constraint_jac,\n        lower_bound=np.zeros(2),\n        tol=1e-8,\n    )\n\n    constraints_flat = om.NonlinearConstraint(\n        func=lambda x: np.dot(x, x),\n        derivative=lambda x: 2 * x,\n        lower_bound=1,\n        upper_bound=2,\n        tol=1e-8,\n    )\n\n    constraints_equality = om.NonlinearConstraint(\n        func=lambda x: np.dot(x, x),\n        derivative=lambda x: 2 * x,\n        value=2,\n    )\n\n    constraints_equality_and_inequality = [\n        om.NonlinearConstraint(\n            func=lambda x: np.dot(x, x),\n            derivative=lambda x: 2 * x,\n            lower_bound=1,\n        ),\n        om.NonlinearConstraint(\n            func=lambda x: np.dot(x, x),\n            derivative=lambda x: 2 * x,\n            value=2,\n        ),\n    ]\n\n    _kwargs = {\n        \"criterion\": criterion,\n        \"params\": np.array([0, np.sqrt(2)]),\n        \"derivative\": derivative,\n        \"bounds\": Bounds(lower=np.zeros(2), upper=2 * np.ones(2)),\n    }\n\n    kwargs = {\n        \"flat\": {**_kwargs, \"constraints\": constraints_flat},\n        \"long\": {**_kwargs, \"constraints\": constraints_long},\n        \"equality\": {**_kwargs, \"constraints\": constraints_equality},\n        \"equality_and_inequality\": {\n            **_kwargs,\n            \"constraints\": constraints_equality_and_inequality,\n        },\n    }\n\n    solution_x = np.ones(2)\n\n    return solution_x, kwargs\n\n\nTEST_CASES = list(\n    itertools.product(\n        NLC_ALGORITHMS, [\"flat\", \"long\", \"equality\", \"equality_and_inequality\"]\n    )\n)\n\n\n@pytest.mark.parametrize(\"algorithm, constr_type\", TEST_CASES)\ndef test_nonlinear_optimization(nlc_2d_example, algorithm, constr_type):\n    \"\"\"Test that available nonlinear optimizers solve a nonlinear constraints problem.\n\n    We test for the cases of \"equality\", \"inequality\" and \"equality_and_inequality\"\n    constraints.\n\n    \"\"\"\n    if \"equality\" in constr_type and algorithm == \"nlopt_mma\":\n        pytest.skip(reason=\"Very slow and low accuracy.\")\n\n    solution_x, kwargs = nlc_2d_example\n    if algorithm == \"scipy_cobyla\":\n        del kwargs[constr_type][\"bounds\"]\n\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\")\n        result = maximize(algorithm=algorithm, **kwargs[constr_type])\n\n    if NLC_ALGORITHMS[algorithm].algo_info.is_global:\n        decimal = 0\n    else:\n        decimal = 4\n\n    aaae(result.params, solution_x, decimal=decimal)\n\n\n# ======================================================================================\n# Documentation example\n# ======================================================================================\n\n\ndef criterion(params):\n    offset = np.linspace(1, 0, len(params))\n    x = params - offset\n    return x @ x\n\n\n@pytest.mark.parametrize(\"algorithm\", NLC_ALGORITHMS)\ndef test_documentation_example(algorithm):\n    if algorithm in (\"nlopt_mma\", \"ipopt\"):\n        pytest.skip(reason=\"Slow.\")\n\n    kwargs = {\n        \"bounds\": Bounds(lower=np.zeros(6), upper=2 * np.ones(6)),\n    }\n\n    if algorithm == \"scipy_cobyla\":\n        del kwargs[\"bounds\"]\n\n    minimize(\n        fun=criterion,\n        params=np.ones(6),\n        algorithm=algorithm,\n        constraints=om.NonlinearConstraint(\n            func=np.prod,\n            selector=lambda x: x[:-1],\n            value=1.0,\n        ),\n        **kwargs,\n    )\n\n\n# ======================================================================================\n# Test: selection + reparametrization constraint + nonlinear constraint\n# ======================================================================================\n\n\n@pytest.fixture()\ndef general_example():\n    params = {\"a\": np.array([0.1, 0.3, 0.4, 0.2]), \"b\": np.array([1.5, 2])}\n\n    def criterion(params):\n        weights = np.array([0, 1, 2, 3])\n        return params[\"a\"] @ weights + params[\"b\"].sum()\n\n    def selector_probability_constraint(params):\n        return params[\"a\"]\n\n    def selector_nonlinear_constraint(params):\n        return {\"probs\": params[\"a\"][:3][::-1], \"unnecessary\": params[\"b\"]}\n\n    def constraint(selected):\n        return selected[\"probs\"] @ selected[\"probs\"]\n\n    constraints = [\n        om.ProbabilityConstraint(\n            selector=selector_probability_constraint,\n        ),\n        om.NonlinearConstraint(\n            selector=selector_nonlinear_constraint,\n            upper_bound=0.8,\n            func=constraint,\n            tol=0.01,\n        ),\n        om.NonlinearConstraint(\n            selector=selector_nonlinear_constraint,\n            func=constraint,\n            upper_bound=0.8,\n            tol=0.01,\n        ),\n    ]\n\n    lower_bound = {\"b\": np.array([0, 0])}\n    upper_bound = {\"b\": np.array([2, 2])}\n\n    kwargs = {\n        \"fun\": criterion,\n        \"params\": params,\n        \"constraints\": constraints,\n        \"lower_bounds\": lower_bound,\n        \"upper_bounds\": upper_bound,\n    }\n    return kwargs\n\n\nTEST_CASES = list(itertools.product([\"ipopt\"], [True, False]))\n\n\n@pytest.mark.skipif(not IS_CYIPOPT_INSTALLED, reason=\"Needs ipopt\")\n@pytest.mark.parametrize(\"algorithm, skip_checks\", TEST_CASES)\ndef test_general_example(general_example, algorithm, skip_checks):\n    kwargs = general_example\n\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\")\n        res = minimize(algorithm=algorithm, skip_checks=skip_checks, **kwargs)\n\n    optimal_p1 = 0.5 + np.sqrt(3 / 20)  # can be derived analytically\n    optimal_p2 = 1 - optimal_p1\n\n    aaae(res.params[\"a\"], np.array([optimal_p1, optimal_p2, 0, 0]), decimal=4)\n    aaae(res.params[\"b\"], np.array([0.0, 0]), decimal=5)\n"
  },
  {
    "path": "tests/optimagic/optimization/test_with_scaling.py",
    "content": "import numpy as np\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic as om\nfrom optimagic.optimization.optimize import maximize, minimize\nfrom optimagic.parameters.scaling import ScalingOptions\n\n\ndef test_minimize_with_scaling_options():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=ScalingOptions(method=\"start_values\", magnitude=1.2),\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_minimize_with_scaling_options_dict():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling={\"method\": \"start_values\", \"magnitude\": 1.2},\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_minimize_with_scaling_true():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=True,\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_maximize_with_scaling_options():\n    got = maximize(\n        fun=lambda x: -x @ x,\n        x0=np.arange(3),\n        jac=lambda x: -2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=ScalingOptions(method=\"start_values\", magnitude=1.2),\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_maximize_with_scaling_options_dict():\n    got = maximize(\n        fun=lambda x: -x @ x,\n        x0=np.arange(3),\n        jac=lambda x: -2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling={\"method\": \"start_values\", \"magnitude\": 1.2},\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_maximize_with_scaling_true():\n    got = maximize(\n        fun=lambda x: -x @ x,\n        x0=np.arange(3),\n        jac=lambda x: -2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=True,\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_minimize_with_scaling_options_with_bounds():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=ScalingOptions(method=\"bounds\", magnitude=1),\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_minimize_with_scaling_options_dict_with_bounds():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling={\"method\": \"bounds\", \"magnitude\": 1},\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n\n\ndef test_minimize_with_scaling_true_with_bounds():\n    got = minimize(\n        fun=lambda x: x @ x,\n        x0=np.arange(3),\n        bounds=om.Bounds(lower=np.array([-1, 0, 0]), upper=np.full(3, 5)),\n        jac=lambda x: 2 * x,\n        algorithm=\"scipy_lbfgsb\",\n        constraints=om.FixedConstraint(lambda x: x[2]),\n        scaling=True,\n    )\n    aaae(got.x, np.array([0, 0, 2]))\n"
  },
  {
    "path": "tests/optimagic/optimizers/__init__.py",
    "content": ""
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/__init__.py",
    "content": ""
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/add_points_until_main_model_fully_linear_i.yaml",
    "content": "---\ndelta: 0.05\nhistory_criterion:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\nhistory_criterion_expected:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 117.9473464966\n      - 104.1334190546\n      - 90.05657396185\n      - 91.2207789054\n      - 83.93061974583\n      - 71.30942158128\n      - 62.32199544308\n      - 59.88083883476\n      - 62.27199544308\n      - 60.05583883476\n      - 62.77642327569\n      - 76.9431054476\n      - 131.4875801385\n      - -2342.694563021\n      - -86.51002021515\n      - -37.31642203542\n      - -19.79861800279\n      - 115.6473464966\n      - 102.3334190546\n      - 97.45657396185\n      - 89.9207789054\n      - 80.83061974583\n      - 67.20942158128\n      - 60.52199544308\n      - 57.68083883476\n      - 60.39699544308\n      - 58.40583883476\n      - 62.55142327569\n      - 77.3931054476\n      - 129.3500801385\n      - -2342.957063021\n      - -86.32252021515\n      - -36.19142203542\n      - -21.71111800279\n      - 105.0473464966\n      - 104.4334190546\n      - 89.65657396185\n      - 83.5207789054\n      - 80.03061974583\n      - 70.50942158128\n      - 58.02199544308\n      - 56.68083883476\n      - 58.07199544308\n      - 57.28083883476\n      - 61.50142327569\n      - 75.6681054476\n      - 130.4375801385\n      - -2343.519563021\n      - -87.11002021515\n      - -38.55392203542\n      - -21.86111800279\n      - 99.14734649662\n      - 92.73341905458\n      - 86.65657396185\n      - 81.8207789054\n      - 77.13061974583\n      - 69.00942158128\n      - 60.62199544308\n      - 56.68083883476\n      - 60.58449544308\n      - 57.43083883476\n      - 61.80142327569\n      - 76.3431054476\n      - 130.7750801385\n      - -2343.932063021\n      - -86.88502021515\n      - -38.70392203542\n      - -22.19861800279\n      - 106.5473464966\n      - 88.25657396185\n      - 61.9476238727\n      - 64.62033506263\n      - 65.33033506263\n      - 67.77033506263\n      - -17.38684121661\n      - 103.0473464966\n      - 85.75657396185\n      - 62.6476238727\n      - 66.05033506263\n      - 64.96033506263\n      - 66.83033506263\n      - -19.07684121661\n      - 101.8473464966\n      - 86.85657396185\n      - 62.3476238727\n      - 66.08033506263\n      - 64.02033506263\n      - 65.52033506263\n      - -17.57684121661\n      - 103.0473464966\n      - 89.35657396185\n      - 63.2476238727\n      - 64.77033506263\n      - -17.38684121661\n      - 64.96033506263\n      - 65.33033506263\n      - -17.57684121661\n      - 101.8473464966\n      - 85.85657396185\n      - 74.63061974583\n      - 61.4476238727\n      - 55.64384266576\n      - 56.01384266576\n      - 58.81780905206\n      - 66.16033506263\n      - 253.6554490216\n      - -52.38720394238\n      - -16.34684121661\n      - 100.8473464966\n      - 87.85657396185\n      - 75.63061974583\n      - 64.6476238727\n      - 53.44384266576\n      - 53.76384266576\n      - 59.30780905206\n      - 64.96033506263\n      - 252.8254490216\n      - -54.07720394238\n      - -15.59684121661\n      - 95.54734649662\n      - 85.35657396185\n      - 75.33061974583\n      - 65.2476238727\n      - 54.44384266576\n      - 55.11384266576\n      - 60.99780905206\n      - 67.85033506263\n      - 250.5754490216\n      - -52.83720394238\n      - -12.89684121661\n      - 103.0473464966\n      - 91.43341905458\n      - 87.85657396185\n      - 84.3207789054\n      - 74.53061974583\n      - 65.80942158128\n      - 56.48083883476\n      - 57.35083883476\n      - 58.90892327569\n      - 72.8131054476\n      - 127.6600801385\n      - -2346.667063021\n      - -89.36502021515\n      - -40.65642203542\n      - -24.07361800279\n      - 76.40033506263\n      - 77.97033506263\n      - 70.28033506263\n      - 64.02033506263\n      - 64.28033506263\n      - 68.33033506263\n      - 95.84734649662\n      - 80.55657396185\n      - 74.83061974583\n      - 69.2476238727\n      - 63.24384266576\n      - 63.69780905206\n      - 63.06384266576\n      - 63.80780905206\n      - 69.91033506263\n      - 253.9554490216\n      - -49.31720394238\n      - -14.27684121661\n      - 105.7473464966\n      - 87.15657396185\n      - 74.33061974583\n      - 58.4476238727\n      - 57.44384266576\n      - 57.69780905206\n      - 58.00384266576\n      - 58.66780905206\n      - 68.45033506263\n      - 251.1454490216\n      - -53.51720394238\n      - -14.50684121661\n      - 91.74734649662\n      - 85.05657396185\n      - 67.63061974583\n      - 60.1476238727\n      - 59.14384266576\n      - 56.29780905206\n      - 59.43384266576\n      - 56.94780905206\n      - 65.56033506263\n      - 251.0254490216\n      - -54.18720394238\n      - -16.37684121661\n      - 65.90033506263\n      - 106.0473464966\n      - 90.35657396185\n      - 64.9476238727\n      - 65.52033506263\n      - -18.14684121661\n      - 65.15033506263\n      - -17.95684121661\n      - 67.40033506263\n      - 66.83033506263\n      - 67.85033506263\n      - 56.72199544308\n      - 57.17199544308\n      - 106.7473464966\n      - 87.45657396185\n      - 61.02199544308\n      - 61.03199544308\n      - 62.20892327569\n      - 130.2900801385\n      - 59.62199544308\n      - 59.91199544308\n      - 106.3473464966\n      - 86.75657396185\n      - 61.68892327569\n      - 129.9500801385\n      - 60.12199544308\n      - 60.17199544308\n  -   - 175.7616267494\n      - 135.815392655\n      - 107.427429421\n      - 99.44443456745\n      - 85.70608965926\n      - 64.3171217786\n      - 44.36460041182\n      - 32.95338522348\n      - 44.31460041182\n      - 33.12838522348\n      - 24.75298136325\n      - 19.273476213\n      - 15.97636612239\n      - 13.00317519799\n      - 10.70826764037\n      - 9.858997178816\n      - 10.15607036729\n      - 173.4616267494\n      - 134.015392655\n      - 114.827429421\n      - 98.14443456745\n      - 82.60608965926\n      - 60.2171217786\n      - 42.56460041182\n      - 30.75338522348\n      - 42.43960041182\n      - 31.47838522348\n      - 24.52798136325\n      - 19.723476213\n      - 13.83886612239\n      - 12.74067519799\n      - 10.89576764037\n      - 10.98399717882\n      - 8.243570367288\n      - 162.8616267494\n      - 136.115392655\n      - 107.027429421\n      - 91.74443456745\n      - 81.80608965926\n      - 63.5171217786\n      - 40.06460041182\n      - 29.75338522348\n      - 40.11460041182\n      - 30.35338522348\n      - 23.47798136325\n      - 17.998476213\n      - 14.92636612239\n      - 12.17817519799\n      - 10.10826764037\n      - 8.621497178816\n      - 8.093570367288\n      - 156.9616267494\n      - 124.415392655\n      - 104.027429421\n      - 90.04443456745\n      - 78.90608965926\n      - 62.0171217786\n      - 42.66460041182\n      - 29.75338522348\n      - 42.62710041182\n      - 30.50338522348\n      - 23.77798136325\n      - 18.673476213\n      - 15.26386612239\n      - 11.76567519799\n      - 10.33326764037\n      - 8.471497178816\n      - 7.756070367288\n      - 164.3616267494\n      - 105.627429421\n      - 48.8819398286\n      - 18.5818314891\n      - 19.2918314891\n      - 21.7318314891\n      - 7.60022447721\n      - 160.8616267494\n      - 103.127429421\n      - 49.5819398286\n      - 20.0118314891\n      - 18.9218314891\n      - 20.7918314891\n      - 5.91022447721\n      - 159.6616267494\n      - 104.227429421\n      - 49.2819398286\n      - 20.0418314891\n      - 17.9818314891\n      - 19.4818314891\n      - 7.41022447721\n      - 160.8616267494\n      - 106.727429421\n      - 50.1819398286\n      - 18.7318314891\n      - 7.60022447721\n      - 18.9218314891\n      - 19.2918314891\n      - 7.41022447721\n      - 159.6616267494\n      - 103.227429421\n      - 76.40608965926\n      - 48.3819398286\n      - 33.22986519451\n      - 33.59986519451\n      - 26.87225158213\n      - 20.1218314891\n      - 15.2111293494\n      - 11.73215109931\n      - 8.64022447721\n      - 158.6616267494\n      - 105.227429421\n      - 77.40608965926\n      - 51.5819398286\n      - 31.02986519451\n      - 31.34986519451\n      - 27.36225158213\n      - 18.9218314891\n      - 14.3811293494\n      - 10.04215109931\n      - 9.39022447721\n      - 153.3616267494\n      - 102.727429421\n      - 77.10608965926\n      - 52.1819398286\n      - 32.02986519451\n      - 32.69986519451\n      - 29.05225158213\n      - 21.8118314891\n      - 12.1311293494\n      - 11.28215109931\n      - 12.09022447721\n      - 160.8616267494\n      - 123.115392655\n      - 105.227429421\n      - 92.54443456745\n      - 76.30608965926\n      - 58.8171217786\n      - 29.55338522348\n      - 30.42338522348\n      - 20.88548136325\n      - 15.143476213\n      - 12.14886612239\n      - 9.030675197988\n      - 7.853267640371\n      - 6.518997178816\n      - 5.881070367288\n      - 30.3618314891\n      - 31.9318314891\n      - 24.2418314891\n      - 17.9818314891\n      - 18.2418314891\n      - 22.2918314891\n      - 153.6616267494\n      - 97.927429421\n      - 76.60608965926\n      - 56.1819398286\n      - 40.82986519451\n      - 31.75225158213\n      - 40.64986519451\n      - 31.86225158213\n      - 23.8718314891\n      - 15.5111293494\n      - 14.80215109931\n      - 10.71022447721\n      - 163.5616267494\n      - 104.527429421\n      - 76.10608965926\n      - 45.3819398286\n      - 35.02986519451\n      - 25.75225158213\n      - 35.58986519451\n      - 26.72225158213\n      - 22.4118314891\n      - 12.7011293494\n      - 10.60215109931\n      - 10.48022447721\n      - 149.5616267494\n      - 102.427429421\n      - 69.40608965926\n      - 47.0819398286\n      - 36.72986519451\n      - 24.35225158213\n      - 37.01986519451\n      - 25.00225158213\n      - 19.5218314891\n      - 12.5811293494\n      - 9.93215109931\n      - 8.61022447721\n      - 19.8618314891\n      - 163.8616267494\n      - 107.727429421\n      - 51.8819398286\n      - 19.4818314891\n      - 6.84022447721\n      - 19.1118314891\n      - 7.03022447721\n      - 21.3618314891\n      - 20.7918314891\n      - 21.8118314891\n      - 38.76460041182\n      - 39.21460041182\n      - 164.5616267494\n      - 104.827429421\n      - 43.06460041182\n      - 43.07460041182\n      - 24.18548136325\n      - 14.77886612239\n      - 41.66460041182\n      - 41.95460041182\n      - 164.1616267494\n      - 104.127429421\n      - 23.66548136325\n      - 14.43886612239\n      - 42.16460041182\n      - 42.21460041182\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nhistory_x_expected:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\nindex_best_x: 0\nlinear_terms:\n  - 168.1373336387\n  - 5647.516828713\n  - 6083.95846304\nlower_bounds: null\nmodel_improving_points:\n  -   - 1.0000000000000002\n      - 0.0\n      - 0.0\n  -   - 0.010623242412742123\n      - 0.0\n      - 0.0\n  -   - -0.2590536815263693\n      - 0.0\n      - 0.0\nmodel_indices:\n  - 5\n  - 4\n  - 3\n  - 2\n  - 1\n  - 0\n  - 0\nmodel_indices_expected:\n  - 5\n  - 6\n  - 7\n  - 2\n  - 1\n  - 0\n  - 0\nn: 3\nn_modelpoints: 1\nn_modelpoints_expected: 3\nsquare_terms:\n  -   - 1593.211846704\n      - -3249.345655429\n      - 7950.494350682\n  -   - -3249.345655429\n      - 151896.1417043\n      - 15486.3682482\n  -   - 7950.494350682\n      - 15486.3682482\n      - 54810.91090485\nupper_bound: null\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/add_points_until_main_model_fully_linear_ii.yaml",
    "content": "---\ndelta: 0.025\nhistory_criterion:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 117.9473464966\n      - 104.1334190546\n      - 90.05657396185\n      - 91.2207789054\n      - 83.93061974583\n      - 71.30942158128\n      - 62.32199544308\n      - 59.88083883476\n      - 62.27199544308\n      - 60.05583883476\n      - 62.77642327569\n      - 76.9431054476\n      - 131.4875801385\n      - -2342.694563021\n      - -86.51002021515\n      - -37.31642203542\n      - -19.79861800279\n      - 115.6473464966\n      - 102.3334190546\n      - 97.45657396185\n      - 89.9207789054\n      - 80.83061974583\n      - 67.20942158128\n      - 60.52199544308\n      - 57.68083883476\n      - 60.39699544308\n      - 58.40583883476\n      - 62.55142327569\n      - 77.3931054476\n      - 129.3500801385\n      - -2342.957063021\n      - -86.32252021515\n      - -36.19142203542\n      - -21.71111800279\n      - 105.0473464966\n      - 104.4334190546\n      - 89.65657396185\n      - 83.5207789054\n      - 80.03061974583\n      - 70.50942158128\n      - 58.02199544308\n      - 56.68083883476\n      - 58.07199544308\n      - 57.28083883476\n      - 61.50142327569\n      - 75.6681054476\n      - 130.4375801385\n      - -2343.519563021\n      - -87.11002021515\n      - -38.55392203542\n      - -21.86111800279\n      - 99.14734649662\n      - 92.73341905458\n      - 86.65657396185\n      - 81.8207789054\n      - 77.13061974583\n      - 69.00942158128\n      - 60.62199544308\n      - 56.68083883476\n      - 60.58449544308\n      - 57.43083883476\n      - 61.80142327569\n      - 76.3431054476\n      - 130.7750801385\n      - -2343.932063021\n      - -86.88502021515\n      - -38.70392203542\n      - -22.19861800279\n      - 106.5473464966\n      - 88.25657396185\n      - 61.9476238727\n      - 64.62033506263\n      - 65.33033506263\n      - 67.77033506263\n      - -17.38684121661\n      - 103.0473464966\n      - 85.75657396185\n      - 62.6476238727\n      - 66.05033506263\n      - 64.96033506263\n      - 66.83033506263\n      - -19.07684121661\n      - 101.8473464966\n      - 86.85657396185\n      - 62.3476238727\n      - 66.08033506263\n      - 64.02033506263\n      - 65.52033506263\n      - -17.57684121661\n      - 103.0473464966\n      - 89.35657396185\n      - 63.2476238727\n      - 64.77033506263\n      - -17.38684121661\n      - 64.96033506263\n      - 65.33033506263\n      - -17.57684121661\n      - 101.8473464966\n      - 85.85657396185\n      - 74.63061974583\n      - 61.4476238727\n      - 55.64384266576\n      - 56.01384266576\n      - 58.81780905206\n      - 66.16033506263\n      - 253.6554490216\n      - -52.38720394238\n      - -16.34684121661\n      - 100.8473464966\n      - 87.85657396185\n      - 75.63061974583\n      - 64.6476238727\n      - 53.44384266576\n      - 53.76384266576\n      - 59.30780905206\n      - 64.96033506263\n      - 252.8254490216\n      - -54.07720394238\n      - -15.59684121661\n      - 95.54734649662\n      - 85.35657396185\n      - 75.33061974583\n      - 65.2476238727\n      - 54.44384266576\n      - 55.11384266576\n      - 60.99780905206\n      - 67.85033506263\n      - 250.5754490216\n      - -52.83720394238\n      - -12.89684121661\n      - 103.0473464966\n      - 91.43341905458\n      - 87.85657396185\n      - 84.3207789054\n      - 74.53061974583\n      - 65.80942158128\n      - 56.48083883476\n      - 57.35083883476\n      - 58.90892327569\n      - 72.8131054476\n      - 127.6600801385\n      - -2346.667063021\n      - -89.36502021515\n      - -40.65642203542\n      - -24.07361800279\n      - 76.40033506263\n      - 77.97033506263\n      - 70.28033506263\n      - 64.02033506263\n      - 64.28033506263\n      - 68.33033506263\n      - 95.84734649662\n      - 80.55657396185\n      - 74.83061974583\n      - 69.2476238727\n      - 63.24384266576\n      - 63.69780905206\n      - 63.06384266576\n      - 63.80780905206\n      - 69.91033506263\n      - 253.9554490216\n      - -49.31720394238\n      - -14.27684121661\n      - 105.7473464966\n      - 87.15657396185\n      - 74.33061974583\n      - 58.4476238727\n      - 57.44384266576\n      - 57.69780905206\n      - 58.00384266576\n      - 58.66780905206\n      - 68.45033506263\n      - 251.1454490216\n      - -53.51720394238\n      - -14.50684121661\n      - 91.74734649662\n      - 85.05657396185\n      - 67.63061974583\n      - 60.1476238727\n      - 59.14384266576\n      - 56.29780905206\n      - 59.43384266576\n      - 56.94780905206\n      - 65.56033506263\n      - 251.0254490216\n      - -54.18720394238\n      - -16.37684121661\n      - 65.90033506263\n      - 106.0473464966\n      - 90.35657396185\n      - 64.9476238727\n      - 65.52033506263\n      - -18.14684121661\n      - 65.15033506263\n      - -17.95684121661\n      - 67.40033506263\n      - 66.83033506263\n      - 67.85033506263\n      - 56.72199544308\n      - 57.17199544308\n      - 106.7473464966\n      - 87.45657396185\n      - 61.02199544308\n      - 61.03199544308\n      - 62.20892327569\n      - 130.2900801385\n      - 59.62199544308\n      - 59.91199544308\n      - 106.3473464966\n      - 86.75657396185\n      - 61.68892327569\n      - 129.9500801385\n      - 60.12199544308\n      - 60.17199544308\n  -   - 175.7616267494\n      - 135.815392655\n      - 107.427429421\n      - 99.44443456745\n      - 85.70608965926\n      - 64.3171217786\n      - 44.36460041182\n      - 32.95338522348\n      - 44.31460041182\n      - 33.12838522348\n      - 24.75298136325\n      - 19.273476213\n      - 15.97636612239\n      - 13.00317519799\n      - 10.70826764037\n      - 9.858997178816\n      - 10.15607036729\n      - 173.4616267494\n      - 134.015392655\n      - 114.827429421\n      - 98.14443456745\n      - 82.60608965926\n      - 60.2171217786\n      - 42.56460041182\n      - 30.75338522348\n      - 42.43960041182\n      - 31.47838522348\n      - 24.52798136325\n      - 19.723476213\n      - 13.83886612239\n      - 12.74067519799\n      - 10.89576764037\n      - 10.98399717882\n      - 8.243570367288\n      - 162.8616267494\n      - 136.115392655\n      - 107.027429421\n      - 91.74443456745\n      - 81.80608965926\n      - 63.5171217786\n      - 40.06460041182\n      - 29.75338522348\n      - 40.11460041182\n      - 30.35338522348\n      - 23.47798136325\n      - 17.998476213\n      - 14.92636612239\n      - 12.17817519799\n      - 10.10826764037\n      - 8.621497178816\n      - 8.093570367288\n      - 156.9616267494\n      - 124.415392655\n      - 104.027429421\n      - 90.04443456745\n      - 78.90608965926\n      - 62.0171217786\n      - 42.66460041182\n      - 29.75338522348\n      - 42.62710041182\n      - 30.50338522348\n      - 23.77798136325\n      - 18.673476213\n      - 15.26386612239\n      - 11.76567519799\n      - 10.33326764037\n      - 8.471497178816\n      - 7.756070367288\n      - 164.3616267494\n      - 105.627429421\n      - 48.8819398286\n      - 18.5818314891\n      - 19.2918314891\n      - 21.7318314891\n      - 7.60022447721\n      - 160.8616267494\n      - 103.127429421\n      - 49.5819398286\n      - 20.0118314891\n      - 18.9218314891\n      - 20.7918314891\n      - 5.91022447721\n      - 159.6616267494\n      - 104.227429421\n      - 49.2819398286\n      - 20.0418314891\n      - 17.9818314891\n      - 19.4818314891\n      - 7.41022447721\n      - 160.8616267494\n      - 106.727429421\n      - 50.1819398286\n      - 18.7318314891\n      - 7.60022447721\n      - 18.9218314891\n      - 19.2918314891\n      - 7.41022447721\n      - 159.6616267494\n      - 103.227429421\n      - 76.40608965926\n      - 48.3819398286\n      - 33.22986519451\n      - 33.59986519451\n      - 26.87225158213\n      - 20.1218314891\n      - 15.2111293494\n      - 11.73215109931\n      - 8.64022447721\n      - 158.6616267494\n      - 105.227429421\n      - 77.40608965926\n      - 51.5819398286\n      - 31.02986519451\n      - 31.34986519451\n      - 27.36225158213\n      - 18.9218314891\n      - 14.3811293494\n      - 10.04215109931\n      - 9.39022447721\n      - 153.3616267494\n      - 102.727429421\n      - 77.10608965926\n      - 52.1819398286\n      - 32.02986519451\n      - 32.69986519451\n      - 29.05225158213\n      - 21.8118314891\n      - 12.1311293494\n      - 11.28215109931\n      - 12.09022447721\n      - 160.8616267494\n      - 123.115392655\n      - 105.227429421\n      - 92.54443456745\n      - 76.30608965926\n      - 58.8171217786\n      - 29.55338522348\n      - 30.42338522348\n      - 20.88548136325\n      - 15.143476213\n      - 12.14886612239\n      - 9.030675197988\n      - 7.853267640371\n      - 6.518997178816\n      - 5.881070367288\n      - 30.3618314891\n      - 31.9318314891\n      - 24.2418314891\n      - 17.9818314891\n      - 18.2418314891\n      - 22.2918314891\n      - 153.6616267494\n      - 97.927429421\n      - 76.60608965926\n      - 56.1819398286\n      - 40.82986519451\n      - 31.75225158213\n      - 40.64986519451\n      - 31.86225158213\n      - 23.8718314891\n      - 15.5111293494\n      - 14.80215109931\n      - 10.71022447721\n      - 163.5616267494\n      - 104.527429421\n      - 76.10608965926\n      - 45.3819398286\n      - 35.02986519451\n      - 25.75225158213\n      - 35.58986519451\n      - 26.72225158213\n      - 22.4118314891\n      - 12.7011293494\n      - 10.60215109931\n      - 10.48022447721\n      - 149.5616267494\n      - 102.427429421\n      - 69.40608965926\n      - 47.0819398286\n      - 36.72986519451\n      - 24.35225158213\n      - 37.01986519451\n      - 25.00225158213\n      - 19.5218314891\n      - 12.5811293494\n      - 9.93215109931\n      - 8.61022447721\n      - 19.8618314891\n      - 163.8616267494\n      - 107.727429421\n      - 51.8819398286\n      - 19.4818314891\n      - 6.84022447721\n      - 19.1118314891\n      - 7.03022447721\n      - 21.3618314891\n      - 20.7918314891\n      - 21.8118314891\n      - 38.76460041182\n      - 39.21460041182\n      - 164.5616267494\n      - 104.827429421\n      - 43.06460041182\n      - 43.07460041182\n      - 24.18548136325\n      - 14.77886612239\n      - 41.66460041182\n      - 41.95460041182\n      - 164.1616267494\n      - 104.127429421\n      - 23.66548136325\n      - 14.43886612239\n      - 42.16460041182\n      - 42.21460041182\n  -   - 28.24095690087\n      - 21.59536620662\n      - 13.25712029593\n      - 19.07487076201\n      - 15.58993695617\n      - 8.655383440756\n      - 5.734068533914\n      - 4.165799626156\n      - 5.684068533914\n      - 4.340799626156\n      - 2.383275058766\n      - 1.39686578611\n      - 1.39855054723\n      - 0.9316549828049\n      - 0.5908378210265\n      - 1.296279986221\n      - 2.850605037275\n      - 25.94095690087\n      - 19.79536620662\n      - 20.65712029593\n      - 17.77487076201\n      - 12.48993695617\n      - 4.555383440756\n      - 3.934068533914\n      - 1.965799626156\n      - 3.809068533914\n      - 2.690799626156\n      - 2.158275058766\n      - 1.84686578611\n      - -0.7389494527701\n      - 0.6691549828049\n      - 0.7783378210265\n      - 2.421279986221\n      - 0.9381050372751\n      - 15.34095690087\n      - 21.89536620662\n      - 12.85712029593\n      - 11.37487076201\n      - 11.68993695617\n      - 7.855383440756\n      - 1.434068533914\n      - 0.9657996261561\n      - 1.484068533914\n      - 1.565799626156\n      - 1.108275058766\n      - 0.12186578611\n      - 0.3485505472299\n      - 0.1066549828049\n      - -0.009162178973513\n      - 0.05877998622137\n      - 0.7881050372751\n      - 9.440956900867\n      - 10.19536620662\n      - 9.857120295928\n      - 9.674870762006\n      - 8.789936956172\n      - 6.355383440756\n      - 4.034068533914\n      - 0.9657996261561\n      - 3.996568533914\n      - 1.715799626156\n      - 1.408275058766\n      - 0.79686578611\n      - 0.6860505472299\n      - -0.3058450171951\n      - 0.2158378210265\n      - -0.09122001377863\n      - 0.4506050372751\n      - 16.84095690087\n      - 11.45712029593\n      - 3.061569786218\n      - -1.357147755765\n      - -0.6471477557654\n      - 1.792852244235\n      - 0.8347758927937\n      - 13.34095690087\n      - 8.957120295928\n      - 3.761569786218\n      - 0.07285224423464\n      - -1.017147755765\n      - 0.8528522442346\n      - -0.8552241072063\n      - 12.14095690087\n      - 10.05712029593\n      - 3.461569786218\n      - 0.1028522442346\n      - -1.957147755765\n      - -0.4571477557654\n      - 0.6447758927937\n      - 13.34095690087\n      - 12.55712029593\n      - 4.361569786218\n      - -1.207147755765\n      - 0.8347758927937\n      - -1.017147755765\n      - -0.6471477557654\n      - 0.6447758927937\n      - 12.14095690087\n      - 9.057120295928\n      - 6.289936956172\n      - 2.561569786218\n      - 0.09812922077451\n      - 0.4681292207745\n      - 1.601218748851\n      - 0.1828522442346\n      - 1.967848278564\n      - 2.434721057271\n      - 1.874775892794\n      - 11.14095690087\n      - 11.05712029593\n      - 7.289936956172\n      - 5.761569786218\n      - -2.101870779225\n      - -1.781870779225\n      - 2.091218748851\n      - -1.017147755765\n      - 1.137848278564\n      - 0.744721057271\n      - 2.624775892794\n      - 5.840956900867\n      - 8.557120295928\n      - 6.989936956172\n      - 6.361569786218\n      - -1.101870779225\n      - -0.4318707792255\n      - 3.781218748851\n      - 1.872852244235\n      - -1.112151721436\n      - 1.984721057271\n      - 5.324775892794\n      - 13.34095690087\n      - 8.895366206617\n      - 11.05712029593\n      - 12.17487076201\n      - 6.189936956172\n      - 3.155383440756\n      - 0.7657996261561\n      - 1.635799626156\n      - -1.484224941234\n      - -2.73313421389\n      - -2.42894945277\n      - -3.040845017195\n      - -2.264162178974\n      - -2.043720013779\n      - -1.424394962725\n      - 10.42285224423\n      - 11.99285224423\n      - 4.302852244235\n      - -1.957147755765\n      - -1.697147755765\n      - 2.352852244235\n      - 6.140956900867\n      - 3.757120295928\n      - 6.489936956172\n      - 10.36156978622\n      - 7.698129220775\n      - 6.481218748851\n      - 7.518129220775\n      - 6.591218748851\n      - 3.932852244235\n      - 2.267848278564\n      - 5.504721057271\n      - 3.944775892794\n      - 16.04095690087\n      - 10.35712029593\n      - 5.989936956172\n      - -0.4384302137822\n      - 1.898129220775\n      - 0.4812187488511\n      - 2.458129220775\n      - 1.451218748851\n      - 2.472852244235\n      - -0.5421517214358\n      - 1.304721057271\n      - 3.714775892794\n      - 2.040956900867\n      - 8.257120295928\n      - -0.7100630438283\n      - 1.261569786218\n      - 3.598129220775\n      - -0.9187812511489\n      - 3.888129220775\n      - -0.2687812511489\n      - -0.4171477557654\n      - -0.6621517214358\n      - 0.634721057271\n      - 1.844775892794\n      - -0.07714775576537\n      - 16.34095690087\n      - 13.55712029593\n      - 6.061569786218\n      - -0.4571477557654\n      - 0.07477589279366\n      - -0.8271477557654\n      - 0.2647758927937\n      - 1.422852244235\n      - 0.8528522442346\n      - 1.872852244235\n      - 0.1340685339144\n      - 0.5840685339144\n      - 17.04095690087\n      - 10.65712029593\n      - 4.434068533914\n      - 4.444068533914\n      - 1.815775058766\n      - 0.2010505472299\n      - 3.034068533914\n      - 3.324068533914\n      - 16.64095690087\n      - 9.957120295928\n      - 1.295775058766\n      - -0.1389494527701\n      - 3.534068533914\n      - 3.584068533914\n  -   - 19.67905061421\n      - 12.78536491634\n      - 4.453409401868\n      - 10.42602658124\n      - 7.181651769754\n      - 0.8467383120783\n      - -0.8151544815029\n      - -1.28878727387\n      - -0.8651544815029\n      - -1.11378727387\n      - -2.178296849214\n      - -2.4437228135\n      - -1.857756175876\n      - -1.847417965917\n      - -1.795022214911\n      - -0.7628423028115\n      - 1.065115779582\n      - 17.37905061421\n      - 10.98536491634\n      - 11.85340940187\n      - 9.126026581237\n      - 4.081651769754\n      - -3.253261687922\n      - -2.615154481503\n      - -3.48878727387\n      - -2.740154481503\n      - -2.76378727387\n      - -2.403296849214\n      - -1.9937228135\n      - -3.995256175876\n      - -2.109917965917\n      - -1.607522214911\n      - 0.3621576971885\n      - -0.8473842204181\n      - 6.779050614207\n      - 13.08536491634\n      - 4.053409401868\n      - 2.726026581237\n      - 3.281651769754\n      - 0.04673831207827\n      - -5.115154481503\n      - -4.48878727387\n      - -5.065154481503\n      - -3.88878727387\n      - -3.453296849214\n      - -3.7187228135\n      - -2.907756175876\n      - -2.672417965917\n      - -2.395022214911\n      - -2.000342302812\n      - -0.9973842204181\n      - 0.8790506142075\n      - 1.385364916339\n      - 1.053409401868\n      - 1.026026581237\n      - 0.3816517697539\n      - -1.453261687922\n      - -2.515154481503\n      - -4.48878727387\n      - -2.552654481503\n      - -3.73878727387\n      - -3.153296849214\n      - -3.0437228135\n      - -2.570256175876\n      - -3.084917965917\n      - -2.170022214911\n      - -2.150342302812\n      - -1.334884220418\n      - 8.279050614207\n      - 2.653409401868\n      - -4.10628705125\n      - -5.539020595211\n      - -4.829020595211\n      - -2.389020595211\n      - -0.8305062664824\n      - 4.779050614207\n      - 0.1534094018679\n      - -3.40628705125\n      - -4.109020595211\n      - -5.199020595211\n      - -3.329020595211\n      - -2.520506266482\n      - 3.579050614207\n      - 1.253409401868\n      - -3.70628705125\n      - -4.079020595211\n      - -6.139020595211\n      - -4.639020595211\n      - -1.020506266482\n      - 4.779050614207\n      - 3.753409401868\n      - -2.80628705125\n      - -5.389020595211\n      - -0.8305062664824\n      - -5.199020595211\n      - -4.829020595211\n      - -1.020506266482\n      - 3.579050614207\n      - 0.2534094018679\n      - -2.118348230246\n      - -4.60628705125\n      - -5.8778638078\n      - -5.5078638078\n      - -3.383083895331\n      - -3.999020595211\n      - -1.038044411845\n      - 0.2196196759168\n      - 0.2094937335176\n      - 2.579050614207\n      - 2.253409401868\n      - -1.118348230246\n      - -1.40628705125\n      - -8.0778638078\n      - -7.7578638078\n      - -2.893083895331\n      - -5.199020595211\n      - -1.868044411845\n      - -1.470380324083\n      - 0.9594937335176\n      - -2.720949385793\n      - -0.2465905981321\n      - -1.418348230246\n      - -0.8062870512504\n      - -7.0778638078\n      - -6.4078638078\n      - -1.203083895331\n      - -2.309020595211\n      - -4.118044411845\n      - -0.2303803240832\n      - 3.659493733518\n      - 4.779050614207\n      - 0.0853649163389\n      - 2.253409401868\n      - 3.526026581237\n      - -2.218348230246\n      - -4.653261687922\n      - -4.68878727387\n      - -3.81878727387\n      - -6.045796849214\n      - -6.5737228135\n      - -5.685256175876\n      - -5.819917965917\n      - -4.650022214911\n      - -4.102842302812\n      - -3.209884220418\n      - 6.240979404789\n      - 7.810979404789\n      - 0.1209794047887\n      - -6.139020595211\n      - -5.879020595211\n      - -1.829020595211\n      - -2.420949385793\n      - -5.046590598132\n      - -1.918348230246\n      - 3.19371294875\n      - 1.7221361922\n      - 1.496916104669\n      - 1.5421361922\n      - 1.606916104669\n      - -0.2490205952113\n      - -0.738044411845\n      - 3.289619675917\n      - 2.279493733518\n      - 7.479050614207\n      - 1.553409401868\n      - -2.418348230246\n      - -7.60628705125\n      - -4.0778638078\n      - -4.503083895331\n      - -3.5178638078\n      - -3.533083895331\n      - -1.709020595211\n      - -3.548044411845\n      - -0.9103803240832\n      - 2.049493733518\n      - -6.520949385793\n      - -0.5465905981321\n      - -9.118348230246\n      - -5.90628705125\n      - -2.3778638078\n      - -5.903083895331\n      - -2.0878638078\n      - -5.253083895331\n      - -4.599020595211\n      - -3.668044411845\n      - -1.580380324083\n      - 0.1794937335176\n      - -4.259020595211\n      - 7.779050614207\n      - 4.753409401868\n      - -1.10628705125\n      - -4.639020595211\n      - -1.590506266482\n      - -5.009020595211\n      - -1.400506266482\n      - -2.759020595211\n      - -3.329020595211\n      - -2.309020595211\n      - -6.415154481503\n      - -5.965154481503\n      - 8.479050614207\n      - 1.853409401868\n      - -2.115154481503\n      - -2.105154481503\n      - -2.745796849214\n      - -3.055256175876\n      - -3.515154481503\n      - -3.225154481503\n      - 8.079050614207\n      - 1.153409401868\n      - -3.265796849214\n      - -3.395256175876\n      - -3.015154481503\n      - -2.965154481503\n  -   - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\nhistory_criterion_expected:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 117.9473464966\n      - 104.1334190546\n      - 90.05657396185\n      - 91.2207789054\n      - 83.93061974583\n      - 71.30942158128\n      - 62.32199544308\n      - 59.88083883476\n      - 62.27199544308\n      - 60.05583883476\n      - 62.77642327569\n      - 76.9431054476\n      - 131.4875801385\n      - -2342.694563021\n      - -86.51002021515\n      - -37.31642203542\n      - -19.79861800279\n      - 115.6473464966\n      - 102.3334190546\n      - 97.45657396185\n      - 89.9207789054\n      - 80.83061974583\n      - 67.20942158128\n      - 60.52199544308\n      - 57.68083883476\n      - 60.39699544308\n      - 58.40583883476\n      - 62.55142327569\n      - 77.3931054476\n      - 129.3500801385\n      - -2342.957063021\n      - -86.32252021515\n      - -36.19142203542\n      - -21.71111800279\n      - 105.0473464966\n      - 104.4334190546\n      - 89.65657396185\n      - 83.5207789054\n      - 80.03061974583\n      - 70.50942158128\n      - 58.02199544308\n      - 56.68083883476\n      - 58.07199544308\n      - 57.28083883476\n      - 61.50142327569\n      - 75.6681054476\n      - 130.4375801385\n      - -2343.519563021\n      - -87.11002021515\n      - -38.55392203542\n      - -21.86111800279\n      - 99.14734649662\n      - 92.73341905458\n      - 86.65657396185\n      - 81.8207789054\n      - 77.13061974583\n      - 69.00942158128\n      - 60.62199544308\n      - 56.68083883476\n      - 60.58449544308\n      - 57.43083883476\n      - 61.80142327569\n      - 76.3431054476\n      - 130.7750801385\n      - -2343.932063021\n      - -86.88502021515\n      - -38.70392203542\n      - -22.19861800279\n      - 106.5473464966\n      - 88.25657396185\n      - 61.9476238727\n      - 64.62033506263\n      - 65.33033506263\n      - 67.77033506263\n      - -17.38684121661\n      - 103.0473464966\n      - 85.75657396185\n      - 62.6476238727\n      - 66.05033506263\n      - 64.96033506263\n      - 66.83033506263\n      - -19.07684121661\n      - 101.8473464966\n      - 86.85657396185\n      - 62.3476238727\n      - 66.08033506263\n      - 64.02033506263\n      - 65.52033506263\n      - -17.57684121661\n      - 103.0473464966\n      - 89.35657396185\n      - 63.2476238727\n      - 64.77033506263\n      - -17.38684121661\n      - 64.96033506263\n      - 65.33033506263\n      - -17.57684121661\n      - 101.8473464966\n      - 85.85657396185\n      - 74.63061974583\n      - 61.4476238727\n      - 55.64384266576\n      - 56.01384266576\n      - 58.81780905206\n      - 66.16033506263\n      - 253.6554490216\n      - -52.38720394238\n      - -16.34684121661\n      - 100.8473464966\n      - 87.85657396185\n      - 75.63061974583\n      - 64.6476238727\n      - 53.44384266576\n      - 53.76384266576\n      - 59.30780905206\n      - 64.96033506263\n      - 252.8254490216\n      - -54.07720394238\n      - -15.59684121661\n      - 95.54734649662\n      - 85.35657396185\n      - 75.33061974583\n      - 65.2476238727\n      - 54.44384266576\n      - 55.11384266576\n      - 60.99780905206\n      - 67.85033506263\n      - 250.5754490216\n      - -52.83720394238\n      - -12.89684121661\n      - 103.0473464966\n      - 91.43341905458\n      - 87.85657396185\n      - 84.3207789054\n      - 74.53061974583\n      - 65.80942158128\n      - 56.48083883476\n      - 57.35083883476\n      - 58.90892327569\n      - 72.8131054476\n      - 127.6600801385\n      - -2346.667063021\n      - -89.36502021515\n      - -40.65642203542\n      - -24.07361800279\n      - 76.40033506263\n      - 77.97033506263\n      - 70.28033506263\n      - 64.02033506263\n      - 64.28033506263\n      - 68.33033506263\n      - 95.84734649662\n      - 80.55657396185\n      - 74.83061974583\n      - 69.2476238727\n      - 63.24384266576\n      - 63.69780905206\n      - 63.06384266576\n      - 63.80780905206\n      - 69.91033506263\n      - 253.9554490216\n      - -49.31720394238\n      - -14.27684121661\n      - 105.7473464966\n      - 87.15657396185\n      - 74.33061974583\n      - 58.4476238727\n      - 57.44384266576\n      - 57.69780905206\n      - 58.00384266576\n      - 58.66780905206\n      - 68.45033506263\n      - 251.1454490216\n      - -53.51720394238\n      - -14.50684121661\n      - 91.74734649662\n      - 85.05657396185\n      - 67.63061974583\n      - 60.1476238727\n      - 59.14384266576\n      - 56.29780905206\n      - 59.43384266576\n      - 56.94780905206\n      - 65.56033506263\n      - 251.0254490216\n      - -54.18720394238\n      - -16.37684121661\n      - 65.90033506263\n      - 106.0473464966\n      - 90.35657396185\n      - 64.9476238727\n      - 65.52033506263\n      - -18.14684121661\n      - 65.15033506263\n      - -17.95684121661\n      - 67.40033506263\n      - 66.83033506263\n      - 67.85033506263\n      - 56.72199544308\n      - 57.17199544308\n      - 106.7473464966\n      - 87.45657396185\n      - 61.02199544308\n      - 61.03199544308\n      - 62.20892327569\n      - 130.2900801385\n      - 59.62199544308\n      - 59.91199544308\n      - 106.3473464966\n      - 86.75657396185\n      - 61.68892327569\n      - 129.9500801385\n      - 60.12199544308\n      - 60.17199544308\n  -   - 175.7616267494\n      - 135.815392655\n      - 107.427429421\n      - 99.44443456745\n      - 85.70608965926\n      - 64.3171217786\n      - 44.36460041182\n      - 32.95338522348\n      - 44.31460041182\n      - 33.12838522348\n      - 24.75298136325\n      - 19.273476213\n      - 15.97636612239\n      - 13.00317519799\n      - 10.70826764037\n      - 9.858997178816\n      - 10.15607036729\n      - 173.4616267494\n      - 134.015392655\n      - 114.827429421\n      - 98.14443456745\n      - 82.60608965926\n      - 60.2171217786\n      - 42.56460041182\n      - 30.75338522348\n      - 42.43960041182\n      - 31.47838522348\n      - 24.52798136325\n      - 19.723476213\n      - 13.83886612239\n      - 12.74067519799\n      - 10.89576764037\n      - 10.98399717882\n      - 8.243570367288\n      - 162.8616267494\n      - 136.115392655\n      - 107.027429421\n      - 91.74443456745\n      - 81.80608965926\n      - 63.5171217786\n      - 40.06460041182\n      - 29.75338522348\n      - 40.11460041182\n      - 30.35338522348\n      - 23.47798136325\n      - 17.998476213\n      - 14.92636612239\n      - 12.17817519799\n      - 10.10826764037\n      - 8.621497178816\n      - 8.093570367288\n      - 156.9616267494\n      - 124.415392655\n      - 104.027429421\n      - 90.04443456745\n      - 78.90608965926\n      - 62.0171217786\n      - 42.66460041182\n      - 29.75338522348\n      - 42.62710041182\n      - 30.50338522348\n      - 23.77798136325\n      - 18.673476213\n      - 15.26386612239\n      - 11.76567519799\n      - 10.33326764037\n      - 8.471497178816\n      - 7.756070367288\n      - 164.3616267494\n      - 105.627429421\n      - 48.8819398286\n      - 18.5818314891\n      - 19.2918314891\n      - 21.7318314891\n      - 7.60022447721\n      - 160.8616267494\n      - 103.127429421\n      - 49.5819398286\n      - 20.0118314891\n      - 18.9218314891\n      - 20.7918314891\n      - 5.91022447721\n      - 159.6616267494\n      - 104.227429421\n      - 49.2819398286\n      - 20.0418314891\n      - 17.9818314891\n      - 19.4818314891\n      - 7.41022447721\n      - 160.8616267494\n      - 106.727429421\n      - 50.1819398286\n      - 18.7318314891\n      - 7.60022447721\n      - 18.9218314891\n      - 19.2918314891\n      - 7.41022447721\n      - 159.6616267494\n      - 103.227429421\n      - 76.40608965926\n      - 48.3819398286\n      - 33.22986519451\n      - 33.59986519451\n      - 26.87225158213\n      - 20.1218314891\n      - 15.2111293494\n      - 11.73215109931\n      - 8.64022447721\n      - 158.6616267494\n      - 105.227429421\n      - 77.40608965926\n      - 51.5819398286\n      - 31.02986519451\n      - 31.34986519451\n      - 27.36225158213\n      - 18.9218314891\n      - 14.3811293494\n      - 10.04215109931\n      - 9.39022447721\n      - 153.3616267494\n      - 102.727429421\n      - 77.10608965926\n      - 52.1819398286\n      - 32.02986519451\n      - 32.69986519451\n      - 29.05225158213\n      - 21.8118314891\n      - 12.1311293494\n      - 11.28215109931\n      - 12.09022447721\n      - 160.8616267494\n      - 123.115392655\n      - 105.227429421\n      - 92.54443456745\n      - 76.30608965926\n      - 58.8171217786\n      - 29.55338522348\n      - 30.42338522348\n      - 20.88548136325\n      - 15.143476213\n      - 12.14886612239\n      - 9.030675197988\n      - 7.853267640371\n      - 6.518997178816\n      - 5.881070367288\n      - 30.3618314891\n      - 31.9318314891\n      - 24.2418314891\n      - 17.9818314891\n      - 18.2418314891\n      - 22.2918314891\n      - 153.6616267494\n      - 97.927429421\n      - 76.60608965926\n      - 56.1819398286\n      - 40.82986519451\n      - 31.75225158213\n      - 40.64986519451\n      - 31.86225158213\n      - 23.8718314891\n      - 15.5111293494\n      - 14.80215109931\n      - 10.71022447721\n      - 163.5616267494\n      - 104.527429421\n      - 76.10608965926\n      - 45.3819398286\n      - 35.02986519451\n      - 25.75225158213\n      - 35.58986519451\n      - 26.72225158213\n      - 22.4118314891\n      - 12.7011293494\n      - 10.60215109931\n      - 10.48022447721\n      - 149.5616267494\n      - 102.427429421\n      - 69.40608965926\n      - 47.0819398286\n      - 36.72986519451\n      - 24.35225158213\n      - 37.01986519451\n      - 25.00225158213\n      - 19.5218314891\n      - 12.5811293494\n      - 9.93215109931\n      - 8.61022447721\n      - 19.8618314891\n      - 163.8616267494\n      - 107.727429421\n      - 51.8819398286\n      - 19.4818314891\n      - 6.84022447721\n      - 19.1118314891\n      - 7.03022447721\n      - 21.3618314891\n      - 20.7918314891\n      - 21.8118314891\n      - 38.76460041182\n      - 39.21460041182\n      - 164.5616267494\n      - 104.827429421\n      - 43.06460041182\n      - 43.07460041182\n      - 24.18548136325\n      - 14.77886612239\n      - 41.66460041182\n      - 41.95460041182\n      - 164.1616267494\n      - 104.127429421\n      - 23.66548136325\n      - 14.43886612239\n      - 42.16460041182\n      - 42.21460041182\n  -   - 28.24095690087\n      - 21.59536620662\n      - 13.25712029593\n      - 19.07487076201\n      - 15.58993695617\n      - 8.655383440756\n      - 5.734068533914\n      - 4.165799626156\n      - 5.684068533914\n      - 4.340799626156\n      - 2.383275058766\n      - 1.39686578611\n      - 1.39855054723\n      - 0.9316549828049\n      - 0.5908378210265\n      - 1.296279986221\n      - 2.850605037275\n      - 25.94095690087\n      - 19.79536620662\n      - 20.65712029593\n      - 17.77487076201\n      - 12.48993695617\n      - 4.555383440756\n      - 3.934068533914\n      - 1.965799626156\n      - 3.809068533914\n      - 2.690799626156\n      - 2.158275058766\n      - 1.84686578611\n      - -0.7389494527701\n      - 0.6691549828049\n      - 0.7783378210265\n      - 2.421279986221\n      - 0.9381050372751\n      - 15.34095690087\n      - 21.89536620662\n      - 12.85712029593\n      - 11.37487076201\n      - 11.68993695617\n      - 7.855383440756\n      - 1.434068533914\n      - 0.9657996261561\n      - 1.484068533914\n      - 1.565799626156\n      - 1.108275058766\n      - 0.12186578611\n      - 0.3485505472299\n      - 0.1066549828049\n      - -0.009162178973513\n      - 0.05877998622137\n      - 0.7881050372751\n      - 9.440956900867\n      - 10.19536620662\n      - 9.857120295928\n      - 9.674870762006\n      - 8.789936956172\n      - 6.355383440756\n      - 4.034068533914\n      - 0.9657996261561\n      - 3.996568533914\n      - 1.715799626156\n      - 1.408275058766\n      - 0.79686578611\n      - 0.6860505472299\n      - -0.3058450171951\n      - 0.2158378210265\n      - -0.09122001377863\n      - 0.4506050372751\n      - 16.84095690087\n      - 11.45712029593\n      - 3.061569786218\n      - -1.357147755765\n      - -0.6471477557654\n      - 1.792852244235\n      - 0.8347758927937\n      - 13.34095690087\n      - 8.957120295928\n      - 3.761569786218\n      - 0.07285224423464\n      - -1.017147755765\n      - 0.8528522442346\n      - -0.8552241072063\n      - 12.14095690087\n      - 10.05712029593\n      - 3.461569786218\n      - 0.1028522442346\n      - -1.957147755765\n      - -0.4571477557654\n      - 0.6447758927937\n      - 13.34095690087\n      - 12.55712029593\n      - 4.361569786218\n      - -1.207147755765\n      - 0.8347758927937\n      - -1.017147755765\n      - -0.6471477557654\n      - 0.6447758927937\n      - 12.14095690087\n      - 9.057120295928\n      - 6.289936956172\n      - 2.561569786218\n      - 0.09812922077451\n      - 0.4681292207745\n      - 1.601218748851\n      - 0.1828522442346\n      - 1.967848278564\n      - 2.434721057271\n      - 1.874775892794\n      - 11.14095690087\n      - 11.05712029593\n      - 7.289936956172\n      - 5.761569786218\n      - -2.101870779225\n      - -1.781870779225\n      - 2.091218748851\n      - -1.017147755765\n      - 1.137848278564\n      - 0.744721057271\n      - 2.624775892794\n      - 5.840956900867\n      - 8.557120295928\n      - 6.989936956172\n      - 6.361569786218\n      - -1.101870779225\n      - -0.4318707792255\n      - 3.781218748851\n      - 1.872852244235\n      - -1.112151721436\n      - 1.984721057271\n      - 5.324775892794\n      - 13.34095690087\n      - 8.895366206617\n      - 11.05712029593\n      - 12.17487076201\n      - 6.189936956172\n      - 3.155383440756\n      - 0.7657996261561\n      - 1.635799626156\n      - -1.484224941234\n      - -2.73313421389\n      - -2.42894945277\n      - -3.040845017195\n      - -2.264162178974\n      - -2.043720013779\n      - -1.424394962725\n      - 10.42285224423\n      - 11.99285224423\n      - 4.302852244235\n      - -1.957147755765\n      - -1.697147755765\n      - 2.352852244235\n      - 6.140956900867\n      - 3.757120295928\n      - 6.489936956172\n      - 10.36156978622\n      - 7.698129220775\n      - 6.481218748851\n      - 7.518129220775\n      - 6.591218748851\n      - 3.932852244235\n      - 2.267848278564\n      - 5.504721057271\n      - 3.944775892794\n      - 16.04095690087\n      - 10.35712029593\n      - 5.989936956172\n      - -0.4384302137822\n      - 1.898129220775\n      - 0.4812187488511\n      - 2.458129220775\n      - 1.451218748851\n      - 2.472852244235\n      - -0.5421517214358\n      - 1.304721057271\n      - 3.714775892794\n      - 2.040956900867\n      - 8.257120295928\n      - -0.7100630438283\n      - 1.261569786218\n      - 3.598129220775\n      - -0.9187812511489\n      - 3.888129220775\n      - -0.2687812511489\n      - -0.4171477557654\n      - -0.6621517214358\n      - 0.634721057271\n      - 1.844775892794\n      - -0.07714775576537\n      - 16.34095690087\n      - 13.55712029593\n      - 6.061569786218\n      - -0.4571477557654\n      - 0.07477589279366\n      - -0.8271477557654\n      - 0.2647758927937\n      - 1.422852244235\n      - 0.8528522442346\n      - 1.872852244235\n      - 0.1340685339144\n      - 0.5840685339144\n      - 17.04095690087\n      - 10.65712029593\n      - 4.434068533914\n      - 4.444068533914\n      - 1.815775058766\n      - 0.2010505472299\n      - 3.034068533914\n      - 3.324068533914\n      - 16.64095690087\n      - 9.957120295928\n      - 1.295775058766\n      - -0.1389494527701\n      - 3.534068533914\n      - 3.584068533914\n  -   - 19.67905061421\n      - 12.78536491634\n      - 4.453409401868\n      - 10.42602658124\n      - 7.181651769754\n      - 0.8467383120783\n      - -0.8151544815029\n      - -1.28878727387\n      - -0.8651544815029\n      - -1.11378727387\n      - -2.178296849214\n      - -2.4437228135\n      - -1.857756175876\n      - -1.847417965917\n      - -1.795022214911\n      - -0.7628423028115\n      - 1.065115779582\n      - 17.37905061421\n      - 10.98536491634\n      - 11.85340940187\n      - 9.126026581237\n      - 4.081651769754\n      - -3.253261687922\n      - -2.615154481503\n      - -3.48878727387\n      - -2.740154481503\n      - -2.76378727387\n      - -2.403296849214\n      - -1.9937228135\n      - -3.995256175876\n      - -2.109917965917\n      - -1.607522214911\n      - 0.3621576971885\n      - -0.8473842204181\n      - 6.779050614207\n      - 13.08536491634\n      - 4.053409401868\n      - 2.726026581237\n      - 3.281651769754\n      - 0.04673831207827\n      - -5.115154481503\n      - -4.48878727387\n      - -5.065154481503\n      - -3.88878727387\n      - -3.453296849214\n      - -3.7187228135\n      - -2.907756175876\n      - -2.672417965917\n      - -2.395022214911\n      - -2.000342302812\n      - -0.9973842204181\n      - 0.8790506142075\n      - 1.385364916339\n      - 1.053409401868\n      - 1.026026581237\n      - 0.3816517697539\n      - -1.453261687922\n      - -2.515154481503\n      - -4.48878727387\n      - -2.552654481503\n      - -3.73878727387\n      - -3.153296849214\n      - -3.0437228135\n      - -2.570256175876\n      - -3.084917965917\n      - -2.170022214911\n      - -2.150342302812\n      - -1.334884220418\n      - 8.279050614207\n      - 2.653409401868\n      - -4.10628705125\n      - -5.539020595211\n      - -4.829020595211\n      - -2.389020595211\n      - -0.8305062664824\n      - 4.779050614207\n      - 0.1534094018679\n      - -3.40628705125\n      - -4.109020595211\n      - -5.199020595211\n      - -3.329020595211\n      - -2.520506266482\n      - 3.579050614207\n      - 1.253409401868\n      - -3.70628705125\n      - -4.079020595211\n      - -6.139020595211\n      - -4.639020595211\n      - -1.020506266482\n      - 4.779050614207\n      - 3.753409401868\n      - -2.80628705125\n      - -5.389020595211\n      - -0.8305062664824\n      - -5.199020595211\n      - -4.829020595211\n      - -1.020506266482\n      - 3.579050614207\n      - 0.2534094018679\n      - -2.118348230246\n      - -4.60628705125\n      - -5.8778638078\n      - -5.5078638078\n      - -3.383083895331\n      - -3.999020595211\n      - -1.038044411845\n      - 0.2196196759168\n      - 0.2094937335176\n      - 2.579050614207\n      - 2.253409401868\n      - -1.118348230246\n      - -1.40628705125\n      - -8.0778638078\n      - -7.7578638078\n      - -2.893083895331\n      - -5.199020595211\n      - -1.868044411845\n      - -1.470380324083\n      - 0.9594937335176\n      - -2.720949385793\n      - -0.2465905981321\n      - -1.418348230246\n      - -0.8062870512504\n      - -7.0778638078\n      - -6.4078638078\n      - -1.203083895331\n      - -2.309020595211\n      - -4.118044411845\n      - -0.2303803240832\n      - 3.659493733518\n      - 4.779050614207\n      - 0.0853649163389\n      - 2.253409401868\n      - 3.526026581237\n      - -2.218348230246\n      - -4.653261687922\n      - -4.68878727387\n      - -3.81878727387\n      - -6.045796849214\n      - -6.5737228135\n      - -5.685256175876\n      - -5.819917965917\n      - -4.650022214911\n      - -4.102842302812\n      - -3.209884220418\n      - 6.240979404789\n      - 7.810979404789\n      - 0.1209794047887\n      - -6.139020595211\n      - -5.879020595211\n      - -1.829020595211\n      - -2.420949385793\n      - -5.046590598132\n      - -1.918348230246\n      - 3.19371294875\n      - 1.7221361922\n      - 1.496916104669\n      - 1.5421361922\n      - 1.606916104669\n      - -0.2490205952113\n      - -0.738044411845\n      - 3.289619675917\n      - 2.279493733518\n      - 7.479050614207\n      - 1.553409401868\n      - -2.418348230246\n      - -7.60628705125\n      - -4.0778638078\n      - -4.503083895331\n      - -3.5178638078\n      - -3.533083895331\n      - -1.709020595211\n      - -3.548044411845\n      - -0.9103803240832\n      - 2.049493733518\n      - -6.520949385793\n      - -0.5465905981321\n      - -9.118348230246\n      - -5.90628705125\n      - -2.3778638078\n      - -5.903083895331\n      - -2.0878638078\n      - -5.253083895331\n      - -4.599020595211\n      - -3.668044411845\n      - -1.580380324083\n      - 0.1794937335176\n      - -4.259020595211\n      - 7.779050614207\n      - 4.753409401868\n      - -1.10628705125\n      - -4.639020595211\n      - -1.590506266482\n      - -5.009020595211\n      - -1.400506266482\n      - -2.759020595211\n      - -3.329020595211\n      - -2.309020595211\n      - -6.415154481503\n      - -5.965154481503\n      - 8.479050614207\n      - 1.853409401868\n      - -2.115154481503\n      - -2.105154481503\n      - -2.745796849214\n      - -3.055256175876\n      - -3.515154481503\n      - -3.225154481503\n      - 8.079050614207\n      - 1.153409401868\n      - -3.265796849214\n      - -3.395256175876\n      - -3.015154481503\n      - -2.965154481503\n  -   - 1050.519509418\n      - 2633.521525076\n      - -3381.282955438\n      - -933.4604542894\n      - -516.3685328612\n      - -257.5208155114\n      - -113.8324082065\n      - -66.57519632661\n      - -113.8824082065\n      - -66.40019632661\n      - -44.86075536786\n      - -32.39601556788\n      - -23.86432104696\n      - -18.5440484742\n      - -14.76646254613\n      - -11.02441930035\n      - -7.169273972705\n      - 1048.219509418\n      - 2631.721525076\n      - -3373.882955438\n      - -934.7604542894\n      - -519.4685328612\n      - -261.6208155114\n      - -115.6324082065\n      - -68.77519632661\n      - -115.7574082065\n      - -68.05019632661\n      - -45.08575536786\n      - -31.94601556788\n      - -26.00182104696\n      - -18.8065484742\n      - -14.57896254613\n      - -9.899419300347\n      - -9.081773972705\n      - 1037.619509418\n      - 2633.821525076\n      - -3381.682955438\n      - -941.1604542894\n      - -520.2685328612\n      - -258.3208155114\n      - -118.1324082065\n      - -69.77519632661\n      - -118.0824082065\n      - -69.17519632661\n      - -46.13575536786\n      - -33.67101556788\n      - -24.91432104696\n      - -19.3690484742\n      - -15.36646254613\n      - -12.26191930035\n      - -9.231773972705\n      - 1031.719509418\n      - 2622.121525076\n      - -3384.682955438\n      - -942.8604542894\n      - -523.1685328612\n      - -259.8208155114\n      - -115.5324082065\n      - -69.77519632661\n      - -115.5699082065\n      - -69.02519632661\n      - -45.83575536786\n      - -32.99601556788\n      - -24.57682104696\n      - -19.7815484742\n      - -15.14146254613\n      - -12.41191930035\n      - -9.569273972705\n      - 1039.119509418\n      - -3383.082955438\n      - -165.9142242952\n      - -41.04746332279\n      - -40.33746332279\n      - -37.89746332279\n      - -8.239694706637\n      - 1035.619509418\n      - -3385.582955438\n      - -165.2142242952\n      - -39.61746332279\n      - -40.70746332279\n      - -38.83746332279\n      - -9.929694706637\n      - 1034.419509418\n      - -3384.482955438\n      - -165.5142242952\n      - -39.58746332279\n      - -41.64746332279\n      - -40.14746332279\n      - -8.429694706637\n      - 1035.619509418\n      - -3381.982955438\n      - -164.6142242952\n      - -40.89746332279\n      - -8.239694706637\n      - -40.70746332279\n      - -40.33746332279\n      - -8.429694706637\n      - 1034.419509418\n      - -3385.482955438\n      - -525.6685328612\n      - -166.4142242952\n      - -89.99572115575\n      - -89.62572115575\n      - -55.58585272365\n      - -39.50746332279\n      - -20.14157855679\n      - -11.29463725292\n      - -7.199694706637\n      - 1033.419509418\n      - -3383.482955438\n      - -524.6685328612\n      - -163.2142242952\n      - -92.19572115575\n      - -91.87572115575\n      - -55.09585272365\n      - -40.70746332279\n      - -20.97157855679\n      - -12.98463725292\n      - -6.449694706637\n      - 1028.119509418\n      - -3385.982955438\n      - -524.9685328612\n      - -162.6142242952\n      - -91.19572115575\n      - -90.52572115575\n      - -53.40585272365\n      - -37.81746332279\n      - -23.22157855679\n      - -11.74463725292\n      - -3.749694706637\n      - 1035.619509418\n      - 2620.821525076\n      - -3383.482955438\n      - -940.3604542894\n      - -525.7685328612\n      - -263.0208155114\n      - -69.97519632661\n      - -69.10519632661\n      - -48.72825536786\n      - -36.52601556788\n      - -27.69182104696\n      - -22.5165484742\n      - -17.62146254613\n      - -14.36441930035\n      - -11.4442739727\n      - -29.26746332279\n      - -27.69746332279\n      - -35.38746332279\n      - -41.64746332279\n      - -41.38746332279\n      - -37.33746332279\n      - 1028.419509418\n      - -3390.782955438\n      - -525.4685328612\n      - -158.6142242952\n      - -82.39572115575\n      - -50.70585272365\n      - -82.57572115575\n      - -50.59585272365\n      - -35.75746332279\n      - -19.84157855679\n      - -8.224637252918\n      - -5.129694706637\n      - 1038.319509418\n      - -3384.182955438\n      - -525.9685328612\n      - -169.4142242952\n      - -88.19572115575\n      - -56.70585272365\n      - -87.63572115575\n      - -55.73585272365\n      - -37.21746332279\n      - -22.65157855679\n      - -12.42463725292\n      - -5.359694706637\n      - 1024.319509418\n      - -3386.282955438\n      - -532.6685328612\n      - -167.7142242952\n      - -86.49572115575\n      - -58.10585272365\n      - -86.20572115575\n      - -57.45585272365\n      - -40.10746332279\n      - -22.77157855679\n      - -13.09463725292\n      - -7.229694706637\n      - -39.76746332279\n      - 1038.619509418\n      - -3380.982955438\n      - -162.9142242952\n      - -40.14746332279\n      - -8.999694706637\n      - -40.51746332279\n      - -8.809694706637\n      - -38.26746332279\n      - -38.83746332279\n      - -37.81746332279\n      - -119.4324082065\n      - -118.9824082065\n      - 1039.319509418\n      - -3383.882955438\n      - -115.1324082065\n      - -115.1224082065\n      - -45.42825536786\n      - -25.06182104696\n      - -116.5324082065\n      - -116.2424082065\n      - 1038.919509418\n      - -3384.582955438\n      - -45.94825536786\n      - -25.40182104696\n      - -116.0324082065\n      - -115.9824082065\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.0\n      - 0.0\n      - 0.0\nhistory_x_expected:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\nindex_best_x: 9\nlinear_terms:\n  - -1.782609615475e-10\n  - -3.274180926383e-11\n  - 2.546585164964e-11\nlower_bounds: null\nmodel_improving_points:\n  -   - 0.025661432597987588\n      - 0.004659684320230673\n      - 0.0\n  -   - -0.005326886922235527\n      - -0.003923230748772294\n      - 0.0\n  -   - 0.14451544438283606\n      - 0.03415020862754581\n      - 0.0\nmodel_indices:\n  - 8\n  - 0\n  - 7\n  - 6\n  - 5\n  - 4\n  - 3\nmodel_indices_expected:\n  - 8\n  - 0\n  - 10\n  - 6\n  - 5\n  - 4\n  - 3\nn: 3\nn_modelpoints: 2\nn_modelpoints_expected: 3\nsquare_terms:\n  -   - 23918483.46505\n      - -221133.0482641\n      - -3862092.694171\n  -   - -221133.0482641\n      - 3420438.11792\n      - -157370.8759191\n  -   - -3862092.694171\n      - -157370.8759191\n      - 925172.8526537\nupper_bound: null\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_i.yaml",
    "content": "---\nc: 10\ndelta: 0.05\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\nmodel_improving_points:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - 0.19235564910118408\n      - 0.0\n      - 0.0\n  -   - -1.6679249464133494\n      - 0.0\n      - 2.0\n  -   - 1.5532770479784463\n      - 2.0\n      - 0.0\nmodel_indices:\n  - 1\n  - 2\n  - 3\n  - 0\n  - 2\n  - 0\n  - 2\nmodel_indices_expected:\n  - 4\n  - 3\n  - 2\n  - 0\n  - 2\n  - 0\n  - 2\nn: 3\nn_modelpoints: 0\nn_modelpoints_expected: 3\nproject_x_onto_null: true\ntheta1: 1.0e-05\nx_accepted:\n  - 0.15\n  - 0.008\n  - 0.01\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_ii.yaml",
    "content": "---\nc: 10\ndelta: 0.025\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\nmodel_improving_points:\n  -   - 0.021001748277756915\n      - 0.0\n      - 0.0\n  -   - -0.001403656173463233\n      - 0.0\n      - 0.0\n  -   - 0.11036523575529027\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - 0.021001748277756915\n      - -0.5015248401252026\n      - 0.02056647070703521\n  -   - -0.001403656173463233\n      - -0.0026205861698429256\n      - -1.9998925353155312\n  -   - 0.11036523575529027\n      - -1.9360955470393286\n      - -0.0026205861698429256\nmodel_indices:\n  - 8\n  - 7\n  - 6\n  - 5\n  - 4\n  - 3\n  - 2\nmodel_indices_expected:\n  - 8\n  - 7\n  - 6\n  - 5\n  - 4\n  - 3\n  - 2\nn: 3\nn_modelpoints: 1\nn_modelpoints_expected: 3\nproject_x_onto_null: true\ntheta1: 1.0e-05\nx_accepted:\n  - 0.15\n  - 0.008\n  - 0.01\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_nonzero_iii.yaml",
    "content": "---\nc: 10\ndelta: 0.00625\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\n  -   - 0.1499498551576\n      - 0.008185153997901\n      - 0.009255435636305\n  -   - 0.1486949409413\n      - 0.001680047032405\n      - 0.01940631659429\n  -   - 0.1494212312914\n      - 0.005607806220598\n      - 0.01308958287811\n  -   - 0.149295008289\n      - 0.006607320278458\n      - 0.009649557844843\n  -   - 0.149373572031\n      - 0.006510591080504\n      - 0.01023212020758\n  -   - 0.1492202503973\n      - 0.005397396245708\n      - 0.01181780391516\n  -   - 0.1493007023164\n      - 0.005582573542213\n      - 0.01196522368907\n  -   - 0.1493027858782\n      - 0.005554596994372\n      - 0.01202415479218\n  -   - 0.1485523407947\n      - 0.005613111126492\n      - 0.01186195785149\n  -   - 0.1490051236084\n      - 0.005576645446634\n      - 0.01199907703224\n  -   - 0.1524257023164\n      - 0.005515392037394\n      - 0.01196491883293\n  -   - 0.1554482518753\n      - 0.005387691742364\n      - 0.01208573313392\n  -   - 0.1555507023164\n      - 0.00569480453193\n      - 0.01169255682528\n  -   - 0.1618007023164\n      - 0.0057804309848\n      - 0.01147013266041\n  -   - 0.1743007023164\n      - 0.005924712736456\n      - 0.01107680666362\n  -   - 0.1895416078574\n      - 0.006133481950173\n      - 0.01054081360503\n  -   - 0.1903649025976\n      - 0.006140000867368\n      - 0.01052352873034\n  -   - 0.1653649025976\n      - 0.006034565329377\n      - 0.01081209329213\n  -   - 0.1902653863983\n      - 0.006141938821062\n      - 0.01051661018242\nmodel_improving_points:\n  -   - -0.015922591887171933\n      - -0.13172715842924898\n      - 0.0\n  -   - 0.00031007259112286745\n      - -0.0010430267511385427\n      - 0.0\n  -   - -0.0011069676676378482\n      - 0.002765579949600694\n      - 0.0\nmodel_improving_points_expected:\n  -   - -0.015922591887171933\n      - -0.13172715842924898\n      - -3.999999999999999\n  -   - 0.00031007259112286745\n      - -0.0010430267511385427\n      - -0.01686968607843814\n  -   - -0.0011069676676378482\n      - 0.002765579949600694\n      - 0.04617032988619707\nmodel_indices:\n  - 29\n  - 26\n  - 25\n  - 5\n  - 28\n  - 24\n  - 23\nmodel_indices_expected:\n  - 29\n  - 26\n  - 28\n  - 5\n  - 28\n  - 24\n  - 23\nn: 3\nn_modelpoints: 2\nn_modelpoints_expected: 3\nproject_x_onto_null: true\ntheta1: 1.0e-05\nx_accepted:\n  - 0.1903649025976\n  - 0.006140000867368\n  - 0.01052352873034\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_i.yaml",
    "content": "---\nc: 1.7320508075688772\ndelta: 0.05\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\nmodel_improving_points:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - -0.2507624200626013\n      - 0.010283235353517606\n      - 1.0000000000000002\n  -   - -0.0013102930849214628\n      - -0.9999462676577656\n      - 0.010623242412742123\n  -   - -0.9680477735196643\n      - -0.0013102930849214628\n      - -0.2590536815263693\nmodel_indices:\n  - 5\n  - 6\n  - 7\n  - 2\n  - 1\n  - 0\n  - 0\nmodel_indices_expected:\n  - 7\n  - 6\n  - 5\n  - 2\n  - 1\n  - 0\n  - 0\nn: 3\nn_modelpoints: 0\nn_modelpoints_expected: 3\nproject_x_onto_null: false\ntheta1: 1.0e-05\nx_accepted:\n  - 0.15\n  - 0.008\n  - 0.01\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_ii.yaml",
    "content": "---\nc: 1.7320508075688772\ndelta: 0.05\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\nmodel_improving_points:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - 0.4357553543466791\n      - 0.012830716298993794\n      - -0.24843257790248596\n  -   - -0.22967014635560642\n      - -0.0026634434611177635\n      - -0.0032719084593076098\n  -   - -0.0858422683250766\n      - 0.07225772219141803\n      - -0.9509726692058914\nmodel_indices:\n  - 8\n  - 0\n  - 10\n  - 6\n  - 5\n  - 4\n  - 3\nmodel_indices_expected:\n  - 10\n  - 8\n  - 7\n  - 6\n  - 5\n  - 4\n  - 3\nn: 3\nn_modelpoints: 0\nn_modelpoints_expected: 3\nproject_x_onto_null: false\ntheta1: 1.0e-05\nx_accepted:\n  - 0.149883507892\n  - 0.008098080768719\n  - 0.009146244784311\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_iii.yaml",
    "content": "---\nc: 1.7320508075688772\ndelta: 0.05\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\nmodel_improving_points:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - 1.0\n      - 0.0\n      - 0.0\n  -   - 0.010623242412742123\n      - 0.0\n      - 0.0\n  -   - -0.2590536815263693\n      - 0.0\n      - 0.0\nmodel_indices:\n  - 0\n  - 4\n  - 3\n  - 2\n  - 1\n  - 0\n  - 0\nmodel_indices_expected:\n  - 5\n  - 4\n  - 3\n  - 2\n  - 1\n  - 0\n  - 0\nn: 3\nn_modelpoints: 0\nn_modelpoints_expected: 1\nproject_x_onto_null: false\ntheta1: 1.0e-05\nx_accepted:\n  - 0.15\n  - 0.008\n  - 0.01\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/find_affine_points_zero_iv.yaml",
    "content": "---\nc: 1.7320508075688772\ndelta: 0.00625\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\n  -   - 0.1499498551576\n      - 0.008185153997901\n      - 0.009255435636305\n  -   - 0.1486949409413\n      - 0.001680047032405\n      - 0.01940631659429\n  -   - 0.1494212312914\n      - 0.005607806220598\n      - 0.01308958287811\n  -   - 0.149295008289\n      - 0.006607320278458\n      - 0.009649557844843\n  -   - 0.149373572031\n      - 0.006510591080504\n      - 0.01023212020758\n  -   - 0.1492202503973\n      - 0.005397396245708\n      - 0.01181780391516\n  -   - 0.1493007023164\n      - 0.005582573542213\n      - 0.01196522368907\n  -   - 0.1493027858782\n      - 0.005554596994372\n      - 0.01202415479218\n  -   - 0.1485523407947\n      - 0.005613111126492\n      - 0.01186195785149\n  -   - 0.1490051236084\n      - 0.005576645446634\n      - 0.01199907703224\n  -   - 0.1524257023164\n      - 0.005515392037394\n      - 0.01196491883293\n  -   - 0.1554482518753\n      - 0.005387691742364\n      - 0.01208573313392\n  -   - 0.1555507023164\n      - 0.00569480453193\n      - 0.01169255682528\n  -   - 0.1618007023164\n      - 0.0057804309848\n      - 0.01147013266041\n  -   - 0.1743007023164\n      - 0.005924712736456\n      - 0.01107680666362\n  -   - 0.1895416078574\n      - 0.006133481950173\n      - 0.01054081360503\n  -   - 0.1903649025976\n      - 0.006140000867368\n      - 0.01052352873034\n  -   - 0.1653649025976\n      - 0.006034565329377\n      - 0.01081209329213\n  -   - 0.1902653863983\n      - 0.006141938821062\n      - 0.01051661018242\n  -   - 0.1936923871033\n      - 0.006154783888531\n      - 0.01043074413202\nmodel_improving_points:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.0\n      - 0.0\n      - 0.0\nmodel_improving_points_expected:\n  -   - 0.5323975209038645\n      - -0.015922591887171933\n      - -0.13172715842924898\n  -   - 0.0023652833861750877\n      - 0.00031007259112286745\n      - -0.0010430267511385427\n  -   - -0.014845535731521364\n      - -0.0011069676676378482\n      - 0.002765579949600694\nmodel_indices:\n  - 27\n  - 29\n  - 26\n  - 28\n  - 25\n  - 24\n  - 23\nmodel_indices_expected:\n  - 30\n  - 29\n  - 26\n  - 28\n  - 25\n  - 24\n  - 23\nn: 3\nn_modelpoints: 0\nn_modelpoints_expected: 3\nproject_x_onto_null: false\ntheta1: 1.0e-05\nx_accepted:\n  - 0.1903649025976\n  - 0.006140000867368\n  - 0.01052352873034\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/get_coefficients_residual_model.yaml",
    "content": "---\nbasis_null_space:\n  -   - -0.4583543462791\n      - -0.319506030216\n      - -0.6977037060623\n  -   - 0.2311109444943\n      - -0.207102182158\n      - 0.2709008772413\n  -   - -0.391511898797\n      - -0.2526774248775\n      - 0.6371234121103\n  -   - 0.008236522535421\n      - -0.04467864269942\n      - -0.001160198289494\n  -   - 0.7520131885729\n      - -0.1064594438631\n      - -0.1818415477397\n  -   - -0.1324539613206\n      - 0.8805459169394\n      - -0.02874512409319\n  -   - -0.009040449205893\n      - 0.04987780687459\n      - 0.001426286833001\nf_interpolated:\n  -   - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595925\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 1.972205368828\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 2.303282937582\n      - 2.303282937582\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n  -   - 1.24344978758e-14\n      - -5.329070518201e-14\n      - 9.947598300641e-14\n      - 2.13162820728e-14\n      - 3.552713678801e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - -4.440892098501e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 6.661338147751e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.59872115546e-14\n      - -1.7763568394e-14\n      - -2.48689957516e-14\n      - -3.552713678801e-15\n      - -1.42108547152e-14\n      - -5.329070518201e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - -1.24344978758e-14\n      - -5.151434834261e-14\n      - -3.19744231092e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-14\n      - 0.0\n      - 3.552713678801e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -4.440892098501e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 6.217248937901e-15\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -1.136868377216e-13\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -7.993605777300e-15\n      - 2.6645352591e-15\n      - -2.22044604925e-15\n      - 3.10862446895e-15\n      - 1.33226762955e-15\n      - 2.22044604925e-16\n      - -1.110223024625e-15\n      - -3.552713678801e-15\n      - 9.947598300641e-14\n      - -3.552713678801e-15\n      - 5.551115123126e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 1.33226762955e-14\n      - 6.217248937901e-15\n      - -5.551115123126e-16\n      - -1.86517468137e-14\n      - 1.101341240428e-13\n      - -7.105427357601e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 6.661338147751e-15\n      - -8.881784197001e-16\n      - 1.33226762955e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.86517468137e-14\n      - 3.19744231092e-14\n      - 1.68753899743e-14\n      - -2.6645352591e-15\n      - 6.217248937901e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 6.217248937901e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.50990331349e-14\n      - 9.592326932761e-14\n      - 0.0\n      - 0.0\n      - 6.661338147751e-16\n      - -1.110223024625e-15\n      - -1.7763568394e-15\n      - 1.33226762955e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 1.06581410364e-14\n      - 1.7763568394e-14\n      - -2.6645352591e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 0.0\n      - -1.24344978758e-14\n      - -2.13162820728e-14\n      - 9.592326932761e-14\n      - 0.0\n      - -2.57571741713e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 7.771561172376e-16\n      - -1.998401444325e-15\n      - 4.440892098501e-15\n      - 1.443289932013e-15\n      - 2.775557561563e-16\n      - -1.665334536938e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - -1.7763568394e-15\n      - 2.22044604925e-15\n      - 1.24344978758e-14\n      - 3.552713678801e-15\n      - 1.24344978758e-14\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - -5.329070518201e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 7.105427357601e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - -2.6645352591e-15\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 5.329070518201e-15\n      - -2.22044604925e-16\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 7.105427357601e-15\n      - 3.153033389935e-14\n      - 0.0\n      - -4.440892098501e-15\n      - 0.0\n      - 2.6645352591e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 4.884981308351e-15\n      - 8.881784197001e-16\n      - -1.06581410364e-14\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - -7.549516567451e-15\n      - -4.440892098501e-16\n      - 6.217248937901e-15\n      - 6.217248937901e-15\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - 4.618527782441e-14\n      - 1.7763568394e-15\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-15\n      - 5.684341886081e-14\n      - 0.0\n      - -2.22044604925e-15\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n  -   - -1.06581410364e-14\n      - -1.95399252334e-14\n      - 1.24344978758e-14\n      - 7.105427357601e-15\n      - 4.440892098501e-15\n      - 2.442490654175e-15\n      - -1.998401444325e-15\n      - 3.330669073875e-16\n      - -1.33226762955e-15\n      - -1.110223024625e-16\n      - 1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 0.0\n      - 0.0\n      - 3.19744231092e-14\n      - -1.7763568394e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 0.0\n      - 1.110223024625e-16\n      - -2.22044604925e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 1.59872115546e-14\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.33226762955e-15\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.110223024625e-16\n      - 8.881784197001e-16\n      - -2.30926389122e-14\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-15\n      - 1.33226762955e-15\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.06581410364e-14\n      - -1.24344978758e-14\n      - 0.0\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -3.330669073875e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-14\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-15\n      - 6.217248937901e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-15\n      - 2.6645352591e-15\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - 0.0\n      - -4.440892098501e-15\n      - -1.7763568394e-15\n      - 9.103828801926e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 1.110223024625e-16\n      - 1.110223024625e-16\n      - -8.881784197001e-16\n      - -7.993605777300e-15\n      - 1.33226762955e-15\n      - 6.661338147751e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 1.33226762955e-15\n      - -1.59872115546e-14\n      - 5.551115123126e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -4.718447854657e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -7.993605777300e-15\n      - 7.993605777301e-15\n      - -1.199040866595e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -5.329070518201e-15\n      - 0.0\n      - 4.773959005888e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -1.398881011028e-14\n      - -1.7763568394e-15\n      - 5.551115123126e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -2.6645352591e-14\n      - 8.659739592076e-15\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.7763568394e-15\n      - 1.59872115546e-14\n      - 1.7763568394e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-16\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 1.665334536938e-16\n      - 5.329070518201e-15\n      - 3.552713678801e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-16\n      - 0.0\n      - -6.661338147751e-16\n      - -3.552713678801e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -3.552713678801e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 6.217248937901e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -3.10862446895e-15\n      - -1.7763568394e-15\n      - 1.24344978758e-14\n      - 1.59872115546e-14\n      - -4.440892098501e-16\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n  -   - -2.735578163993e-08\n      - -5.45305738342e-08\n      - 4.426374289324e-08\n      - -4.691855792771e-10\n      - -2.819547262334e-09\n      - -5.672461611539e-09\n      - -6.022133902661e-09\n      - -5.938645131209e-09\n      - -6.026155574546e-09\n      - -5.914529310758e-09\n      - -1.108718095111e-08\n      - -5.205436082178e-11\n      - -1.303774865846e-09\n      - 3.85213638765e-10\n      - -1.12522080542e-09\n      - -1.038296559841e-09\n      - -9.272147494244e-10\n      - -2.739852789091e-08\n      - -5.461970431497e-08\n      - 4.424782673595e-08\n      - -4.685034582508e-10\n      - -2.810566002154e-09\n      - -5.653987500409e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -6.050512979527e-09\n      - -5.92861226778e-09\n      - -1.110882408284e-08\n      - -5.200462283028e-11\n      - -1.299493845863e-09\n      - 3.87061049878e-10\n      - -1.12522080542e-09\n      - -1.042401720497e-09\n      - -9.273968260004e-10\n      - -2.746151039901e-08\n      - -5.433957994683e-08\n      - 4.423691279953e-08\n      - -4.692992661148e-10\n      - -2.817955646606e-09\n      - -5.670756308973e-09\n      - -6.043791245247e-09\n      - -5.925457458034e-09\n      - -6.050569822946e-09\n      - -5.927120128035e-09\n      - -1.106290881125e-08\n      - -5.205436082178e-11\n      - -1.303028795974e-09\n      - 3.85909970646e-10\n      - -1.127881787966e-09\n      - -1.040435293476e-09\n      - -9.260201494499e-10\n      - -2.734941517701e-08\n      - -5.424772098195e-08\n      - 4.424055077834e-08\n      - -4.674802767113e-10\n      - -2.812839738908e-09\n      - -5.670756308973e-09\n      - -6.039726940799e-09\n      - -5.925457458034e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -1.104555735765e-08\n      - -5.22533127878e-11\n      - -1.303213537085e-09\n      - 3.862474784455e-10\n      - -1.128016791085e-09\n      - -1.036601915416e-09\n      - -9.278338097829e-10\n      - -2.736760507105e-08\n      - 4.430148692336e-08\n      - -6.124395213192e-09\n      - 6.223643822523e-09\n      - 6.227637072698e-09\n      - 6.216559711447e-09\n      - -8.725535849408e-10\n      - -2.746151039901e-08\n      - 4.422872734722e-08\n      - -6.108592742748e-09\n      - 6.207599767549e-09\n      - 6.222357740171e-09\n      - 6.224738058336e-09\n      - -8.727347733384e-10\n      - -2.734918780334e-08\n      - 4.453613655642e-08\n      - -6.112173878137e-09\n      - 6.22451068466e-09\n      - 6.227359961031e-09\n      - 6.222300896752e-09\n      - -8.725198341608e-10\n      - -2.746151039901e-08\n      - 4.457479008124e-08\n      - -6.087390147513e-09\n      - 6.217128145636e-09\n      - -8.725535849408e-10\n      - 6.222357740171e-09\n      - 6.227637072698e-09\n      - -8.725198341608e-10\n      - -2.734918780334e-08\n      - 4.454204827198e-08\n      - -2.825686351571e-09\n      - -6.100492555561e-09\n      - -5.898698418605e-09\n      - -5.89793103245e-09\n      - -6.570232358172e-09\n      - 6.235552518774e-09\n      - -1.462325371904e-09\n      - -1.088713119657e-09\n      - -8.705525189612e-10\n      - -2.73569185083e-08\n      - 4.454068402993e-08\n      - -2.808064891724e-09\n      - -6.131557483968e-09\n      - -5.876401587557e-09\n      - -5.897845767322e-09\n      - -6.565869625774e-09\n      - 6.222357740171e-09\n      - -1.463405396862e-09\n      - -1.089073720095e-09\n      - -8.69984972951e-10\n      - -2.731030690484e-08\n      - 4.454204827198e-08\n      - -2.806132215483e-09\n      - -6.112287564974e-09\n      - -5.890328225178e-09\n      - -5.881020115339e-09\n      - -6.602697055769e-09\n      - 6.208729530499e-09\n      - -1.467945764944e-09\n      - -1.086476686396e-09\n      - -8.746043889118e-10\n      - -2.746151039901e-08\n      - -5.454558049678e-08\n      - 4.454068402993e-08\n      - -4.674802767113e-10\n      - -2.816477717715e-09\n      - -5.670813152392e-09\n      - -5.936954039498e-09\n      - -5.93111337821e-09\n      - -1.104545788166e-08\n      - -5.2018833685e-11\n      - -1.29870869614e-09\n      - 3.867874909247e-10\n      - -1.12415321496e-09\n      - -1.038033659029e-09\n      - -9.241301057727e-10\n      - 6.206445135604e-09\n      - 6.206281710774e-09\n      - 6.214690984052e-09\n      - 6.227359961031e-09\n      - 6.216112069524e-09\n      - 6.232355076463e-09\n      - -2.740466698015e-08\n      - 4.426556188264e-08\n      - -2.806132215483e-09\n      - -6.098474614191e-09\n      - -5.856662710357e-09\n      - -6.574133237791e-09\n      - -5.885155474061e-09\n      - -6.587036693873e-09\n      - 6.232085070224e-09\n      - -1.464133703166e-09\n      - -1.089770051976e-09\n      - -8.732499168218e-10\n      - -2.739443516475e-08\n      - 4.424418875715e-08\n      - -2.815454536176e-09\n      - -6.108251682235e-09\n      - -5.863171281817e-09\n      - -6.585437972717e-09\n      - -5.886150233891e-09\n      - -6.576577504802e-09\n      - 6.217746317816e-09\n      - -1.467178378789e-09\n      - -1.090377566015e-09\n      - -8.717009336578e-10\n      - -2.741853677435e-08\n      - 4.456569513422e-08\n      - -2.813635546772e-09\n      - -6.107967465141e-09\n      - -5.881588549528e-09\n      - -6.590155976482e-09\n      - -5.854928986082e-09\n      - -6.584819800537e-09\n      - 6.221597459444e-09\n      - -1.46228273934e-09\n      - -1.089073720095e-09\n      - -8.693215036715e-10\n      - 6.218208170594e-09\n      - -2.752040018095e-08\n      - 4.457479008124e-08\n      - -6.096939841882e-09\n      - 6.222300896752e-09\n      - -8.720668631668e-10\n      - 6.209738501184e-09\n      - -8.718927801965e-10\n      - 6.226734683423e-09\n      - 6.224738058336e-09\n      - 6.208729530499e-09\n      - -6.025345555827e-09\n      - -6.015824283168e-09\n      - -2.741762727965e-08\n      - 4.41937118012e-08\n      - -6.025857146597e-09\n      - -6.039897471055e-09\n      - -1.109327030235e-08\n      - -1.30298616341e-09\n      - -6.022730758559e-09\n      - -6.028955112924e-09\n      - -2.738147486525e-08\n      - 4.430512490217e-08\n      - -1.106290881125e-08\n      - -1.301476260096e-09\n      - -6.022730758559e-09\n      - -6.022105480952e-09\n  -   - -3.552713678801e-15\n      - -3.552713678801e-15\n      - 9.769962616701e-15\n      - 5.329070518201e-15\n      - 6.217248937901e-15\n      - 3.774758283726e-15\n      - -2.553512956638e-15\n      - 4.440892098501e-16\n      - -1.110223024625e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 1.7763568394e-15\n      - 2.22044604925e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 0.0\n      - 1.42108547152e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -5.551115123126e-16\n      - -1.110223024625e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 1.95399252334e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - 2.442490654175e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-14\n      - 1.95399252334e-14\n      - 6.439293542826e-15\n      - 6.883382752676e-15\n      - 2.442490654175e-15\n      - -8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 2.22044604925e-16\n      - -7.105427357601e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-16\n      - -8.881784197001e-16\n      - 3.28626015289e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -1.33226762955e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -8.881784197001e-16\n      - 1.95399252334e-14\n      - -3.10862446895e-15\n      - 1.7763568394e-15\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 3.552713678801e-15\n      - -1.95399252334e-14\n      - 7.105427357601e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.110223024625e-16\n      - -8.326672684688e-17\n      - -3.552713678801e-15\n      - 0.0\n      - -2.22044604925e-15\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 6.661338147751e-16\n      - 0.0\n      - 0.0\n      - 5.329070518201e-15\n      - -3.37507799486e-14\n      - -6.661338147751e-16\n      - 2.553512956638e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -3.053113317719e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -3.37507799486e-14\n      - 0.0\n      - 1.24344978758e-14\n      - -1.199040866595e-14\n      - 0.0\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 0.0\n      - -7.105427357601e-15\n      - -1.7763568394e-15\n      - 2.997602166488e-15\n      - -2.6645352591e-15\n      - -2.6645352591e-15\n      - -1.554312234475e-15\n      - -1.06581410364e-14\n      - -3.28626015289e-14\n      - -6.661338147751e-16\n      - -2.22044604925e-15\n      - -1.110223024625e-15\n      - 4.440892098501e-16\n      - 1.110223024625e-15\n      - -4.440892098501e-16\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-15\n      - 5.773159728051e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - 4.440892098501e-16\n      - 2.6645352591e-15\n      - 2.13162820728e-14\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.775557561563e-17\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 1.95399252334e-14\n      - -1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - -1.7763568394e-15\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 0.0\n      - 2.6645352591e-15\n      - 0.0\n      - 7.105427357601e-15\n      - 1.68753899743e-14\n      - -1.33226762955e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 1.24344978758e-14\n      - 0.0\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n  -   - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -1.110223024625e-16\n      - -2.22044604925e-16\n      - 0.0\n      - -3.552713678801e-15\n      - 4.263256414561e-14\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -1.554312234475e-15\n      - 0.0\n      - -1.110223024625e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - 1.7763568394e-14\n      - 8.881784197001e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -7.771561172376e-16\n      - -1.33226762955e-15\n      - -1.33226762955e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.443289932013e-15\n      - -8.881784197001e-16\n      - -2.081668171172e-16\n      - -2.775557561563e-17\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -5.329070518201e-14\n      - -2.48689957516e-14\n      - -7.105427357601e-15\n      - -5.329070518201e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -7.771561172376e-16\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -1.7763568394e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.110223024625e-16\n      - -1.665334536938e-16\n      - 5.551115123126e-17\n      - 7.105427357601e-15\n      - -3.19744231092e-14\n      - -1.7763568394e-15\n      - 4.440892098501e-16\n      - -5.773159728051e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 1.59872115546e-14\n      - -8.881784197001e-16\n      - 1.33226762955e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - 5.329070518201e-15\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - 5.773159728051e-15\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -5.773159728051e-15\n      - -4.440892098501e-16\n      - 5.329070518201e-15\n      - -3.730349362741e-14\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - -1.110223024625e-16\n      - 9.436895709314e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 5.329070518201e-15\n      - -3.01980662698e-14\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -5.151434834261e-14\n      - 5.329070518201e-15\n      - -2.6645352591e-15\n      - -1.998401444325e-15\n      - 4.996003610813e-16\n      - -4.440892098501e-16\n      - -3.996802888651e-15\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-15\n      - -3.01980662698e-14\n      - 5.329070518201e-15\n      - -1.42108547152e-14\n      - -8.881784197001e-16\n      - 1.110223024625e-16\n      - -4.440892098501e-16\n      - 6.661338147751e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.22044604925e-16\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -3.10862446895e-15\n      - -8.881784197001e-16\n      - -6.217248937901e-15\n      - -2.13162820728e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - -1.7763568394e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 7.105427357601e-15\n      - -1.59872115546e-14\n      - 7.993605777301e-15\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - -9.992007221626e-16\n      - 0.0\n      - 2.22044604925e-16\n      - -6.661338147751e-15\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 1.06581410364e-14\n      - 4.440892098501e-16\n      - -2.442490654175e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 2.22044604925e-15\n      - -6.661338147751e-16\n      - -3.330669073875e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -7.910339050454e-16\n      - -5.329070518201e-15\n      - -2.22044604925e-16\n      - 2.22044604925e-15\n      - 0.0\n      - -3.996802888651e-15\n      - 1.998401444325e-15\n      - -2.22044604925e-15\n      - 1.06581410364e-14\n      - -3.01980662698e-14\n      - 0.0\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - -3.552713678801e-15\n      - 0.0\n      - -3.552713678801e-14\n      - -4.440892098501e-16\n      - 3.330669073875e-16\n      - -2.22044604925e-15\n      - 0.0\n  -   - -2.453347747178e-08\n      - -4.890711124972e-08\n      - 3.971540252223e-08\n      - -4.215081617076e-10\n      - -2.531166387598e-09\n      - -5.090015520182e-09\n      - -5.402824854173e-09\n      - -5.326945995421e-09\n      - -5.406533887253e-09\n      - -5.303881778218e-09\n      - -9.95139259885e-09\n      - -4.664002517529e-11\n      - -1.169468077933e-09\n      - 3.465174813755e-10\n      - -1.009041739053e-09\n      - -9.316067917097e-10\n      - -8.316902722072e-10\n      - -2.457070991113e-08\n      - -4.9003332947e-08\n      - 3.969984163632e-08\n      - -4.210960469209e-10\n      - -2.520792463656e-09\n      - -5.072479325463e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -5.43165867839e-09\n      - -5.320373475115e-09\n      - -9.971742542803e-09\n      - -4.661160346586e-11\n      - -1.165034291262e-09\n      - 3.48336470779e-10\n      - -1.009041739053e-09\n      - -9.349250262858e-10\n      - -8.319460675921e-10\n      - -2.462979864504e-08\n      - -4.874540593391e-08\n      - 3.969002193571e-08\n      - -4.214655291435e-10\n      - -2.529361609049e-09\n      - -5.086377541375e-09\n      - -5.424837468126e-09\n      - -5.313083306646e-09\n      - -5.429683369584e-09\n      - -5.316017848145e-09\n      - -9.926125699167e-09\n      - -4.666844688472e-11\n      - -1.168785956907e-09\n      - 3.474269760773e-10\n      - -1.012267603073e-09\n      - -9.336105222246e-10\n      - -8.307345922276e-10\n      - -2.453501224409e-08\n      - -4.864099878432e-08\n      - 3.969411466187e-08\n      - -4.200586545267e-10\n      - -2.522682507333e-09\n      - -5.086377541375e-09\n      - -5.417248871709e-09\n      - -5.313083306646e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -9.905932074616e-09\n      - -4.685318799602e-11\n      - -1.16909859571e-09\n      - 3.474269760773e-10\n      - -1.011144945551e-09\n      - -9.30121757392e-10\n      - -8.32123703276e-10\n      - -2.455203684804e-08\n      - 3.975333129347e-08\n      - -5.494243282556e-09\n      - 5.58331691991e-09\n      - 5.584197992903e-09\n      - 5.577660999734e-09\n      - -7.832063886326e-10\n      - -2.462979864504e-08\n      - 3.968375494878e-08\n      - -5.482178266902e-09\n      - 5.567116545535e-09\n      - 5.582947437688e-09\n      - 5.582720064012e-09\n      - -7.826947978629e-10\n      - -2.453300851357e-08\n      - 3.997169528702e-08\n      - -5.48229195374e-09\n      - 5.582549533756e-09\n      - 5.58533486128e-09\n      - 5.58033264042e-09\n      - -7.824034753412e-10\n      - -2.462979864504e-08\n      - 4.000852982244e-08\n      - -5.459440899358e-09\n      - 5.578741024692e-09\n      - -7.832063886326e-10\n      - 5.582947437688e-09\n      - 5.584197992903e-09\n      - -7.824034753412e-10\n      - -2.453300851357e-08\n      - 3.997578801318e-08\n      - -2.534619625294e-09\n      - -5.472671205098e-09\n      - -5.292946525515e-09\n      - -5.289514604101e-09\n      - -5.893511456634e-09\n      - 5.593420837613e-09\n      - -1.311946107307e-09\n      - -9.763994057721e-10\n      - -7.807834379037e-10\n      - -2.454028447119e-08\n      - 3.997533326583e-08\n      - -2.518589781175e-09\n      - -5.499970257006e-09\n      - -5.27222709934e-09\n      - -5.289443549827e-09\n      - -5.889233989365e-09\n      - 5.582947437688e-09\n      - -1.31279875859e-09\n      - -9.767973097040e-10\n      - -7.801634893667e-10\n      - -2.450508418406e-08\n      - 3.997578801318e-08\n      - -2.517722919038e-09\n      - -5.481609832714e-09\n      - -5.282728920974e-09\n      - -5.276390879771e-09\n      - -5.921897638927e-09\n      - 5.568864480665e-09\n      - -1.316720954492e-09\n      - -9.741683015817e-10\n      - -7.849578764763e-10\n      - -2.462979864504e-08\n      - -4.892184790606e-08\n      - 3.997533326583e-08\n      - -4.198312808512e-10\n      - -2.525382569729e-09\n      - -5.087684940008e-09\n      - -5.325020424607e-09\n      - -5.321375340372e-09\n      - -9.905946285471e-09\n      - -4.662581432058e-11\n      - -1.165076923826e-09\n      - 3.474269760773e-10\n      - -1.008160666061e-09\n      - -9.312302040598e-10\n      - -8.287912578453e-10\n      - 5.564075422626e-09\n      - 5.566761274167e-09\n      - 5.577334150075e-09\n      - 5.58533486128e-09\n      - 5.575742534347e-09\n      - 5.589768647951e-09\n      - -2.458268966166e-08\n      - 3.971631201694e-08\n      - -2.517722919038e-09\n      - -5.472060138345e-09\n      - -5.253625090518e-09\n      - -5.896666266381e-09\n      - -5.281549420033e-09\n      - -5.911225287036e-09\n      - 5.58955548513e-09\n      - -1.31430510919e-09\n      - -9.776002229954e-10\n      - -7.833289572545e-10\n      - -2.456549452745e-08\n      - 3.969729789333e-08\n      - -2.525453624003e-09\n      - -5.481140874508e-09\n      - -5.259096269583e-09\n      - -5.908617595196e-09\n      - -5.280242021399e-09\n      - -5.902698774207e-09\n      - 5.579806838796e-09\n      - -1.316578845945e-09\n      - -9.776002229954e-10\n      - -7.816733926802e-10\n      - -2.458754977397e-08\n      - 3.999780062713e-08\n      - -2.523478315197e-09\n      - -5.481275877628e-09\n      - -5.276277192934e-09\n      - -5.912788481055e-09\n      - -5.252047685644e-09\n      - -5.904489341901e-09\n      - 5.580417905549e-09\n      - -1.312287167821e-09\n      - -9.767973097040e-10\n      - -7.796998602316e-10\n      - 5.577490469477e-09\n      - -2.470176241332e-08\n      - 4.000852982244e-08\n      - -5.468805852615e-09\n      - 5.58033264042e-09\n      - -7.824567660464e-10\n      - 5.57135138024e-09\n      - -7.82140574529e-10\n      - 5.586642259914e-09\n      - 5.582720064012e-09\n      - 5.568864480665e-09\n      - -5.405695446825e-09\n      - -5.398412383784e-09\n      - -2.459630366047e-08\n      - 3.965256212268e-08\n      - -5.406420200416e-09\n      - -5.417355453119e-09\n      - -9.949395973763e-09\n      - -1.168757535197e-09\n      - -5.401048497333e-09\n      - -5.409134473666e-09\n      - -2.456927461481e-08\n      - 3.975651452492e-08\n      - -9.926125699167e-09\n      - -1.167023810922e-09\n      - -5.401048497333e-09\n      - -5.400458746863e-09\nlinear_terms_expected:\n  -   - -9.787418445994\n      - -22.54377112295\n      - -9.534168263258\n  -   - -10.78162277843\n      - -24.83376357139\n      - -10.50264745927\n  -   - -10.20110565515\n      - -23.49663439001\n      - -9.937151359945\n  -   - -10.98873563222\n      - -25.31081542293\n      - -10.70440130878\n  -   - -10.99335463641\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.68909540507\n      - -24.62064151412\n      - -10.41251428865\n  -   - -9.405382108517\n      - -21.66381086468\n      - -9.162017165394\n  -   - -8.053437460602\n      - -18.54982008449\n      - -7.845054182336\n  -   - -9.405382108518\n      - -21.66381086468\n      - -9.162017165391\n  -   - -8.053437460579\n      - -18.5498200845\n      - -7.845054182338\n  -   - -7.201412728984\n      - -16.587315791\n      - -7.015075646482\n  -   - -5.482178202194\n      - -12.62733086319\n      - -5.340326447796\n  -   - -4.683343670952\n      - -10.78734179755\n      - -4.56216181608\n  -   - -6.699455120896\n      - -15.43113581877\n      - -6.526106239334\n  -   - -3.676712397266\n      - -8.46872578782\n      - -3.581577199013\n  -   - -3.14429118694\n      - -7.242377695456\n      - -3.062932425796\n  -   - -2.721950037932\n      - -6.269581622886\n      - -2.651519384438\n  -   - -9.787418446035\n      - -22.54377112294\n      - -9.534168263254\n  -   - -10.78162277851\n      - -24.83376357135\n      - -10.50264745926\n  -   - -10.20110565517\n      - -23.49663439\n      - -9.937151359944\n  -   - -10.98873563222\n      - -25.31081542293\n      - -10.70440130878\n  -   - -10.9933546364\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.68909540505\n      - -24.62064151412\n      - -10.41251428865\n  -   - -9.405382108542\n      - -21.66381086467\n      - -9.162017165389\n  -   - -8.053437460596\n      - -18.54982008448\n      - -7.845054182333\n  -   - -9.405382108542\n      - -21.66381086467\n      - -9.162017165389\n  -   - -8.053437460588\n      - -18.54982008448\n      - -7.845054182332\n  -   - -7.201412729004\n      - -16.58731579099\n      - -7.015075646479\n  -   - -5.482178202195\n      - -12.6273308632\n      - -5.340326447797\n  -   - -4.683343670949\n      - -10.78734179755\n      - -4.562161816081\n  -   - -6.699455120895\n      - -15.43113581877\n      - -6.526106239334\n  -   - -3.676712397266\n      - -8.46872578782\n      - -3.581577199013\n  -   - -3.144291186945\n      - -7.242377695458\n      - -3.062932425798\n  -   - -2.721950037932\n      - -6.269581622886\n      - -2.651519384437\n  -   - -9.787418446088\n      - -22.54377112291\n      - -9.534168263241\n  -   - -10.78162277827\n      - -24.83376357148\n      - -10.50264745931\n  -   - -10.20110565518\n      - -23.49663439\n      - -9.937151359945\n  -   - -10.98873563222\n      - -25.31081542292\n      - -10.70440130878\n  -   - -10.99335463641\n      - -25.32145456311\n      - -10.70890079349\n  -   - -10.68909540507\n      - -24.62064151412\n      - -10.41251428865\n  -   - -9.405382108538\n      - -21.66381086467\n      - -9.162017165392\n  -   - -8.053437460588\n      - -18.54982008449\n      - -7.845054182336\n  -   - -9.405382108543\n      - -21.66381086467\n      - -9.16201716539\n  -   - -8.053437460585\n      - -18.54982008448\n      - -7.845054182331\n  -   - -7.201412728965\n      - -16.58731579101\n      - -7.015075646488\n  -   - -5.482178202194\n      - -12.6273308632\n      - -5.340326447796\n  -   - -4.683343670951\n      - -10.78734179755\n      - -4.56216181608\n  -   - -6.699455120897\n      - -15.43113581877\n      - -6.526106239335\n  -   - -3.676712397267\n      - -8.468725787817\n      - -3.581577199011\n  -   - -3.144291186944\n      - -7.242377695458\n      - -3.062932425798\n  -   - -2.721950037931\n      - -6.269581622886\n      - -2.651519384438\n  -   - -9.787418445987\n      - -22.54377112295\n      - -9.534168263257\n  -   - -10.78162277819\n      - -24.83376357152\n      - -10.50264745933\n  -   - -10.20110565518\n      - -23.49663439\n      - -9.937151359945\n  -   - -10.98873563222\n      - -25.31081542293\n      - -10.70440130878\n  -   - -10.9933546364\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.68909540507\n      - -24.62064151412\n      - -10.41251428865\n  -   - -9.405382108534\n      - -21.66381086467\n      - -9.162017165392\n  -   - -8.053437460588\n      - -18.54982008449\n      - -7.845054182336\n  -   - -9.405382108542\n      - -21.66381086467\n      - -9.162017165389\n  -   - -8.053437460599\n      - -18.54982008449\n      - -7.845054182336\n  -   - -7.201412728951\n      - -16.58731579102\n      - -7.015075646493\n  -   - -5.482178202195\n      - -12.6273308632\n      - -5.340326447797\n  -   - -4.683343670951\n      - -10.78734179755\n      - -4.56216181608\n  -   - -6.699455120896\n      - -15.43113581877\n      - -6.526106239335\n  -   - -3.676712397269\n      - -8.468725787819\n      - -3.581577199013\n  -   - -3.144291186939\n      - -7.242377695458\n      - -3.062932425797\n  -   - -2.721950037932\n      - -6.269581622885\n      - -2.651519384437\n  -   - -9.787418446009\n      - -22.54377112295\n      - -9.534168263261\n  -   - -10.20110565512\n      - -23.49663439003\n      - -9.937151359956\n  -   - -10.09564030674\n      - -23.25371150768\n      - -9.834414883103\n  -   - -5.556847609202\n      - -12.79932005394\n      - -5.413063785002\n  -   - -5.5568476092\n      - -12.79932005394\n      - -5.413063785004\n  -   - -5.55684760921\n      - -12.79932005394\n      - -5.413063785003\n  -   - -2.538438570694\n      - -5.846891967821\n      - -2.472756289638\n  -   - -9.787418446088\n      - -22.54377112291\n      - -9.534168263241\n  -   - -10.20110565518\n      - -23.49663438999\n      - -9.937151359941\n  -   - -10.09564030673\n      - -23.25371150769\n      - -9.834414883109\n  -   - -5.556847609218\n      - -12.79932005394\n      - -5.413063785002\n  -   - -5.556847609206\n      - -12.79932005394\n      - -5.413063785005\n  -   - -5.556847609203\n      - -12.79932005394\n      - -5.413063785005\n  -   - -2.538438570694\n      - -5.846891967822\n      - -2.472756289638\n  -   - -9.787418445993\n      - -22.54377112296\n      - -9.534168263264\n  -   - -10.20110565492\n      - -23.49663439014\n      - -9.93715136\n  -   - -10.09564030673\n      - -23.25371150769\n      - -9.834414883108\n  -   - -5.556847609201\n      - -12.79932005394\n      - -5.413063785003\n  -   - -5.556847609201\n      - -12.79932005394\n      - -5.413063785005\n  -   - -5.556847609204\n      - -12.79932005394\n      - -5.413063785003\n  -   - -2.538438570694\n      - -5.846891967822\n      - -2.472756289638\n  -   - -9.787418446088\n      - -22.54377112291\n      - -9.534168263241\n  -   - -10.20110565488\n      - -23.49663439015\n      - -9.937151360001\n  -   - -10.09564030671\n      - -23.2537115077\n      - -9.834414883111\n  -   - -5.556847609208\n      - -12.79932005394\n      - -5.413063785002\n  -   - -2.538438570694\n      - -5.846891967821\n      - -2.472756289638\n  -   - -5.556847609206\n      - -12.79932005394\n      - -5.413063785005\n  -   - -5.5568476092\n      - -12.79932005394\n      - -5.413063785004\n  -   - -2.538438570694\n      - -5.846891967822\n      - -2.472756289638\n  -   - -9.787418445993\n      - -22.54377112296\n      - -9.534168263264\n  -   - -10.20110565491\n      - -23.49663439014\n      - -9.937151360001\n  -   - -10.99335463642\n      - -25.3214545631\n      - -10.70890079349\n  -   - -10.09564030672\n      - -23.25371150769\n      - -9.834414883108\n  -   - -8.709021129272\n      - -20.05985343038\n      - -8.483674576579\n  -   - -8.709021129268\n      - -20.05985343037\n      - -8.483674576577\n  -   - -7.480399516444\n      - -17.22991776468\n      - -7.28684363545\n  -   - -5.556847609193\n      - -12.79932005395\n      - -5.413063785006\n  -   - -4.20144396755\n      - -9.677362009312\n      - -4.092731301918\n  -   - -3.391504667746\n      - -7.81179486874\n      - -3.303749239997\n  -   - -2.538438570692\n      - -5.846891967823\n      - -2.472756289639\n  -   - -9.787418445998\n      - -22.54377112296\n      - -9.534168263261\n  -   - -10.20110565491\n      - -23.49663439014\n      - -9.937151359998\n  -   - -10.9933546364\n      - -25.32145456311\n      - -10.70890079349\n  -   - -10.09564030674\n      - -23.25371150767\n      - -9.834414883101\n  -   - -8.709021129253\n      - -20.05985343039\n      - -8.483674576584\n  -   - -8.709021129268\n      - -20.05985343037\n      - -8.483674576576\n  -   - -7.48039951644\n      - -17.22991776469\n      - -7.28684363545\n  -   - -5.556847609206\n      - -12.79932005394\n      - -5.413063785005\n  -   - -4.20144396755\n      - -9.67736200931\n      - -4.092731301916\n  -   - -3.391504667747\n      - -7.81179486874\n      - -3.303749239997\n  -   - -2.538438570692\n      - -5.846891967823\n      - -2.472756289639\n  -   - -9.787418445957\n      - -22.54377112297\n      - -9.534168263268\n  -   - -10.20110565491\n      - -23.49663439014\n      - -9.937151360001\n  -   - -10.9933546364\n      - -25.32145456311\n      - -10.70890079349\n  -   - -10.09564030673\n      - -23.25371150769\n      - -9.834414883109\n  -   - -8.709021129265\n      - -20.05985343038\n      - -8.483674576582\n  -   - -8.709021129256\n      - -20.05985343039\n      - -8.483674576583\n  -   - -7.480399516473\n      - -17.22991776467\n      - -7.286843635444\n  -   - -5.556847609215\n      - -12.79932005393\n      - -5.413063784999\n  -   - -4.201443967553\n      - -9.677362009307\n      - -4.092731301915\n  -   - -3.391504667745\n      - -7.811794868741\n      - -3.303749239997\n  -   - -2.538438570696\n      - -5.846891967821\n      - -2.472756289638\n  -   - -9.787418446088\n      - -22.54377112291\n      - -9.534168263241\n  -   - -10.78162277845\n      - -24.83376357139\n      - -10.50264745927\n  -   - -10.20110565491\n      - -23.49663439014\n      - -9.937151359998\n  -   - -10.98873563222\n      - -25.31081542293\n      - -10.70440130878\n  -   - -10.99335463641\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.68909540506\n      - -24.62064151411\n      - -10.41251428864\n  -   - -8.053437460599\n      - -18.54982008449\n      - -7.845054182335\n  -   - -8.053437460597\n      - -18.54982008449\n      - -7.845054182339\n  -   - -7.201412728949\n      - -16.58731579102\n      - -7.015075646491\n  -   - -5.482178202193\n      - -12.62733086319\n      - -5.340326447795\n  -   - -4.683343670948\n      - -10.78734179755\n      - -4.562161816081\n  -   - -6.699455120895\n      - -15.43113581877\n      - -6.526106239335\n  -   - -3.676712397264\n      - -8.468725787819\n      - -3.581577199012\n  -   - -3.144291186941\n      - -7.242377695458\n      - -3.062932425797\n  -   - -2.721950037928\n      - -6.269581622887\n      - -2.651519384438\n  -   - -5.556847609219\n      - -12.79932005393\n      - -5.413063785001\n  -   - -5.556847609216\n      - -12.79932005393\n      - -5.413063784998\n  -   - -5.556847609211\n      - -12.79932005394\n      - -5.413063785002\n  -   - -5.556847609201\n      - -12.79932005394\n      - -5.413063785005\n  -   - -5.556847609211\n      - -12.79932005394\n      - -5.413063785004\n  -   - -5.556847609195\n      - -12.79932005395\n      - -5.413063785005\n  -   - -9.787418446041\n      - -22.54377112293\n      - -9.534168263253\n  -   - -10.20110565515\n      - -23.49663439001\n      - -9.93715135995\n  -   - -10.9933546364\n      - -25.32145456311\n      - -10.70890079349\n  -   - -10.09564030672\n      - -23.25371150769\n      - -9.834414883109\n  -   - -8.709021129238\n      - -20.0598534304\n      - -8.48367457659\n  -   - -7.480399516449\n      - -17.22991776469\n      - -7.286843635451\n  -   - -8.709021129259\n      - -20.05985343038\n      - -8.483674576582\n  -   - -7.480399516461\n      - -17.22991776468\n      - -7.286843635449\n  -   - -5.556847609196\n      - -12.79932005395\n      - -5.413063785006\n  -   - -4.201443967551\n      - -9.67736200931\n      - -4.092731301917\n  -   - -3.391504667748\n      - -7.81179486874\n      - -3.303749239997\n  -   - -2.538438570695\n      - -5.846891967823\n      - -2.472756289639\n  -   - -9.78741844603\n      - -22.54377112294\n      - -9.534168263253\n  -   - -10.20110565517\n      - -23.49663439\n      - -9.937151359944\n  -   - -10.9933546364\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.09564030673\n      - -23.25371150769\n      - -9.834414883112\n  -   - -8.70902112924\n      - -20.05985343039\n      - -8.483674576586\n  -   - -7.480399516459\n      - -17.22991776468\n      - -7.286843635448\n  -   - -8.70902112926\n      - -20.05985343038\n      - -8.483674576581\n  -   - -7.48039951645\n      - -17.22991776468\n      - -7.286843635449\n  -   - -5.556847609209\n      - -12.79932005394\n      - -5.413063785003\n  -   - -4.201443967552\n      - -9.677362009307\n      - -4.092731301915\n  -   - -3.391504667747\n      - -7.811794868738\n      - -3.303749239996\n  -   - -2.538438570693\n      - -5.846891967823\n      - -2.472756289639\n  -   - -9.787418446052\n      - -22.54377112293\n      - -9.53416826325\n  -   - -10.20110565489\n      - -23.49663439015\n      - -9.937151360001\n  -   - -10.9933546364\n      - -25.3214545631\n      - -10.70890079348\n  -   - -10.09564030673\n      - -23.25371150769\n      - -9.834414883108\n  -   - -8.709021129257\n      - -20.05985343039\n      - -8.483674576583\n  -   - -7.480399516466\n      - -17.22991776468\n      - -7.28684363545\n  -   - -8.709021129236\n      - -20.0598534304\n      - -8.48367457659\n  -   - -7.480399516456\n      - -17.22991776468\n      - -7.286843635447\n  -   - -5.556847609203\n      - -12.79932005394\n      - -5.413063785001\n  -   - -4.201443967549\n      - -9.677362009311\n      - -4.092731301917\n  -   - -3.391504667747\n      - -7.81179486874\n      - -3.303749239997\n  -   - -2.538438570692\n      - -5.846891967825\n      - -2.47275628964\n  -   - -5.556847609209\n      - -12.79932005394\n      - -5.413063785004\n  -   - -9.787418446139\n      - -22.54377112288\n      - -9.534168263229\n  -   - -10.20110565488\n      - -23.49663439015\n      - -9.937151360001\n  -   - -10.09564030672\n      - -23.25371150769\n      - -9.834414883111\n  -   - -5.556847609204\n      - -12.79932005394\n      - -5.413063785003\n  -   - -2.538438570693\n      - -5.846891967821\n      - -2.472756289637\n  -   - -5.556847609215\n      - -12.79932005393\n      - -5.413063785001\n  -   - -2.538438570694\n      - -5.846891967823\n      - -2.472756289639\n  -   - -5.5568476092\n      - -12.79932005394\n      - -5.413063785004\n  -   - -5.556847609203\n      - -12.79932005394\n      - -5.413063785005\n  -   - -5.556847609215\n      - -12.79932005393\n      - -5.413063784999\n  -   - -9.405382108518\n      - -21.66381086468\n      - -9.162017165391\n  -   - -9.40538210851\n      - -21.66381086468\n      - -9.162017165394\n  -   - -9.787418446052\n      - -22.54377112293\n      - -9.53416826325\n  -   - -10.20110565522\n      - -23.49663438999\n      - -9.937151359941\n  -   - -9.405382108525\n      - -21.66381086468\n      - -9.162017165398\n  -   - -9.405382108532\n      - -21.66381086467\n      - -9.16201716539\n  -   - -7.201412728992\n      - -16.587315791\n      - -7.015075646484\n  -   - -4.683343670952\n      - -10.78734179755\n      - -4.56216181608\n  -   - -9.405382108516\n      - -21.66381086468\n      - -9.162017165392\n  -   - -9.405382108524\n      - -21.66381086468\n      - -9.162017165394\n  -   - -9.787418446016\n      - -22.54377112294\n      - -9.534168263253\n  -   - -10.20110565512\n      - -23.49663439003\n      - -9.937151359954\n  -   - -7.201412728965\n      - -16.58731579101\n      - -7.015075646488\n  -   - -4.68334367095\n      - -10.78734179755\n      - -4.56216181608\n  -   - -9.405382108516\n      - -21.66381086468\n      - -9.162017165392\n  -   - -9.405382108518\n      - -21.66381086468\n      - -9.162017165395\nlower_triangular:\n  -   - -0.1507316305838\n      - 0.3652114026571\n      - -0.009929602309966\n      - -0.01905060831947\n      - 0.003262911080217\n      - -0.01695118100081\n      - -0.0002604899716981\n  -   - 0.1044075404358\n      - -0.2033554793648\n      - -0.04901244210073\n      - 0.002419549056951\n      - -0.000600520783894\n      - 0.008574072391572\n      - 0.001713650401476\n  -   - 0.08359492814255\n      - -0.1891075948275\n      - 0.003335257628927\n      - 0.0118310729907\n      - -0.003470346928518\n      - 0.01057835837333\n      - -0.002245237316082\n  -   - -0.7199325351227\n      - -0.1628252921563\n      - 1.64250205349\n      - 0.2023897915246\n      - -0.01194347807638\n      - 0.08702438194994\n      - 0.008210075299701\n  -   - -0.07653108379319\n      - 0.06625813621784\n      - 0.1592803660839\n      - 0.02907110965794\n      - -0.01119440066196\n      - 0.01377712744979\n      - -0.01253655518419\n  -   - -0.04476255198824\n      - 0.03879496991897\n      - -0.001710465543054\n      - -0.01718375258754\n      - 0.01249119968731\n      - -0.01285690490792\n      - 0.0138241209585\nmonomial_basis:\n  -   - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.000421998138113\n      - 0.003227454378072\n      - -0.005190483660171\n      - 0.01234183379235\n      - -0.02806999966988\n      - 0.0319208998729\n  -   - 0.0002235545535664\n      - 0.001541433438831\n      - -0.002293087033098\n      - 0.005314177252134\n      - -0.01118012852874\n      - 0.01176054805746\n  -   - 0.39605157772\n      - -0.2263871121141\n      - -0.2073114454022\n      - 0.06470258851941\n      - 0.08379309531591\n      - 0.05425812925951\n  -   - 0.0001709597243704\n      - 0.001302429203959\n      - -0.002062390549796\n      - 0.004961173860017\n      - -0.01111005551172\n      - 0.01243993225759\n  -   - 0.0009747214789788\n      - 0.002943599394654\n      - -0.0004126754404125\n      - 0.004444745285232\n      - -0.000881235114793\n      - 8.735881110268e-05\n  -   - 0.0009555976207896\n      - -0.05886419129446\n      - -0.003901308645878\n      - 1.81299792997\n      - 0.1699305387922\n      - 0.007963712350931\nn_modelpoints: 7\nsquare_terms_expected:\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -6.026354345269\n          - 12.09263433612\n          - -65.19840466722\n      -   - 12.09263433612\n          - -20.07818166598\n          - -50.39406779834\n      -   - -65.19840466722\n          - -50.39406779834\n          - -203.1711567547\n  -   -   - -5.701876161034\n          - 11.44152824299\n          - -61.68791411975\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007599\n      -   - -61.68791411975\n          - -47.68069007599\n          - -192.2317721381\n  -   -   - -6.142119454382\n          - 12.32493151818\n          - -66.45085349382\n      -   - 12.32493151818\n          - -20.46387967883\n          - -51.36212815726\n      -   - -66.45085349382\n          - -51.36212815726\n          - -207.0740356415\n  -   -   - -6.144701231192\n          - 12.33011217653\n          - -66.4787854274\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.4787854274\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.974636482892\n          - 11.98885597141\n          - -64.63887531859\n      -   - 11.98885597141\n          - -19.90587174644\n          - -49.96158850494\n      -   - -64.63887531859\n          - -49.96158850494\n          - -201.4275523606\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.025207143397\n          - 8.077081984078\n          - -43.54823317181\n      -   - 8.077081984078\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -3.06424638066\n          - 6.148793926422\n          - -33.1517139682\n      -   - 6.148793926422\n          - -10.20923961279\n          - -25.62408896111\n      -   - -33.1517139682\n          - -25.62408896111\n          - -103.3073141862\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -3.744639512008\n          - 7.514087911923\n          - -40.51280562804\n      -   - 7.514087911923\n          - -12.47612538042\n          - -31.31372744325\n      -   - -40.51280562804\n          - -31.31372744325\n          - -126.2459353865\n  -   -   - -2.05508690845\n          - 4.123789125024\n          - -22.23373870938\n      -   - 4.123789125024\n          - -6.846993376853\n          - -17.18521398846\n      -   - -22.23373870938\n          - -17.18521398846\n          - -69.28473841762\n  -   -   - -1.757491736179\n          - 3.526627160717\n          - -19.0140922437\n      -   - 3.526627160717\n          - -5.855486805939\n          - -14.69663956546\n      -   - -19.0140922437\n          - -14.69663956546\n          - -59.25168162552\n  -   -   - -1.52142547032\n          - 3.052930648937\n          - -16.46011963474\n      -   - 3.052930648937\n          - -5.068977898606\n          - -12.72258714093\n      -   - -16.46011963474\n          - -12.72258714093\n          - -51.29299656357\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -6.026354345269\n          - 12.09263433613\n          - -65.19840466724\n      -   - 12.09263433613\n          - -20.07818166599\n          - -50.39406779837\n      -   - -65.19840466724\n          - -50.39406779837\n          - -203.1711567547\n  -   -   - -5.701876161034\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.231772138\n  -   -   - -6.142119454382\n          - 12.32493151818\n          - -66.45085349382\n      -   - 12.32493151818\n          - -20.46387967883\n          - -51.36212815727\n      -   - -66.45085349382\n          - -51.36212815727\n          - -207.0740356415\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.47878542741\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.974636482893\n          - 11.98885597141\n          - -64.63887531859\n      -   - 11.98885597141\n          - -19.90587174644\n          - -49.96158850494\n      -   - -64.63887531859\n          - -49.96158850494\n          - -201.4275523606\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.025207143397\n          - 8.077081984077\n          - -43.54823317181\n      -   - 8.077081984077\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294897\n  -   -   - -3.06424638066\n          - 6.148793926422\n          - -33.1517139682\n      -   - 6.148793926422\n          - -10.20923961279\n          - -25.62408896111\n      -   - -33.1517139682\n          - -25.62408896111\n          - -103.3073141862\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244759\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -3.744639512008\n          - 7.514087911923\n          - -40.51280562804\n      -   - 7.514087911923\n          - -12.47612538042\n          - -31.31372744325\n      -   - -40.51280562804\n          - -31.31372744325\n          - -126.2459353865\n  -   -   - -2.05508690845\n          - 4.123789125024\n          - -22.23373870938\n      -   - 4.123789125024\n          - -6.846993376853\n          - -17.18521398846\n      -   - -22.23373870938\n          - -17.18521398846\n          - -69.28473841762\n  -   -   - -1.757491736179\n          - 3.526627160717\n          - -19.0140922437\n      -   - 3.526627160717\n          - -5.85548680594\n          - -14.69663956546\n      -   - -19.0140922437\n          - -14.69663956546\n          - -59.25168162552\n  -   -   - -1.521425470319\n          - 3.052930648937\n          - -16.46011963474\n      -   - 3.052930648937\n          - -5.068977898607\n          - -12.72258714093\n      -   - -16.46011963474\n          - -12.72258714093\n          - -51.29299656357\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.1862730562\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648664\n      -   - -59.1862730562\n          - -45.74708648664\n          - -184.4361625451\n  -   -   - -6.026354345269\n          - 12.09263433612\n          - -65.19840466723\n      -   - 12.09263433612\n          - -20.07818166599\n          - -50.39406779835\n      -   - -65.19840466723\n          - -50.39406779835\n          - -203.1711567547\n  -   -   - -5.701876161033\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.231772138\n  -   -   - -6.142119454382\n          - 12.32493151818\n          - -66.45085349382\n      -   - 12.32493151818\n          - -20.46387967883\n          - -51.36212815726\n      -   - -66.45085349382\n          - -51.36212815726\n          - -207.0740356415\n  -   -   - -6.144701231192\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.38371770021\n      -   - -66.47878542741\n          - -51.38371770021\n          - -207.1610770849\n  -   -   - -5.974636482892\n          - 11.98885597141\n          - -64.63887531859\n      -   - 11.98885597141\n          - -19.90587174644\n          - -49.96158850494\n      -   - -64.63887531859\n          - -49.96158850494\n          - -201.4275523606\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.025207143397\n          - 8.077081984077\n          - -43.54823317181\n      -   - 8.077081984077\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -3.06424638066\n          - 6.148793926422\n          - -33.1517139682\n      -   - 6.148793926422\n          - -10.20923961279\n          - -25.62408896111\n      -   - -33.1517139682\n          - -25.62408896111\n          - -103.3073141862\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -3.744639512008\n          - 7.514087911923\n          - -40.51280562804\n      -   - 7.514087911923\n          - -12.47612538042\n          - -31.31372744325\n      -   - -40.51280562804\n          - -31.31372744325\n          - -126.2459353865\n  -   -   - -2.05508690845\n          - 4.123789125024\n          - -22.23373870938\n      -   - 4.123789125024\n          - -6.846993376854\n          - -17.18521398846\n      -   - -22.23373870938\n          - -17.18521398846\n          - -69.28473841762\n  -   -   - -1.757491736179\n          - 3.526627160717\n          - -19.0140922437\n      -   - 3.526627160717\n          - -5.855486805939\n          - -14.69663956546\n      -   - -19.0140922437\n          - -14.69663956546\n          - -59.25168162552\n  -   -   - -1.52142547032\n          - 3.052930648937\n          - -16.46011963474\n      -   - 3.052930648937\n          - -5.068977898607\n          - -12.72258714093\n      -   - -16.46011963474\n          - -12.72258714093\n          - -51.29299656357\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -6.026354345272\n          - 12.09263433612\n          - -65.19840466723\n      -   - 12.09263433612\n          - -20.07818166598\n          - -50.39406779835\n      -   - -65.19840466723\n          - -50.39406779835\n          - -203.1711567547\n  -   -   - -5.701876161033\n          - 11.44152824299\n          - -61.68791411973\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007597\n      -   - -61.68791411973\n          - -47.68069007597\n          - -192.231772138\n  -   -   - -6.142119454382\n          - 12.32493151818\n          - -66.45085349382\n      -   - 12.32493151818\n          - -20.46387967883\n          - -51.36212815726\n      -   - -66.45085349382\n          - -51.36212815726\n          - -207.0740356415\n  -   -   - -6.144701231192\n          - 12.33011217653\n          - -66.4787854274\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.4787854274\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.974636482892\n          - 11.98885597141\n          - -64.63887531859\n      -   - 11.98885597141\n          - -19.90587174644\n          - -49.96158850494\n      -   - -64.63887531859\n          - -49.96158850494\n          - -201.4275523606\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.025207143398\n          - 8.077081984078\n          - -43.54823317181\n      -   - 8.077081984078\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -3.06424638066\n          - 6.148793926422\n          - -33.1517139682\n      -   - 6.148793926422\n          - -10.20923961279\n          - -25.62408896111\n      -   - -33.1517139682\n          - -25.62408896111\n          - -103.3073141862\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -3.744639512008\n          - 7.514087911923\n          - -40.51280562804\n      -   - 7.514087911923\n          - -12.47612538042\n          - -31.31372744325\n      -   - -40.51280562804\n          - -31.31372744325\n          - -126.2459353865\n  -   -   - -2.05508690845\n          - 4.123789125024\n          - -22.23373870938\n      -   - 4.123789125024\n          - -6.846993376853\n          - -17.18521398846\n      -   - -22.23373870938\n          - -17.18521398846\n          - -69.28473841762\n  -   -   - -1.757491736179\n          - 3.526627160717\n          - -19.0140922437\n      -   - 3.526627160717\n          - -5.855486805939\n          - -14.69663956546\n      -   - -19.0140922437\n          - -14.69663956546\n          - -59.25168162552\n  -   -   - -1.52142547032\n          - 3.052930648937\n          - -16.46011963474\n      -   - 3.052930648937\n          - -5.068977898606\n          - -12.72258714093\n      -   - -16.46011963474\n          - -12.72258714093\n          - -51.29299656357\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161034\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007597\n      -   - -61.68791411974\n          - -47.68069007597\n          - -192.231772138\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345168\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.1862730562\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648664\n      -   - -59.1862730562\n          - -45.74708648664\n          - -184.4361625451\n  -   -   - -5.701876161034\n          - 11.44152824299\n          - -61.68791411975\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007599\n      -   - -61.68791411975\n          - -47.68069007599\n          - -192.2317721381\n  -   -   - -5.642926609318\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161035\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.2317721381\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.1862730562\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648664\n      -   - -59.1862730562\n          - -45.74708648664\n          - -184.4361625451\n  -   -   - -5.701876161036\n          - 11.44152824299\n          - -61.68791411975\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007599\n      -   - -61.68791411975\n          - -47.68069007599\n          - -192.2317721381\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345168\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161036\n          - 11.441528243\n          - -61.68791411976\n      -   - 11.441528243\n          - -18.99710817628\n          - -47.680690076\n      -   - -61.68791411976\n          - -47.680690076\n          - -192.2317721381\n  -   -   - -6.144701231192\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.47878542741\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.348383980319\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.824179840489\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512005\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161035\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.2317721381\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.38371770021\n      -   - -66.47878542741\n          - -51.38371770021\n          - -207.1610770849\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.867880151755\n          - 9.768010855988\n          - -52.66501135193\n      -   - 9.768010855988\n          - -16.21845919082\n          - -40.70658118362\n      -   - -52.66501135193\n          - -40.70658118362\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.34838398032\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.824179840489\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512005\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161036\n          - 11.441528243\n          - -61.68791411976\n      -   - 11.441528243\n          - -18.99710817628\n          - -47.680690076\n      -   - -61.68791411976\n          - -47.680690076\n          - -192.2317721381\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.38371770021\n      -   - -66.47878542741\n          - -51.38371770021\n          - -207.1610770849\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855988\n          - -52.66501135193\n      -   - 9.768010855988\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615525\n          - 6.232542905949\n          - -33.60325328599\n      -   - 6.232542905949\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.348383980319\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.82417984049\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512006\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.1862730562\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648664\n      -   - -59.1862730562\n          - -45.74708648664\n          - -184.4361625451\n  -   -   - -6.026354345272\n          - 12.09263433613\n          - -65.19840466725\n      -   - 12.09263433613\n          - -20.07818166598\n          - -50.39406779838\n      -   - -65.19840466725\n          - -50.39406779838\n          - -203.1711567548\n  -   -   - -5.701876161035\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.2317721381\n  -   -   - -6.142119454382\n          - 12.32493151818\n          - -66.45085349382\n      -   - 12.32493151818\n          - -20.46387967883\n          - -51.36212815726\n      -   - -66.45085349382\n          - -51.36212815726\n          - -207.0740356415\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146038\n          - -51.3837177002\n      -   - -66.47878542741\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.974636482892\n          - 11.98885597141\n          - -64.63887531859\n      -   - 11.98885597141\n          - -19.90587174644\n          - -49.96158850494\n      -   - -64.63887531859\n          - -49.96158850494\n          - -201.4275523606\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.501443708068\n          - 9.032710263457\n          - -48.70057942984\n      -   - 9.032710263457\n          - -14.9975921352\n          - -37.642336712\n      -   - -48.70057942984\n          - -37.642336712\n          - -151.7606620589\n  -   -   - -4.025207143398\n          - 8.077081984078\n          - -43.54823317181\n      -   - 8.077081984078\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -3.06424638066\n          - 6.148793926422\n          - -33.1517139682\n      -   - 6.148793926422\n          - -10.20923961279\n          - -25.62408896111\n      -   - -33.1517139682\n          - -25.62408896111\n          - -103.3073141862\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -3.744639512008\n          - 7.514087911923\n          - -40.51280562804\n      -   - 7.514087911923\n          - -12.47612538042\n          - -31.31372744325\n      -   - -40.51280562804\n          - -31.31372744325\n          - -126.2459353865\n  -   -   - -2.05508690845\n          - 4.123789125024\n          - -22.23373870938\n      -   - 4.123789125024\n          - -6.846993376853\n          - -17.18521398846\n      -   - -22.23373870938\n          - -17.18521398846\n          - -69.28473841762\n  -   -   - -1.757491736179\n          - 3.526627160717\n          - -19.0140922437\n      -   - 3.526627160717\n          - -5.85548680594\n          - -14.69663956546\n      -   - -19.0140922437\n          - -14.69663956546\n          - -59.25168162552\n  -   -   - -1.52142547032\n          - 3.052930648937\n          - -16.46011963474\n      -   - 3.052930648937\n          - -5.068977898606\n          - -12.72258714093\n      -   - -16.46011963474\n          - -12.72258714093\n          - -51.29299656357\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345168\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161035\n          - 11.441528243\n          - -61.68791411976\n      -   - 11.441528243\n          - -18.99710817628\n          - -47.68069007601\n      -   - -61.68791411976\n          - -47.68069007601\n          - -192.2317721381\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.38371770021\n      -   - -66.47878542741\n          - -51.38371770021\n          - -207.1610770849\n  -   -   - -5.642926609318\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -4.867880151755\n          - 9.768010855988\n          - -52.66501135193\n      -   - 9.768010855988\n          - -16.21845919082\n          - -40.70658118362\n      -   - -52.66501135193\n          - -40.70658118362\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696807\n          - -45.23531629256\n      -   - 8.389992696807\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.348383980319\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.824179840489\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512006\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161033\n          - 11.44152824299\n          - -61.68791411973\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007597\n      -   - -61.68791411973\n          - -47.68069007597\n          - -192.231772138\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.47878542741\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736708\n      -   - -61.05014599234\n          - -47.18773736708\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855988\n          - -52.66501135193\n      -   - 9.768010855988\n          - -16.21845919082\n          - -40.70658118362\n      -   - -52.66501135193\n          - -40.70658118362\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696807\n          - -45.23531629256\n      -   - 8.389992696807\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629257\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629257\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.348383980319\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.82417984049\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512006\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -5.470646952694\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161035\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.2317721381\n  -   -   - -6.144701231193\n          - 12.33011217653\n          - -66.47878542741\n      -   - 12.33011217653\n          - -20.47248146039\n          - -51.3837177002\n      -   - -66.47878542741\n          - -51.3837177002\n          - -207.1610770849\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -4.867880151755\n          - 9.768010855988\n          - -52.66501135193\n      -   - 9.768010855988\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.18114593895\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -4.867880151755\n          - 9.768010855989\n          - -52.66501135193\n      -   - 9.768010855989\n          - -16.21845919082\n          - -40.70658118363\n      -   - -52.66501135193\n          - -40.70658118363\n          - -164.1146180123\n  -   -   - -4.181145938951\n          - 8.389992696808\n          - -45.23531629256\n      -   - 8.389992696808\n          - -13.93044665596\n          - -34.96391679718\n      -   - -45.23531629256\n          - -34.96391679718\n          - -140.9622150161\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -2.348383980319\n          - 4.712326412864\n          - -25.40688454246\n      -   - 4.712326412864\n          - -7.82417984049\n          - -19.6378464886\n      -   - -25.40688454246\n          - -19.6378464886\n          - -79.17289001818\n  -   -   - -1.895670938996\n          - 3.803901027596\n          - -20.50903646132\n      -   - 3.803901027596\n          - -6.315862512005\n          - -15.85213287302\n      -   - -20.50903646132\n          - -15.85213287302\n          - -63.91022423146\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -5.470646952691\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503455\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161036\n          - 11.44152824299\n          - -61.68791411975\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007599\n      -   - -61.68791411975\n          - -47.68069007599\n          - -192.2317721381\n  -   -   - -5.642926609319\n          - 11.32323858867\n          - -61.05014599234\n      -   - 11.32323858867\n          - -18.80070422381\n          - -47.18773736707\n      -   - -61.05014599234\n          - -47.18773736707\n          - -190.2443601916\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -1.418852308994\n          - 2.847104761256\n          - -15.35039290828\n      -   - 2.847104761256\n          - -4.727231886138\n          - -11.86484155382\n      -   - -15.35039290828\n          - -11.86484155382\n          - -47.83486804263\n  -   -   - -3.105982615526\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.23254290595\n          - -33.60325328599\n      -   - 6.23254290595\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -3.105982615525\n          - 6.232542905949\n          - -33.60325328599\n      -   - 6.232542905949\n          - -10.34829345167\n          - -25.97309908035\n      -   - -33.60325328599\n          - -25.97309908035\n          - -104.7144002338\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161033\n          - 11.44152824299\n          - -61.68791411974\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.68069007598\n      -   - -61.68791411974\n          - -47.68069007598\n          - -192.231772138\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\n  -   -   - -4.025207143398\n          - 8.077081984078\n          - -43.54823317181\n      -   - 8.077081984078\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169055\n      -   - -56.8760309773\n          - -43.96142169055\n          - -177.2369901437\n  -   -   - -5.470646952693\n          - 10.97753789279\n          - -59.18627305619\n      -   - 10.97753789279\n          - -18.22671503454\n          - -45.74708648663\n      -   - -59.18627305619\n          - -45.74708648663\n          - -184.4361625451\n  -   -   - -5.701876161035\n          - 11.44152824299\n          - -61.68791411975\n      -   - 11.44152824299\n          - -18.99710817628\n          - -47.680690076\n      -   - -61.68791411975\n          - -47.680690076\n          - -192.2317721381\n  -   -   - -4.025207143397\n          - 8.077081984077\n          - -43.54823317181\n      -   - 8.077081984077\n          - -13.41090079341\n          - -33.65991278671\n      -   - -43.54823317181\n          - -33.65991278671\n          - -135.7049294898\n  -   -   - -2.617740314238\n          - 5.252823613251\n          - -28.32101840387\n      -   - 5.252823613251\n          - -8.721602244758\n          - -21.89027981319\n      -   - -28.32101840387\n          - -21.89027981319\n          - -88.25390895707\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\n  -   -   - -5.257108945726\n          - 10.54904715237\n          - -56.8760309773\n      -   - 10.54904715237\n          - -17.51526418892\n          - -43.96142169054\n      -   - -56.8760309773\n          - -43.96142169054\n          - -177.2369901437\nx_sample_monomial_basis:\n  -   - 1.0\n      - 0.0\n      - 0.0\n      - 0.0\n  -   - 1.0\n      - -0.0290516140038\n      - -0.1571103675277\n      - 0.2526693486472\n  -   - 1.0\n      - 0.02114495464958\n      - 0.1030939110921\n      - -0.1533658896721\n  -   - 1.0\n      - 0.8900017727174\n      - -0.3597293107864\n      - -0.329418060402\n  -   - 1.0\n      - 0.01849106402403\n      - 0.09961098192486\n      - -0.1577335237518\n  -   - 1.0\n      - 0.04415249662202\n      - 0.09428409500262\n      - -0.01321807936901\n  -   - 1.0\n      - 0.0437172190513\n      - -1.904204784139\n      - -0.1262039012941\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/get_interpolation_matrices_residual_model.yaml",
    "content": "---\nbasis_null_space_expected:\n  -   - -0.4583543462791\n      - -0.319506030216\n      - -0.6977037060623\n  -   - 0.2311109444943\n      - -0.207102182158\n      - 0.2709008772413\n  -   - -0.391511898797\n      - -0.2526774248775\n      - 0.6371234121103\n  -   - 0.008236522535421\n      - -0.04467864269942\n      - -0.001160198289494\n  -   - 0.7520131885729\n      - -0.1064594438631\n      - -0.1818415477397\n  -   - -0.1324539613206\n      - 0.8805459169394\n      - -0.02874512409319\n  -   - -0.009040449205893\n      - 0.04987780687459\n      - 0.001426286833001\ndelta: 0.025\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\n  -   - 0.1499498551576\n      - 0.008185153997901\n      - 0.009255435636305\n  -   - 0.1486949409413\n      - 0.001680047032405\n      - 0.01940631659429\n  -   - 0.1494212312914\n      - 0.005607806220598\n      - 0.01308958287811\nlower_triangular_expected:\n  -   - -0.1507316305838\n      - 0.3652114026571\n      - -0.009929602309966\n      - -0.01905060831947\n      - 0.003262911080217\n      - -0.01695118100081\n      - -0.0002604899716981\n  -   - 0.1044075404358\n      - -0.2033554793648\n      - -0.04901244210073\n      - 0.002419549056951\n      - -0.000600520783894\n      - 0.008574072391572\n      - 0.001713650401476\n  -   - 0.08359492814255\n      - -0.1891075948275\n      - 0.003335257628927\n      - 0.0118310729907\n      - -0.003470346928518\n      - 0.01057835837333\n      - -0.002245237316082\n  -   - -0.7199325351227\n      - -0.1628252921563\n      - 1.64250205349\n      - 0.2023897915246\n      - -0.01194347807638\n      - 0.08702438194994\n      - 0.008210075299701\n  -   - -0.07653108379319\n      - 0.06625813621784\n      - 0.1592803660839\n      - 0.02907110965794\n      - -0.01119440066196\n      - 0.01377712744979\n      - -0.01253655518419\n  -   - -0.04476255198824\n      - 0.03879496991897\n      - -0.001710465543054\n      - -0.01718375258754\n      - 0.01249119968731\n      - -0.01285690490792\n      - 0.0138241209585\nmodel_indices:\n  - 13\n  - 12\n  - 11\n  - 10\n  - 10\n  - 7\n  - 6\nmonomial_basis_expected:\n  -   - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n  -   - 0.000421998138113\n      - 0.003227454378072\n      - -0.005190483660171\n      - 0.01234183379235\n      - -0.02806999966988\n      - 0.0319208998729\n  -   - 0.0002235545535664\n      - 0.001541433438831\n      - -0.002293087033098\n      - 0.005314177252134\n      - -0.01118012852874\n      - 0.01176054805746\n  -   - 0.39605157772\n      - -0.2263871121141\n      - -0.2073114454022\n      - 0.06470258851941\n      - 0.08379309531591\n      - 0.05425812925951\n  -   - 0.0001709597243704\n      - 0.001302429203959\n      - -0.002062390549796\n      - 0.004961173860017\n      - -0.01111005551172\n      - 0.01243993225759\n  -   - 0.0009747214789788\n      - 0.002943599394654\n      - -0.0004126754404125\n      - 0.004444745285232\n      - -0.000881235114793\n      - 8.735881110268e-05\n  -   - 0.0009555976207896\n      - -0.05886419129446\n      - -0.003901308645878\n      - 1.81299792997\n      - 0.1699305387922\n      - 0.007963712350931\nn_modelpoints: 4\nn_modelpoints_expected: 7\nx_accepted:\n  - 0.1494212312914\n  - 0.005607806220598\n  - 0.01308958287811\nx_sample_monomial_basis_expected:\n  -   - 1.0\n      - 0.0\n      - 0.0\n      - 0.0\n  -   - 1.0\n      - -0.0290516140038\n      - -0.1571103675277\n      - 0.2526693486472\n  -   - 1.0\n      - 0.02114495464958\n      - 0.1030939110921\n      - -0.1533658896721\n  -   - 1.0\n      - 0.8900017727174\n      - -0.3597293107864\n      - -0.329418060402\n  -   - 1.0\n      - 0.01849106402403\n      - 0.09961098192486\n      - -0.1577335237518\n  -   - 1.0\n      - 0.04415249662202\n      - 0.09428409500262\n      - -0.01321807936901\n  -   - 1.0\n      - 0.0437172190513\n      - -1.904204784139\n      - -0.1262039012941\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/interpolate_f_iter_4.yaml",
    "content": "---\ndelta_old: 0.0125\nf_interpolated_expected:\n  -   - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595925\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 1.972205368828\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 2.303282937582\n      - 2.303282937582\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n  -   - 1.24344978758e-14\n      - -5.329070518201e-14\n      - 9.947598300641e-14\n      - 2.13162820728e-14\n      - 3.552713678801e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - -4.440892098501e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 6.661338147751e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.59872115546e-14\n      - -1.7763568394e-14\n      - -2.48689957516e-14\n      - -3.552713678801e-15\n      - -1.42108547152e-14\n      - -5.329070518201e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - -1.24344978758e-14\n      - -5.151434834261e-14\n      - -3.19744231092e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-14\n      - 0.0\n      - 3.552713678801e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -4.440892098501e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 6.217248937901e-15\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -1.136868377216e-13\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -7.993605777300e-15\n      - 2.6645352591e-15\n      - -2.22044604925e-15\n      - 3.10862446895e-15\n      - 1.33226762955e-15\n      - 2.22044604925e-16\n      - -1.110223024625e-15\n      - -3.552713678801e-15\n      - 9.947598300641e-14\n      - -3.552713678801e-15\n      - 5.551115123126e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 1.33226762955e-14\n      - 6.217248937901e-15\n      - -5.551115123126e-16\n      - -1.86517468137e-14\n      - 1.101341240428e-13\n      - -7.105427357601e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 6.661338147751e-15\n      - -8.881784197001e-16\n      - 1.33226762955e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.86517468137e-14\n      - 3.19744231092e-14\n      - 1.68753899743e-14\n      - -2.6645352591e-15\n      - 6.217248937901e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 6.217248937901e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.50990331349e-14\n      - 9.592326932761e-14\n      - 0.0\n      - 0.0\n      - 6.661338147751e-16\n      - -1.110223024625e-15\n      - -1.7763568394e-15\n      - 1.33226762955e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 1.06581410364e-14\n      - 1.7763568394e-14\n      - -2.6645352591e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 0.0\n      - -1.24344978758e-14\n      - -2.13162820728e-14\n      - 9.592326932761e-14\n      - 0.0\n      - -2.57571741713e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 7.771561172376e-16\n      - -1.998401444325e-15\n      - 4.440892098501e-15\n      - 1.443289932013e-15\n      - 2.775557561563e-16\n      - -1.665334536938e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - -1.7763568394e-15\n      - 2.22044604925e-15\n      - 1.24344978758e-14\n      - 3.552713678801e-15\n      - 1.24344978758e-14\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - -5.329070518201e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 7.105427357601e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - -2.6645352591e-15\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 5.329070518201e-15\n      - -2.22044604925e-16\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 7.105427357601e-15\n      - 3.153033389935e-14\n      - 0.0\n      - -4.440892098501e-15\n      - 0.0\n      - 2.6645352591e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 4.884981308351e-15\n      - 8.881784197001e-16\n      - -1.06581410364e-14\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - -7.549516567451e-15\n      - -4.440892098501e-16\n      - 6.217248937901e-15\n      - 6.217248937901e-15\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - 4.618527782441e-14\n      - 1.7763568394e-15\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-15\n      - 5.684341886081e-14\n      - 0.0\n      - -2.22044604925e-15\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n  -   - -1.06581410364e-14\n      - -1.95399252334e-14\n      - 1.24344978758e-14\n      - 7.105427357601e-15\n      - 4.440892098501e-15\n      - 2.442490654175e-15\n      - -1.998401444325e-15\n      - 3.330669073875e-16\n      - -1.33226762955e-15\n      - -1.110223024625e-16\n      - 1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 0.0\n      - 0.0\n      - 3.19744231092e-14\n      - -1.7763568394e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 0.0\n      - 1.110223024625e-16\n      - -2.22044604925e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 1.59872115546e-14\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.33226762955e-15\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.110223024625e-16\n      - 8.881784197001e-16\n      - -2.30926389122e-14\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-15\n      - 1.33226762955e-15\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.06581410364e-14\n      - -1.24344978758e-14\n      - 0.0\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -3.330669073875e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-14\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-15\n      - 6.217248937901e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-15\n      - 2.6645352591e-15\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - 0.0\n      - -4.440892098501e-15\n      - -1.7763568394e-15\n      - 9.103828801926e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 1.110223024625e-16\n      - 1.110223024625e-16\n      - -8.881784197001e-16\n      - -7.993605777300e-15\n      - 1.33226762955e-15\n      - 6.661338147751e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 1.33226762955e-15\n      - -1.59872115546e-14\n      - 5.551115123126e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -4.718447854657e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -7.993605777300e-15\n      - 7.993605777301e-15\n      - -1.199040866595e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -5.329070518201e-15\n      - 0.0\n      - 4.773959005888e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -1.398881011028e-14\n      - -1.7763568394e-15\n      - 5.551115123126e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -2.6645352591e-14\n      - 8.659739592076e-15\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.7763568394e-15\n      - 1.59872115546e-14\n      - 1.7763568394e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-16\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 1.665334536938e-16\n      - 5.329070518201e-15\n      - 3.552713678801e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-16\n      - 0.0\n      - -6.661338147751e-16\n      - -3.552713678801e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -3.552713678801e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 6.217248937901e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -3.10862446895e-15\n      - -1.7763568394e-15\n      - 1.24344978758e-14\n      - 1.59872115546e-14\n      - -4.440892098501e-16\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n  -   - -2.735578163993e-08\n      - -5.45305738342e-08\n      - 4.426374289324e-08\n      - -4.691855792771e-10\n      - -2.819547262334e-09\n      - -5.672461611539e-09\n      - -6.022133902661e-09\n      - -5.938645131209e-09\n      - -6.026155574546e-09\n      - -5.914529310758e-09\n      - -1.108718095111e-08\n      - -5.205436082178e-11\n      - -1.303774865846e-09\n      - 3.85213638765e-10\n      - -1.12522080542e-09\n      - -1.038296559841e-09\n      - -9.272147494244e-10\n      - -2.739852789091e-08\n      - -5.461970431497e-08\n      - 4.424782673595e-08\n      - -4.685034582508e-10\n      - -2.810566002154e-09\n      - -5.653987500409e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -6.050512979527e-09\n      - -5.92861226778e-09\n      - -1.110882408284e-08\n      - -5.200462283028e-11\n      - -1.299493845863e-09\n      - 3.87061049878e-10\n      - -1.12522080542e-09\n      - -1.042401720497e-09\n      - -9.273968260004e-10\n      - -2.746151039901e-08\n      - -5.433957994683e-08\n      - 4.423691279953e-08\n      - -4.692992661148e-10\n      - -2.817955646606e-09\n      - -5.670756308973e-09\n      - -6.043791245247e-09\n      - -5.925457458034e-09\n      - -6.050569822946e-09\n      - -5.927120128035e-09\n      - -1.106290881125e-08\n      - -5.205436082178e-11\n      - -1.303028795974e-09\n      - 3.85909970646e-10\n      - -1.127881787966e-09\n      - -1.040435293476e-09\n      - -9.260201494499e-10\n      - -2.734941517701e-08\n      - -5.424772098195e-08\n      - 4.424055077834e-08\n      - -4.674802767113e-10\n      - -2.812839738908e-09\n      - -5.670756308973e-09\n      - -6.039726940799e-09\n      - -5.925457458034e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -1.104555735765e-08\n      - -5.22533127878e-11\n      - -1.303213537085e-09\n      - 3.862474784455e-10\n      - -1.128016791085e-09\n      - -1.036601915416e-09\n      - -9.278338097829e-10\n      - -2.736760507105e-08\n      - 4.430148692336e-08\n      - -6.124395213192e-09\n      - 6.223643822523e-09\n      - 6.227637072698e-09\n      - 6.216559711447e-09\n      - -8.725535849408e-10\n      - -2.746151039901e-08\n      - 4.422872734722e-08\n      - -6.108592742748e-09\n      - 6.207599767549e-09\n      - 6.222357740171e-09\n      - 6.224738058336e-09\n      - -8.727347733384e-10\n      - -2.734918780334e-08\n      - 4.453613655642e-08\n      - -6.112173878137e-09\n      - 6.22451068466e-09\n      - 6.227359961031e-09\n      - 6.222300896752e-09\n      - -8.725198341608e-10\n      - -2.746151039901e-08\n      - 4.457479008124e-08\n      - -6.087390147513e-09\n      - 6.217128145636e-09\n      - -8.725535849408e-10\n      - 6.222357740171e-09\n      - 6.227637072698e-09\n      - -8.725198341608e-10\n      - -2.734918780334e-08\n      - 4.454204827198e-08\n      - -2.825686351571e-09\n      - -6.100492555561e-09\n      - -5.898698418605e-09\n      - -5.89793103245e-09\n      - -6.570232358172e-09\n      - 6.235552518774e-09\n      - -1.462325371904e-09\n      - -1.088713119657e-09\n      - -8.705525189612e-10\n      - -2.73569185083e-08\n      - 4.454068402993e-08\n      - -2.808064891724e-09\n      - -6.131557483968e-09\n      - -5.876401587557e-09\n      - -5.897845767322e-09\n      - -6.565869625774e-09\n      - 6.222357740171e-09\n      - -1.463405396862e-09\n      - -1.089073720095e-09\n      - -8.69984972951e-10\n      - -2.731030690484e-08\n      - 4.454204827198e-08\n      - -2.806132215483e-09\n      - -6.112287564974e-09\n      - -5.890328225178e-09\n      - -5.881020115339e-09\n      - -6.602697055769e-09\n      - 6.208729530499e-09\n      - -1.467945764944e-09\n      - -1.086476686396e-09\n      - -8.746043889118e-10\n      - -2.746151039901e-08\n      - -5.454558049678e-08\n      - 4.454068402993e-08\n      - -4.674802767113e-10\n      - -2.816477717715e-09\n      - -5.670813152392e-09\n      - -5.936954039498e-09\n      - -5.93111337821e-09\n      - -1.104545788166e-08\n      - -5.2018833685e-11\n      - -1.29870869614e-09\n      - 3.867874909247e-10\n      - -1.12415321496e-09\n      - -1.038033659029e-09\n      - -9.241301057727e-10\n      - 6.206445135604e-09\n      - 6.206281710774e-09\n      - 6.214690984052e-09\n      - 6.227359961031e-09\n      - 6.216112069524e-09\n      - 6.232355076463e-09\n      - -2.740466698015e-08\n      - 4.426556188264e-08\n      - -2.806132215483e-09\n      - -6.098474614191e-09\n      - -5.856662710357e-09\n      - -6.574133237791e-09\n      - -5.885155474061e-09\n      - -6.587036693873e-09\n      - 6.232085070224e-09\n      - -1.464133703166e-09\n      - -1.089770051976e-09\n      - -8.732499168218e-10\n      - -2.739443516475e-08\n      - 4.424418875715e-08\n      - -2.815454536176e-09\n      - -6.108251682235e-09\n      - -5.863171281817e-09\n      - -6.585437972717e-09\n      - -5.886150233891e-09\n      - -6.576577504802e-09\n      - 6.217746317816e-09\n      - -1.467178378789e-09\n      - -1.090377566015e-09\n      - -8.717009336578e-10\n      - -2.741853677435e-08\n      - 4.456569513422e-08\n      - -2.813635546772e-09\n      - -6.107967465141e-09\n      - -5.881588549528e-09\n      - -6.590155976482e-09\n      - -5.854928986082e-09\n      - -6.584819800537e-09\n      - 6.221597459444e-09\n      - -1.46228273934e-09\n      - -1.089073720095e-09\n      - -8.693215036715e-10\n      - 6.218208170594e-09\n      - -2.752040018095e-08\n      - 4.457479008124e-08\n      - -6.096939841882e-09\n      - 6.222300896752e-09\n      - -8.720668631668e-10\n      - 6.209738501184e-09\n      - -8.718927801965e-10\n      - 6.226734683423e-09\n      - 6.224738058336e-09\n      - 6.208729530499e-09\n      - -6.025345555827e-09\n      - -6.015824283168e-09\n      - -2.741762727965e-08\n      - 4.41937118012e-08\n      - -6.025857146597e-09\n      - -6.039897471055e-09\n      - -1.109327030235e-08\n      - -1.30298616341e-09\n      - -6.022730758559e-09\n      - -6.028955112924e-09\n      - -2.738147486525e-08\n      - 4.430512490217e-08\n      - -1.106290881125e-08\n      - -1.301476260096e-09\n      - -6.022730758559e-09\n      - -6.022105480952e-09\n  -   - -3.552713678801e-15\n      - -3.552713678801e-15\n      - 9.769962616701e-15\n      - 5.329070518201e-15\n      - 6.217248937901e-15\n      - 3.774758283726e-15\n      - -2.553512956638e-15\n      - 4.440892098501e-16\n      - -1.110223024625e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 1.7763568394e-15\n      - 2.22044604925e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 0.0\n      - 1.42108547152e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -5.551115123126e-16\n      - -1.110223024625e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 1.95399252334e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - 2.442490654175e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-14\n      - 1.95399252334e-14\n      - 6.439293542826e-15\n      - 6.883382752676e-15\n      - 2.442490654175e-15\n      - -8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 2.22044604925e-16\n      - -7.105427357601e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-16\n      - -8.881784197001e-16\n      - 3.28626015289e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -1.33226762955e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -8.881784197001e-16\n      - 1.95399252334e-14\n      - -3.10862446895e-15\n      - 1.7763568394e-15\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 3.552713678801e-15\n      - -1.95399252334e-14\n      - 7.105427357601e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.110223024625e-16\n      - -8.326672684688e-17\n      - -3.552713678801e-15\n      - 0.0\n      - -2.22044604925e-15\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 6.661338147751e-16\n      - 0.0\n      - 0.0\n      - 5.329070518201e-15\n      - -3.37507799486e-14\n      - -6.661338147751e-16\n      - 2.553512956638e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -3.053113317719e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -3.37507799486e-14\n      - 0.0\n      - 1.24344978758e-14\n      - -1.199040866595e-14\n      - 0.0\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 0.0\n      - -7.105427357601e-15\n      - -1.7763568394e-15\n      - 2.997602166488e-15\n      - -2.6645352591e-15\n      - -2.6645352591e-15\n      - -1.554312234475e-15\n      - -1.06581410364e-14\n      - -3.28626015289e-14\n      - -6.661338147751e-16\n      - -2.22044604925e-15\n      - -1.110223024625e-15\n      - 4.440892098501e-16\n      - 1.110223024625e-15\n      - -4.440892098501e-16\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-15\n      - 5.773159728051e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - 4.440892098501e-16\n      - 2.6645352591e-15\n      - 2.13162820728e-14\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.775557561563e-17\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 1.95399252334e-14\n      - -1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - -1.7763568394e-15\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 0.0\n      - 2.6645352591e-15\n      - 0.0\n      - 7.105427357601e-15\n      - 1.68753899743e-14\n      - -1.33226762955e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 1.24344978758e-14\n      - 0.0\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n  -   - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -1.110223024625e-16\n      - -2.22044604925e-16\n      - 0.0\n      - -3.552713678801e-15\n      - 4.263256414561e-14\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -1.554312234475e-15\n      - 0.0\n      - -1.110223024625e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - 1.7763568394e-14\n      - 8.881784197001e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -7.771561172376e-16\n      - -1.33226762955e-15\n      - -1.33226762955e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.443289932013e-15\n      - -8.881784197001e-16\n      - -2.081668171172e-16\n      - -2.775557561563e-17\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -5.329070518201e-14\n      - -2.48689957516e-14\n      - -7.105427357601e-15\n      - -5.329070518201e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -7.771561172376e-16\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -1.7763568394e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.110223024625e-16\n      - -1.665334536938e-16\n      - 5.551115123126e-17\n      - 7.105427357601e-15\n      - -3.19744231092e-14\n      - -1.7763568394e-15\n      - 4.440892098501e-16\n      - -5.773159728051e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 1.59872115546e-14\n      - -8.881784197001e-16\n      - 1.33226762955e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - 5.329070518201e-15\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - 5.773159728051e-15\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -5.773159728051e-15\n      - -4.440892098501e-16\n      - 5.329070518201e-15\n      - -3.730349362741e-14\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - -1.110223024625e-16\n      - 9.436895709314e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 5.329070518201e-15\n      - -3.01980662698e-14\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -5.151434834261e-14\n      - 5.329070518201e-15\n      - -2.6645352591e-15\n      - -1.998401444325e-15\n      - 4.996003610813e-16\n      - -4.440892098501e-16\n      - -3.996802888651e-15\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-15\n      - -3.01980662698e-14\n      - 5.329070518201e-15\n      - -1.42108547152e-14\n      - -8.881784197001e-16\n      - 1.110223024625e-16\n      - -4.440892098501e-16\n      - 6.661338147751e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.22044604925e-16\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -3.10862446895e-15\n      - -8.881784197001e-16\n      - -6.217248937901e-15\n      - -2.13162820728e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - -1.7763568394e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 7.105427357601e-15\n      - -1.59872115546e-14\n      - 7.993605777301e-15\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - -9.992007221626e-16\n      - 0.0\n      - 2.22044604925e-16\n      - -6.661338147751e-15\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 1.06581410364e-14\n      - 4.440892098501e-16\n      - -2.442490654175e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 2.22044604925e-15\n      - -6.661338147751e-16\n      - -3.330669073875e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -7.910339050454e-16\n      - -5.329070518201e-15\n      - -2.22044604925e-16\n      - 2.22044604925e-15\n      - 0.0\n      - -3.996802888651e-15\n      - 1.998401444325e-15\n      - -2.22044604925e-15\n      - 1.06581410364e-14\n      - -3.01980662698e-14\n      - 0.0\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - -3.552713678801e-15\n      - 0.0\n      - -3.552713678801e-14\n      - -4.440892098501e-16\n      - 3.330669073875e-16\n      - -2.22044604925e-15\n      - 0.0\n  -   - -2.453347747178e-08\n      - -4.890711124972e-08\n      - 3.971540252223e-08\n      - -4.215081617076e-10\n      - -2.531166387598e-09\n      - -5.090015520182e-09\n      - -5.402824854173e-09\n      - -5.326945995421e-09\n      - -5.406533887253e-09\n      - -5.303881778218e-09\n      - -9.95139259885e-09\n      - -4.664002517529e-11\n      - -1.169468077933e-09\n      - 3.465174813755e-10\n      - -1.009041739053e-09\n      - -9.316067917097e-10\n      - -8.316902722072e-10\n      - -2.457070991113e-08\n      - -4.9003332947e-08\n      - 3.969984163632e-08\n      - -4.210960469209e-10\n      - -2.520792463656e-09\n      - -5.072479325463e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -5.43165867839e-09\n      - -5.320373475115e-09\n      - -9.971742542803e-09\n      - -4.661160346586e-11\n      - -1.165034291262e-09\n      - 3.48336470779e-10\n      - -1.009041739053e-09\n      - -9.349250262858e-10\n      - -8.319460675921e-10\n      - -2.462979864504e-08\n      - -4.874540593391e-08\n      - 3.969002193571e-08\n      - -4.214655291435e-10\n      - -2.529361609049e-09\n      - -5.086377541375e-09\n      - -5.424837468126e-09\n      - -5.313083306646e-09\n      - -5.429683369584e-09\n      - -5.316017848145e-09\n      - -9.926125699167e-09\n      - -4.666844688472e-11\n      - -1.168785956907e-09\n      - 3.474269760773e-10\n      - -1.012267603073e-09\n      - -9.336105222246e-10\n      - -8.307345922276e-10\n      - -2.453501224409e-08\n      - -4.864099878432e-08\n      - 3.969411466187e-08\n      - -4.200586545267e-10\n      - -2.522682507333e-09\n      - -5.086377541375e-09\n      - -5.417248871709e-09\n      - -5.313083306646e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -9.905932074616e-09\n      - -4.685318799602e-11\n      - -1.16909859571e-09\n      - 3.474269760773e-10\n      - -1.011144945551e-09\n      - -9.30121757392e-10\n      - -8.32123703276e-10\n      - -2.455203684804e-08\n      - 3.975333129347e-08\n      - -5.494243282556e-09\n      - 5.58331691991e-09\n      - 5.584197992903e-09\n      - 5.577660999734e-09\n      - -7.832063886326e-10\n      - -2.462979864504e-08\n      - 3.968375494878e-08\n      - -5.482178266902e-09\n      - 5.567116545535e-09\n      - 5.582947437688e-09\n      - 5.582720064012e-09\n      - -7.826947978629e-10\n      - -2.453300851357e-08\n      - 3.997169528702e-08\n      - -5.48229195374e-09\n      - 5.582549533756e-09\n      - 5.58533486128e-09\n      - 5.58033264042e-09\n      - -7.824034753412e-10\n      - -2.462979864504e-08\n      - 4.000852982244e-08\n      - -5.459440899358e-09\n      - 5.578741024692e-09\n      - -7.832063886326e-10\n      - 5.582947437688e-09\n      - 5.584197992903e-09\n      - -7.824034753412e-10\n      - -2.453300851357e-08\n      - 3.997578801318e-08\n      - -2.534619625294e-09\n      - -5.472671205098e-09\n      - -5.292946525515e-09\n      - -5.289514604101e-09\n      - -5.893511456634e-09\n      - 5.593420837613e-09\n      - -1.311946107307e-09\n      - -9.763994057721e-10\n      - -7.807834379037e-10\n      - -2.454028447119e-08\n      - 3.997533326583e-08\n      - -2.518589781175e-09\n      - -5.499970257006e-09\n      - -5.27222709934e-09\n      - -5.289443549827e-09\n      - -5.889233989365e-09\n      - 5.582947437688e-09\n      - -1.31279875859e-09\n      - -9.767973097040e-10\n      - -7.801634893667e-10\n      - -2.450508418406e-08\n      - 3.997578801318e-08\n      - -2.517722919038e-09\n      - -5.481609832714e-09\n      - -5.282728920974e-09\n      - -5.276390879771e-09\n      - -5.921897638927e-09\n      - 5.568864480665e-09\n      - -1.316720954492e-09\n      - -9.741683015817e-10\n      - -7.849578764763e-10\n      - -2.462979864504e-08\n      - -4.892184790606e-08\n      - 3.997533326583e-08\n      - -4.198312808512e-10\n      - -2.525382569729e-09\n      - -5.087684940008e-09\n      - -5.325020424607e-09\n      - -5.321375340372e-09\n      - -9.905946285471e-09\n      - -4.662581432058e-11\n      - -1.165076923826e-09\n      - 3.474269760773e-10\n      - -1.008160666061e-09\n      - -9.312302040598e-10\n      - -8.287912578453e-10\n      - 5.564075422626e-09\n      - 5.566761274167e-09\n      - 5.577334150075e-09\n      - 5.58533486128e-09\n      - 5.575742534347e-09\n      - 5.589768647951e-09\n      - -2.458268966166e-08\n      - 3.971631201694e-08\n      - -2.517722919038e-09\n      - -5.472060138345e-09\n      - -5.253625090518e-09\n      - -5.896666266381e-09\n      - -5.281549420033e-09\n      - -5.911225287036e-09\n      - 5.58955548513e-09\n      - -1.31430510919e-09\n      - -9.776002229954e-10\n      - -7.833289572545e-10\n      - -2.456549452745e-08\n      - 3.969729789333e-08\n      - -2.525453624003e-09\n      - -5.481140874508e-09\n      - -5.259096269583e-09\n      - -5.908617595196e-09\n      - -5.280242021399e-09\n      - -5.902698774207e-09\n      - 5.579806838796e-09\n      - -1.316578845945e-09\n      - -9.776002229954e-10\n      - -7.816733926802e-10\n      - -2.458754977397e-08\n      - 3.999780062713e-08\n      - -2.523478315197e-09\n      - -5.481275877628e-09\n      - -5.276277192934e-09\n      - -5.912788481055e-09\n      - -5.252047685644e-09\n      - -5.904489341901e-09\n      - 5.580417905549e-09\n      - -1.312287167821e-09\n      - -9.767973097040e-10\n      - -7.796998602316e-10\n      - 5.577490469477e-09\n      - -2.470176241332e-08\n      - 4.000852982244e-08\n      - -5.468805852615e-09\n      - 5.58033264042e-09\n      - -7.824567660464e-10\n      - 5.57135138024e-09\n      - -7.82140574529e-10\n      - 5.586642259914e-09\n      - 5.582720064012e-09\n      - 5.568864480665e-09\n      - -5.405695446825e-09\n      - -5.398412383784e-09\n      - -2.459630366047e-08\n      - 3.965256212268e-08\n      - -5.406420200416e-09\n      - -5.417355453119e-09\n      - -9.949395973763e-09\n      - -1.168757535197e-09\n      - -5.401048497333e-09\n      - -5.409134473666e-09\n      - -2.456927461481e-08\n      - 3.975651452492e-08\n      - -9.926125699167e-09\n      - -1.167023810922e-09\n      - -5.401048497333e-09\n      - -5.400458746863e-09\nhistory_criterion:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 117.9473464966\n      - 104.1334190546\n      - 90.05657396185\n      - 91.2207789054\n      - 83.93061974583\n      - 71.30942158128\n      - 62.32199544308\n      - 59.88083883476\n      - 62.27199544308\n      - 60.05583883476\n      - 62.77642327569\n      - 76.9431054476\n      - 131.4875801385\n      - -2342.694563021\n      - -86.51002021515\n      - -37.31642203542\n      - -19.79861800279\n      - 115.6473464966\n      - 102.3334190546\n      - 97.45657396185\n      - 89.9207789054\n      - 80.83061974583\n      - 67.20942158128\n      - 60.52199544308\n      - 57.68083883476\n      - 60.39699544308\n      - 58.40583883476\n      - 62.55142327569\n      - 77.3931054476\n      - 129.3500801385\n      - -2342.957063021\n      - -86.32252021515\n      - -36.19142203542\n      - -21.71111800279\n      - 105.0473464966\n      - 104.4334190546\n      - 89.65657396185\n      - 83.5207789054\n      - 80.03061974583\n      - 70.50942158128\n      - 58.02199544308\n      - 56.68083883476\n      - 58.07199544308\n      - 57.28083883476\n      - 61.50142327569\n      - 75.6681054476\n      - 130.4375801385\n      - -2343.519563021\n      - -87.11002021515\n      - -38.55392203542\n      - -21.86111800279\n      - 99.14734649662\n      - 92.73341905458\n      - 86.65657396185\n      - 81.8207789054\n      - 77.13061974583\n      - 69.00942158128\n      - 60.62199544308\n      - 56.68083883476\n      - 60.58449544308\n      - 57.43083883476\n      - 61.80142327569\n      - 76.3431054476\n      - 130.7750801385\n      - -2343.932063021\n      - -86.88502021515\n      - -38.70392203542\n      - -22.19861800279\n      - 106.5473464966\n      - 88.25657396185\n      - 61.9476238727\n      - 64.62033506263\n      - 65.33033506263\n      - 67.77033506263\n      - -17.38684121661\n      - 103.0473464966\n      - 85.75657396185\n      - 62.6476238727\n      - 66.05033506263\n      - 64.96033506263\n      - 66.83033506263\n      - -19.07684121661\n      - 101.8473464966\n      - 86.85657396185\n      - 62.3476238727\n      - 66.08033506263\n      - 64.02033506263\n      - 65.52033506263\n      - -17.57684121661\n      - 103.0473464966\n      - 89.35657396185\n      - 63.2476238727\n      - 64.77033506263\n      - -17.38684121661\n      - 64.96033506263\n      - 65.33033506263\n      - -17.57684121661\n      - 101.8473464966\n      - 85.85657396185\n      - 74.63061974583\n      - 61.4476238727\n      - 55.64384266576\n      - 56.01384266576\n      - 58.81780905206\n      - 66.16033506263\n      - 253.6554490216\n      - -52.38720394238\n      - -16.34684121661\n      - 100.8473464966\n      - 87.85657396185\n      - 75.63061974583\n      - 64.6476238727\n      - 53.44384266576\n      - 53.76384266576\n      - 59.30780905206\n      - 64.96033506263\n      - 252.8254490216\n      - -54.07720394238\n      - -15.59684121661\n      - 95.54734649662\n      - 85.35657396185\n      - 75.33061974583\n      - 65.2476238727\n      - 54.44384266576\n      - 55.11384266576\n      - 60.99780905206\n      - 67.85033506263\n      - 250.5754490216\n      - -52.83720394238\n      - -12.89684121661\n      - 103.0473464966\n      - 91.43341905458\n      - 87.85657396185\n      - 84.3207789054\n      - 74.53061974583\n      - 65.80942158128\n      - 56.48083883476\n      - 57.35083883476\n      - 58.90892327569\n      - 72.8131054476\n      - 127.6600801385\n      - -2346.667063021\n      - -89.36502021515\n      - -40.65642203542\n      - -24.07361800279\n      - 76.40033506263\n      - 77.97033506263\n      - 70.28033506263\n      - 64.02033506263\n      - 64.28033506263\n      - 68.33033506263\n      - 95.84734649662\n      - 80.55657396185\n      - 74.83061974583\n      - 69.2476238727\n      - 63.24384266576\n      - 63.69780905206\n      - 63.06384266576\n      - 63.80780905206\n      - 69.91033506263\n      - 253.9554490216\n      - -49.31720394238\n      - -14.27684121661\n      - 105.7473464966\n      - 87.15657396185\n      - 74.33061974583\n      - 58.4476238727\n      - 57.44384266576\n      - 57.69780905206\n      - 58.00384266576\n      - 58.66780905206\n      - 68.45033506263\n      - 251.1454490216\n      - -53.51720394238\n      - -14.50684121661\n      - 91.74734649662\n      - 85.05657396185\n      - 67.63061974583\n      - 60.1476238727\n      - 59.14384266576\n      - 56.29780905206\n      - 59.43384266576\n      - 56.94780905206\n      - 65.56033506263\n      - 251.0254490216\n      - -54.18720394238\n      - -16.37684121661\n      - 65.90033506263\n      - 106.0473464966\n      - 90.35657396185\n      - 64.9476238727\n      - 65.52033506263\n      - -18.14684121661\n      - 65.15033506263\n      - -17.95684121661\n      - 67.40033506263\n      - 66.83033506263\n      - 67.85033506263\n      - 56.72199544308\n      - 57.17199544308\n      - 106.7473464966\n      - 87.45657396185\n      - 61.02199544308\n      - 61.03199544308\n      - 62.20892327569\n      - 130.2900801385\n      - 59.62199544308\n      - 59.91199544308\n      - 106.3473464966\n      - 86.75657396185\n      - 61.68892327569\n      - 129.9500801385\n      - 60.12199544308\n      - 60.17199544308\n  -   - 175.7616267494\n      - 135.815392655\n      - 107.427429421\n      - 99.44443456745\n      - 85.70608965926\n      - 64.3171217786\n      - 44.36460041182\n      - 32.95338522348\n      - 44.31460041182\n      - 33.12838522348\n      - 24.75298136325\n      - 19.273476213\n      - 15.97636612239\n      - 13.00317519799\n      - 10.70826764037\n      - 9.858997178816\n      - 10.15607036729\n      - 173.4616267494\n      - 134.015392655\n      - 114.827429421\n      - 98.14443456745\n      - 82.60608965926\n      - 60.2171217786\n      - 42.56460041182\n      - 30.75338522348\n      - 42.43960041182\n      - 31.47838522348\n      - 24.52798136325\n      - 19.723476213\n      - 13.83886612239\n      - 12.74067519799\n      - 10.89576764037\n      - 10.98399717882\n      - 8.243570367288\n      - 162.8616267494\n      - 136.115392655\n      - 107.027429421\n      - 91.74443456745\n      - 81.80608965926\n      - 63.5171217786\n      - 40.06460041182\n      - 29.75338522348\n      - 40.11460041182\n      - 30.35338522348\n      - 23.47798136325\n      - 17.998476213\n      - 14.92636612239\n      - 12.17817519799\n      - 10.10826764037\n      - 8.621497178816\n      - 8.093570367288\n      - 156.9616267494\n      - 124.415392655\n      - 104.027429421\n      - 90.04443456745\n      - 78.90608965926\n      - 62.0171217786\n      - 42.66460041182\n      - 29.75338522348\n      - 42.62710041182\n      - 30.50338522348\n      - 23.77798136325\n      - 18.673476213\n      - 15.26386612239\n      - 11.76567519799\n      - 10.33326764037\n      - 8.471497178816\n      - 7.756070367288\n      - 164.3616267494\n      - 105.627429421\n      - 48.8819398286\n      - 18.5818314891\n      - 19.2918314891\n      - 21.7318314891\n      - 7.60022447721\n      - 160.8616267494\n      - 103.127429421\n      - 49.5819398286\n      - 20.0118314891\n      - 18.9218314891\n      - 20.7918314891\n      - 5.91022447721\n      - 159.6616267494\n      - 104.227429421\n      - 49.2819398286\n      - 20.0418314891\n      - 17.9818314891\n      - 19.4818314891\n      - 7.41022447721\n      - 160.8616267494\n      - 106.727429421\n      - 50.1819398286\n      - 18.7318314891\n      - 7.60022447721\n      - 18.9218314891\n      - 19.2918314891\n      - 7.41022447721\n      - 159.6616267494\n      - 103.227429421\n      - 76.40608965926\n      - 48.3819398286\n      - 33.22986519451\n      - 33.59986519451\n      - 26.87225158213\n      - 20.1218314891\n      - 15.2111293494\n      - 11.73215109931\n      - 8.64022447721\n      - 158.6616267494\n      - 105.227429421\n      - 77.40608965926\n      - 51.5819398286\n      - 31.02986519451\n      - 31.34986519451\n      - 27.36225158213\n      - 18.9218314891\n      - 14.3811293494\n      - 10.04215109931\n      - 9.39022447721\n      - 153.3616267494\n      - 102.727429421\n      - 77.10608965926\n      - 52.1819398286\n      - 32.02986519451\n      - 32.69986519451\n      - 29.05225158213\n      - 21.8118314891\n      - 12.1311293494\n      - 11.28215109931\n      - 12.09022447721\n      - 160.8616267494\n      - 123.115392655\n      - 105.227429421\n      - 92.54443456745\n      - 76.30608965926\n      - 58.8171217786\n      - 29.55338522348\n      - 30.42338522348\n      - 20.88548136325\n      - 15.143476213\n      - 12.14886612239\n      - 9.030675197988\n      - 7.853267640371\n      - 6.518997178816\n      - 5.881070367288\n      - 30.3618314891\n      - 31.9318314891\n      - 24.2418314891\n      - 17.9818314891\n      - 18.2418314891\n      - 22.2918314891\n      - 153.6616267494\n      - 97.927429421\n      - 76.60608965926\n      - 56.1819398286\n      - 40.82986519451\n      - 31.75225158213\n      - 40.64986519451\n      - 31.86225158213\n      - 23.8718314891\n      - 15.5111293494\n      - 14.80215109931\n      - 10.71022447721\n      - 163.5616267494\n      - 104.527429421\n      - 76.10608965926\n      - 45.3819398286\n      - 35.02986519451\n      - 25.75225158213\n      - 35.58986519451\n      - 26.72225158213\n      - 22.4118314891\n      - 12.7011293494\n      - 10.60215109931\n      - 10.48022447721\n      - 149.5616267494\n      - 102.427429421\n      - 69.40608965926\n      - 47.0819398286\n      - 36.72986519451\n      - 24.35225158213\n      - 37.01986519451\n      - 25.00225158213\n      - 19.5218314891\n      - 12.5811293494\n      - 9.93215109931\n      - 8.61022447721\n      - 19.8618314891\n      - 163.8616267494\n      - 107.727429421\n      - 51.8819398286\n      - 19.4818314891\n      - 6.84022447721\n      - 19.1118314891\n      - 7.03022447721\n      - 21.3618314891\n      - 20.7918314891\n      - 21.8118314891\n      - 38.76460041182\n      - 39.21460041182\n      - 164.5616267494\n      - 104.827429421\n      - 43.06460041182\n      - 43.07460041182\n      - 24.18548136325\n      - 14.77886612239\n      - 41.66460041182\n      - 41.95460041182\n      - 164.1616267494\n      - 104.127429421\n      - 23.66548136325\n      - 14.43886612239\n      - 42.16460041182\n      - 42.21460041182\n  -   - 28.24095690087\n      - 21.59536620662\n      - 13.25712029593\n      - 19.07487076201\n      - 15.58993695617\n      - 8.655383440756\n      - 5.734068533914\n      - 4.165799626156\n      - 5.684068533914\n      - 4.340799626156\n      - 2.383275058766\n      - 1.39686578611\n      - 1.39855054723\n      - 0.9316549828049\n      - 0.5908378210265\n      - 1.296279986221\n      - 2.850605037275\n      - 25.94095690087\n      - 19.79536620662\n      - 20.65712029593\n      - 17.77487076201\n      - 12.48993695617\n      - 4.555383440756\n      - 3.934068533914\n      - 1.965799626156\n      - 3.809068533914\n      - 2.690799626156\n      - 2.158275058766\n      - 1.84686578611\n      - -0.7389494527701\n      - 0.6691549828049\n      - 0.7783378210265\n      - 2.421279986221\n      - 0.9381050372751\n      - 15.34095690087\n      - 21.89536620662\n      - 12.85712029593\n      - 11.37487076201\n      - 11.68993695617\n      - 7.855383440756\n      - 1.434068533914\n      - 0.9657996261561\n      - 1.484068533914\n      - 1.565799626156\n      - 1.108275058766\n      - 0.12186578611\n      - 0.3485505472299\n      - 0.1066549828049\n      - -0.009162178973513\n      - 0.05877998622137\n      - 0.7881050372751\n      - 9.440956900867\n      - 10.19536620662\n      - 9.857120295928\n      - 9.674870762006\n      - 8.789936956172\n      - 6.355383440756\n      - 4.034068533914\n      - 0.9657996261561\n      - 3.996568533914\n      - 1.715799626156\n      - 1.408275058766\n      - 0.79686578611\n      - 0.6860505472299\n      - -0.3058450171951\n      - 0.2158378210265\n      - -0.09122001377863\n      - 0.4506050372751\n      - 16.84095690087\n      - 11.45712029593\n      - 3.061569786218\n      - -1.357147755765\n      - -0.6471477557654\n      - 1.792852244235\n      - 0.8347758927937\n      - 13.34095690087\n      - 8.957120295928\n      - 3.761569786218\n      - 0.07285224423464\n      - -1.017147755765\n      - 0.8528522442346\n      - -0.8552241072063\n      - 12.14095690087\n      - 10.05712029593\n      - 3.461569786218\n      - 0.1028522442346\n      - -1.957147755765\n      - -0.4571477557654\n      - 0.6447758927937\n      - 13.34095690087\n      - 12.55712029593\n      - 4.361569786218\n      - -1.207147755765\n      - 0.8347758927937\n      - -1.017147755765\n      - -0.6471477557654\n      - 0.6447758927937\n      - 12.14095690087\n      - 9.057120295928\n      - 6.289936956172\n      - 2.561569786218\n      - 0.09812922077451\n      - 0.4681292207745\n      - 1.601218748851\n      - 0.1828522442346\n      - 1.967848278564\n      - 2.434721057271\n      - 1.874775892794\n      - 11.14095690087\n      - 11.05712029593\n      - 7.289936956172\n      - 5.761569786218\n      - -2.101870779225\n      - -1.781870779225\n      - 2.091218748851\n      - -1.017147755765\n      - 1.137848278564\n      - 0.744721057271\n      - 2.624775892794\n      - 5.840956900867\n      - 8.557120295928\n      - 6.989936956172\n      - 6.361569786218\n      - -1.101870779225\n      - -0.4318707792255\n      - 3.781218748851\n      - 1.872852244235\n      - -1.112151721436\n      - 1.984721057271\n      - 5.324775892794\n      - 13.34095690087\n      - 8.895366206617\n      - 11.05712029593\n      - 12.17487076201\n      - 6.189936956172\n      - 3.155383440756\n      - 0.7657996261561\n      - 1.635799626156\n      - -1.484224941234\n      - -2.73313421389\n      - -2.42894945277\n      - -3.040845017195\n      - -2.264162178974\n      - -2.043720013779\n      - -1.424394962725\n      - 10.42285224423\n      - 11.99285224423\n      - 4.302852244235\n      - -1.957147755765\n      - -1.697147755765\n      - 2.352852244235\n      - 6.140956900867\n      - 3.757120295928\n      - 6.489936956172\n      - 10.36156978622\n      - 7.698129220775\n      - 6.481218748851\n      - 7.518129220775\n      - 6.591218748851\n      - 3.932852244235\n      - 2.267848278564\n      - 5.504721057271\n      - 3.944775892794\n      - 16.04095690087\n      - 10.35712029593\n      - 5.989936956172\n      - -0.4384302137822\n      - 1.898129220775\n      - 0.4812187488511\n      - 2.458129220775\n      - 1.451218748851\n      - 2.472852244235\n      - -0.5421517214358\n      - 1.304721057271\n      - 3.714775892794\n      - 2.040956900867\n      - 8.257120295928\n      - -0.7100630438283\n      - 1.261569786218\n      - 3.598129220775\n      - -0.9187812511489\n      - 3.888129220775\n      - -0.2687812511489\n      - -0.4171477557654\n      - -0.6621517214358\n      - 0.634721057271\n      - 1.844775892794\n      - -0.07714775576537\n      - 16.34095690087\n      - 13.55712029593\n      - 6.061569786218\n      - -0.4571477557654\n      - 0.07477589279366\n      - -0.8271477557654\n      - 0.2647758927937\n      - 1.422852244235\n      - 0.8528522442346\n      - 1.872852244235\n      - 0.1340685339144\n      - 0.5840685339144\n      - 17.04095690087\n      - 10.65712029593\n      - 4.434068533914\n      - 4.444068533914\n      - 1.815775058766\n      - 0.2010505472299\n      - 3.034068533914\n      - 3.324068533914\n      - 16.64095690087\n      - 9.957120295928\n      - 1.295775058766\n      - -0.1389494527701\n      - 3.534068533914\n      - 3.584068533914\n  -   - 19.67905061421\n      - 12.78536491634\n      - 4.453409401868\n      - 10.42602658124\n      - 7.181651769754\n      - 0.8467383120783\n      - -0.8151544815029\n      - -1.28878727387\n      - -0.8651544815029\n      - -1.11378727387\n      - -2.178296849214\n      - -2.4437228135\n      - -1.857756175876\n      - -1.847417965917\n      - -1.795022214911\n      - -0.7628423028115\n      - 1.065115779582\n      - 17.37905061421\n      - 10.98536491634\n      - 11.85340940187\n      - 9.126026581237\n      - 4.081651769754\n      - -3.253261687922\n      - -2.615154481503\n      - -3.48878727387\n      - -2.740154481503\n      - -2.76378727387\n      - -2.403296849214\n      - -1.9937228135\n      - -3.995256175876\n      - -2.109917965917\n      - -1.607522214911\n      - 0.3621576971885\n      - -0.8473842204181\n      - 6.779050614207\n      - 13.08536491634\n      - 4.053409401868\n      - 2.726026581237\n      - 3.281651769754\n      - 0.04673831207827\n      - -5.115154481503\n      - -4.48878727387\n      - -5.065154481503\n      - -3.88878727387\n      - -3.453296849214\n      - -3.7187228135\n      - -2.907756175876\n      - -2.672417965917\n      - -2.395022214911\n      - -2.000342302812\n      - -0.9973842204181\n      - 0.8790506142075\n      - 1.385364916339\n      - 1.053409401868\n      - 1.026026581237\n      - 0.3816517697539\n      - -1.453261687922\n      - -2.515154481503\n      - -4.48878727387\n      - -2.552654481503\n      - -3.73878727387\n      - -3.153296849214\n      - -3.0437228135\n      - -2.570256175876\n      - -3.084917965917\n      - -2.170022214911\n      - -2.150342302812\n      - -1.334884220418\n      - 8.279050614207\n      - 2.653409401868\n      - -4.10628705125\n      - -5.539020595211\n      - -4.829020595211\n      - -2.389020595211\n      - -0.8305062664824\n      - 4.779050614207\n      - 0.1534094018679\n      - -3.40628705125\n      - -4.109020595211\n      - -5.199020595211\n      - -3.329020595211\n      - -2.520506266482\n      - 3.579050614207\n      - 1.253409401868\n      - -3.70628705125\n      - -4.079020595211\n      - -6.139020595211\n      - -4.639020595211\n      - -1.020506266482\n      - 4.779050614207\n      - 3.753409401868\n      - -2.80628705125\n      - -5.389020595211\n      - -0.8305062664824\n      - -5.199020595211\n      - -4.829020595211\n      - -1.020506266482\n      - 3.579050614207\n      - 0.2534094018679\n      - -2.118348230246\n      - -4.60628705125\n      - -5.8778638078\n      - -5.5078638078\n      - -3.383083895331\n      - -3.999020595211\n      - -1.038044411845\n      - 0.2196196759168\n      - 0.2094937335176\n      - 2.579050614207\n      - 2.253409401868\n      - -1.118348230246\n      - -1.40628705125\n      - -8.0778638078\n      - -7.7578638078\n      - -2.893083895331\n      - -5.199020595211\n      - -1.868044411845\n      - -1.470380324083\n      - 0.9594937335176\n      - -2.720949385793\n      - -0.2465905981321\n      - -1.418348230246\n      - -0.8062870512504\n      - -7.0778638078\n      - -6.4078638078\n      - -1.203083895331\n      - -2.309020595211\n      - -4.118044411845\n      - -0.2303803240832\n      - 3.659493733518\n      - 4.779050614207\n      - 0.0853649163389\n      - 2.253409401868\n      - 3.526026581237\n      - -2.218348230246\n      - -4.653261687922\n      - -4.68878727387\n      - -3.81878727387\n      - -6.045796849214\n      - -6.5737228135\n      - -5.685256175876\n      - -5.819917965917\n      - -4.650022214911\n      - -4.102842302812\n      - -3.209884220418\n      - 6.240979404789\n      - 7.810979404789\n      - 0.1209794047887\n      - -6.139020595211\n      - -5.879020595211\n      - -1.829020595211\n      - -2.420949385793\n      - -5.046590598132\n      - -1.918348230246\n      - 3.19371294875\n      - 1.7221361922\n      - 1.496916104669\n      - 1.5421361922\n      - 1.606916104669\n      - -0.2490205952113\n      - -0.738044411845\n      - 3.289619675917\n      - 2.279493733518\n      - 7.479050614207\n      - 1.553409401868\n      - -2.418348230246\n      - -7.60628705125\n      - -4.0778638078\n      - -4.503083895331\n      - -3.5178638078\n      - -3.533083895331\n      - -1.709020595211\n      - -3.548044411845\n      - -0.9103803240832\n      - 2.049493733518\n      - -6.520949385793\n      - -0.5465905981321\n      - -9.118348230246\n      - -5.90628705125\n      - -2.3778638078\n      - -5.903083895331\n      - -2.0878638078\n      - -5.253083895331\n      - -4.599020595211\n      - -3.668044411845\n      - -1.580380324083\n      - 0.1794937335176\n      - -4.259020595211\n      - 7.779050614207\n      - 4.753409401868\n      - -1.10628705125\n      - -4.639020595211\n      - -1.590506266482\n      - -5.009020595211\n      - -1.400506266482\n      - -2.759020595211\n      - -3.329020595211\n      - -2.309020595211\n      - -6.415154481503\n      - -5.965154481503\n      - 8.479050614207\n      - 1.853409401868\n      - -2.115154481503\n      - -2.105154481503\n      - -2.745796849214\n      - -3.055256175876\n      - -3.515154481503\n      - -3.225154481503\n      - 8.079050614207\n      - 1.153409401868\n      - -3.265796849214\n      - -3.395256175876\n      - -3.015154481503\n      - -2.965154481503\n  -   - 1050.519509418\n      - 2633.521525076\n      - -3381.282955438\n      - -933.4604542894\n      - -516.3685328612\n      - -257.5208155114\n      - -113.8324082065\n      - -66.57519632661\n      - -113.8824082065\n      - -66.40019632661\n      - -44.86075536786\n      - -32.39601556788\n      - -23.86432104696\n      - -18.5440484742\n      - -14.76646254613\n      - -11.02441930035\n      - -7.169273972705\n      - 1048.219509418\n      - 2631.721525076\n      - -3373.882955438\n      - -934.7604542894\n      - -519.4685328612\n      - -261.6208155114\n      - -115.6324082065\n      - -68.77519632661\n      - -115.7574082065\n      - -68.05019632661\n      - -45.08575536786\n      - -31.94601556788\n      - -26.00182104696\n      - -18.8065484742\n      - -14.57896254613\n      - -9.899419300347\n      - -9.081773972705\n      - 1037.619509418\n      - 2633.821525076\n      - -3381.682955438\n      - -941.1604542894\n      - -520.2685328612\n      - -258.3208155114\n      - -118.1324082065\n      - -69.77519632661\n      - -118.0824082065\n      - -69.17519632661\n      - -46.13575536786\n      - -33.67101556788\n      - -24.91432104696\n      - -19.3690484742\n      - -15.36646254613\n      - -12.26191930035\n      - -9.231773972705\n      - 1031.719509418\n      - 2622.121525076\n      - -3384.682955438\n      - -942.8604542894\n      - -523.1685328612\n      - -259.8208155114\n      - -115.5324082065\n      - -69.77519632661\n      - -115.5699082065\n      - -69.02519632661\n      - -45.83575536786\n      - -32.99601556788\n      - -24.57682104696\n      - -19.7815484742\n      - -15.14146254613\n      - -12.41191930035\n      - -9.569273972705\n      - 1039.119509418\n      - -3383.082955438\n      - -165.9142242952\n      - -41.04746332279\n      - -40.33746332279\n      - -37.89746332279\n      - -8.239694706637\n      - 1035.619509418\n      - -3385.582955438\n      - -165.2142242952\n      - -39.61746332279\n      - -40.70746332279\n      - -38.83746332279\n      - -9.929694706637\n      - 1034.419509418\n      - -3384.482955438\n      - -165.5142242952\n      - -39.58746332279\n      - -41.64746332279\n      - -40.14746332279\n      - -8.429694706637\n      - 1035.619509418\n      - -3381.982955438\n      - -164.6142242952\n      - -40.89746332279\n      - -8.239694706637\n      - -40.70746332279\n      - -40.33746332279\n      - -8.429694706637\n      - 1034.419509418\n      - -3385.482955438\n      - -525.6685328612\n      - -166.4142242952\n      - -89.99572115575\n      - -89.62572115575\n      - -55.58585272365\n      - -39.50746332279\n      - -20.14157855679\n      - -11.29463725292\n      - -7.199694706637\n      - 1033.419509418\n      - -3383.482955438\n      - -524.6685328612\n      - -163.2142242952\n      - -92.19572115575\n      - -91.87572115575\n      - -55.09585272365\n      - -40.70746332279\n      - -20.97157855679\n      - -12.98463725292\n      - -6.449694706637\n      - 1028.119509418\n      - -3385.982955438\n      - -524.9685328612\n      - -162.6142242952\n      - -91.19572115575\n      - -90.52572115575\n      - -53.40585272365\n      - -37.81746332279\n      - -23.22157855679\n      - -11.74463725292\n      - -3.749694706637\n      - 1035.619509418\n      - 2620.821525076\n      - -3383.482955438\n      - -940.3604542894\n      - -525.7685328612\n      - -263.0208155114\n      - -69.97519632661\n      - -69.10519632661\n      - -48.72825536786\n      - -36.52601556788\n      - -27.69182104696\n      - -22.5165484742\n      - -17.62146254613\n      - -14.36441930035\n      - -11.4442739727\n      - -29.26746332279\n      - -27.69746332279\n      - -35.38746332279\n      - -41.64746332279\n      - -41.38746332279\n      - -37.33746332279\n      - 1028.419509418\n      - -3390.782955438\n      - -525.4685328612\n      - -158.6142242952\n      - -82.39572115575\n      - -50.70585272365\n      - -82.57572115575\n      - -50.59585272365\n      - -35.75746332279\n      - -19.84157855679\n      - -8.224637252918\n      - -5.129694706637\n      - 1038.319509418\n      - -3384.182955438\n      - -525.9685328612\n      - -169.4142242952\n      - -88.19572115575\n      - -56.70585272365\n      - -87.63572115575\n      - -55.73585272365\n      - -37.21746332279\n      - -22.65157855679\n      - -12.42463725292\n      - -5.359694706637\n      - 1024.319509418\n      - -3386.282955438\n      - -532.6685328612\n      - -167.7142242952\n      - -86.49572115575\n      - -58.10585272365\n      - -86.20572115575\n      - -57.45585272365\n      - -40.10746332279\n      - -22.77157855679\n      - -13.09463725292\n      - -7.229694706637\n      - -39.76746332279\n      - 1038.619509418\n      - -3380.982955438\n      - -162.9142242952\n      - -40.14746332279\n      - -8.999694706637\n      - -40.51746332279\n      - -8.809694706637\n      - -38.26746332279\n      - -38.83746332279\n      - -37.81746332279\n      - -119.4324082065\n      - -118.9824082065\n      - 1039.319509418\n      - -3383.882955438\n      - -115.1324082065\n      - -115.1224082065\n      - -45.42825536786\n      - -25.06182104696\n      - -116.5324082065\n      - -116.2424082065\n      - 1038.919509418\n      - -3384.582955438\n      - -45.94825536786\n      - -25.40182104696\n      - -116.0324082065\n      - -115.9824082065\n  -   - 20.49103808147\n      - 13.5209126197\n      - 5.123721658539\n      - 11.04005890825\n      - 7.746672181034\n      - 1.330664211629\n      - -0.447396135285\n      - -0.9996323863606\n      - -0.497396135285\n      - -0.8246323863606\n      - -1.945368320316\n      - -2.252654921573\n      - -1.698804957691\n      - -1.71369037214\n      - -1.681477631675\n      - -0.6656948221704\n      - 1.148772894321\n      - 18.19103808147\n      - 11.7209126197\n      - 12.52372165854\n      - 9.740058908254\n      - 4.646672181034\n      - -2.769335788371\n      - -2.247396135285\n      - -3.199632386361\n      - -2.372396135285\n      - -2.474632386361\n      - -2.170368320316\n      - -1.802654921573\n      - -3.836304957691\n      - -1.97619037214\n      - -1.493977631675\n      - 0.4593051778296\n      - -0.7637271056786\n      - 7.591038081469\n      - 13.8209126197\n      - 4.723721658539\n      - 3.340058908254\n      - 3.846672181034\n      - 0.5306642116288\n      - -4.747396135285\n      - -4.199632386361\n      - -4.697396135285\n      - -3.599632386361\n      - -3.220368320316\n      - -3.527654921573\n      - -2.748804957691\n      - -2.53869037214\n      - -2.281477631675\n      - -1.90319482217\n      - -0.9137271056786\n      - 1.691038081469\n      - 2.120912619699\n      - 1.723721658539\n      - 1.640058908254\n      - 0.9466721810337\n      - -0.9693357883712\n      - -2.147396135285\n      - -4.199632386361\n      - -2.184896135285\n      - -3.449632386361\n      - -2.920368320316\n      - -2.852654921573\n      - -2.411304957691\n      - -2.95119037214\n      - -2.056477631675\n      - -2.05319482217\n      - -1.251227105679\n      - 9.091038081469\n      - 3.323721658539\n      - -3.686566865047\n      - -5.328474711208\n      - -4.618474711208\n      - -2.178474711208\n      - -0.7527095065441\n      - 5.591038081469\n      - 0.8237216585391\n      - -2.986566865047\n      - -3.898474711208\n      - -4.988474711208\n      - -3.118474711208\n      - -2.442709506544\n      - 4.391038081469\n      - 1.923721658539\n      - -3.286566865047\n      - -3.868474711208\n      - -5.928474711208\n      - -4.428474711208\n      - -0.9427095065441\n      - 5.591038081469\n      - 4.423721658539\n      - -2.386566865047\n      - -5.178474711208\n      - -0.7527095065441\n      - -4.988474711208\n      - -4.618474711208\n      - -0.9427095065441\n      - 4.391038081469\n      - 0.9237216585391\n      - -1.553327818966\n      - -4.186566865047\n      - -5.552915024831\n      - -5.182915024831\n      - -3.124230683974\n      - -3.788474711208\n      - -0.892433332114\n      - 0.3245543770619\n      - 0.2872904934559\n      - 3.391038081469\n      - 2.923721658539\n      - -0.5533278189663\n      - -0.9865668650472\n      - -7.752915024831\n      - -7.432915024831\n      - -2.634230683974\n      - -4.988474711208\n      - -1.722433332114\n      - -1.365445622938\n      - 1.037290493456\n      - -1.908961918531\n      - 0.4237216585391\n      - -0.8533278189662\n      - -0.3865668650472\n      - -6.752915024831\n      - -6.082915024831\n      - -0.9442306839744\n      - -2.098474711208\n      - -3.972433332114\n      - -0.1254456229381\n      - 3.737290493456\n      - 5.591038081469\n      - 0.8209126196993\n      - 2.923721658539\n      - 4.140058908254\n      - -1.653327818966\n      - -4.169335788371\n      - -4.399632386361\n      - -3.529632386361\n      - -5.812868320316\n      - -6.382654921573\n      - -5.526304957691\n      - -5.68619037214\n      - -4.536477631675\n      - -4.00569482217\n      - -3.126227105679\n      - 6.451525288792\n      - 8.021525288792\n      - 0.3315252887919\n      - -5.928474711208\n      - -5.668474711208\n      - -1.618474711208\n      - -1.608961918531\n      - -4.376278341461\n      - -1.353327818966\n      - 3.613433134953\n      - 2.047084975169\n      - 1.755769316026\n      - 1.867084975169\n      - 1.865769316026\n      - -0.03847471120814\n      - -0.592433332114\n      - 3.394554377062\n      - 2.357290493456\n      - 8.291038081469\n      - 2.223721658539\n      - -1.853327818966\n      - -7.186566865047\n      - -3.752915024831\n      - -4.244230683974\n      - -3.192915024831\n      - -3.274230683974\n      - -1.498474711208\n      - -3.402433332114\n      - -0.8054456229381\n      - 2.127290493456\n      - -5.708961918531\n      - 0.1237216585391\n      - -8.553327818966\n      - -5.486566865047\n      - -2.052915024831\n      - -5.644230683974\n      - -1.762915024831\n      - -4.994230683974\n      - -4.388474711208\n      - -3.522433332114\n      - -1.475445622938\n      - 0.2572904934559\n      - -4.048474711208\n      - 8.591038081469\n      - 5.423721658539\n      - -0.6865668650472\n      - -4.428474711208\n      - -1.512709506544\n      - -4.798474711208\n      - -1.322709506544\n      - -2.548474711208\n      - -3.118474711208\n      - -2.098474711208\n      - -6.047396135285\n      - -5.597396135285\n      - 9.291038081469\n      - 2.523721658539\n      - -1.747396135285\n      - -1.737396135285\n      - -2.512868320316\n      - -2.896304957691\n      - -3.147396135285\n      - -2.857396135285\n      - 8.891038081469\n      - 1.823721658539\n      - -3.032868320316\n      - -3.236304957691\n      - -2.647396135285\n      - -2.597396135285\n  -   - 11.34572446599\n      - 12.71016226644\n      - 9.103954561983\n      - 17.84889991643\n      - 16.22846806054\n      - 11.28579424593\n      - 9.470899876731\n      - 7.817399827028\n      - 9.420899876731\n      - 7.992399827028\n      - 5.668390695887\n      - 4.274674199655\n      - 3.897111252284\n      - 3.096292463526\n      - 2.467532445354\n      - 2.926528936865\n      - 4.270267329615\n      - 9.045724465995\n      - 10.91016226644\n      - 16.50395456198\n      - 16.54889991643\n      - 13.12846806054\n      - 7.185794245929\n      - 7.670899876731\n      - 5.617399827028\n      - 7.545899876731\n      - 6.342399827028\n      - 5.443390695887\n      - 4.724674199655\n      - 1.759611252284\n      - 2.833792463526\n      - 2.655032445354\n      - 4.051528936865\n      - 2.357767329615\n      - -1.554275534005\n      - 13.01016226644\n      - 8.703954561983\n      - 10.14889991643\n      - 12.32846806054\n      - 10.48579424593\n      - 5.170899876731\n      - 4.617399827028\n      - 5.220899876731\n      - 5.217399827028\n      - 4.393390695887\n      - 2.999674199655\n      - 2.847111252284\n      - 2.271292463526\n      - 1.867532445354\n      - 1.689028936865\n      - 2.207767329615\n      - -7.454275534005\n      - 1.310162266438\n      - 5.703954561983\n      - 8.448899916427\n      - 9.428468060537\n      - 8.985794245929\n      - 7.770899876731\n      - 4.617399827028\n      - 7.733399876731\n      - 5.367399827028\n      - 4.693390695887\n      - 3.674674199655\n      - 3.184611252284\n      - 1.858792463526\n      - 2.092532445354\n      - 1.539028936865\n      - 1.870267329615\n      - -0.05427553400547\n      - 7.303954561983\n      - 6.51450668858\n      - 1.723185425401\n      - 2.433185425401\n      - 4.873185425401\n      - 2.160876526885\n      - -3.554275534005\n      - 4.803954561983\n      - 7.21450668858\n      - 3.153185425401\n      - 2.063185425401\n      - 3.933185425401\n      - 0.4708765268852\n      - -4.754275534005\n      - 5.903954561983\n      - 6.91450668858\n      - 3.183185425401\n      - 1.123185425401\n      - 2.623185425401\n      - 1.970876526885\n      - -3.554275534005\n      - 8.403954561983\n      - 7.81450668858\n      - 1.873185425401\n      - 2.160876526885\n      - 2.063185425401\n      - 2.433185425401\n      - 1.970876526885\n      - -4.754275534005\n      - 4.903954561983\n      - 6.928468060537\n      - 6.01450668858\n      - 3.857070725448\n      - 4.227070725448\n      - 5.083097819871\n      - 3.263185425401\n      - 4.293542360661\n      - 4.18335415557\n      - 3.200876526885\n      - -5.754275534005\n      - 6.903954561983\n      - 7.928468060537\n      - 9.21450668858\n      - 1.657070725448\n      - 1.977070725448\n      - 5.573097819871\n      - 2.063185425401\n      - 3.463542360661\n      - 2.49335415557\n      - 3.950876526885\n      - -11.05427553401\n      - 4.403954561983\n      - 7.628468060537\n      - 9.81450668858\n      - 2.657070725448\n      - 3.327070725448\n      - 7.263097819871\n      - 4.953185425401\n      - 1.213542360661\n      - 3.73335415557\n      - 6.650876526885\n      - -3.554275534005\n      - 0.01016226643847\n      - 6.903954561983\n      - 10.94889991643\n      - 6.828468060537\n      - 5.785794245929\n      - 4.417399827028\n      - 5.287399827028\n      - 1.800890695887\n      - 0.1446741996551\n      - 0.06961125228437\n      - -0.8762075364739\n      - -0.3874675546463\n      - -0.4134710631349\n      - -0.004732670384874\n      - 13.5031854254\n      - 15.0731854254\n      - 7.383185425401\n      - 1.123185425401\n      - 1.383185425401\n      - 5.433185425401\n      - -10.75427553401\n      - -0.3960454380168\n      - 7.128468060537\n      - 13.81450668858\n      - 11.45707072545\n      - 9.963097819871\n      - 11.27707072545\n      - 10.07309781987\n      - 7.013185425401\n      - 4.593542360661\n      - 7.25335415557\n      - 5.270876526885\n      - -0.8542755340055\n      - 6.203954561983\n      - 6.628468060537\n      - 3.01450668858\n      - 5.657070725448\n      - 3.963097819871\n      - 6.217070725448\n      - 4.933097819871\n      - 5.553185425401\n      - 1.783542360661\n      - 3.05335415557\n      - 5.040876526885\n      - -14.85427553401\n      - 4.103954561983\n      - -0.07153193946291\n      - 4.71450668858\n      - 7.357070725448\n      - 2.563097819871\n      - 7.647070725448\n      - 3.213097819871\n      - 2.663185425401\n      - 1.663542360661\n      - 2.38335415557\n      - 3.170876526885\n      - 3.003185425401\n      - -0.5542755340055\n      - 9.403954561983\n      - 9.51450668858\n      - 2.623185425401\n      - 1.400876526885\n      - 2.253185425401\n      - 1.590876526885\n      - 4.503185425401\n      - 3.933185425401\n      - 4.953185425401\n      - 3.870899876731\n      - 4.320899876731\n      - 0.1457244659945\n      - 6.503954561983\n      - 8.170899876731\n      - 8.180899876731\n      - 5.100890695887\n      - 2.699611252284\n      - 6.770899876731\n      - 7.060899876731\n      - -0.2542755340055\n      - 5.803954561983\n      - 4.580890695887\n      - 2.359611252284\n      - 7.270899876731\n      - 7.320899876731\n  -   - 16.53673795362\n      - 12.64348483951\n      - 6.243066148003\n      - 13.47083954897\n      - 11.03975774331\n      - 5.537722754593\n      - 4.09957193688\n      - 3.220802215329\n      - 4.04957193688\n      - 3.395802215329\n      - 1.800447382832\n      - 1.020568885813\n      - 1.147168191638\n      - 0.7592201452716\n      - 0.470019408067\n      - 1.210124173449\n      - 2.788233035035\n      - 14.23673795362\n      - 10.84348483951\n      - 13.643066148\n      - 12.17083954897\n      - 7.939757743311\n      - 1.437722754593\n      - 2.29957193688\n      - 1.020802215329\n      - 2.17457193688\n      - 1.745802215329\n      - 1.575447382832\n      - 1.470568885813\n      - -0.9903318083618\n      - 0.4967201452716\n      - 0.657519408067\n      - 2.335124173449\n      - 0.8757330350355\n      - 3.636737953619\n      - 12.94348483951\n      - 5.843066148002\n      - 5.770839548972\n      - 7.139757743311\n      - 4.737722754593\n      - -0.2004280631201\n      - 0.02080221532934\n      - -0.1504280631201\n      - 0.6208022153293\n      - 0.525447382832\n      - -0.2544311141866\n      - 0.09716819163822\n      - -0.06577985472837\n      - -0.129980591933\n      - -0.02737582655083\n      - 0.7257330350355\n      - -2.263262046381\n      - 1.24348483951\n      - 2.843066148002\n      - 4.070839548972\n      - 4.239757743311\n      - 3.237722754593\n      - 2.39957193688\n      - 0.02080221532934\n      - 2.36207193688\n      - 0.7708022153293\n      - 0.825447382832\n      - 0.4205688858134\n      - 0.4346681916382\n      - -0.4782798547284\n      - 0.09501940806695\n      - -0.1773758265508\n      - 0.3882330350355\n      - 5.136737953619\n      - 4.443066148003\n      - 0.8383235109659\n      - -1.82318329708\n      - -1.11318329708\n      - 1.32681670292\n      - 0.7814350074993\n      - 1.636737953619\n      - 1.943066148003\n      - 1.538323510966\n      - -0.39318329708\n      - -1.48318329708\n      - 0.38681670292\n      - -0.9085649925007\n      - 0.4367379536194\n      - 3.043066148003\n      - 1.238323510966\n      - -0.36318329708\n      - -2.42318329708\n      - -0.92318329708\n      - 0.5914350074993\n      - 1.636737953619\n      - 5.543066148003\n      - 2.138323510966\n      - -1.67318329708\n      - 0.7814350074993\n      - -1.48318329708\n      - -1.11318329708\n      - 0.5914350074993\n      - 0.4367379536194\n      - 2.043066148003\n      - 1.739757743311\n      - 0.3383235109659\n      - -1.132711355225\n      - -0.7627113552248\n      - 0.8640180395549\n      - -0.28318329708\n      - 1.760258757134\n      - 2.332901512346\n      - 1.821435007499\n      - -0.5632620463806\n      - 4.043066148003\n      - 2.739757743311\n      - 3.538323510966\n      - -3.332711355225\n      - -3.012711355225\n      - 1.354018039555\n      - -1.48318329708\n      - 0.9302587571339\n      - 0.6429015123457\n      - 2.571435007499\n      - -5.863262046381\n      - 1.543066148003\n      - 2.439757743311\n      - 4.138323510966\n      - -2.332711355225\n      - -1.662711355225\n      - 3.044018039555\n      - 1.40681670292\n      - -1.319741242866\n      - 1.882901512346\n      - 5.271435007499\n      - 1.636737953619\n      - -0.05651516048991\n      - 4.043066148003\n      - 6.570839548972\n      - 1.639757743311\n      - 0.03772275459342\n      - -0.1791977846707\n      - 0.6908022153293\n      - -2.067052617168\n      - -3.109431114187\n      - -2.680331808362\n      - -3.213279854728\n      - -2.384980591933\n      - -2.129875826551\n      - -1.486766964965\n      - 9.95681670292\n      - 11.52681670292\n      - 3.83681670292\n      - -2.42318329708\n      - -2.16318329708\n      - 1.88681670292\n      - -5.563262046381\n      - -3.256933851997\n      - 1.939757743311\n      - 8.138323510966\n      - 6.467288644775\n      - 5.744018039555\n      - 6.287288644775\n      - 5.854018039555\n      - 3.46681670292\n      - 2.060258757134\n      - 5.402901512346\n      - 3.891435007499\n      - 4.336737953619\n      - 3.343066148002\n      - 1.439757743311\n      - -2.661676489034\n      - 0.6672886447752\n      - -0.2559819604451\n      - 1.227288644775\n      - 0.7140180395549\n      - 2.00681670292\n      - -0.7497412428661\n      - 1.202901512346\n      - 3.661435007499\n      - -9.663262046381\n      - 1.243066148003\n      - -5.260242256689\n      - -0.9616764890341\n      - 2.367288644775\n      - -1.655981960445\n      - 2.657288644775\n      - -1.005981960445\n      - -0.88318329708\n      - -0.8697412428661\n      - 0.5329015123457\n      - 1.791435007499\n      - -0.54318329708\n      - 4.636737953619\n      - 6.543066148003\n      - 3.838323510966\n      - -0.92318329708\n      - 0.02143500749933\n      - -1.29318329708\n      - 0.2114350074993\n      - 0.95681670292\n      - 0.38681670292\n      - 1.40681670292\n      - -1.50042806312\n      - -1.05042806312\n      - 5.336737953619\n      - 3.643066148003\n      - 2.79957193688\n      - 2.80957193688\n      - 1.232947382832\n      - -0.05033180836178\n      - 1.39957193688\n      - 1.68957193688\n      - 4.936737953619\n      - 2.943066148003\n      - 0.712947382832\n      - -0.3903318083618\n      - 1.89957193688\n      - 1.94957193688\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\n  -   - 0.1499498551576\n      - 0.008185153997901\n      - 0.009255435636305\n  -   - 0.1486949409413\n      - 0.001680047032405\n      - 0.01940631659429\n  -   - 0.1494212312914\n      - 0.005607806220598\n      - 0.01308958287811\ninterpolation_set_expected:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - -0.0581032280076\n      - -0.3142207350554\n      - 0.5053386972944\n  -   - 0.04228990929916\n      - 0.2061878221842\n      - -0.3067317793442\n  -   - 1.780003545435\n      - -0.7194586215727\n      - -0.658836120804\n  -   - 0.03698212804807\n      - 0.1992219638497\n      - -0.3154670475037\n  -   - 0.08830499324404\n      - 0.1885681900052\n      - -0.02643615873801\n  -   - 0.0874344381026\n      - -3.808409568279\n      - -0.2524078025883\nlinear_terms_residual_model:\n  -   - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.05431142241\n      - 7.210065582227\n      - -32.05431142241\n      - 7.210065582217\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804598\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834066\n      - -3.97310556824\n      - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.0543114224\n      - 7.210065582226\n      - -32.0543114224\n      - 7.210065582223\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834064\n      - -3.97310556824\n      - 723.7257702008\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.0543114224\n      - 7.210065582221\n      - -32.0543114224\n      - 7.210065582222\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689525\n      - -5.359113834065\n      - -3.97310556824\n      - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.05431142241\n      - 7.210065582221\n      - -32.0543114224\n      - 7.210065582226\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689524\n      - -5.359113834066\n      - -3.97310556824\n      - 723.7257702007\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122702\n      - -187.067690334\n      - -3.412910016525\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122702\n      - -11.3061006031\n      - -11.3061006031\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.202581227\n      - -11.30610060311\n      - -11.3061006031\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060311\n      - -11.30610060311\n      - 33.45212714559\n      - -187.067690334\n      - -26.50116734207\n      - -6.144592795851\n      - -3.412910016524\n      - 723.7257702008\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - 7.210065582226\n      - 7.210065582224\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834066\n      - -3.973105568241\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122702\n      - -11.30610060312\n      - 33.45212714558\n      - -11.30610060311\n      - 33.45212714559\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060312\n      - 33.45212714558\n      - -11.30610060311\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060311\n      - 33.45212714559\n      - -11.30610060312\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016527\n      - -187.067690334\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122702\n      - -187.067690334\n      - -3.412910016525\n      - -187.067690334\n      - -3.412910016526\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -32.05431142241\n      - -32.05431142242\n      - 723.7257702007\n      - -2111.94123313\n      - -32.05431142241\n      - -32.05431142241\n      - 128.5030418743\n      - -28.86164804599\n      - -32.05431142241\n      - -32.05431142241\n      - 723.7257702007\n      - -2111.94123313\n      - 128.5030418743\n      - -28.86164804599\n      - -32.05431142241\n      - -32.05431142241\n  -   - -250.9049856979\n      - -713.548298078\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.98074162601\n      - 13.74284457971\n      - 35.98074162601\n      - 13.74284457972\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915525\n      - 6.047825533545\n      - -250.9049856979\n      - -713.548298078\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774507\n      - 35.980741626\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307755\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915524\n      - 6.047825533545\n      - -250.904985698\n      - -713.5482980779\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.980741626\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915524\n      - 6.047825533545\n      - -250.9049856979\n      - -713.5482980779\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.98074162601\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915525\n      - 6.047825533545\n      - -250.9049856979\n      - 991.3297761498\n      - 53.62515459183\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761498\n      - 53.62515459184\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471011\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459183\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471012\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471004\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658369\n      - 5.458199688096\n      - -250.904985698\n      - -713.548298078\n      - 991.3297761499\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 13.74284457971\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.623990025561\n      - 7.529398915525\n      - 6.047825533545\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - -250.904985698\n      - 991.3297761498\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695734\n      - 1.08252947101\n      - 23.96018695733\n      - 1.082529471007\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761498\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695734\n      - 1.082529471008\n      - 23.96018695733\n      - 1.08252947101\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 1.082529471007\n      - 23.96018695734\n      - 1.082529471008\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - 84.70726176104\n      - -250.904985698\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 35.98074162601\n      - 35.98074162601\n      - -250.904985698\n      - 991.3297761498\n      - 35.98074162601\n      - 35.98074162601\n      - -38.33093307755\n      - 18.14259771035\n      - 35.98074162601\n      - 35.98074162601\n      - -250.9049856979\n      - 991.3297761498\n      - -38.33093307754\n      - 18.14259771035\n      - 35.98074162601\n      - 35.98074162601\n  -   - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391073\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391093\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671898\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391052\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391036\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744828\n      - 7.034149386141\n      - -86.32920671897\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584981\n      - 6.487537441591\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584981\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735094\n      - 39.79428584982\n      - 6.487537441591\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671896\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584981\n      - 12.42064972558\n      - 9.152506987831\n      - 6.487537441591\n      - -86.32920671898\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 19.43979000176\n      - 19.43979000176\n      - -0.5081983391036\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - 39.79428584981\n      - 39.79428584981\n      - 39.79428584981\n      - 39.79428584982\n      - 39.79428584981\n      - 39.79428584982\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735094\n      - 24.27471941817\n      - 13.90491560132\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941817\n      - 13.90491560132\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584981\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 13.90491560132\n      - 24.27471941817\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - 39.79428584982\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735094\n      - 39.79428584982\n      - 6.487537441591\n      - 39.79428584981\n      - 6.487537441591\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584981\n      - 29.83406116421\n      - 29.83406116421\n      - -86.32920671897\n      - 379.8255147156\n      - 29.83406116421\n      - 29.83406116421\n      - -0.5081983391078\n      - 14.64616559016\n      - 29.83406116421\n      - 29.83406116421\n      - -86.32920671897\n      - 379.8255147156\n      - -0.5081983391052\n      - 14.64616559016\n      - 29.83406116421\n      - 29.83406116421\nmodel_indices:\n  - 13\n  - 12\n  - 11\n  - 10\n  - 10\n  - 7\n  - 6\nn_modelpoints: 7\nresiduals:\n  - 14.13989818606\n  - 10.00317449361\n  - 3.744918552079\n  - 10.77980934854\n  - 8.347596395984\n  - 2.920071415924\n  - 1.796288999298\n  - 1.248596846502\n  - 1.746288999298\n  - 1.423596846502\n  - 0.03689425329096\n  - -0.3219611012974\n  - 0.0002647009432151\n  - -0.881408646043\n  - -0.4303702662499\n  - 0.4401190678521\n  - 2.121655004644\n  - 11.83989818606\n  - 8.203174493607\n  - 11.14491855208\n  - 9.479809348539\n  - 5.247596395983\n  - -1.179928584076\n  - -0.003711000701976\n  - -0.9514031534983\n  - -0.128711000702\n  - -0.2264031534983\n  - -0.188105746709\n  - 0.1280388987026\n  - -2.137235299057\n  - -1.143908646043\n  - -0.2428702662499\n  - 1.565119067852\n  - 0.2091550046441\n  - 1.239898186057\n  - 10.30317449361\n  - 3.344918552079\n  - 3.079809348539\n  - 4.447596395984\n  - 2.120071415924\n  - -2.503711000702\n  - -1.951403153498\n  - -2.453711000702\n  - -1.351403153498\n  - -1.238105746709\n  - -1.596961101297\n  - -1.049735299057\n  - -1.706408646043\n  - -1.03037026625\n  - -0.7973809321479\n  - 0.05915500464407\n  - -4.660101813943\n  - -1.396825506394\n  - 0.3449185520786\n  - 1.379809348539\n  - 1.547596395983\n  - 0.6200714159243\n  - 0.09628899929797\n  - -1.951403153498\n  - 0.05878899929802\n  - -1.201403153498\n  - -0.9381057467091\n  - -0.9219611012974\n  - -0.7122352990568\n  - -2.118908646043\n  - -0.8053702662499\n  - -0.9473809321479\n  - -0.2783449953559\n  - 2.739898186057\n  - 1.944918552078\n  - -1.63399667708\n  - -3.183999068009\n  - -2.473999068009\n  - -0.03399906800905\n  - 0.159797079125\n  - -0.7601018139425\n  - -0.5550814479215\n  - -0.9339966770796\n  - -1.753999068009\n  - -2.843999068009\n  - -0.9739990680091\n  - -1.530202920875\n  - -1.960101813943\n  - 0.5449185520779\n  - -1.23399667708\n  - -1.723999068009\n  - -3.783999068009\n  - -2.283999068009\n  - -0.03020292087504\n  - -0.7601018139425\n  - 3.044918552078\n  - -0.3339966770796\n  - -3.033999068009\n  - 0.159797079125\n  - -2.843999068009\n  - -2.473999068009\n  - -0.03020292087504\n  - -1.960101813943\n  - -0.4550814479221\n  - -0.9524036040165\n  - -2.13399667708\n  - -3.265462533007\n  - -2.895462533007\n  - -0.9678561331385\n  - -1.643999068009\n  - 0.7313676292134\n  - 1.502356325981\n  - 1.199797079125\n  - -2.960101813943\n  - 1.544918552078\n  - 0.04759639598348\n  - 1.06600332292\n  - -5.465462533007\n  - -5.145462533007\n  - -0.4778561331385\n  - -2.843999068009\n  - -0.09863237078665\n  - -0.1876436740187\n  - 1.949797079125\n  - -8.260101813943\n  - -0.9550814479221\n  - -0.2524036040165\n  - 1.66600332292\n  - -4.465462533007\n  - -3.795462533007\n  - 1.212143866862\n  - 0.04600093199099\n  - -2.348632370787\n  - 1.052356325981\n  - 4.649797079125\n  - -0.7601018139425\n  - -2.696825506393\n  - 1.544918552078\n  - 3.879809348539\n  - -1.052403604016\n  - -2.579928584076\n  - -2.151403153498\n  - -1.281403153498\n  - -3.830605746709\n  - -4.451961101297\n  - -3.827235299057\n  - -4.853908646043\n  - -3.28537026625\n  - -2.899880932148\n  - -2.153344995356\n  - 8.596000931991\n  - 10.16600093199\n  - 2.476000931991\n  - -3.783999068009\n  - -3.523999068009\n  - 0.5260009319909\n  - -7.960101813943\n  - -5.755081447921\n  - -0.7524036040165\n  - 5.66600332292\n  - 4.334537466993\n  - 3.912143866861\n  - 4.154537466993\n  - 4.022143866862\n  - 2.106000931991\n  - 1.031367629213\n  - 4.572356325981\n  - 3.269797079125\n  - 1.939898186057\n  - 0.8449185520786\n  - -1.252403604017\n  - -5.13399667708\n  - -1.465462533007\n  - -2.087856133139\n  - -0.9054625330074\n  - -1.117856133138\n  - 0.6460009319909\n  - -1.778632370787\n  - 0.3723563259813\n  - 3.039797079125\n  - -12.06010181394\n  - -1.255081447922\n  - -7.952403604016\n  - -3.43399667708\n  - 0.2345374669926\n  - -3.487856133138\n  - 0.5245374669926\n  - -2.837856133139\n  - -2.243999068009\n  - -1.898632370787\n  - -0.2976436740187\n  - 1.169797079125\n  - -1.903999068009\n  - 2.239898186058\n  - 4.044918552078\n  - 1.36600332292\n  - -2.283999068009\n  - -0.600202920875\n  - -2.653999068009\n  - -0.410202920875\n  - -0.4039990680091\n  - -0.9739990680091\n  - 0.04600093199099\n  - -3.803711000702\n  - -3.353711000702\n  - 2.939898186057\n  - 1.144918552079\n  - 0.496288999298\n  - 0.506288999298\n  - -0.5306057467091\n  - -1.197235299057\n  - -0.9037110007021\n  - -0.6137110007021\n  - 2.539898186057\n  - 0.4449185520785\n  - -1.050605746709\n  - -1.537235299057\n  - -0.4037110007021\n  - -0.3537110007021\nsquare_terms_residual_model:\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638637\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554024\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114451\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114448\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638632\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554029\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114453\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701694\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721924\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518941\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638635\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554026\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082801\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114447\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082801\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114448\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227361\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554026\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082804\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114447\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693998\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762072\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638635\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002871\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002872\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049041\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028759\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002872\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028717\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028754\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272406\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554025\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693998\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714455\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028804\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002871\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028763\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028752\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638634\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028753\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028805\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049041\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082804\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\nx_accepted:\n  - 0.1494212312914\n  - 0.005607806220598\n  - 0.01308958287811\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/interpolate_f_iter_7.yaml",
    "content": "---\ndelta_old: 0.0125\nf_interpolated_expected:\n  -   - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.640310345904\n      - 2.498147595924\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 2.303282937582\n      - 1.972205368828\n      - 2.303282937582\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188045\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.640310345903\n      - 2.498147595925\n      - 2.691030200433\n      - 2.692161347328\n      - 2.617651338669\n      - 1.972205368828\n      - 1.972205368828\n      - 1.763553129541\n      - 1.342529987111\n      - 1.146903490695\n      - 1.640628791315\n      - 0.9003896743169\n      - 0.7700051055971\n      - 0.6665780303914\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188046\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595924\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 2.396839767562\n      - 2.498147595925\n      - 2.692161347328\n      - 2.472320188045\n      - 2.132751177783\n      - 1.831874172693\n      - 2.132751177783\n      - 1.831874172693\n      - 1.360815770929\n      - 1.028891127921\n      - 0.8305451863645\n      - 0.6216379283744\n      - 1.360815770929\n      - 2.396839767562\n      - 2.498147595925\n      - 2.472320188046\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 0.6216379283744\n      - 1.360815770929\n      - 1.360815770929\n      - 1.360815770929\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 2.303282937582\n      - 2.303282937582\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n      - 2.396839767562\n      - 2.498147595924\n      - 1.763553129541\n      - 1.146903490695\n      - 2.303282937582\n      - 2.303282937582\n  -   - 1.24344978758e-14\n      - -5.329070518201e-14\n      - 9.947598300641e-14\n      - 2.13162820728e-14\n      - 3.552713678801e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - -4.440892098501e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 6.661338147751e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.59872115546e-14\n      - -1.7763568394e-14\n      - -2.48689957516e-14\n      - -3.552713678801e-15\n      - -1.42108547152e-14\n      - -5.329070518201e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - -1.24344978758e-14\n      - -5.151434834261e-14\n      - -3.19744231092e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-14\n      - 0.0\n      - 3.552713678801e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -4.440892098501e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 6.217248937901e-15\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -1.136868377216e-13\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -7.993605777300e-15\n      - 2.6645352591e-15\n      - -2.22044604925e-15\n      - 3.10862446895e-15\n      - 1.33226762955e-15\n      - 2.22044604925e-16\n      - -1.110223024625e-15\n      - -3.552713678801e-15\n      - 9.947598300641e-14\n      - -3.552713678801e-15\n      - 5.551115123126e-15\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 1.33226762955e-14\n      - 6.217248937901e-15\n      - -5.551115123126e-16\n      - -1.86517468137e-14\n      - 1.101341240428e-13\n      - -7.105427357601e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -1.24344978758e-14\n      - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 6.661338147751e-15\n      - -8.881784197001e-16\n      - 1.33226762955e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - -1.86517468137e-14\n      - 3.19744231092e-14\n      - 1.68753899743e-14\n      - -2.6645352591e-15\n      - 6.217248937901e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 6.217248937901e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.50990331349e-14\n      - 9.592326932761e-14\n      - 0.0\n      - 0.0\n      - 6.661338147751e-16\n      - -1.110223024625e-15\n      - -1.7763568394e-15\n      - 1.33226762955e-14\n      - 0.0\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 1.06581410364e-14\n      - 1.7763568394e-14\n      - -2.6645352591e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 0.0\n      - -1.24344978758e-14\n      - -2.13162820728e-14\n      - 9.592326932761e-14\n      - 0.0\n      - -2.57571741713e-14\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 7.771561172376e-16\n      - -1.998401444325e-15\n      - 4.440892098501e-15\n      - 1.443289932013e-15\n      - 2.775557561563e-16\n      - -1.665334536938e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - -1.7763568394e-15\n      - 2.22044604925e-15\n      - 1.24344978758e-14\n      - 3.552713678801e-15\n      - 1.24344978758e-14\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - -5.329070518201e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 7.105427357601e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - -2.6645352591e-15\n      - -4.618527782441e-14\n      - -7.105427357601e-15\n      - 5.329070518201e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 5.329070518201e-15\n      - -2.22044604925e-16\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 7.105427357601e-15\n      - 3.153033389935e-14\n      - 0.0\n      - -4.440892098501e-15\n      - 0.0\n      - 2.6645352591e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 4.884981308351e-15\n      - 8.881784197001e-16\n      - -1.06581410364e-14\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - -7.549516567451e-15\n      - -4.440892098501e-16\n      - 6.217248937901e-15\n      - 6.217248937901e-15\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - 4.618527782441e-14\n      - 1.7763568394e-15\n      - -1.7763568394e-15\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-15\n      - 5.684341886081e-14\n      - 0.0\n      - -2.22044604925e-15\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n  -   - -1.06581410364e-14\n      - -1.95399252334e-14\n      - 1.24344978758e-14\n      - 7.105427357601e-15\n      - 4.440892098501e-15\n      - 2.442490654175e-15\n      - -1.998401444325e-15\n      - 3.330669073875e-16\n      - -1.33226762955e-15\n      - -1.110223024625e-16\n      - 1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 0.0\n      - 0.0\n      - 3.19744231092e-14\n      - -1.7763568394e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 0.0\n      - 1.110223024625e-16\n      - -2.22044604925e-16\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - 1.59872115546e-14\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.33226762955e-15\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.110223024625e-16\n      - 8.881784197001e-16\n      - -2.30926389122e-14\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - 2.22044604925e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-15\n      - 1.33226762955e-15\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.06581410364e-14\n      - -1.24344978758e-14\n      - 0.0\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -3.330669073875e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-14\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-15\n      - 6.217248937901e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 1.7763568394e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-15\n      - 2.6645352591e-15\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -2.6645352591e-15\n      - 0.0\n      - -4.440892098501e-15\n      - -1.7763568394e-15\n      - 9.103828801926e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 1.110223024625e-16\n      - 1.110223024625e-16\n      - -8.881784197001e-16\n      - -7.993605777300e-15\n      - 1.33226762955e-15\n      - 6.661338147751e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 1.33226762955e-15\n      - -1.59872115546e-14\n      - 5.551115123126e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -4.718447854657e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -7.993605777300e-15\n      - 7.993605777301e-15\n      - -1.199040866595e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -5.329070518201e-15\n      - 0.0\n      - 4.773959005888e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 4.440892098501e-16\n      - -1.398881011028e-14\n      - -1.7763568394e-15\n      - 5.551115123126e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -2.6645352591e-14\n      - 8.659739592076e-15\n      - 3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 0.0\n      - -1.7763568394e-15\n      - 1.59872115546e-14\n      - 1.7763568394e-15\n      - 0.0\n      - 4.440892098501e-16\n      - 0.0\n      - -4.440892098501e-16\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 1.665334536938e-16\n      - 5.329070518201e-15\n      - 3.552713678801e-15\n      - 2.84217094304e-14\n      - -4.440892098501e-16\n      - 0.0\n      - -6.661338147751e-16\n      - -3.552713678801e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -3.552713678801e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - 6.217248937901e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -3.10862446895e-15\n      - -1.7763568394e-15\n      - 1.24344978758e-14\n      - 1.59872115546e-14\n      - -4.440892098501e-16\n      - 8.881784197001e-16\n      - -3.10862446895e-15\n      - 4.440892098501e-16\n  -   - -2.735578163993e-08\n      - -5.45305738342e-08\n      - 4.426374289324e-08\n      - -4.691855792771e-10\n      - -2.819547262334e-09\n      - -5.672461611539e-09\n      - -6.022133902661e-09\n      - -5.938645131209e-09\n      - -6.026155574546e-09\n      - -5.914529310758e-09\n      - -1.108718095111e-08\n      - -5.205436082178e-11\n      - -1.303774865846e-09\n      - 3.85213638765e-10\n      - -1.12522080542e-09\n      - -1.038296559841e-09\n      - -9.272147494244e-10\n      - -2.739852789091e-08\n      - -5.461970431497e-08\n      - 4.424782673595e-08\n      - -4.685034582508e-10\n      - -2.810566002154e-09\n      - -5.653987500409e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -6.050512979527e-09\n      - -5.92861226778e-09\n      - -1.110882408284e-08\n      - -5.200462283028e-11\n      - -1.299493845863e-09\n      - 3.87061049878e-10\n      - -1.12522080542e-09\n      - -1.042401720497e-09\n      - -9.273968260004e-10\n      - -2.746151039901e-08\n      - -5.433957994683e-08\n      - 4.423691279953e-08\n      - -4.692992661148e-10\n      - -2.817955646606e-09\n      - -5.670756308973e-09\n      - -6.043791245247e-09\n      - -5.925457458034e-09\n      - -6.050569822946e-09\n      - -5.927120128035e-09\n      - -1.106290881125e-08\n      - -5.205436082178e-11\n      - -1.303028795974e-09\n      - 3.85909970646e-10\n      - -1.127881787966e-09\n      - -1.040435293476e-09\n      - -9.260201494499e-10\n      - -2.734941517701e-08\n      - -5.424772098195e-08\n      - 4.424055077834e-08\n      - -4.674802767113e-10\n      - -2.812839738908e-09\n      - -5.670756308973e-09\n      - -6.039726940799e-09\n      - -5.925457458034e-09\n      - -6.050512979527e-09\n      - -5.93539084548e-09\n      - -1.104555735765e-08\n      - -5.22533127878e-11\n      - -1.303213537085e-09\n      - 3.862474784455e-10\n      - -1.128016791085e-09\n      - -1.036601915416e-09\n      - -9.278338097829e-10\n      - -2.736760507105e-08\n      - 4.430148692336e-08\n      - -6.124395213192e-09\n      - 6.223643822523e-09\n      - 6.227637072698e-09\n      - 6.216559711447e-09\n      - -8.725535849408e-10\n      - -2.746151039901e-08\n      - 4.422872734722e-08\n      - -6.108592742748e-09\n      - 6.207599767549e-09\n      - 6.222357740171e-09\n      - 6.224738058336e-09\n      - -8.727347733384e-10\n      - -2.734918780334e-08\n      - 4.453613655642e-08\n      - -6.112173878137e-09\n      - 6.22451068466e-09\n      - 6.227359961031e-09\n      - 6.222300896752e-09\n      - -8.725198341608e-10\n      - -2.746151039901e-08\n      - 4.457479008124e-08\n      - -6.087390147513e-09\n      - 6.217128145636e-09\n      - -8.725535849408e-10\n      - 6.222357740171e-09\n      - 6.227637072698e-09\n      - -8.725198341608e-10\n      - -2.734918780334e-08\n      - 4.454204827198e-08\n      - -2.825686351571e-09\n      - -6.100492555561e-09\n      - -5.898698418605e-09\n      - -5.89793103245e-09\n      - -6.570232358172e-09\n      - 6.235552518774e-09\n      - -1.462325371904e-09\n      - -1.088713119657e-09\n      - -8.705525189612e-10\n      - -2.73569185083e-08\n      - 4.454068402993e-08\n      - -2.808064891724e-09\n      - -6.131557483968e-09\n      - -5.876401587557e-09\n      - -5.897845767322e-09\n      - -6.565869625774e-09\n      - 6.222357740171e-09\n      - -1.463405396862e-09\n      - -1.089073720095e-09\n      - -8.69984972951e-10\n      - -2.731030690484e-08\n      - 4.454204827198e-08\n      - -2.806132215483e-09\n      - -6.112287564974e-09\n      - -5.890328225178e-09\n      - -5.881020115339e-09\n      - -6.602697055769e-09\n      - 6.208729530499e-09\n      - -1.467945764944e-09\n      - -1.086476686396e-09\n      - -8.746043889118e-10\n      - -2.746151039901e-08\n      - -5.454558049678e-08\n      - 4.454068402993e-08\n      - -4.674802767113e-10\n      - -2.816477717715e-09\n      - -5.670813152392e-09\n      - -5.936954039498e-09\n      - -5.93111337821e-09\n      - -1.104545788166e-08\n      - -5.2018833685e-11\n      - -1.29870869614e-09\n      - 3.867874909247e-10\n      - -1.12415321496e-09\n      - -1.038033659029e-09\n      - -9.241301057727e-10\n      - 6.206445135604e-09\n      - 6.206281710774e-09\n      - 6.214690984052e-09\n      - 6.227359961031e-09\n      - 6.216112069524e-09\n      - 6.232355076463e-09\n      - -2.740466698015e-08\n      - 4.426556188264e-08\n      - -2.806132215483e-09\n      - -6.098474614191e-09\n      - -5.856662710357e-09\n      - -6.574133237791e-09\n      - -5.885155474061e-09\n      - -6.587036693873e-09\n      - 6.232085070224e-09\n      - -1.464133703166e-09\n      - -1.089770051976e-09\n      - -8.732499168218e-10\n      - -2.739443516475e-08\n      - 4.424418875715e-08\n      - -2.815454536176e-09\n      - -6.108251682235e-09\n      - -5.863171281817e-09\n      - -6.585437972717e-09\n      - -5.886150233891e-09\n      - -6.576577504802e-09\n      - 6.217746317816e-09\n      - -1.467178378789e-09\n      - -1.090377566015e-09\n      - -8.717009336578e-10\n      - -2.741853677435e-08\n      - 4.456569513422e-08\n      - -2.813635546772e-09\n      - -6.107967465141e-09\n      - -5.881588549528e-09\n      - -6.590155976482e-09\n      - -5.854928986082e-09\n      - -6.584819800537e-09\n      - 6.221597459444e-09\n      - -1.46228273934e-09\n      - -1.089073720095e-09\n      - -8.693215036715e-10\n      - 6.218208170594e-09\n      - -2.752040018095e-08\n      - 4.457479008124e-08\n      - -6.096939841882e-09\n      - 6.222300896752e-09\n      - -8.720668631668e-10\n      - 6.209738501184e-09\n      - -8.718927801965e-10\n      - 6.226734683423e-09\n      - 6.224738058336e-09\n      - 6.208729530499e-09\n      - -6.025345555827e-09\n      - -6.015824283168e-09\n      - -2.741762727965e-08\n      - 4.41937118012e-08\n      - -6.025857146597e-09\n      - -6.039897471055e-09\n      - -1.109327030235e-08\n      - -1.30298616341e-09\n      - -6.022730758559e-09\n      - -6.028955112924e-09\n      - -2.738147486525e-08\n      - 4.430512490217e-08\n      - -1.106290881125e-08\n      - -1.301476260096e-09\n      - -6.022730758559e-09\n      - -6.022105480952e-09\n  -   - -3.552713678801e-15\n      - -3.552713678801e-15\n      - 9.769962616701e-15\n      - 5.329070518201e-15\n      - 6.217248937901e-15\n      - 3.774758283726e-15\n      - -2.553512956638e-15\n      - 4.440892098501e-16\n      - -1.110223024625e-15\n      - 0.0\n      - 8.881784197001e-16\n      - 1.7763568394e-15\n      - 2.22044604925e-16\n      - 0.0\n      - 2.22044604925e-16\n      - 2.22044604925e-16\n      - -2.22044604925e-16\n      - 0.0\n      - 1.42108547152e-14\n      - -1.42108547152e-14\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 4.440892098501e-16\n      - 2.22044604925e-16\n      - -5.551115123126e-16\n      - -1.110223024625e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 1.95399252334e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - 2.442490654175e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 4.440892098501e-16\n      - -4.440892098501e-16\n      - 0.0\n      - 0.0\n      - 4.440892098501e-16\n      - -2.22044604925e-14\n      - 1.95399252334e-14\n      - 6.439293542826e-15\n      - 6.883382752676e-15\n      - 2.442490654175e-15\n      - -8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - 8.881784197001e-16\n      - -4.440892098501e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 0.0\n      - 2.22044604925e-16\n      - -7.105427357601e-15\n      - 0.0\n      - 1.7763568394e-15\n      - 2.6645352591e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-16\n      - -8.881784197001e-16\n      - 3.28626015289e-14\n      - 1.7763568394e-15\n      - 1.7763568394e-15\n      - 3.552713678801e-15\n      - -1.33226762955e-15\n      - 0.0\n      - 3.552713678801e-15\n      - 1.42108547152e-14\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -8.881784197001e-16\n      - 1.95399252334e-14\n      - -3.10862446895e-15\n      - 1.7763568394e-15\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-16\n      - 0.0\n      - 3.552713678801e-15\n      - -1.95399252334e-14\n      - 7.105427357601e-15\n      - -3.552713678801e-15\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.110223024625e-16\n      - -8.326672684688e-17\n      - -3.552713678801e-15\n      - 0.0\n      - -2.22044604925e-15\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - 6.661338147751e-16\n      - 0.0\n      - 0.0\n      - 5.329070518201e-15\n      - -3.37507799486e-14\n      - -6.661338147751e-16\n      - 2.553512956638e-15\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -3.053113317719e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -3.37507799486e-14\n      - 0.0\n      - 1.24344978758e-14\n      - -1.199040866595e-14\n      - 0.0\n      - 8.881784197001e-16\n      - 4.440892098501e-16\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - 0.0\n      - 0.0\n      - -7.105427357601e-15\n      - -1.7763568394e-15\n      - 2.997602166488e-15\n      - -2.6645352591e-15\n      - -2.6645352591e-15\n      - -1.554312234475e-15\n      - -1.06581410364e-14\n      - -3.28626015289e-14\n      - -6.661338147751e-16\n      - -2.22044604925e-15\n      - -1.110223024625e-15\n      - 4.440892098501e-16\n      - 1.110223024625e-15\n      - -4.440892098501e-16\n      - 1.998401444325e-15\n      - 4.440892098501e-16\n      - 4.440892098501e-16\n      - 0.0\n      - 0.0\n      - -8.881784197001e-15\n      - 5.773159728051e-15\n      - 2.6645352591e-15\n      - 1.7763568394e-15\n      - 0.0\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - 8.881784197001e-16\n      - 1.110223024625e-16\n      - 4.440892098501e-16\n      - 2.6645352591e-15\n      - 2.13162820728e-14\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 0.0\n      - -8.881784197001e-16\n      - 8.881784197001e-16\n      - 2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.775557561563e-17\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 1.95399252334e-14\n      - -1.33226762955e-15\n      - 1.7763568394e-15\n      - -6.661338147751e-16\n      - -1.7763568394e-15\n      - 0.0\n      - 0.0\n      - -1.33226762955e-15\n      - 0.0\n      - 2.6645352591e-15\n      - 0.0\n      - 7.105427357601e-15\n      - 1.68753899743e-14\n      - -1.33226762955e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 1.24344978758e-14\n      - 0.0\n      - -4.440892098501e-16\n      - 4.440892098501e-16\n      - -1.7763568394e-15\n      - 1.7763568394e-15\n  -   - -1.06581410364e-14\n      - 3.552713678801e-15\n      - 3.552713678801e-15\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -2.6645352591e-15\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 1.7763568394e-15\n      - 8.881784197001e-16\n      - 0.0\n      - 1.7763568394e-15\n      - -1.110223024625e-16\n      - -2.22044604925e-16\n      - 0.0\n      - -3.552713678801e-15\n      - 4.263256414561e-14\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - 0.0\n      - -2.6645352591e-15\n      - -4.440892098501e-16\n      - -1.554312234475e-15\n      - 0.0\n      - -1.110223024625e-16\n      - 0.0\n      - -2.22044604925e-16\n      - 3.552713678801e-15\n      - 1.7763568394e-14\n      - 8.881784197001e-15\n      - -3.552713678801e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - 4.440892098501e-16\n      - -7.771561172376e-16\n      - -1.33226762955e-15\n      - -1.33226762955e-15\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -1.443289932013e-15\n      - -8.881784197001e-16\n      - -2.081668171172e-16\n      - -2.775557561563e-17\n      - -3.330669073875e-16\n      - 3.552713678801e-15\n      - -5.329070518201e-14\n      - -2.48689957516e-14\n      - -7.105427357601e-15\n      - -5.329070518201e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -7.771561172376e-16\n      - -8.881784197001e-16\n      - 2.22044604925e-16\n      - -1.7763568394e-15\n      - 2.22044604925e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.110223024625e-16\n      - -1.665334536938e-16\n      - 5.551115123126e-17\n      - 7.105427357601e-15\n      - -3.19744231092e-14\n      - -1.7763568394e-15\n      - 4.440892098501e-16\n      - -5.773159728051e-15\n      - -3.552713678801e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 1.59872115546e-14\n      - -8.881784197001e-16\n      - 1.33226762955e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - 5.329070518201e-15\n      - 1.7763568394e-15\n      - -3.552713678801e-15\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.7763568394e-15\n      - -4.440892098501e-16\n      - 3.552713678801e-15\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - 5.773159728051e-15\n      - -4.440892098501e-16\n      - 2.6645352591e-15\n      - -5.773159728051e-15\n      - -4.440892098501e-16\n      - 5.329070518201e-15\n      - -3.730349362741e-14\n      - 8.881784197001e-15\n      - -2.6645352591e-15\n      - -1.110223024625e-16\n      - 9.436895709314e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - 2.22044604925e-16\n      - 5.329070518201e-15\n      - -3.01980662698e-14\n      - 8.881784197001e-16\n      - -1.7763568394e-15\n      - -1.33226762955e-15\n      - 0.0\n      - -1.7763568394e-15\n      - 2.6645352591e-15\n      - 0.0\n      - -3.330669073875e-16\n      - -4.440892098501e-16\n      - 0.0\n      - -5.151434834261e-14\n      - 5.329070518201e-15\n      - -2.6645352591e-15\n      - -1.998401444325e-15\n      - 4.996003610813e-16\n      - -4.440892098501e-16\n      - -3.996802888651e-15\n      - 0.0\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - 3.552713678801e-15\n      - -8.881784197001e-15\n      - -3.01980662698e-14\n      - 5.329070518201e-15\n      - -1.42108547152e-14\n      - -8.881784197001e-16\n      - 1.110223024625e-16\n      - -4.440892098501e-16\n      - 6.661338147751e-16\n      - -8.881784197001e-16\n      - -8.881784197001e-16\n      - -2.6645352591e-15\n      - 0.0\n      - 0.0\n      - -2.22044604925e-16\n      - -7.105427357601e-15\n      - -3.552713678801e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -3.10862446895e-15\n      - -8.881784197001e-16\n      - -6.217248937901e-15\n      - -2.13162820728e-14\n      - 5.329070518201e-15\n      - -5.329070518201e-15\n      - -1.7763568394e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -8.881784197001e-16\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - 8.881784197001e-16\n      - 0.0\n      - 7.105427357601e-15\n      - -1.59872115546e-14\n      - 7.993605777301e-15\n      - 2.22044604925e-15\n      - 4.440892098501e-16\n      - -9.992007221626e-16\n      - 0.0\n      - 2.22044604925e-16\n      - -6.661338147751e-15\n      - 0.0\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 1.06581410364e-14\n      - 4.440892098501e-16\n      - -2.442490654175e-15\n      - 8.881784197001e-16\n      - -8.881784197001e-16\n      - -1.33226762955e-15\n      - 0.0\n      - 2.22044604925e-15\n      - -6.661338147751e-16\n      - -3.330669073875e-16\n      - 0.0\n      - 8.881784197001e-16\n      - 7.105427357601e-15\n      - 8.881784197001e-15\n      - 0.0\n      - -1.7763568394e-15\n      - -7.910339050454e-16\n      - -5.329070518201e-15\n      - -2.22044604925e-16\n      - 2.22044604925e-15\n      - 0.0\n      - -3.996802888651e-15\n      - 1.998401444325e-15\n      - -2.22044604925e-15\n      - 1.06581410364e-14\n      - -3.01980662698e-14\n      - 0.0\n      - -8.881784197001e-16\n      - -4.440892098501e-16\n      - -8.881784197001e-16\n      - -2.22044604925e-15\n      - -3.552713678801e-15\n      - 0.0\n      - -3.552713678801e-14\n      - -4.440892098501e-16\n      - 3.330669073875e-16\n      - -2.22044604925e-15\n      - 0.0\n  -   - -2.453347747178e-08\n      - -4.890711124972e-08\n      - 3.971540252223e-08\n      - -4.215081617076e-10\n      - -2.531166387598e-09\n      - -5.090015520182e-09\n      - -5.402824854173e-09\n      - -5.326945995421e-09\n      - -5.406533887253e-09\n      - -5.303881778218e-09\n      - -9.95139259885e-09\n      - -4.664002517529e-11\n      - -1.169468077933e-09\n      - 3.465174813755e-10\n      - -1.009041739053e-09\n      - -9.316067917097e-10\n      - -8.316902722072e-10\n      - -2.457070991113e-08\n      - -4.9003332947e-08\n      - 3.969984163632e-08\n      - -4.210960469209e-10\n      - -2.520792463656e-09\n      - -5.072479325463e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -5.43165867839e-09\n      - -5.320373475115e-09\n      - -9.971742542803e-09\n      - -4.661160346586e-11\n      - -1.165034291262e-09\n      - 3.48336470779e-10\n      - -1.009041739053e-09\n      - -9.349250262858e-10\n      - -8.319460675921e-10\n      - -2.462979864504e-08\n      - -4.874540593391e-08\n      - 3.969002193571e-08\n      - -4.214655291435e-10\n      - -2.529361609049e-09\n      - -5.086377541375e-09\n      - -5.424837468126e-09\n      - -5.313083306646e-09\n      - -5.429683369584e-09\n      - -5.316017848145e-09\n      - -9.926125699167e-09\n      - -4.666844688472e-11\n      - -1.168785956907e-09\n      - 3.474269760773e-10\n      - -1.012267603073e-09\n      - -9.336105222246e-10\n      - -8.307345922276e-10\n      - -2.453501224409e-08\n      - -4.864099878432e-08\n      - 3.969411466187e-08\n      - -4.200586545267e-10\n      - -2.522682507333e-09\n      - -5.086377541375e-09\n      - -5.417248871709e-09\n      - -5.313083306646e-09\n      - -5.43165867839e-09\n      - -5.325112795163e-09\n      - -9.905932074616e-09\n      - -4.685318799602e-11\n      - -1.16909859571e-09\n      - 3.474269760773e-10\n      - -1.011144945551e-09\n      - -9.30121757392e-10\n      - -8.32123703276e-10\n      - -2.455203684804e-08\n      - 3.975333129347e-08\n      - -5.494243282556e-09\n      - 5.58331691991e-09\n      - 5.584197992903e-09\n      - 5.577660999734e-09\n      - -7.832063886326e-10\n      - -2.462979864504e-08\n      - 3.968375494878e-08\n      - -5.482178266902e-09\n      - 5.567116545535e-09\n      - 5.582947437688e-09\n      - 5.582720064012e-09\n      - -7.826947978629e-10\n      - -2.453300851357e-08\n      - 3.997169528702e-08\n      - -5.48229195374e-09\n      - 5.582549533756e-09\n      - 5.58533486128e-09\n      - 5.58033264042e-09\n      - -7.824034753412e-10\n      - -2.462979864504e-08\n      - 4.000852982244e-08\n      - -5.459440899358e-09\n      - 5.578741024692e-09\n      - -7.832063886326e-10\n      - 5.582947437688e-09\n      - 5.584197992903e-09\n      - -7.824034753412e-10\n      - -2.453300851357e-08\n      - 3.997578801318e-08\n      - -2.534619625294e-09\n      - -5.472671205098e-09\n      - -5.292946525515e-09\n      - -5.289514604101e-09\n      - -5.893511456634e-09\n      - 5.593420837613e-09\n      - -1.311946107307e-09\n      - -9.763994057721e-10\n      - -7.807834379037e-10\n      - -2.454028447119e-08\n      - 3.997533326583e-08\n      - -2.518589781175e-09\n      - -5.499970257006e-09\n      - -5.27222709934e-09\n      - -5.289443549827e-09\n      - -5.889233989365e-09\n      - 5.582947437688e-09\n      - -1.31279875859e-09\n      - -9.767973097040e-10\n      - -7.801634893667e-10\n      - -2.450508418406e-08\n      - 3.997578801318e-08\n      - -2.517722919038e-09\n      - -5.481609832714e-09\n      - -5.282728920974e-09\n      - -5.276390879771e-09\n      - -5.921897638927e-09\n      - 5.568864480665e-09\n      - -1.316720954492e-09\n      - -9.741683015817e-10\n      - -7.849578764763e-10\n      - -2.462979864504e-08\n      - -4.892184790606e-08\n      - 3.997533326583e-08\n      - -4.198312808512e-10\n      - -2.525382569729e-09\n      - -5.087684940008e-09\n      - -5.325020424607e-09\n      - -5.321375340372e-09\n      - -9.905946285471e-09\n      - -4.662581432058e-11\n      - -1.165076923826e-09\n      - 3.474269760773e-10\n      - -1.008160666061e-09\n      - -9.312302040598e-10\n      - -8.287912578453e-10\n      - 5.564075422626e-09\n      - 5.566761274167e-09\n      - 5.577334150075e-09\n      - 5.58533486128e-09\n      - 5.575742534347e-09\n      - 5.589768647951e-09\n      - -2.458268966166e-08\n      - 3.971631201694e-08\n      - -2.517722919038e-09\n      - -5.472060138345e-09\n      - -5.253625090518e-09\n      - -5.896666266381e-09\n      - -5.281549420033e-09\n      - -5.911225287036e-09\n      - 5.58955548513e-09\n      - -1.31430510919e-09\n      - -9.776002229954e-10\n      - -7.833289572545e-10\n      - -2.456549452745e-08\n      - 3.969729789333e-08\n      - -2.525453624003e-09\n      - -5.481140874508e-09\n      - -5.259096269583e-09\n      - -5.908617595196e-09\n      - -5.280242021399e-09\n      - -5.902698774207e-09\n      - 5.579806838796e-09\n      - -1.316578845945e-09\n      - -9.776002229954e-10\n      - -7.816733926802e-10\n      - -2.458754977397e-08\n      - 3.999780062713e-08\n      - -2.523478315197e-09\n      - -5.481275877628e-09\n      - -5.276277192934e-09\n      - -5.912788481055e-09\n      - -5.252047685644e-09\n      - -5.904489341901e-09\n      - 5.580417905549e-09\n      - -1.312287167821e-09\n      - -9.767973097040e-10\n      - -7.796998602316e-10\n      - 5.577490469477e-09\n      - -2.470176241332e-08\n      - 4.000852982244e-08\n      - -5.468805852615e-09\n      - 5.58033264042e-09\n      - -7.824567660464e-10\n      - 5.57135138024e-09\n      - -7.82140574529e-10\n      - 5.586642259914e-09\n      - 5.582720064012e-09\n      - 5.568864480665e-09\n      - -5.405695446825e-09\n      - -5.398412383784e-09\n      - -2.459630366047e-08\n      - 3.965256212268e-08\n      - -5.406420200416e-09\n      - -5.417355453119e-09\n      - -9.949395973763e-09\n      - -1.168757535197e-09\n      - -5.401048497333e-09\n      - -5.409134473666e-09\n      - -2.456927461481e-08\n      - 3.975651452492e-08\n      - -9.926125699167e-09\n      - -1.167023810922e-09\n      - -5.401048497333e-09\n      - -5.400458746863e-09\nhistory_criterion:\n  -   - 21.53511643627\n      - 14.80453604351\n      - 6.548558251064\n      - 12.54188075473\n      - 9.282890198608\n      - 2.859555210712\n      - 0.9381817894678\n      - 0.2048532883114\n      - 0.8881817894678\n      - 0.3798532883114\n      - -0.9101956814319\n      - -1.36444138824\n      - -0.9351994446357\n      - -1.055070381505\n      - -1.111335532899\n      - -0.1703442432756\n      - 1.580641245921\n      - 19.23511643627\n      - 13.00453604351\n      - 13.94855825106\n      - 11.24188075473\n      - 6.182890198608\n      - -1.240444789288\n      - -0.8618182105322\n      - -1.995146711689\n      - -0.9868182105322\n      - -1.270146711689\n      - -1.135195681432\n      - -0.9144413882404\n      - -3.072699444636\n      - -1.317570381505\n      - -0.9238355328992\n      - 0.9546557567244\n      - -0.3318587540789\n      - 8.635116436265\n      - 15.10453604351\n      - 6.148558251063\n      - 4.841880754733\n      - 5.382890198608\n      - 2.059555210712\n      - -3.361818210532\n      - -2.995146711689\n      - -3.311818210532\n      - -2.395146711689\n      - -2.185195681432\n      - -2.63944138824\n      - -1.985199444636\n      - -1.880070381505\n      - -1.711335532899\n      - -1.407844243276\n      - -0.4818587540789\n      - 2.735116436265\n      - 3.404536043506\n      - 3.148558251063\n      - 3.141880754733\n      - 2.482890198608\n      - 0.5595552107122\n      - -0.7618182105322\n      - -2.995146711689\n      - -0.7993182105322\n      - -2.245146711689\n      - -1.885195681432\n      - -1.96444138824\n      - -1.647699444636\n      - -2.292570381505\n      - -1.486335532899\n      - -1.557844243276\n      - -0.8193587540789\n      - 10.13511643627\n      - 4.748558251063\n      - -2.218096467799\n      - -4.369688200573\n      - -3.659688200573\n      - -1.219688200573\n      - -0.3489655844206\n      - 6.635116436265\n      - 2.248558251063\n      - -1.518096467799\n      - -2.939688200573\n      - -4.029688200573\n      - -2.159688200573\n      - -2.038965584421\n      - 5.435116436265\n      - 3.348558251064\n      - -1.818096467799\n      - -2.909688200573\n      - -4.969688200573\n      - -3.469688200573\n      - -0.5389655844206\n      - 6.635116436265\n      - 5.848558251064\n      - -0.918096467799\n      - -4.219688200573\n      - -0.3489655844206\n      - -4.029688200573\n      - -3.659688200573\n      - -0.5389655844206\n      - 5.435116436265\n      - 2.348558251064\n      - -0.0171098013921\n      - -2.718096467799\n      - -4.257793595776\n      - -3.887793595776\n      - -2.006947842151\n      - -2.829688200573\n      - -0.1835757519589\n      - 0.8557490906722\n      - 0.6910344155794\n      - 4.435116436265\n      - 4.348558251064\n      - 0.9828901986079\n      - 0.481903532201\n      - -6.457793595776\n      - -6.137793595776\n      - -1.516947842151\n      - -4.029688200573\n      - -1.013575751959\n      - -0.8342509093278\n      - 1.441034415579\n      - -0.8648835637348\n      - 1.848558251064\n      - 0.6828901986079\n      - 1.081903532201\n      - -5.457793595776\n      - -4.787793595776\n      - 0.1730521578493\n      - -1.139688200573\n      - -3.263575751959\n      - 0.4057490906722\n      - 4.141034415579\n      - 6.635116436265\n      - 2.104536043506\n      - 4.348558251064\n      - 5.641880754733\n      - -0.1171098013921\n      - -2.640444789288\n      - -3.195146711689\n      - -2.325146711689\n      - -4.777695681432\n      - -5.49444138824\n      - -4.762699444636\n      - -5.027570381505\n      - -3.966335532899\n      - -3.510344243276\n      - -2.694358754079\n      - 7.410311799427\n      - 8.980311799427\n      - 1.290311799427\n      - -4.969688200573\n      - -4.709688200573\n      - -0.659688200573\n      - -0.5648835637348\n      - -2.951441748936\n      - 0.1828901986079\n      - 5.081903532201\n      - 3.342206404224\n      - 2.873052157849\n      - 3.162206404224\n      - 2.983052157849\n      - 0.920311799427\n      - 0.1164242480411\n      - 3.925749090672\n      - 2.761034415579\n      - 9.335116436265\n      - 3.648558251063\n      - -0.3171098013921\n      - -5.718096467799\n      - -2.457793595776\n      - -3.126947842151\n      - -1.897793595776\n      - -2.156947842151\n      - -0.539688200573\n      - -2.693575751959\n      - -0.2742509093278\n      - 2.531034415579\n      - -4.664883563735\n      - 1.548558251064\n      - -7.017109801392\n      - -4.018096467799\n      - -0.7577935957756\n      - -4.526947842151\n      - -0.4677935957756\n      - -3.876947842151\n      - -3.429688200573\n      - -2.813575751959\n      - -0.9442509093278\n      - 0.6610344155794\n      - -3.089688200573\n      - 9.635116436265\n      - 6.848558251064\n      - 0.781903532201\n      - -3.469688200573\n      - -1.108965584421\n      - -3.839688200573\n      - -0.9189655844206\n      - -1.589688200573\n      - -2.159688200573\n      - -1.139688200573\n      - -4.661818210532\n      - -4.211818210532\n      - 10.33511643627\n      - 3.948558251064\n      - -0.3618182105322\n      - -0.3518182105322\n      - -1.477695681432\n      - -2.132699444636\n      - -1.761818210532\n      - -1.471818210532\n      - 9.935116436265\n      - 3.248558251063\n      - -1.997695681432\n      - -2.472699444636\n      - -1.261818210532\n      - -1.211818210532\n  -   - 25.01562287811\n      - 18.67576650474\n      - 10.71425043997\n      - 16.92850306334\n      - 13.83328982937\n      - 7.61143273431\n      - 5.780449944004\n      - 4.918595910462\n      - 5.730449944004\n      - 5.093595910462\n      - 3.573230198002\n      - 2.843276294294\n      - 2.98078293018\n      - 2.569118760852\n      - 2.229814978179\n      - 2.901300021005\n      - 4.398727952741\n      - 22.71562287811\n      - 16.87576650474\n      - 18.11425043997\n      - 15.62850306334\n      - 10.73328982937\n      - 3.51143273431\n      - 3.980449944004\n      - 2.718595910462\n      - 3.855449944004\n      - 3.443595910462\n      - 3.348230198002\n      - 3.293276294294\n      - 0.8432829301802\n      - 2.306618760852\n      - 2.417314978179\n      - 4.026300021005\n      - 2.486227952741\n      - 12.11562287811\n      - 18.97576650474\n      - 10.31425043997\n      - 9.22850306334\n      - 9.933289829366\n      - 6.81143273431\n      - 1.480449944004\n      - 1.718595910462\n      - 1.530449944004\n      - 2.318595910462\n      - 2.298230198002\n      - 1.568276294294\n      - 1.93078293018\n      - 1.744118760852\n      - 1.629814978179\n      - 1.663800021005\n      - 2.336227952741\n      - 6.215622878108\n      - 7.275766504742\n      - 7.314250439974\n      - 7.52850306334\n      - 7.033289829366\n      - 5.31143273431\n      - 4.080449944004\n      - 1.718595910462\n      - 4.042949944004\n      - 2.468595910462\n      - 2.598230198002\n      - 2.243276294294\n      - 2.26828293018\n      - 1.331618760852\n      - 1.854814978179\n      - 1.513800021005\n      - 1.998727952741\n      - 13.61562287811\n      - 8.914250439974\n      - 2.617857443871\n      - -0.02069875634249\n      - 0.6893012436575\n      - 3.129301243658\n      - 2.348674115464\n      - 10.11562287811\n      - 6.414250439974\n      - 3.317857443871\n      - 1.409301243658\n      - 0.3193012436575\n      - 2.189301243658\n      - 0.6586741154643\n      - 8.915622878108\n      - 7.514250439974\n      - 3.017857443871\n      - 1.439301243658\n      - -0.6206987563425\n      - 0.8793012436575\n      - 2.158674115464\n      - 10.11562287811\n      - 10.01425043997\n      - 3.917857443871\n      - 0.1293012436575\n      - 2.348674115464\n      - 0.3193012436575\n      - 0.6893012436575\n      - 2.158674115464\n      - 8.915622878108\n      - 6.514250439974\n      - 4.533289829366\n      - 2.117857443871\n      - 0.5381907245488\n      - 0.9081907245488\n      - 2.599956711546\n      - 1.519301243658\n      - 3.585844975595\n      - 4.0602621231\n      - 3.388674115464\n      - 7.915622878108\n      - 8.514250439974\n      - 5.533289829366\n      - 5.317857443871\n      - -1.661809275451\n      - -1.341809275451\n      - 3.089956711546\n      - 0.3193012436575\n      - 2.755844975595\n      - 2.3702621231\n      - 4.138674115464\n      - 2.615622878108\n      - 6.014250439974\n      - 5.233289829366\n      - 5.917857443871\n      - -0.6618092754512\n      - 0.008190724548808\n      - 4.779956711546\n      - 3.209301243658\n      - 0.505844975595\n      - 3.6102621231\n      - 6.838674115464\n      - 10.11562287811\n      - 5.975766504742\n      - 8.514250439974\n      - 10.02850306334\n      - 4.433289829366\n      - 2.11143273431\n      - 1.518595910462\n      - 2.388595910462\n      - -0.2942698019983\n      - -1.286723705706\n      - -0.8467170698198\n      - -1.403381239148\n      - -0.6251850218209\n      - -0.4386999789948\n      - 0.1237279527411\n      - 11.75930124366\n      - 13.32930124366\n      - 5.639301243658\n      - -0.6206987563425\n      - -0.3606987563425\n      - 3.689301243658\n      - 2.915622878108\n      - 1.214250439974\n      - 4.733289829366\n      - 9.917857443871\n      - 8.138190724549\n      - 7.479956711546\n      - 7.958190724549\n      - 7.589956711546\n      - 5.269301243658\n      - 3.885844975595\n      - 7.1302621231\n      - 5.458674115464\n      - 12.81562287811\n      - 7.814250439974\n      - 4.233289829366\n      - -0.8821425561292\n      - 2.338190724549\n      - 1.479956711546\n      - 2.898190724549\n      - 2.449956711546\n      - 3.809301243658\n      - 1.075844975595\n      - 2.9302621231\n      - 5.228674115464\n      - -1.184377121892\n      - 5.714250439974\n      - -2.466710170634\n      - 0.8178574438708\n      - 4.038190724549\n      - 0.07995671154575\n      - 4.328190724549\n      - 0.7299567115457\n      - 0.9193012436575\n      - 0.955844975595\n      - 2.2602621231\n      - 3.358674115464\n      - 1.259301243658\n      - 13.11562287811\n      - 11.01425043997\n      - 5.617857443871\n      - 0.8793012436575\n      - 1.588674115464\n      - 0.5093012436575\n      - 1.778674115464\n      - 2.759301243658\n      - 2.189301243658\n      - 3.209301243658\n      - 0.1804499440042\n      - 0.6304499440042\n      - 13.81562287811\n      - 8.114250439974\n      - 4.480449944004\n      - 4.490449944004\n      - 3.005730198002\n      - 1.78328293018\n      - 3.080449944004\n      - 3.370449944004\n      - 13.41562287811\n      - 7.414250439974\n      - 2.485730198002\n      - 1.44328293018\n      - 3.580449944004\n      - 3.630449944004\n  -   - 84.68988065196\n      - 70.73054388289\n      - 56.46322643196\n      - 57.38823556867\n      - 49.80586460657\n      - 36.42009030556\n      - 24.97150307276\n      - 18.1321687762\n      - 24.92150307276\n      - 18.3071687762\n      - 12.85194135284\n      - 9.428755329368\n      - 7.671466840337\n      - 5.900125885276\n      - 4.571195356425\n      - 4.515228493968\n      - 5.474513604881\n      - 82.38988065196\n      - 68.93054388289\n      - 63.86322643196\n      - 56.08823556867\n      - 46.70586460657\n      - 32.32009030556\n      - 23.17150307276\n      - 15.9321687762\n      - 23.04650307276\n      - 16.6571687762\n      - 12.62694135284\n      - 9.878755329368\n      - 5.533966840337\n      - 5.637625885276\n      - 4.758695356425\n      - 5.640228493968\n      - 3.562013604881\n      - 71.78988065196\n      - 71.03054388289\n      - 56.06322643196\n      - 49.68823556867\n      - 45.90586460657\n      - 35.62009030556\n      - 20.67150307276\n      - 14.9321687762\n      - 20.72150307276\n      - 15.5321687762\n      - 11.57694135284\n      - 8.153755329368\n      - 6.621466840337\n      - 5.075125885276\n      - 3.971195356425\n      - 3.277728493968\n      - 3.412013604881\n      - 65.88988065196\n      - 59.33054388289\n      - 53.06322643196\n      - 47.98823556867\n      - 43.00586460657\n      - 34.12009030556\n      - 23.27150307276\n      - 14.9321687762\n      - 23.23400307276\n      - 15.6821687762\n      - 11.87694135284\n      - 8.828755329368\n      - 6.958966840337\n      - 4.662625885276\n      - 4.196195356425\n      - 3.127728493968\n      - 3.074513604881\n      - 73.28988065196\n      - 54.66322643196\n      - 26.00799822147\n      - 7.789506147668\n      - 8.499506147668\n      - 10.93950614767\n      - 3.209942501544\n      - 69.78988065196\n      - 52.16322643196\n      - 26.70799822147\n      - 9.219506147668\n      - 8.129506147668\n      - 9.999506147668\n      - 1.519942501544\n      - 68.58988065196\n      - 53.26322643196\n      - 26.40799822147\n      - 9.249506147668\n      - 7.189506147668\n      - 8.689506147668\n      - 3.019942501544\n      - 69.78988065196\n      - 55.76322643196\n      - 27.30799822147\n      - 7.939506147668\n      - 3.209942501544\n      - 8.129506147668\n      - 8.499506147668\n      - 3.019942501544\n      - 68.58988065196\n      - 52.26322643196\n      - 40.50586460657\n      - 25.50799822147\n      - 16.41235765092\n      - 16.78235765092\n      - 13.65241143766\n      - 9.329506147668\n      - 7.541813269635\n      - 6.010338273791\n      - 4.249942501544\n      - 67.58988065196\n      - 54.26322643196\n      - 41.50586460657\n      - 28.70799822147\n      - 14.21235765092\n      - 14.53235765092\n      - 14.14241143766\n      - 8.129506147668\n      - 6.711813269635\n      - 4.320338273791\n      - 4.999942501544\n      - 62.28988065196\n      - 51.76322643196\n      - 41.20586460657\n      - 29.30799822147\n      - 15.21235765092\n      - 15.88235765092\n      - 15.83241143766\n      - 11.01950614767\n      - 4.461813269635\n      - 5.560338273791\n      - 7.699942501544\n      - 69.78988065196\n      - 58.03054388289\n      - 54.26322643196\n      - 50.48823556867\n      - 40.40586460657\n      - 30.92009030556\n      - 14.7321687762\n      - 15.6021687762\n      - 8.984441352835\n      - 5.298755329368\n      - 3.843966840337\n      - 1.927625885276\n      - 1.716195356425\n      - 1.175228493968\n      - 1.199513604881\n      - 19.56950614767\n      - 21.13950614767\n      - 13.44950614767\n      - 7.189506147668\n      - 7.449506147668\n      - 11.49950614767\n      - 62.58988065196\n      - 46.96322643196\n      - 40.70586460657\n      - 33.30799822147\n      - 24.01235765092\n      - 18.53241143766\n      - 23.83235765092\n      - 18.64241143766\n      - 13.07950614767\n      - 7.841813269635\n      - 9.080338273791\n      - 6.319942501544\n      - 72.48988065196\n      - 53.56322643196\n      - 40.20586460657\n      - 22.50799822147\n      - 18.21235765092\n      - 12.53241143766\n      - 18.77235765092\n      - 13.50241143766\n      - 11.61950614767\n      - 5.031813269635\n      - 4.880338273791\n      - 6.089942501544\n      - 58.48988065196\n      - 51.46322643196\n      - 33.50586460657\n      - 24.20799822147\n      - 19.91235765092\n      - 11.13241143766\n      - 20.20235765092\n      - 11.78241143766\n      - 8.729506147668\n      - 4.911813269635\n      - 4.210338273791\n      - 4.219942501544\n      - 9.069506147668\n      - 72.78988065196\n      - 56.76322643196\n      - 29.00799822147\n      - 8.689506147668\n      - 2.449942501544\n      - 8.319506147668\n      - 2.639942501544\n      - 10.56950614767\n      - 9.999506147668\n      - 11.01950614767\n      - 19.37150307276\n      - 19.82150307276\n      - 73.48988065196\n      - 53.86322643196\n      - 23.67150307276\n      - 23.68150307276\n      - 12.28444135284\n      - 6.473966840337\n      - 22.27150307276\n      - 22.56150307276\n      - 73.08988065196\n      - 53.16322643196\n      - 11.76444135284\n      - 6.133966840337\n      - 22.77150307276\n      - 22.82150307276\n  -   - 78.17391291542\n      - 66.8366728159\n      - 54.32599616455\n      - 56.48754438985\n      - 49.80586460657\n      - 37.60220537333\n      - 27.26395828245\n      - 20.80723297571\n      - 27.21395828245\n      - 20.98223297571\n      - 15.60547682225\n      - 12.11967202128\n      - 10.23248733714\n      - 8.30080219923\n      - 6.80055773407\n      - 6.572919168714\n      - 7.365975022026\n      - 75.87391291542\n      - 65.0366728159\n      - 61.72599616455\n      - 55.18754438985\n      - 46.70586460657\n      - 33.50220537333\n      - 25.46395828245\n      - 18.60723297571\n      - 25.33895828245\n      - 19.33223297571\n      - 15.38047682225\n      - 12.56967202128\n      - 8.094987337144\n      - 8.03830219923\n      - 6.98805773407\n      - 7.697919168714\n      - 5.453475022026\n      - 65.27391291542\n      - 67.1366728159\n      - 53.92599616455\n      - 48.78754438985\n      - 45.90586460657\n      - 36.80220537333\n      - 22.96395828245\n      - 17.60723297571\n      - 23.01395828245\n      - 18.20723297571\n      - 14.33047682225\n      - 10.84467202128\n      - 9.182487337144\n      - 7.47580219923\n      - 6.20055773407\n      - 5.335419168714\n      - 5.303475022026\n      - 59.37391291542\n      - 55.4366728159\n      - 50.92599616455\n      - 47.08754438985\n      - 43.00586460657\n      - 35.30220537333\n      - 25.56395828245\n      - 17.60723297571\n      - 25.52645828245\n      - 18.35723297571\n      - 14.63047682225\n      - 11.51967202128\n      - 9.519987337144\n      - 7.06330219923\n      - 6.42555773407\n      - 5.185419168714\n      - 4.965975022026\n      - 66.77391291542\n      - 52.52599616455\n      - 27.88429931353\n      - 10.52352617863\n      - 11.23352617863\n      - 13.67352617863\n      - 5.021362784819\n      - 63.27391291542\n      - 50.02599616455\n      - 28.58429931353\n      - 11.95352617863\n      - 10.86352617863\n      - 12.73352617863\n      - 3.331362784819\n      - 62.07391291542\n      - 51.12599616455\n      - 28.28429931353\n      - 11.98352617863\n      - 9.923526178634\n      - 11.42352617863\n      - 4.831362784819\n      - 63.27391291542\n      - 53.62599616455\n      - 29.18429931353\n      - 10.67352617863\n      - 5.021362784819\n      - 10.86352617863\n      - 11.23352617863\n      - 4.831362784819\n      - 62.07391291542\n      - 50.12599616455\n      - 40.50586460657\n      - 27.38429931353\n      - 18.95079727771\n      - 19.32079727771\n      - 16.391415976\n      - 12.06352617863\n      - 10.02497402658\n      - 8.153464959245\n      - 6.061362784819\n      - 61.07391291542\n      - 52.12599616455\n      - 41.50586460657\n      - 30.58429931353\n      - 16.75079727771\n      - 17.07079727771\n      - 16.881415976\n      - 10.86352617863\n      - 9.194974026576\n      - 6.463464959245\n      - 6.811362784819\n      - 55.77391291542\n      - 49.62599616455\n      - 41.20586460657\n      - 31.18429931353\n      - 17.75079727771\n      - 18.42079727771\n      - 18.571415976\n      - 13.75352617863\n      - 6.944974026576\n      - 7.703464959245\n      - 9.511362784819\n      - 63.27391291542\n      - 54.1366728159\n      - 52.12599616455\n      - 49.58754438985\n      - 40.40586460657\n      - 32.10220537333\n      - 17.40723297571\n      - 18.27723297571\n      - 11.73797682225\n      - 7.989672021276\n      - 6.404987337144\n      - 4.32830219923\n      - 3.94555773407\n      - 3.232919168714\n      - 3.090975022026\n      - 22.30352617863\n      - 23.87352617863\n      - 16.18352617863\n      - 9.923526178634\n      - 10.18352617863\n      - 14.23352617863\n      - 56.07391291542\n      - 44.82599616455\n      - 40.70586460657\n      - 35.18429931353\n      - 26.55079727771\n      - 21.271415976\n      - 26.37079727771\n      - 21.381415976\n      - 15.81352617863\n      - 10.32497402658\n      - 11.22346495925\n      - 8.131362784819\n      - 65.97391291542\n      - 51.42599616455\n      - 40.20586460657\n      - 24.38429931353\n      - 20.75079727771\n      - 15.271415976\n      - 21.31079727771\n      - 16.241415976\n      - 14.35352617863\n      - 7.514974026576\n      - 7.023464959245\n      - 7.901362784819\n      - 51.97391291542\n      - 49.32599616455\n      - 33.50586460657\n      - 26.08429931353\n      - 22.45079727771\n      - 13.871415976\n      - 22.74079727771\n      - 14.521415976\n      - 11.46352617863\n      - 7.394974026576\n      - 6.353464959245\n      - 6.031362784819\n      - 11.80352617863\n      - 66.27391291542\n      - 54.62599616455\n      - 30.88429931353\n      - 11.42352617863\n      - 4.261362784819\n      - 11.05352617863\n      - 4.451362784819\n      - 13.30352617863\n      - 12.73352617863\n      - 13.75352617863\n      - 21.66395828245\n      - 22.11395828245\n      - 66.97391291542\n      - 51.72599616455\n      - 25.96395828245\n      - 25.97395828245\n      - 15.03797682225\n      - 9.034987337144\n      - 24.56395828245\n      - 24.85395828245\n      - 66.57391291542\n      - 51.02599616455\n      - 14.51797682225\n      - 8.694987337144\n      - 25.06395828245\n      - 25.11395828245\n  -   - 122.1511527598\n      - 122.6211240639\n      - 156.1508499375\n      - -599.1450584808\n      - -12.38948471101\n      - 19.33755088278\n      - 21.40590723345\n      - 17.86925432536\n      - 21.35590723345\n      - 18.04425432536\n      - 13.84616571438\n      - 10.95881863539\n      - 9.418115456519\n      - 7.704952567843\n      - 6.351080428381\n      - 6.22595148445\n      - 7.093292608367\n      - 119.8511527598\n      - 120.8211240639\n      - 163.5508499375\n      - -600.4450584808\n      - -15.48948471101\n      - 15.23755088278\n      - 19.60590723345\n      - 15.66925432536\n      - 19.48090723345\n      - 16.39425432536\n      - 13.62116571438\n      - 11.40881863539\n      - 7.280615456519\n      - 7.442452567843\n      - 6.538580428381\n      - 7.35095148445\n      - 5.180792608367\n      - 109.2511527598\n      - 122.9211240639\n      - 155.7508499375\n      - -606.8450584808\n      - -16.28948471101\n      - 18.53755088278\n      - 17.10590723345\n      - 14.66925432536\n      - 17.15590723345\n      - 15.26925432536\n      - 12.57116571438\n      - 9.683818635389\n      - 8.368115456519\n      - 6.879952567843\n      - 5.751080428381\n      - 4.98845148445\n      - 5.030792608367\n      - 103.3511527598\n      - 111.2211240639\n      - 152.7508499375\n      - -608.5450584808\n      - -19.18948471101\n      - 17.03755088278\n      - 19.70590723345\n      - 14.66925432536\n      - 19.66840723345\n      - 15.41925432536\n      - 12.87116571438\n      - 10.35881863539\n      - 8.705615456519\n      - 6.467452567843\n      - 5.976080428381\n      - 4.83845148445\n      - 4.693292608367\n      - 110.7511527598\n      - 154.3508499375\n      - 18.46995434894\n      - 9.107714197815\n      - 9.817714197815\n      - 12.25771419782\n      - 4.778284133588\n      - 107.2511527598\n      - 151.8508499375\n      - 19.16995434894\n      - 10.53771419782\n      - 9.447714197815\n      - 11.31771419782\n      - 3.088284133588\n      - 106.0511527598\n      - 152.9508499375\n      - 18.86995434894\n      - 10.56771419782\n      - 8.507714197815\n      - 10.00771419782\n      - 4.588284133588\n      - 107.2511527598\n      - 155.4508499375\n      - 19.76995434894\n      - 9.257714197815\n      - 4.778284133588\n      - 9.447714197815\n      - 9.817714197815\n      - 4.588284133588\n      - 106.0511527598\n      - 151.9508499375\n      - -21.68948471101\n      - 17.96995434894\n      - 14.92797213765\n      - 15.29797213765\n      - 14.15288063203\n      - 10.64771419782\n      - 9.331447324355\n      - 7.759548240156\n      - 5.818284133588\n      - 105.0511527598\n      - 153.9508499375\n      - -20.68948471101\n      - 21.16995434894\n      - 12.72797213765\n      - 13.04797213765\n      - 14.64288063203\n      - 9.447714197815\n      - 8.501447324355\n      - 6.069548240156\n      - 6.568284133588\n      - 99.75115275983\n      - 151.4508499375\n      - -20.98948471101\n      - 21.76995434894\n      - 13.72797213765\n      - 14.39797213765\n      - 16.33288063203\n      - 12.33771419782\n      - 6.251447324355\n      - 7.309548240156\n      - 9.268284133588\n      - 107.2511527598\n      - 109.9211240639\n      - 153.9508499375\n      - -606.0450584808\n      - -21.78948471101\n      - 13.83755088278\n      - 14.46925432536\n      - 15.33925432536\n      - 9.978665714379\n      - 6.828818635389\n      - 5.590615456519\n      - 3.732452567843\n      - 3.496080428381\n      - 2.88595148445\n      - 2.818292608367\n      - 20.88771419782\n      - 22.45771419782\n      - 14.76771419782\n      - 8.507714197815\n      - 8.767714197815\n      - 12.81771419782\n      - 100.0511527598\n      - 146.6508499375\n      - -21.48948471101\n      - 25.76995434894\n      - 22.52797213765\n      - 19.03288063203\n      - 22.34797213765\n      - 19.14288063203\n      - 14.39771419782\n      - 9.631447324355\n      - 10.82954824016\n      - 7.888284133588\n      - 109.9511527598\n      - 153.2508499375\n      - -21.98948471101\n      - 14.96995434894\n      - 16.72797213765\n      - 13.03288063203\n      - 17.28797213765\n      - 14.00288063203\n      - 12.93771419782\n      - 6.821447324355\n      - 6.629548240156\n      - 7.658284133588\n      - 95.95115275983\n      - 151.1508499375\n      - -28.68948471101\n      - 16.66995434894\n      - 18.42797213765\n      - 11.63288063203\n      - 18.71797213765\n      - 12.28288063203\n      - 10.04771419782\n      - 6.701447324355\n      - 5.959548240156\n      - 5.788284133588\n      - 10.38771419782\n      - 110.2511527598\n      - 156.4508499375\n      - 21.46995434894\n      - 10.00771419782\n      - 4.018284133588\n      - 9.637714197815\n      - 4.208284133588\n      - 11.88771419782\n      - 11.31771419782\n      - 12.33771419782\n      - 15.80590723345\n      - 16.25590723345\n      - 110.9511527598\n      - 153.5508499375\n      - 20.10590723345\n      - 20.11590723345\n      - 13.27866571438\n      - 8.220615456519\n      - 18.70590723345\n      - 18.99590723345\n      - 110.5511527598\n      - 152.8508499375\n      - 12.75866571438\n      - 7.880615456519\n      - 19.20590723345\n      - 19.25590723345\n  -   - -35.35804307658\n      - -53.29699942572\n      - -72.06021768605\n      - -76.24300976651\n      - -89.66597211166\n      - -117.5990460751\n      - -178.3813984168\n      - -314.1942358597\n      - -178.4313984168\n      - -314.0192358597\n      - -1385.074532566\n      - 503.9556946656\n      - 197.4559049595\n      - 115.7942913474\n      - 78.11722466414\n      - 57.55318477565\n      - 45.51113019711\n      - -37.65804307658\n      - -55.09699942572\n      - -64.66021768605\n      - -77.54300976651\n      - -92.76597211166\n      - -121.6990460751\n      - -180.1813984168\n      - -316.3942358597\n      - -180.3063984168\n      - -315.6692358597\n      - -1385.299532566\n      - 504.4056946656\n      - 195.3184049595\n      - 115.5317913474\n      - 78.30472466414\n      - 58.67818477565\n      - 43.59863019711\n      - -48.25804307658\n      - -52.99699942572\n      - -72.46021768605\n      - -83.94300976651\n      - -93.56597211166\n      - -118.3990460751\n      - -182.6813984168\n      - -317.3942358597\n      - -182.6313984168\n      - -316.7942358597\n      - -1386.349532566\n      - 502.6806946656\n      - 196.4059049595\n      - 114.9692913474\n      - 77.51722466414\n      - 56.31568477565\n      - 43.44863019711\n      - -54.15804307658\n      - -64.69699942572\n      - -75.46021768605\n      - -85.64300976651\n      - -96.46597211166\n      - -119.8990460751\n      - -180.0813984168\n      - -317.3942358597\n      - -180.1188984168\n      - -316.6442358597\n      - -1386.049532566\n      - 503.3556946656\n      - 196.7434049595\n      - 114.5567913474\n      - 77.74222466414\n      - 56.16568477565\n      - 43.11113019711\n      - -46.75804307658\n      - -73.86021768605\n      - -148.0932859294\n      - 1691.297253326\n      - 1692.007253326\n      - 1694.447253326\n      - 38.42217232425\n      - -50.25804307658\n      - -76.36021768605\n      - -147.3932859294\n      - 1692.727253326\n      - 1691.637253326\n      - 1693.507253326\n      - 36.73217232425\n      - -51.45804307658\n      - -75.26021768605\n      - -147.6932859294\n      - 1692.757253326\n      - 1690.697253326\n      - 1692.197253326\n      - 38.23217232425\n      - -50.25804307658\n      - -72.76021768605\n      - -146.7932859294\n      - 1691.447253326\n      - 38.42217232425\n      - 1691.637253326\n      - 1692.007253326\n      - 38.23217232425\n      - -51.45804307658\n      - -76.26021768605\n      - -98.96597211166\n      - -148.5932859294\n      - -233.0828036794\n      - -232.7128036794\n      - -508.8494431202\n      - 1692.837253326\n      - 148.258324502\n      - 68.02827286157\n      - 39.46217232425\n      - -52.45804307658\n      - -74.26021768605\n      - -97.96597211166\n      - -145.3932859294\n      - -235.2828036794\n      - -234.9628036794\n      - -508.3594431202\n      - 1691.637253326\n      - 147.428324502\n      - 66.33827286157\n      - 40.21217232425\n      - -57.75804307658\n      - -76.76021768605\n      - -98.26597211166\n      - -144.7932859294\n      - -234.2828036794\n      - -233.6128036794\n      - -506.6694431202\n      - 1694.527253326\n      - 145.178324502\n      - 67.57827286157\n      - 42.91217232425\n      - -50.25804307658\n      - -65.99699942572\n      - -74.26021768605\n      - -83.14300976651\n      - -99.06597211166\n      - -123.0990460751\n      - -317.5942358597\n      - -316.7242358597\n      - -1388.942032566\n      - 499.8256946656\n      - 193.6284049595\n      - 111.8217913474\n      - 75.26222466414\n      - 54.21318477565\n      - 41.23613019711\n      - 1703.077253326\n      - 1704.647253326\n      - 1696.957253326\n      - 1690.697253326\n      - 1690.957253326\n      - 1695.007253326\n      - -57.45804307658\n      - -81.56021768605\n      - -98.76597211166\n      - -140.7932859294\n      - -225.4828036794\n      - -503.9694431202\n      - -225.6628036794\n      - -503.8594431202\n      - 1696.587253326\n      - 148.558324502\n      - 71.09827286157\n      - 41.53217232425\n      - -47.55804307658\n      - -74.96021768605\n      - -99.26597211166\n      - -151.5932859294\n      - -231.2828036794\n      - -509.9694431202\n      - -230.7228036794\n      - -508.9994431202\n      - 1695.127253326\n      - 145.748324502\n      - 66.89827286157\n      - 41.30217232425\n      - -61.55804307658\n      - -77.06021768605\n      - -105.9659721117\n      - -149.8932859294\n      - -229.5828036794\n      - -511.3694431202\n      - -229.2928036794\n      - -510.7194431202\n      - 1692.237253326\n      - 145.628324502\n      - 66.22827286157\n      - 39.43217232425\n      - 1692.577253326\n      - -47.25804307658\n      - -71.76021768605\n      - -145.0932859294\n      - 1692.197253326\n      - 37.66217232425\n      - 1691.827253326\n      - 37.85217232425\n      - 1694.077253326\n      - 1693.507253326\n      - 1694.527253326\n      - -183.9813984168\n      - -183.5313984168\n      - -46.55804307658\n      - -74.66021768605\n      - -179.6813984168\n      - -179.6713984168\n      - -1385.642032566\n      - 196.2584049595\n      - -181.0813984168\n      - -180.7913984168\n      - -46.95804307658\n      - -75.36021768605\n      - -1386.162032566\n      - 195.9184049595\n      - -180.5813984168\n      - -180.5313984168\n  -   - 117.9473464966\n      - 104.1334190546\n      - 90.05657396185\n      - 91.2207789054\n      - 83.93061974583\n      - 71.30942158128\n      - 62.32199544308\n      - 59.88083883476\n      - 62.27199544308\n      - 60.05583883476\n      - 62.77642327569\n      - 76.9431054476\n      - 131.4875801385\n      - -2342.694563021\n      - -86.51002021515\n      - -37.31642203542\n      - -19.79861800279\n      - 115.6473464966\n      - 102.3334190546\n      - 97.45657396185\n      - 89.9207789054\n      - 80.83061974583\n      - 67.20942158128\n      - 60.52199544308\n      - 57.68083883476\n      - 60.39699544308\n      - 58.40583883476\n      - 62.55142327569\n      - 77.3931054476\n      - 129.3500801385\n      - -2342.957063021\n      - -86.32252021515\n      - -36.19142203542\n      - -21.71111800279\n      - 105.0473464966\n      - 104.4334190546\n      - 89.65657396185\n      - 83.5207789054\n      - 80.03061974583\n      - 70.50942158128\n      - 58.02199544308\n      - 56.68083883476\n      - 58.07199544308\n      - 57.28083883476\n      - 61.50142327569\n      - 75.6681054476\n      - 130.4375801385\n      - -2343.519563021\n      - -87.11002021515\n      - -38.55392203542\n      - -21.86111800279\n      - 99.14734649662\n      - 92.73341905458\n      - 86.65657396185\n      - 81.8207789054\n      - 77.13061974583\n      - 69.00942158128\n      - 60.62199544308\n      - 56.68083883476\n      - 60.58449544308\n      - 57.43083883476\n      - 61.80142327569\n      - 76.3431054476\n      - 130.7750801385\n      - -2343.932063021\n      - -86.88502021515\n      - -38.70392203542\n      - -22.19861800279\n      - 106.5473464966\n      - 88.25657396185\n      - 61.9476238727\n      - 64.62033506263\n      - 65.33033506263\n      - 67.77033506263\n      - -17.38684121661\n      - 103.0473464966\n      - 85.75657396185\n      - 62.6476238727\n      - 66.05033506263\n      - 64.96033506263\n      - 66.83033506263\n      - -19.07684121661\n      - 101.8473464966\n      - 86.85657396185\n      - 62.3476238727\n      - 66.08033506263\n      - 64.02033506263\n      - 65.52033506263\n      - -17.57684121661\n      - 103.0473464966\n      - 89.35657396185\n      - 63.2476238727\n      - 64.77033506263\n      - -17.38684121661\n      - 64.96033506263\n      - 65.33033506263\n      - -17.57684121661\n      - 101.8473464966\n      - 85.85657396185\n      - 74.63061974583\n      - 61.4476238727\n      - 55.64384266576\n      - 56.01384266576\n      - 58.81780905206\n      - 66.16033506263\n      - 253.6554490216\n      - -52.38720394238\n      - -16.34684121661\n      - 100.8473464966\n      - 87.85657396185\n      - 75.63061974583\n      - 64.6476238727\n      - 53.44384266576\n      - 53.76384266576\n      - 59.30780905206\n      - 64.96033506263\n      - 252.8254490216\n      - -54.07720394238\n      - -15.59684121661\n      - 95.54734649662\n      - 85.35657396185\n      - 75.33061974583\n      - 65.2476238727\n      - 54.44384266576\n      - 55.11384266576\n      - 60.99780905206\n      - 67.85033506263\n      - 250.5754490216\n      - -52.83720394238\n      - -12.89684121661\n      - 103.0473464966\n      - 91.43341905458\n      - 87.85657396185\n      - 84.3207789054\n      - 74.53061974583\n      - 65.80942158128\n      - 56.48083883476\n      - 57.35083883476\n      - 58.90892327569\n      - 72.8131054476\n      - 127.6600801385\n      - -2346.667063021\n      - -89.36502021515\n      - -40.65642203542\n      - -24.07361800279\n      - 76.40033506263\n      - 77.97033506263\n      - 70.28033506263\n      - 64.02033506263\n      - 64.28033506263\n      - 68.33033506263\n      - 95.84734649662\n      - 80.55657396185\n      - 74.83061974583\n      - 69.2476238727\n      - 63.24384266576\n      - 63.69780905206\n      - 63.06384266576\n      - 63.80780905206\n      - 69.91033506263\n      - 253.9554490216\n      - -49.31720394238\n      - -14.27684121661\n      - 105.7473464966\n      - 87.15657396185\n      - 74.33061974583\n      - 58.4476238727\n      - 57.44384266576\n      - 57.69780905206\n      - 58.00384266576\n      - 58.66780905206\n      - 68.45033506263\n      - 251.1454490216\n      - -53.51720394238\n      - -14.50684121661\n      - 91.74734649662\n      - 85.05657396185\n      - 67.63061974583\n      - 60.1476238727\n      - 59.14384266576\n      - 56.29780905206\n      - 59.43384266576\n      - 56.94780905206\n      - 65.56033506263\n      - 251.0254490216\n      - -54.18720394238\n      - -16.37684121661\n      - 65.90033506263\n      - 106.0473464966\n      - 90.35657396185\n      - 64.9476238727\n      - 65.52033506263\n      - -18.14684121661\n      - 65.15033506263\n      - -17.95684121661\n      - 67.40033506263\n      - 66.83033506263\n      - 67.85033506263\n      - 56.72199544308\n      - 57.17199544308\n      - 106.7473464966\n      - 87.45657396185\n      - 61.02199544308\n      - 61.03199544308\n      - 62.20892327569\n      - 130.2900801385\n      - 59.62199544308\n      - 59.91199544308\n      - 106.3473464966\n      - 86.75657396185\n      - 61.68892327569\n      - 129.9500801385\n      - 60.12199544308\n      - 60.17199544308\n  -   - 175.7616267494\n      - 135.815392655\n      - 107.427429421\n      - 99.44443456745\n      - 85.70608965926\n      - 64.3171217786\n      - 44.36460041182\n      - 32.95338522348\n      - 44.31460041182\n      - 33.12838522348\n      - 24.75298136325\n      - 19.273476213\n      - 15.97636612239\n      - 13.00317519799\n      - 10.70826764037\n      - 9.858997178816\n      - 10.15607036729\n      - 173.4616267494\n      - 134.015392655\n      - 114.827429421\n      - 98.14443456745\n      - 82.60608965926\n      - 60.2171217786\n      - 42.56460041182\n      - 30.75338522348\n      - 42.43960041182\n      - 31.47838522348\n      - 24.52798136325\n      - 19.723476213\n      - 13.83886612239\n      - 12.74067519799\n      - 10.89576764037\n      - 10.98399717882\n      - 8.243570367288\n      - 162.8616267494\n      - 136.115392655\n      - 107.027429421\n      - 91.74443456745\n      - 81.80608965926\n      - 63.5171217786\n      - 40.06460041182\n      - 29.75338522348\n      - 40.11460041182\n      - 30.35338522348\n      - 23.47798136325\n      - 17.998476213\n      - 14.92636612239\n      - 12.17817519799\n      - 10.10826764037\n      - 8.621497178816\n      - 8.093570367288\n      - 156.9616267494\n      - 124.415392655\n      - 104.027429421\n      - 90.04443456745\n      - 78.90608965926\n      - 62.0171217786\n      - 42.66460041182\n      - 29.75338522348\n      - 42.62710041182\n      - 30.50338522348\n      - 23.77798136325\n      - 18.673476213\n      - 15.26386612239\n      - 11.76567519799\n      - 10.33326764037\n      - 8.471497178816\n      - 7.756070367288\n      - 164.3616267494\n      - 105.627429421\n      - 48.8819398286\n      - 18.5818314891\n      - 19.2918314891\n      - 21.7318314891\n      - 7.60022447721\n      - 160.8616267494\n      - 103.127429421\n      - 49.5819398286\n      - 20.0118314891\n      - 18.9218314891\n      - 20.7918314891\n      - 5.91022447721\n      - 159.6616267494\n      - 104.227429421\n      - 49.2819398286\n      - 20.0418314891\n      - 17.9818314891\n      - 19.4818314891\n      - 7.41022447721\n      - 160.8616267494\n      - 106.727429421\n      - 50.1819398286\n      - 18.7318314891\n      - 7.60022447721\n      - 18.9218314891\n      - 19.2918314891\n      - 7.41022447721\n      - 159.6616267494\n      - 103.227429421\n      - 76.40608965926\n      - 48.3819398286\n      - 33.22986519451\n      - 33.59986519451\n      - 26.87225158213\n      - 20.1218314891\n      - 15.2111293494\n      - 11.73215109931\n      - 8.64022447721\n      - 158.6616267494\n      - 105.227429421\n      - 77.40608965926\n      - 51.5819398286\n      - 31.02986519451\n      - 31.34986519451\n      - 27.36225158213\n      - 18.9218314891\n      - 14.3811293494\n      - 10.04215109931\n      - 9.39022447721\n      - 153.3616267494\n      - 102.727429421\n      - 77.10608965926\n      - 52.1819398286\n      - 32.02986519451\n      - 32.69986519451\n      - 29.05225158213\n      - 21.8118314891\n      - 12.1311293494\n      - 11.28215109931\n      - 12.09022447721\n      - 160.8616267494\n      - 123.115392655\n      - 105.227429421\n      - 92.54443456745\n      - 76.30608965926\n      - 58.8171217786\n      - 29.55338522348\n      - 30.42338522348\n      - 20.88548136325\n      - 15.143476213\n      - 12.14886612239\n      - 9.030675197988\n      - 7.853267640371\n      - 6.518997178816\n      - 5.881070367288\n      - 30.3618314891\n      - 31.9318314891\n      - 24.2418314891\n      - 17.9818314891\n      - 18.2418314891\n      - 22.2918314891\n      - 153.6616267494\n      - 97.927429421\n      - 76.60608965926\n      - 56.1819398286\n      - 40.82986519451\n      - 31.75225158213\n      - 40.64986519451\n      - 31.86225158213\n      - 23.8718314891\n      - 15.5111293494\n      - 14.80215109931\n      - 10.71022447721\n      - 163.5616267494\n      - 104.527429421\n      - 76.10608965926\n      - 45.3819398286\n      - 35.02986519451\n      - 25.75225158213\n      - 35.58986519451\n      - 26.72225158213\n      - 22.4118314891\n      - 12.7011293494\n      - 10.60215109931\n      - 10.48022447721\n      - 149.5616267494\n      - 102.427429421\n      - 69.40608965926\n      - 47.0819398286\n      - 36.72986519451\n      - 24.35225158213\n      - 37.01986519451\n      - 25.00225158213\n      - 19.5218314891\n      - 12.5811293494\n      - 9.93215109931\n      - 8.61022447721\n      - 19.8618314891\n      - 163.8616267494\n      - 107.727429421\n      - 51.8819398286\n      - 19.4818314891\n      - 6.84022447721\n      - 19.1118314891\n      - 7.03022447721\n      - 21.3618314891\n      - 20.7918314891\n      - 21.8118314891\n      - 38.76460041182\n      - 39.21460041182\n      - 164.5616267494\n      - 104.827429421\n      - 43.06460041182\n      - 43.07460041182\n      - 24.18548136325\n      - 14.77886612239\n      - 41.66460041182\n      - 41.95460041182\n      - 164.1616267494\n      - 104.127429421\n      - 23.66548136325\n      - 14.43886612239\n      - 42.16460041182\n      - 42.21460041182\n  -   - 28.24095690087\n      - 21.59536620662\n      - 13.25712029593\n      - 19.07487076201\n      - 15.58993695617\n      - 8.655383440756\n      - 5.734068533914\n      - 4.165799626156\n      - 5.684068533914\n      - 4.340799626156\n      - 2.383275058766\n      - 1.39686578611\n      - 1.39855054723\n      - 0.9316549828049\n      - 0.5908378210265\n      - 1.296279986221\n      - 2.850605037275\n      - 25.94095690087\n      - 19.79536620662\n      - 20.65712029593\n      - 17.77487076201\n      - 12.48993695617\n      - 4.555383440756\n      - 3.934068533914\n      - 1.965799626156\n      - 3.809068533914\n      - 2.690799626156\n      - 2.158275058766\n      - 1.84686578611\n      - -0.7389494527701\n      - 0.6691549828049\n      - 0.7783378210265\n      - 2.421279986221\n      - 0.9381050372751\n      - 15.34095690087\n      - 21.89536620662\n      - 12.85712029593\n      - 11.37487076201\n      - 11.68993695617\n      - 7.855383440756\n      - 1.434068533914\n      - 0.9657996261561\n      - 1.484068533914\n      - 1.565799626156\n      - 1.108275058766\n      - 0.12186578611\n      - 0.3485505472299\n      - 0.1066549828049\n      - -0.009162178973513\n      - 0.05877998622137\n      - 0.7881050372751\n      - 9.440956900867\n      - 10.19536620662\n      - 9.857120295928\n      - 9.674870762006\n      - 8.789936956172\n      - 6.355383440756\n      - 4.034068533914\n      - 0.9657996261561\n      - 3.996568533914\n      - 1.715799626156\n      - 1.408275058766\n      - 0.79686578611\n      - 0.6860505472299\n      - -0.3058450171951\n      - 0.2158378210265\n      - -0.09122001377863\n      - 0.4506050372751\n      - 16.84095690087\n      - 11.45712029593\n      - 3.061569786218\n      - -1.357147755765\n      - -0.6471477557654\n      - 1.792852244235\n      - 0.8347758927937\n      - 13.34095690087\n      - 8.957120295928\n      - 3.761569786218\n      - 0.07285224423464\n      - -1.017147755765\n      - 0.8528522442346\n      - -0.8552241072063\n      - 12.14095690087\n      - 10.05712029593\n      - 3.461569786218\n      - 0.1028522442346\n      - -1.957147755765\n      - -0.4571477557654\n      - 0.6447758927937\n      - 13.34095690087\n      - 12.55712029593\n      - 4.361569786218\n      - -1.207147755765\n      - 0.8347758927937\n      - -1.017147755765\n      - -0.6471477557654\n      - 0.6447758927937\n      - 12.14095690087\n      - 9.057120295928\n      - 6.289936956172\n      - 2.561569786218\n      - 0.09812922077451\n      - 0.4681292207745\n      - 1.601218748851\n      - 0.1828522442346\n      - 1.967848278564\n      - 2.434721057271\n      - 1.874775892794\n      - 11.14095690087\n      - 11.05712029593\n      - 7.289936956172\n      - 5.761569786218\n      - -2.101870779225\n      - -1.781870779225\n      - 2.091218748851\n      - -1.017147755765\n      - 1.137848278564\n      - 0.744721057271\n      - 2.624775892794\n      - 5.840956900867\n      - 8.557120295928\n      - 6.989936956172\n      - 6.361569786218\n      - -1.101870779225\n      - -0.4318707792255\n      - 3.781218748851\n      - 1.872852244235\n      - -1.112151721436\n      - 1.984721057271\n      - 5.324775892794\n      - 13.34095690087\n      - 8.895366206617\n      - 11.05712029593\n      - 12.17487076201\n      - 6.189936956172\n      - 3.155383440756\n      - 0.7657996261561\n      - 1.635799626156\n      - -1.484224941234\n      - -2.73313421389\n      - -2.42894945277\n      - -3.040845017195\n      - -2.264162178974\n      - -2.043720013779\n      - -1.424394962725\n      - 10.42285224423\n      - 11.99285224423\n      - 4.302852244235\n      - -1.957147755765\n      - -1.697147755765\n      - 2.352852244235\n      - 6.140956900867\n      - 3.757120295928\n      - 6.489936956172\n      - 10.36156978622\n      - 7.698129220775\n      - 6.481218748851\n      - 7.518129220775\n      - 6.591218748851\n      - 3.932852244235\n      - 2.267848278564\n      - 5.504721057271\n      - 3.944775892794\n      - 16.04095690087\n      - 10.35712029593\n      - 5.989936956172\n      - -0.4384302137822\n      - 1.898129220775\n      - 0.4812187488511\n      - 2.458129220775\n      - 1.451218748851\n      - 2.472852244235\n      - -0.5421517214358\n      - 1.304721057271\n      - 3.714775892794\n      - 2.040956900867\n      - 8.257120295928\n      - -0.7100630438283\n      - 1.261569786218\n      - 3.598129220775\n      - -0.9187812511489\n      - 3.888129220775\n      - -0.2687812511489\n      - -0.4171477557654\n      - -0.6621517214358\n      - 0.634721057271\n      - 1.844775892794\n      - -0.07714775576537\n      - 16.34095690087\n      - 13.55712029593\n      - 6.061569786218\n      - -0.4571477557654\n      - 0.07477589279366\n      - -0.8271477557654\n      - 0.2647758927937\n      - 1.422852244235\n      - 0.8528522442346\n      - 1.872852244235\n      - 0.1340685339144\n      - 0.5840685339144\n      - 17.04095690087\n      - 10.65712029593\n      - 4.434068533914\n      - 4.444068533914\n      - 1.815775058766\n      - 0.2010505472299\n      - 3.034068533914\n      - 3.324068533914\n      - 16.64095690087\n      - 9.957120295928\n      - 1.295775058766\n      - -0.1389494527701\n      - 3.534068533914\n      - 3.584068533914\n  -   - 19.67905061421\n      - 12.78536491634\n      - 4.453409401868\n      - 10.42602658124\n      - 7.181651769754\n      - 0.8467383120783\n      - -0.8151544815029\n      - -1.28878727387\n      - -0.8651544815029\n      - -1.11378727387\n      - -2.178296849214\n      - -2.4437228135\n      - -1.857756175876\n      - -1.847417965917\n      - -1.795022214911\n      - -0.7628423028115\n      - 1.065115779582\n      - 17.37905061421\n      - 10.98536491634\n      - 11.85340940187\n      - 9.126026581237\n      - 4.081651769754\n      - -3.253261687922\n      - -2.615154481503\n      - -3.48878727387\n      - -2.740154481503\n      - -2.76378727387\n      - -2.403296849214\n      - -1.9937228135\n      - -3.995256175876\n      - -2.109917965917\n      - -1.607522214911\n      - 0.3621576971885\n      - -0.8473842204181\n      - 6.779050614207\n      - 13.08536491634\n      - 4.053409401868\n      - 2.726026581237\n      - 3.281651769754\n      - 0.04673831207827\n      - -5.115154481503\n      - -4.48878727387\n      - -5.065154481503\n      - -3.88878727387\n      - -3.453296849214\n      - -3.7187228135\n      - -2.907756175876\n      - -2.672417965917\n      - -2.395022214911\n      - -2.000342302812\n      - -0.9973842204181\n      - 0.8790506142075\n      - 1.385364916339\n      - 1.053409401868\n      - 1.026026581237\n      - 0.3816517697539\n      - -1.453261687922\n      - -2.515154481503\n      - -4.48878727387\n      - -2.552654481503\n      - -3.73878727387\n      - -3.153296849214\n      - -3.0437228135\n      - -2.570256175876\n      - -3.084917965917\n      - -2.170022214911\n      - -2.150342302812\n      - -1.334884220418\n      - 8.279050614207\n      - 2.653409401868\n      - -4.10628705125\n      - -5.539020595211\n      - -4.829020595211\n      - -2.389020595211\n      - -0.8305062664824\n      - 4.779050614207\n      - 0.1534094018679\n      - -3.40628705125\n      - -4.109020595211\n      - -5.199020595211\n      - -3.329020595211\n      - -2.520506266482\n      - 3.579050614207\n      - 1.253409401868\n      - -3.70628705125\n      - -4.079020595211\n      - -6.139020595211\n      - -4.639020595211\n      - -1.020506266482\n      - 4.779050614207\n      - 3.753409401868\n      - -2.80628705125\n      - -5.389020595211\n      - -0.8305062664824\n      - -5.199020595211\n      - -4.829020595211\n      - -1.020506266482\n      - 3.579050614207\n      - 0.2534094018679\n      - -2.118348230246\n      - -4.60628705125\n      - -5.8778638078\n      - -5.5078638078\n      - -3.383083895331\n      - -3.999020595211\n      - -1.038044411845\n      - 0.2196196759168\n      - 0.2094937335176\n      - 2.579050614207\n      - 2.253409401868\n      - -1.118348230246\n      - -1.40628705125\n      - -8.0778638078\n      - -7.7578638078\n      - -2.893083895331\n      - -5.199020595211\n      - -1.868044411845\n      - -1.470380324083\n      - 0.9594937335176\n      - -2.720949385793\n      - -0.2465905981321\n      - -1.418348230246\n      - -0.8062870512504\n      - -7.0778638078\n      - -6.4078638078\n      - -1.203083895331\n      - -2.309020595211\n      - -4.118044411845\n      - -0.2303803240832\n      - 3.659493733518\n      - 4.779050614207\n      - 0.0853649163389\n      - 2.253409401868\n      - 3.526026581237\n      - -2.218348230246\n      - -4.653261687922\n      - -4.68878727387\n      - -3.81878727387\n      - -6.045796849214\n      - -6.5737228135\n      - -5.685256175876\n      - -5.819917965917\n      - -4.650022214911\n      - -4.102842302812\n      - -3.209884220418\n      - 6.240979404789\n      - 7.810979404789\n      - 0.1209794047887\n      - -6.139020595211\n      - -5.879020595211\n      - -1.829020595211\n      - -2.420949385793\n      - -5.046590598132\n      - -1.918348230246\n      - 3.19371294875\n      - 1.7221361922\n      - 1.496916104669\n      - 1.5421361922\n      - 1.606916104669\n      - -0.2490205952113\n      - -0.738044411845\n      - 3.289619675917\n      - 2.279493733518\n      - 7.479050614207\n      - 1.553409401868\n      - -2.418348230246\n      - -7.60628705125\n      - -4.0778638078\n      - -4.503083895331\n      - -3.5178638078\n      - -3.533083895331\n      - -1.709020595211\n      - -3.548044411845\n      - -0.9103803240832\n      - 2.049493733518\n      - -6.520949385793\n      - -0.5465905981321\n      - -9.118348230246\n      - -5.90628705125\n      - -2.3778638078\n      - -5.903083895331\n      - -2.0878638078\n      - -5.253083895331\n      - -4.599020595211\n      - -3.668044411845\n      - -1.580380324083\n      - 0.1794937335176\n      - -4.259020595211\n      - 7.779050614207\n      - 4.753409401868\n      - -1.10628705125\n      - -4.639020595211\n      - -1.590506266482\n      - -5.009020595211\n      - -1.400506266482\n      - -2.759020595211\n      - -3.329020595211\n      - -2.309020595211\n      - -6.415154481503\n      - -5.965154481503\n      - 8.479050614207\n      - 1.853409401868\n      - -2.115154481503\n      - -2.105154481503\n      - -2.745796849214\n      - -3.055256175876\n      - -3.515154481503\n      - -3.225154481503\n      - 8.079050614207\n      - 1.153409401868\n      - -3.265796849214\n      - -3.395256175876\n      - -3.015154481503\n      - -2.965154481503\n  -   - 1050.519509418\n      - 2633.521525076\n      - -3381.282955438\n      - -933.4604542894\n      - -516.3685328612\n      - -257.5208155114\n      - -113.8324082065\n      - -66.57519632661\n      - -113.8824082065\n      - -66.40019632661\n      - -44.86075536786\n      - -32.39601556788\n      - -23.86432104696\n      - -18.5440484742\n      - -14.76646254613\n      - -11.02441930035\n      - -7.169273972705\n      - 1048.219509418\n      - 2631.721525076\n      - -3373.882955438\n      - -934.7604542894\n      - -519.4685328612\n      - -261.6208155114\n      - -115.6324082065\n      - -68.77519632661\n      - -115.7574082065\n      - -68.05019632661\n      - -45.08575536786\n      - -31.94601556788\n      - -26.00182104696\n      - -18.8065484742\n      - -14.57896254613\n      - -9.899419300347\n      - -9.081773972705\n      - 1037.619509418\n      - 2633.821525076\n      - -3381.682955438\n      - -941.1604542894\n      - -520.2685328612\n      - -258.3208155114\n      - -118.1324082065\n      - -69.77519632661\n      - -118.0824082065\n      - -69.17519632661\n      - -46.13575536786\n      - -33.67101556788\n      - -24.91432104696\n      - -19.3690484742\n      - -15.36646254613\n      - -12.26191930035\n      - -9.231773972705\n      - 1031.719509418\n      - 2622.121525076\n      - -3384.682955438\n      - -942.8604542894\n      - -523.1685328612\n      - -259.8208155114\n      - -115.5324082065\n      - -69.77519632661\n      - -115.5699082065\n      - -69.02519632661\n      - -45.83575536786\n      - -32.99601556788\n      - -24.57682104696\n      - -19.7815484742\n      - -15.14146254613\n      - -12.41191930035\n      - -9.569273972705\n      - 1039.119509418\n      - -3383.082955438\n      - -165.9142242952\n      - -41.04746332279\n      - -40.33746332279\n      - -37.89746332279\n      - -8.239694706637\n      - 1035.619509418\n      - -3385.582955438\n      - -165.2142242952\n      - -39.61746332279\n      - -40.70746332279\n      - -38.83746332279\n      - -9.929694706637\n      - 1034.419509418\n      - -3384.482955438\n      - -165.5142242952\n      - -39.58746332279\n      - -41.64746332279\n      - -40.14746332279\n      - -8.429694706637\n      - 1035.619509418\n      - -3381.982955438\n      - -164.6142242952\n      - -40.89746332279\n      - -8.239694706637\n      - -40.70746332279\n      - -40.33746332279\n      - -8.429694706637\n      - 1034.419509418\n      - -3385.482955438\n      - -525.6685328612\n      - -166.4142242952\n      - -89.99572115575\n      - -89.62572115575\n      - -55.58585272365\n      - -39.50746332279\n      - -20.14157855679\n      - -11.29463725292\n      - -7.199694706637\n      - 1033.419509418\n      - -3383.482955438\n      - -524.6685328612\n      - -163.2142242952\n      - -92.19572115575\n      - -91.87572115575\n      - -55.09585272365\n      - -40.70746332279\n      - -20.97157855679\n      - -12.98463725292\n      - -6.449694706637\n      - 1028.119509418\n      - -3385.982955438\n      - -524.9685328612\n      - -162.6142242952\n      - -91.19572115575\n      - -90.52572115575\n      - -53.40585272365\n      - -37.81746332279\n      - -23.22157855679\n      - -11.74463725292\n      - -3.749694706637\n      - 1035.619509418\n      - 2620.821525076\n      - -3383.482955438\n      - -940.3604542894\n      - -525.7685328612\n      - -263.0208155114\n      - -69.97519632661\n      - -69.10519632661\n      - -48.72825536786\n      - -36.52601556788\n      - -27.69182104696\n      - -22.5165484742\n      - -17.62146254613\n      - -14.36441930035\n      - -11.4442739727\n      - -29.26746332279\n      - -27.69746332279\n      - -35.38746332279\n      - -41.64746332279\n      - -41.38746332279\n      - -37.33746332279\n      - 1028.419509418\n      - -3390.782955438\n      - -525.4685328612\n      - -158.6142242952\n      - -82.39572115575\n      - -50.70585272365\n      - -82.57572115575\n      - -50.59585272365\n      - -35.75746332279\n      - -19.84157855679\n      - -8.224637252918\n      - -5.129694706637\n      - 1038.319509418\n      - -3384.182955438\n      - -525.9685328612\n      - -169.4142242952\n      - -88.19572115575\n      - -56.70585272365\n      - -87.63572115575\n      - -55.73585272365\n      - -37.21746332279\n      - -22.65157855679\n      - -12.42463725292\n      - -5.359694706637\n      - 1024.319509418\n      - -3386.282955438\n      - -532.6685328612\n      - -167.7142242952\n      - -86.49572115575\n      - -58.10585272365\n      - -86.20572115575\n      - -57.45585272365\n      - -40.10746332279\n      - -22.77157855679\n      - -13.09463725292\n      - -7.229694706637\n      - -39.76746332279\n      - 1038.619509418\n      - -3380.982955438\n      - -162.9142242952\n      - -40.14746332279\n      - -8.999694706637\n      - -40.51746332279\n      - -8.809694706637\n      - -38.26746332279\n      - -38.83746332279\n      - -37.81746332279\n      - -119.4324082065\n      - -118.9824082065\n      - 1039.319509418\n      - -3383.882955438\n      - -115.1324082065\n      - -115.1224082065\n      - -45.42825536786\n      - -25.06182104696\n      - -116.5324082065\n      - -116.2424082065\n      - 1038.919509418\n      - -3384.582955438\n      - -45.94825536786\n      - -25.40182104696\n      - -116.0324082065\n      - -115.9824082065\n  -   - 20.49103808147\n      - 13.5209126197\n      - 5.123721658539\n      - 11.04005890825\n      - 7.746672181034\n      - 1.330664211629\n      - -0.447396135285\n      - -0.9996323863606\n      - -0.497396135285\n      - -0.8246323863606\n      - -1.945368320316\n      - -2.252654921573\n      - -1.698804957691\n      - -1.71369037214\n      - -1.681477631675\n      - -0.6656948221704\n      - 1.148772894321\n      - 18.19103808147\n      - 11.7209126197\n      - 12.52372165854\n      - 9.740058908254\n      - 4.646672181034\n      - -2.769335788371\n      - -2.247396135285\n      - -3.199632386361\n      - -2.372396135285\n      - -2.474632386361\n      - -2.170368320316\n      - -1.802654921573\n      - -3.836304957691\n      - -1.97619037214\n      - -1.493977631675\n      - 0.4593051778296\n      - -0.7637271056786\n      - 7.591038081469\n      - 13.8209126197\n      - 4.723721658539\n      - 3.340058908254\n      - 3.846672181034\n      - 0.5306642116288\n      - -4.747396135285\n      - -4.199632386361\n      - -4.697396135285\n      - -3.599632386361\n      - -3.220368320316\n      - -3.527654921573\n      - -2.748804957691\n      - -2.53869037214\n      - -2.281477631675\n      - -1.90319482217\n      - -0.9137271056786\n      - 1.691038081469\n      - 2.120912619699\n      - 1.723721658539\n      - 1.640058908254\n      - 0.9466721810337\n      - -0.9693357883712\n      - -2.147396135285\n      - -4.199632386361\n      - -2.184896135285\n      - -3.449632386361\n      - -2.920368320316\n      - -2.852654921573\n      - -2.411304957691\n      - -2.95119037214\n      - -2.056477631675\n      - -2.05319482217\n      - -1.251227105679\n      - 9.091038081469\n      - 3.323721658539\n      - -3.686566865047\n      - -5.328474711208\n      - -4.618474711208\n      - -2.178474711208\n      - -0.7527095065441\n      - 5.591038081469\n      - 0.8237216585391\n      - -2.986566865047\n      - -3.898474711208\n      - -4.988474711208\n      - -3.118474711208\n      - -2.442709506544\n      - 4.391038081469\n      - 1.923721658539\n      - -3.286566865047\n      - -3.868474711208\n      - -5.928474711208\n      - -4.428474711208\n      - -0.9427095065441\n      - 5.591038081469\n      - 4.423721658539\n      - -2.386566865047\n      - -5.178474711208\n      - -0.7527095065441\n      - -4.988474711208\n      - -4.618474711208\n      - -0.9427095065441\n      - 4.391038081469\n      - 0.9237216585391\n      - -1.553327818966\n      - -4.186566865047\n      - -5.552915024831\n      - -5.182915024831\n      - -3.124230683974\n      - -3.788474711208\n      - -0.892433332114\n      - 0.3245543770619\n      - 0.2872904934559\n      - 3.391038081469\n      - 2.923721658539\n      - -0.5533278189663\n      - -0.9865668650472\n      - -7.752915024831\n      - -7.432915024831\n      - -2.634230683974\n      - -4.988474711208\n      - -1.722433332114\n      - -1.365445622938\n      - 1.037290493456\n      - -1.908961918531\n      - 0.4237216585391\n      - -0.8533278189662\n      - -0.3865668650472\n      - -6.752915024831\n      - -6.082915024831\n      - -0.9442306839744\n      - -2.098474711208\n      - -3.972433332114\n      - -0.1254456229381\n      - 3.737290493456\n      - 5.591038081469\n      - 0.8209126196993\n      - 2.923721658539\n      - 4.140058908254\n      - -1.653327818966\n      - -4.169335788371\n      - -4.399632386361\n      - -3.529632386361\n      - -5.812868320316\n      - -6.382654921573\n      - -5.526304957691\n      - -5.68619037214\n      - -4.536477631675\n      - -4.00569482217\n      - -3.126227105679\n      - 6.451525288792\n      - 8.021525288792\n      - 0.3315252887919\n      - -5.928474711208\n      - -5.668474711208\n      - -1.618474711208\n      - -1.608961918531\n      - -4.376278341461\n      - -1.353327818966\n      - 3.613433134953\n      - 2.047084975169\n      - 1.755769316026\n      - 1.867084975169\n      - 1.865769316026\n      - -0.03847471120814\n      - -0.592433332114\n      - 3.394554377062\n      - 2.357290493456\n      - 8.291038081469\n      - 2.223721658539\n      - -1.853327818966\n      - -7.186566865047\n      - -3.752915024831\n      - -4.244230683974\n      - -3.192915024831\n      - -3.274230683974\n      - -1.498474711208\n      - -3.402433332114\n      - -0.8054456229381\n      - 2.127290493456\n      - -5.708961918531\n      - 0.1237216585391\n      - -8.553327818966\n      - -5.486566865047\n      - -2.052915024831\n      - -5.644230683974\n      - -1.762915024831\n      - -4.994230683974\n      - -4.388474711208\n      - -3.522433332114\n      - -1.475445622938\n      - 0.2572904934559\n      - -4.048474711208\n      - 8.591038081469\n      - 5.423721658539\n      - -0.6865668650472\n      - -4.428474711208\n      - -1.512709506544\n      - -4.798474711208\n      - -1.322709506544\n      - -2.548474711208\n      - -3.118474711208\n      - -2.098474711208\n      - -6.047396135285\n      - -5.597396135285\n      - 9.291038081469\n      - 2.523721658539\n      - -1.747396135285\n      - -1.737396135285\n      - -2.512868320316\n      - -2.896304957691\n      - -3.147396135285\n      - -2.857396135285\n      - 8.891038081469\n      - 1.823721658539\n      - -3.032868320316\n      - -3.236304957691\n      - -2.647396135285\n      - -2.597396135285\n  -   - 11.34572446599\n      - 12.71016226644\n      - 9.103954561983\n      - 17.84889991643\n      - 16.22846806054\n      - 11.28579424593\n      - 9.470899876731\n      - 7.817399827028\n      - 9.420899876731\n      - 7.992399827028\n      - 5.668390695887\n      - 4.274674199655\n      - 3.897111252284\n      - 3.096292463526\n      - 2.467532445354\n      - 2.926528936865\n      - 4.270267329615\n      - 9.045724465995\n      - 10.91016226644\n      - 16.50395456198\n      - 16.54889991643\n      - 13.12846806054\n      - 7.185794245929\n      - 7.670899876731\n      - 5.617399827028\n      - 7.545899876731\n      - 6.342399827028\n      - 5.443390695887\n      - 4.724674199655\n      - 1.759611252284\n      - 2.833792463526\n      - 2.655032445354\n      - 4.051528936865\n      - 2.357767329615\n      - -1.554275534005\n      - 13.01016226644\n      - 8.703954561983\n      - 10.14889991643\n      - 12.32846806054\n      - 10.48579424593\n      - 5.170899876731\n      - 4.617399827028\n      - 5.220899876731\n      - 5.217399827028\n      - 4.393390695887\n      - 2.999674199655\n      - 2.847111252284\n      - 2.271292463526\n      - 1.867532445354\n      - 1.689028936865\n      - 2.207767329615\n      - -7.454275534005\n      - 1.310162266438\n      - 5.703954561983\n      - 8.448899916427\n      - 9.428468060537\n      - 8.985794245929\n      - 7.770899876731\n      - 4.617399827028\n      - 7.733399876731\n      - 5.367399827028\n      - 4.693390695887\n      - 3.674674199655\n      - 3.184611252284\n      - 1.858792463526\n      - 2.092532445354\n      - 1.539028936865\n      - 1.870267329615\n      - -0.05427553400547\n      - 7.303954561983\n      - 6.51450668858\n      - 1.723185425401\n      - 2.433185425401\n      - 4.873185425401\n      - 2.160876526885\n      - -3.554275534005\n      - 4.803954561983\n      - 7.21450668858\n      - 3.153185425401\n      - 2.063185425401\n      - 3.933185425401\n      - 0.4708765268852\n      - -4.754275534005\n      - 5.903954561983\n      - 6.91450668858\n      - 3.183185425401\n      - 1.123185425401\n      - 2.623185425401\n      - 1.970876526885\n      - -3.554275534005\n      - 8.403954561983\n      - 7.81450668858\n      - 1.873185425401\n      - 2.160876526885\n      - 2.063185425401\n      - 2.433185425401\n      - 1.970876526885\n      - -4.754275534005\n      - 4.903954561983\n      - 6.928468060537\n      - 6.01450668858\n      - 3.857070725448\n      - 4.227070725448\n      - 5.083097819871\n      - 3.263185425401\n      - 4.293542360661\n      - 4.18335415557\n      - 3.200876526885\n      - -5.754275534005\n      - 6.903954561983\n      - 7.928468060537\n      - 9.21450668858\n      - 1.657070725448\n      - 1.977070725448\n      - 5.573097819871\n      - 2.063185425401\n      - 3.463542360661\n      - 2.49335415557\n      - 3.950876526885\n      - -11.05427553401\n      - 4.403954561983\n      - 7.628468060537\n      - 9.81450668858\n      - 2.657070725448\n      - 3.327070725448\n      - 7.263097819871\n      - 4.953185425401\n      - 1.213542360661\n      - 3.73335415557\n      - 6.650876526885\n      - -3.554275534005\n      - 0.01016226643847\n      - 6.903954561983\n      - 10.94889991643\n      - 6.828468060537\n      - 5.785794245929\n      - 4.417399827028\n      - 5.287399827028\n      - 1.800890695887\n      - 0.1446741996551\n      - 0.06961125228437\n      - -0.8762075364739\n      - -0.3874675546463\n      - -0.4134710631349\n      - -0.004732670384874\n      - 13.5031854254\n      - 15.0731854254\n      - 7.383185425401\n      - 1.123185425401\n      - 1.383185425401\n      - 5.433185425401\n      - -10.75427553401\n      - -0.3960454380168\n      - 7.128468060537\n      - 13.81450668858\n      - 11.45707072545\n      - 9.963097819871\n      - 11.27707072545\n      - 10.07309781987\n      - 7.013185425401\n      - 4.593542360661\n      - 7.25335415557\n      - 5.270876526885\n      - -0.8542755340055\n      - 6.203954561983\n      - 6.628468060537\n      - 3.01450668858\n      - 5.657070725448\n      - 3.963097819871\n      - 6.217070725448\n      - 4.933097819871\n      - 5.553185425401\n      - 1.783542360661\n      - 3.05335415557\n      - 5.040876526885\n      - -14.85427553401\n      - 4.103954561983\n      - -0.07153193946291\n      - 4.71450668858\n      - 7.357070725448\n      - 2.563097819871\n      - 7.647070725448\n      - 3.213097819871\n      - 2.663185425401\n      - 1.663542360661\n      - 2.38335415557\n      - 3.170876526885\n      - 3.003185425401\n      - -0.5542755340055\n      - 9.403954561983\n      - 9.51450668858\n      - 2.623185425401\n      - 1.400876526885\n      - 2.253185425401\n      - 1.590876526885\n      - 4.503185425401\n      - 3.933185425401\n      - 4.953185425401\n      - 3.870899876731\n      - 4.320899876731\n      - 0.1457244659945\n      - 6.503954561983\n      - 8.170899876731\n      - 8.180899876731\n      - 5.100890695887\n      - 2.699611252284\n      - 6.770899876731\n      - 7.060899876731\n      - -0.2542755340055\n      - 5.803954561983\n      - 4.580890695887\n      - 2.359611252284\n      - 7.270899876731\n      - 7.320899876731\n  -   - 16.53673795362\n      - 12.64348483951\n      - 6.243066148003\n      - 13.47083954897\n      - 11.03975774331\n      - 5.537722754593\n      - 4.09957193688\n      - 3.220802215329\n      - 4.04957193688\n      - 3.395802215329\n      - 1.800447382832\n      - 1.020568885813\n      - 1.147168191638\n      - 0.7592201452716\n      - 0.470019408067\n      - 1.210124173449\n      - 2.788233035035\n      - 14.23673795362\n      - 10.84348483951\n      - 13.643066148\n      - 12.17083954897\n      - 7.939757743311\n      - 1.437722754593\n      - 2.29957193688\n      - 1.020802215329\n      - 2.17457193688\n      - 1.745802215329\n      - 1.575447382832\n      - 1.470568885813\n      - -0.9903318083618\n      - 0.4967201452716\n      - 0.657519408067\n      - 2.335124173449\n      - 0.8757330350355\n      - 3.636737953619\n      - 12.94348483951\n      - 5.843066148002\n      - 5.770839548972\n      - 7.139757743311\n      - 4.737722754593\n      - -0.2004280631201\n      - 0.02080221532934\n      - -0.1504280631201\n      - 0.6208022153293\n      - 0.525447382832\n      - -0.2544311141866\n      - 0.09716819163822\n      - -0.06577985472837\n      - -0.129980591933\n      - -0.02737582655083\n      - 0.7257330350355\n      - -2.263262046381\n      - 1.24348483951\n      - 2.843066148002\n      - 4.070839548972\n      - 4.239757743311\n      - 3.237722754593\n      - 2.39957193688\n      - 0.02080221532934\n      - 2.36207193688\n      - 0.7708022153293\n      - 0.825447382832\n      - 0.4205688858134\n      - 0.4346681916382\n      - -0.4782798547284\n      - 0.09501940806695\n      - -0.1773758265508\n      - 0.3882330350355\n      - 5.136737953619\n      - 4.443066148003\n      - 0.8383235109659\n      - -1.82318329708\n      - -1.11318329708\n      - 1.32681670292\n      - 0.7814350074993\n      - 1.636737953619\n      - 1.943066148003\n      - 1.538323510966\n      - -0.39318329708\n      - -1.48318329708\n      - 0.38681670292\n      - -0.9085649925007\n      - 0.4367379536194\n      - 3.043066148003\n      - 1.238323510966\n      - -0.36318329708\n      - -2.42318329708\n      - -0.92318329708\n      - 0.5914350074993\n      - 1.636737953619\n      - 5.543066148003\n      - 2.138323510966\n      - -1.67318329708\n      - 0.7814350074993\n      - -1.48318329708\n      - -1.11318329708\n      - 0.5914350074993\n      - 0.4367379536194\n      - 2.043066148003\n      - 1.739757743311\n      - 0.3383235109659\n      - -1.132711355225\n      - -0.7627113552248\n      - 0.8640180395549\n      - -0.28318329708\n      - 1.760258757134\n      - 2.332901512346\n      - 1.821435007499\n      - -0.5632620463806\n      - 4.043066148003\n      - 2.739757743311\n      - 3.538323510966\n      - -3.332711355225\n      - -3.012711355225\n      - 1.354018039555\n      - -1.48318329708\n      - 0.9302587571339\n      - 0.6429015123457\n      - 2.571435007499\n      - -5.863262046381\n      - 1.543066148003\n      - 2.439757743311\n      - 4.138323510966\n      - -2.332711355225\n      - -1.662711355225\n      - 3.044018039555\n      - 1.40681670292\n      - -1.319741242866\n      - 1.882901512346\n      - 5.271435007499\n      - 1.636737953619\n      - -0.05651516048991\n      - 4.043066148003\n      - 6.570839548972\n      - 1.639757743311\n      - 0.03772275459342\n      - -0.1791977846707\n      - 0.6908022153293\n      - -2.067052617168\n      - -3.109431114187\n      - -2.680331808362\n      - -3.213279854728\n      - -2.384980591933\n      - -2.129875826551\n      - -1.486766964965\n      - 9.95681670292\n      - 11.52681670292\n      - 3.83681670292\n      - -2.42318329708\n      - -2.16318329708\n      - 1.88681670292\n      - -5.563262046381\n      - -3.256933851997\n      - 1.939757743311\n      - 8.138323510966\n      - 6.467288644775\n      - 5.744018039555\n      - 6.287288644775\n      - 5.854018039555\n      - 3.46681670292\n      - 2.060258757134\n      - 5.402901512346\n      - 3.891435007499\n      - 4.336737953619\n      - 3.343066148002\n      - 1.439757743311\n      - -2.661676489034\n      - 0.6672886447752\n      - -0.2559819604451\n      - 1.227288644775\n      - 0.7140180395549\n      - 2.00681670292\n      - -0.7497412428661\n      - 1.202901512346\n      - 3.661435007499\n      - -9.663262046381\n      - 1.243066148003\n      - -5.260242256689\n      - -0.9616764890341\n      - 2.367288644775\n      - -1.655981960445\n      - 2.657288644775\n      - -1.005981960445\n      - -0.88318329708\n      - -0.8697412428661\n      - 0.5329015123457\n      - 1.791435007499\n      - -0.54318329708\n      - 4.636737953619\n      - 6.543066148003\n      - 3.838323510966\n      - -0.92318329708\n      - 0.02143500749933\n      - -1.29318329708\n      - 0.2114350074993\n      - 0.95681670292\n      - 0.38681670292\n      - 1.40681670292\n      - -1.50042806312\n      - -1.05042806312\n      - 5.336737953619\n      - 3.643066148003\n      - 2.79957193688\n      - 2.80957193688\n      - 1.232947382832\n      - -0.05033180836178\n      - 1.39957193688\n      - 1.68957193688\n      - 4.936737953619\n      - 2.943066148003\n      - 0.712947382832\n      - -0.3903318083618\n      - 1.89957193688\n      - 1.94957193688\nhistory_x:\n  -   - 0.15\n      - 0.008\n      - 0.01\n  -   - 0.25\n      - 0.008\n      - 0.01\n  -   - 0.15\n      - 0.108\n      - 0.01\n  -   - 0.15\n      - 0.008\n      - 0.11\n  -   - 0.1596177824551\n      - -0.07539624732067\n      - 0.08766385239892\n  -   - 0.2\n      - 0.008531162120637\n      - -0.002952684076318\n  -   - 0.1505141617677\n      - -0.04199731338289\n      - 0.009934485345754\n  -   - 0.1374618789969\n      - 0.007934485345754\n      - -0.03840238867598\n  -   - 0.1505250437069\n      - 0.007964908595663\n      - 0.01275913089388\n  -   - 0.149883507892\n      - 0.008098080768719\n      - 0.009146244784311\n  -   - 0.1716712756093\n      - -0.003385426549061\n      - 0.004854131368058\n  -   - 0.1499498551576\n      - 0.008185153997901\n      - 0.009255435636305\n  -   - 0.1486949409413\n      - 0.001680047032405\n      - 0.01940631659429\n  -   - 0.1494212312914\n      - 0.005607806220598\n      - 0.01308958287811\ninterpolation_set_expected:\n  -   - 0.0\n      - 0.0\n      - 0.0\n  -   - -0.0581032280076\n      - -0.3142207350554\n      - 0.5053386972944\n  -   - 0.04228990929916\n      - 0.2061878221842\n      - -0.3067317793442\n  -   - 1.780003545435\n      - -0.7194586215727\n      - -0.658836120804\n  -   - 0.03698212804807\n      - 0.1992219638497\n      - -0.3154670475037\n  -   - 0.08830499324404\n      - 0.1885681900052\n      - -0.02643615873801\n  -   - 0.0874344381026\n      - -3.808409568279\n      - -0.2524078025883\nlinear_terms_residual_model:\n  -   - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.05431142241\n      - 7.210065582227\n      - -32.05431142241\n      - 7.210065582217\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804598\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834066\n      - -3.97310556824\n      - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.0543114224\n      - 7.210065582226\n      - -32.0543114224\n      - 7.210065582223\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834064\n      - -3.97310556824\n      - 723.7257702008\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.0543114224\n      - 7.210065582221\n      - -32.0543114224\n      - 7.210065582222\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689525\n      - -5.359113834065\n      - -3.97310556824\n      - 723.7257702007\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - -32.05431142241\n      - 7.210065582221\n      - -32.0543114224\n      - 7.210065582226\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689524\n      - -5.359113834066\n      - -3.97310556824\n      - 723.7257702007\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -63.20258122701\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122702\n      - -187.067690334\n      - -3.412910016525\n      - -187.067690334\n      - -187.067690334\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122702\n      - -11.3061006031\n      - -11.3061006031\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.202581227\n      - -11.30610060311\n      - -11.3061006031\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060311\n      - -11.30610060311\n      - 33.45212714559\n      - -187.067690334\n      - -26.50116734207\n      - -6.144592795851\n      - -3.412910016524\n      - 723.7257702008\n      - 1733.358000128\n      - -2111.94123313\n      - -558.4561073307\n      - -289.1782671082\n      - -123.1292919174\n      - 7.210065582226\n      - 7.210065582224\n      - 128.5030418743\n      - -61.94075414453\n      - -28.86164804599\n      - 59.16327636\n      - -6.773101689526\n      - -5.359113834066\n      - -3.973105568241\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122702\n      - -11.30610060312\n      - 33.45212714558\n      - -11.30610060311\n      - 33.45212714559\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016525\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060312\n      - 33.45212714558\n      - -11.30610060311\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016526\n      - 723.7257702007\n      - -2111.94123313\n      - -289.1782671082\n      - -63.20258122701\n      - -11.30610060311\n      - 33.45212714559\n      - -11.30610060312\n      - 33.45212714558\n      - -187.067690334\n      - -26.50116734208\n      - -6.14459279585\n      - -3.412910016527\n      - -187.067690334\n      - 723.7257702008\n      - -2111.94123313\n      - -63.20258122702\n      - -187.067690334\n      - -3.412910016525\n      - -187.067690334\n      - -3.412910016526\n      - -187.067690334\n      - -187.067690334\n      - -187.067690334\n      - -32.05431142241\n      - -32.05431142242\n      - 723.7257702007\n      - -2111.94123313\n      - -32.05431142241\n      - -32.05431142241\n      - 128.5030418743\n      - -28.86164804599\n      - -32.05431142241\n      - -32.05431142241\n      - 723.7257702007\n      - -2111.94123313\n      - 128.5030418743\n      - -28.86164804599\n      - -32.05431142241\n      - -32.05431142241\n  -   - -250.9049856979\n      - -713.548298078\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.98074162601\n      - 13.74284457971\n      - 35.98074162601\n      - 13.74284457972\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915525\n      - 6.047825533545\n      - -250.9049856979\n      - -713.548298078\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774507\n      - 35.980741626\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307755\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915524\n      - 6.047825533545\n      - -250.904985698\n      - -713.5482980779\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.980741626\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915524\n      - 6.047825533545\n      - -250.9049856979\n      - -713.5482980779\n      - 991.3297761498\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 35.98074162601\n      - 13.74284457971\n      - 35.980741626\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.62399002556\n      - 7.529398915525\n      - 6.047825533545\n      - -250.9049856979\n      - 991.3297761498\n      - 53.62515459183\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761498\n      - 53.62515459184\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 84.70726176104\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471011\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459183\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471012\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - -250.9049856979\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 23.96018695733\n      - 1.082529471004\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658369\n      - 5.458199688096\n      - -250.904985698\n      - -713.548298078\n      - 991.3297761499\n      - 292.2346826038\n      - 167.0701872133\n      - 85.53108774506\n      - 13.74284457971\n      - 13.74284457971\n      - -38.33093307754\n      - 33.90567353701\n      - 18.14259771035\n      - 14.18583175432\n      - 9.623990025561\n      - 7.529398915525\n      - 6.047825533545\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - -250.904985698\n      - 991.3297761498\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695734\n      - 1.08252947101\n      - 23.96018695733\n      - 1.082529471007\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688096\n      - -250.9049856979\n      - 991.3297761498\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695734\n      - 1.082529471008\n      - 23.96018695733\n      - 1.08252947101\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688096\n      - -250.904985698\n      - 991.3297761499\n      - 167.0701872133\n      - 53.62515459184\n      - 23.96018695733\n      - 1.082529471007\n      - 23.96018695734\n      - 1.082529471008\n      - 84.70726176104\n      - 14.82949233042\n      - 8.477216658368\n      - 5.458199688097\n      - 84.70726176104\n      - -250.904985698\n      - 991.3297761499\n      - 53.62515459184\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 5.458199688096\n      - 84.70726176104\n      - 84.70726176104\n      - 84.70726176104\n      - 35.98074162601\n      - 35.98074162601\n      - -250.904985698\n      - 991.3297761498\n      - 35.98074162601\n      - 35.98074162601\n      - -38.33093307755\n      - 18.14259771035\n      - 35.98074162601\n      - 35.98074162601\n      - -250.9049856979\n      - 991.3297761498\n      - -38.33093307754\n      - 18.14259771035\n      - 35.98074162601\n      - 35.98074162601\n  -   - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391073\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391093\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671898\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391052\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - -86.32920671897\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 29.83406116421\n      - 19.43979000176\n      - 29.83406116421\n      - 19.43979000176\n      - -0.5081983391036\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744828\n      - 7.034149386141\n      - -86.32920671897\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584981\n      - 6.487537441591\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584981\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 37.38830735093\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735094\n      - 39.79428584982\n      - 6.487537441591\n      - 39.79428584982\n      - 39.79428584982\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671896\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584981\n      - 12.42064972558\n      - 9.152506987831\n      - 6.487537441591\n      - -86.32920671898\n      - -249.9457650428\n      - 379.8255147156\n      - 124.7291668319\n      - 79.62656596282\n      - 49.85277513245\n      - 19.43979000176\n      - 19.43979000176\n      - -0.5081983391036\n      - 21.74505371124\n      - 14.64616559016\n      - 20.52866927178\n      - 10.12254826871\n      - 8.345670744827\n      - 7.034149386141\n      - 39.79428584981\n      - 39.79428584981\n      - 39.79428584981\n      - 39.79428584982\n      - 39.79428584981\n      - 39.79428584982\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735094\n      - 24.27471941817\n      - 13.90491560132\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941817\n      - 13.90491560132\n      - 24.27471941816\n      - 13.90491560132\n      - 39.79428584981\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - -86.32920671897\n      - 379.8255147156\n      - 79.62656596282\n      - 37.38830735093\n      - 24.27471941816\n      - 13.90491560132\n      - 24.27471941817\n      - 13.90491560132\n      - 39.79428584982\n      - 12.42064972558\n      - 9.15250698783\n      - 6.487537441591\n      - 39.79428584982\n      - -86.32920671898\n      - 379.8255147156\n      - 37.38830735094\n      - 39.79428584982\n      - 6.487537441591\n      - 39.79428584981\n      - 6.487537441591\n      - 39.79428584982\n      - 39.79428584982\n      - 39.79428584981\n      - 29.83406116421\n      - 29.83406116421\n      - -86.32920671897\n      - 379.8255147156\n      - 29.83406116421\n      - 29.83406116421\n      - -0.5081983391078\n      - 14.64616559016\n      - 29.83406116421\n      - 29.83406116421\n      - -86.32920671897\n      - 379.8255147156\n      - -0.5081983391052\n      - 14.64616559016\n      - 29.83406116421\n      - 29.83406116421\nmodel_indices:\n  - 13\n  - 12\n  - 11\n  - 10\n  - 9\n  - 8\n  - 6\nn_modelpoints: 7\nresiduals:\n  - 14.13989818606\n  - 10.00317449361\n  - 3.744918552079\n  - 10.77980934854\n  - 8.347596395984\n  - 2.920071415924\n  - 1.796288999298\n  - 1.248596846502\n  - 1.746288999298\n  - 1.423596846502\n  - 0.03689425329096\n  - -0.3219611012974\n  - 0.0002647009432151\n  - -0.881408646043\n  - -0.4303702662499\n  - 0.4401190678521\n  - 2.121655004644\n  - 11.83989818606\n  - 8.203174493607\n  - 11.14491855208\n  - 9.479809348539\n  - 5.247596395983\n  - -1.179928584076\n  - -0.003711000701976\n  - -0.9514031534983\n  - -0.128711000702\n  - -0.2264031534983\n  - -0.188105746709\n  - 0.1280388987026\n  - -2.137235299057\n  - -1.143908646043\n  - -0.2428702662499\n  - 1.565119067852\n  - 0.2091550046441\n  - 1.239898186057\n  - 10.30317449361\n  - 3.344918552079\n  - 3.079809348539\n  - 4.447596395984\n  - 2.120071415924\n  - -2.503711000702\n  - -1.951403153498\n  - -2.453711000702\n  - -1.351403153498\n  - -1.238105746709\n  - -1.596961101297\n  - -1.049735299057\n  - -1.706408646043\n  - -1.03037026625\n  - -0.7973809321479\n  - 0.05915500464407\n  - -4.660101813943\n  - -1.396825506394\n  - 0.3449185520786\n  - 1.379809348539\n  - 1.547596395983\n  - 0.6200714159243\n  - 0.09628899929797\n  - -1.951403153498\n  - 0.05878899929802\n  - -1.201403153498\n  - -0.9381057467091\n  - -0.9219611012974\n  - -0.7122352990568\n  - -2.118908646043\n  - -0.8053702662499\n  - -0.9473809321479\n  - -0.2783449953559\n  - 2.739898186057\n  - 1.944918552078\n  - -1.63399667708\n  - -3.183999068009\n  - -2.473999068009\n  - -0.03399906800905\n  - 0.159797079125\n  - -0.7601018139425\n  - -0.5550814479215\n  - -0.9339966770796\n  - -1.753999068009\n  - -2.843999068009\n  - -0.9739990680091\n  - -1.530202920875\n  - -1.960101813943\n  - 0.5449185520779\n  - -1.23399667708\n  - -1.723999068009\n  - -3.783999068009\n  - -2.283999068009\n  - -0.03020292087504\n  - -0.7601018139425\n  - 3.044918552078\n  - -0.3339966770796\n  - -3.033999068009\n  - 0.159797079125\n  - -2.843999068009\n  - -2.473999068009\n  - -0.03020292087504\n  - -1.960101813943\n  - -0.4550814479221\n  - -0.9524036040165\n  - -2.13399667708\n  - -3.265462533007\n  - -2.895462533007\n  - -0.9678561331385\n  - -1.643999068009\n  - 0.7313676292134\n  - 1.502356325981\n  - 1.199797079125\n  - -2.960101813943\n  - 1.544918552078\n  - 0.04759639598348\n  - 1.06600332292\n  - -5.465462533007\n  - -5.145462533007\n  - -0.4778561331385\n  - -2.843999068009\n  - -0.09863237078665\n  - -0.1876436740187\n  - 1.949797079125\n  - -8.260101813943\n  - -0.9550814479221\n  - -0.2524036040165\n  - 1.66600332292\n  - -4.465462533007\n  - -3.795462533007\n  - 1.212143866862\n  - 0.04600093199099\n  - -2.348632370787\n  - 1.052356325981\n  - 4.649797079125\n  - -0.7601018139425\n  - -2.696825506393\n  - 1.544918552078\n  - 3.879809348539\n  - -1.052403604016\n  - -2.579928584076\n  - -2.151403153498\n  - -1.281403153498\n  - -3.830605746709\n  - -4.451961101297\n  - -3.827235299057\n  - -4.853908646043\n  - -3.28537026625\n  - -2.899880932148\n  - -2.153344995356\n  - 8.596000931991\n  - 10.16600093199\n  - 2.476000931991\n  - -3.783999068009\n  - -3.523999068009\n  - 0.5260009319909\n  - -7.960101813943\n  - -5.755081447921\n  - -0.7524036040165\n  - 5.66600332292\n  - 4.334537466993\n  - 3.912143866861\n  - 4.154537466993\n  - 4.022143866862\n  - 2.106000931991\n  - 1.031367629213\n  - 4.572356325981\n  - 3.269797079125\n  - 1.939898186057\n  - 0.8449185520786\n  - -1.252403604017\n  - -5.13399667708\n  - -1.465462533007\n  - -2.087856133139\n  - -0.9054625330074\n  - -1.117856133138\n  - 0.6460009319909\n  - -1.778632370787\n  - 0.3723563259813\n  - 3.039797079125\n  - -12.06010181394\n  - -1.255081447922\n  - -7.952403604016\n  - -3.43399667708\n  - 0.2345374669926\n  - -3.487856133138\n  - 0.5245374669926\n  - -2.837856133139\n  - -2.243999068009\n  - -1.898632370787\n  - -0.2976436740187\n  - 1.169797079125\n  - -1.903999068009\n  - 2.239898186058\n  - 4.044918552078\n  - 1.36600332292\n  - -2.283999068009\n  - -0.600202920875\n  - -2.653999068009\n  - -0.410202920875\n  - -0.4039990680091\n  - -0.9739990680091\n  - 0.04600093199099\n  - -3.803711000702\n  - -3.353711000702\n  - 2.939898186057\n  - 1.144918552079\n  - 0.496288999298\n  - 0.506288999298\n  - -0.5306057467091\n  - -1.197235299057\n  - -0.9037110007021\n  - -0.6137110007021\n  - 2.539898186057\n  - 0.4449185520785\n  - -1.050605746709\n  - -1.537235299057\n  - -0.4037110007021\n  - -0.3537110007021\nsquare_terms_residual_model:\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638637\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554024\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114451\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114448\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638632\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554029\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114453\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701694\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721924\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518941\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638635\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554026\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082801\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114447\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082801\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114448\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227361\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554026\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082804\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114447\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082796\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693998\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762072\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714454\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638635\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002871\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002872\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049041\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028759\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002872\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028717\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028754\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272406\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - -693.1880101263\n          - -282.0272651787\n          - 185.6089587299\n      -   - -282.0272651787\n          - -377.0062670135\n          - -199.8947029577\n      -   - 185.6089587299\n          - -199.8947029577\n          - 32.55049992815\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 259.2154297329\n          - 115.0514237096\n          - -9.284737880698\n      -   - 115.0514237096\n          - 174.2494476566\n          - 49.11184063816\n      -   - -9.284737880698\n          - 49.11184063816\n          - -1.549507008423\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638631\n  -   -   - 68.45844162431\n          - 32.65296277576\n          - 25.04360055654\n      -   - 32.65296277576\n          - 59.00150809543\n          - 0.7592426398398\n      -   - 25.04360055654\n          - 0.7592426398398\n          - 0.9871650554025\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -14.88013773366\n          - -1.672249752224\n          - 30.57297543811\n      -   - -1.672249752224\n          - 17.31712592596\n          - -10.5469407781\n      -   - 30.57297543811\n          - -10.5469407781\n          - -0.7358906114452\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693998\n  -   -   - 87.41811326922\n          - 33.86514838909\n          - -5.240102033061\n      -   - 33.86514838909\n          - 29.49048109152\n          - 14.67250320165\n      -   - -5.240102033061\n          - 14.67250320165\n          - 4.833599928218\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 2.547257721925\n          - -41.98520512744\n          - 20.72868343281\n      -   - -41.98520512744\n          - -302.2224872041\n          - -113.9339256901\n      -   - 20.72868343281\n          - -113.9339256901\n          - -2.362980518942\n  -   -   - 18.58084217516\n          - 5.678085135653\n          - 6.602937980917\n      -   - 5.678085135653\n          - -5.592544704067\n          - -3.940958740204\n      -   - 6.602937980917\n          - -3.940958740204\n          - 0.9575365227362\n  -   -   - 14.66994772782\n          - 5.070231742209\n          - 5.910736412971\n      -   - 5.070231742209\n          - -0.4082883762073\n          - -1.880932217244\n      -   - 5.910736412971\n          - -1.880932217244\n          - 0.79286999734\n  -   -   - 11.79669162928\n          - 4.251219274511\n          - 5.316966607371\n      -   - 4.251219274511\n          - 0.8144741803491\n          - -1.252738735688\n      -   - 5.316966607371\n          - -1.252738735688\n          - 0.6496190714455\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028804\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.0893332002871\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638633\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028763\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028752\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 143.5729232274\n          - 64.4900364164\n          - 14.94615333046\n      -   - 64.4900364164\n          - 105.0390768054\n          - 17.44042411675\n      -   - 14.94615333046\n          - 17.44042411675\n          - -0.4689104638634\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860058\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028753\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710657\n  -   -   - 3.158995638313\n          - 5.285050749783\n          - 28.72100787546\n      -   - 5.285050749783\n          - 23.11411940654\n          - -8.290517826993\n      -   - 28.72100787546\n          - -8.290517826993\n          - 0.08933320028805\n  -   -   - -46.24243692467\n          - -13.19929340666\n          - 36.398937508\n      -   - -13.19929340666\n          - 10.49974863698\n          - -15.59542019426\n      -   - 36.398937508\n          - -15.59542019426\n          - -2.340461710656\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 34.65099735562\n          - 17.97663481639\n          - 4.117471070847\n      -   - 17.97663481639\n          - 42.36208423833\n          - 14.06237417874\n      -   - 4.117471070847\n          - 14.06237417874\n          - 2.203133844605\n  -   -   - 16.48276796572\n          - 5.469469601922\n          - 6.230099813495\n      -   - 5.469469601922\n          - -1.991487592538\n          - -2.545944896211\n      -   - 6.230099813495\n          - -2.545944896211\n          - 0.8761849272405\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049041\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.38438988059\n  -   -   - 37.86067159491\n          - 19.63578725902\n          - 27.33833014146\n      -   - 19.63578725902\n          - 40.52405881727\n          - -4.310564613888\n      -   - 27.33833014146\n          - -4.310564613888\n          - 1.001499860059\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.590211704904\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 10.65126838649\n          - 3.886653870481\n          - 5.035741025472\n      -   - 3.886653870481\n          - 1.042562599207\n          - -1.080339920451\n      -   - 5.035741025472\n          - -1.080339920451\n          - 0.5902117049039\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.8045838275\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 256.1687039175\n          - 93.85492401693\n          - -48.56016854851\n      -   - 93.85492401693\n          - 55.16906109347\n          - 47.3133808107\n      -   - -48.56016854851\n          - 47.3133808107\n          - 13.80458382749\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082804\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082805\n  -   -   - -266.0047978153\n          - -104.4240748307\n          - 113.152993159\n      -   - -104.4240748307\n          - -122.1523164991\n          - -86.07363307296\n      -   - 113.152993159\n          - -86.07363307296\n          - 16.31641633252\n  -   -   - 912.6451934849\n          - 383.7119079265\n          - -122.603887661\n      -   - 383.7119079265\n          - 559.8400701698\n          - 213.0361667162\n      -   - -122.603887661\n          - 213.0361667162\n          - -29.3843898806\n  -   -   - -172.4440433656\n          - -58.30234348421\n          - 67.88122528662\n      -   - -58.30234348421\n          - -10.11688714194\n          - -39.74497829255\n      -   - 67.88122528662\n          - -39.74497829255\n          - -9.026701693999\n  -   -   - 41.68746835262\n          - 18.32521144382\n          - 3.890270269493\n      -   - 18.32521144382\n          - 28.10847211295\n          - 9.269126205224\n      -   - 3.890270269493\n          - 9.269126205224\n          - 2.450369115355\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\n  -   -   - 18.93707066102\n          - 11.68664155631\n          - 28.03242287255\n      -   - 11.68664155631\n          - 30.15119945711\n          - -6.606690969349\n      -   - 28.03242287255\n          - -6.606690969349\n          - 0.6417870082809\nx_accepted:\n  - 0.1494212312914\n  - 0.005607806220598\n  - 0.01308958287811\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/pounders_example_data.csv",
    "content": "y,t\n92.9000,0.5000\n78.7000,0.6250\n64.2000,0.7500\n64.9000,0.8750\n57.1000,1.0000\n43.3000,1.2500\n31.1000,1.7500\n23.6000,2.2500\n31.0500,1.7500\n23.7750,2.2500\n17.7375,2.7500\n13.8000,3.2500\n11.5875,3.7500\n9.4125,4.2500\n7.7250,4.7500\n7.3500,5.2500\n8.0250,5.7500\n90.6000,0.5000\n76.9000,0.6250\n71.6000,0.7500\n63.6000,0.8750\n54.0000,1.0000\n39.2000,1.2500\n29.3000,1.7500\n21.4000,2.2500\n29.1750,1.7500\n22.1250,2.2500\n17.5125,2.7500\n14.2500,3.2500\n9.4500,3.7500\n9.1500,4.2500\n7.9125,4.7500\n8.4750,5.2500\n6.1125,5.7500\n80.0000,0.5000\n79.0000,0.6250\n63.8000,0.7500\n57.2000,0.8750\n53.2000,1.0000\n42.5000,1.2500\n26.8000,1.7500\n20.4000,2.2500\n26.8500,1.7500\n21.0000,2.2500\n16.4625,2.7500\n12.5250,3.2500\n10.5375,3.7500\n8.5875,4.2500\n7.1250,4.7500\n6.1125,5.2500\n5.9625,5.7500\n74.1000,0.5000\n67.3000,0.6250\n60.8000,0.7500\n55.5000,0.8750\n50.3000,1.0000\n41.0000,1.2500\n29.4000,1.7500\n20.4000,2.2500\n29.3625,1.7500\n21.1500,2.2500\n16.7625,2.7500\n13.2000,3.2500\n10.8750,3.7500\n8.1750,4.2500\n7.3500,4.7500\n5.9625,5.2500\n5.6250,5.7500\n81.5000,.5000\n62.4000,.7500\n32.5000,1.5000\n12.4100,3.0000\n13.1200,3.0000\n15.5600,3.0000\n5.6300,6.0000\n78.0000,.5000\n59.9000,.7500\n33.2000,1.5000\n13.8400,3.0000\n12.7500,3.0000\n14.6200,3.0000\n3.9400,6.0000\n76.8000,.5000\n61.0000,.7500\n32.9000,1.5000\n13.8700,3.0000\n11.8100,3.0000\n13.3100,3.0000\n5.4400,6.0000\n78.0000,.5000\n63.5000,.7500\n33.8000,1.5000\n12.5600,3.0000\n5.6300,6.0000\n12.7500,3.0000\n13.1200,3.0000\n5.4400,6.0000\n76.8000,.5000\n60.0000,.7500\n47.8000,1.0000\n32.0000,1.5000\n22.2000,2.0000\n22.5700,2.0000\n18.8200,2.5000\n13.9500,3.0000\n11.2500,4.0000\n9.0000,5.0000\n6.6700,6.0000\n75.8000,.5000\n62.0000,.7500\n48.8000,1.0000\n35.2000,1.5000\n20.0000,2.0000\n20.3200,2.0000\n19.3100,2.5000\n12.7500,3.0000\n10.4200,4.0000\n7.3100,5.0000\n7.4200,6.0000\n70.5000,.5000\n59.5000,.7500\n48.5000,1.0000\n35.8000,1.5000\n21.0000,2.0000\n21.6700,2.0000\n21.0000,2.5000\n15.6400,3.0000\n8.1700,4.0000\n8.5500,5.0000\n10.1200,6.0000\n78.0000,.5000\n66.0000,.6250\n62.0000,.7500\n58.0000,.8750\n47.7000,1.0000\n37.8000,1.2500\n20.2000,2.2500\n21.0700,2.2500\n13.8700,2.7500\n9.6700,3.2500\n7.7600,3.7500\n5.4400,4.2500\n4.8700,4.7500\n4.0100,5.2500\n3.7500,5.7500\n24.1900,3.0000\n25.7600,3.0000\n18.0700,3.0000\n11.8100,3.0000\n12.0700,3.0000\n16.1200,3.0000\n70.8000,.5000\n54.7000,.7500\n48.0000,1.0000\n39.8000,1.5000\n29.8000,2.0000\n23.7000,2.5000\n29.6200,2.0000\n23.8100,2.5000\n17.7000,3.0000\n11.5500,4.0000\n12.0700,5.0000\n8.7400,6.0000\n80.7000,.5000\n61.3000,.7500\n47.5000,1.0000\n29.0000,1.5000\n24.0000,2.0000\n17.7000,2.5000\n24.5600,2.0000\n18.6700,2.5000\n16.2400,3.0000\n8.7400,4.0000\n7.8700,5.0000\n8.5100,6.0000\n66.7000,.5000\n59.2000,.7500\n40.8000,1.0000\n30.7000,1.5000\n25.7000,2.0000\n16.3000,2.5000\n25.9900,2.0000\n16.9500,2.5000\n13.3500,3.0000\n8.6200,4.0000\n7.2000,5.0000\n6.6400,6.0000\n13.6900,3.0000\n81.0000,.5000\n64.5000,.7500\n35.5000,1.5000\n13.3100,3.0000\n4.8700,6.0000\n12.9400,3.0000\n5.0600,6.0000\n15.1900,3.0000\n14.6200,3.0000\n15.6400,3.0000\n25.5000,1.7500\n25.9500,1.7500\n81.7000,.5000\n61.6000,.7500\n29.8000,1.7500\n29.8100,1.7500\n17.1700,2.7500\n10.3900,3.7500\n28.4000,1.7500\n28.6900,1.7500\n81.3000,.5000\n60.9000,.7500\n16.6500,2.7500\n10.0500,3.7500\n28.9000,1.7500\n28.9500,1.7500\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_initial_residual_model.yaml",
    "content": "---\ninitial_residual_model:\n  intercepts:\n    - 25.015622878108047\n    - 18.675766504742285\n    - 10.714250439974172\n    - 16.92850306333966\n    - 13.8332898293664\n    - 7.611432734310156\n    - 5.780449944004236\n    - 4.918595910461541\n    - 5.730449944004235\n    - 5.093595910461538\n    - 3.573230198001667\n    - 2.8432762942943253\n    - 2.9807829301802418\n    - 2.56911876085199\n    - 2.229814978179112\n    - 2.901300021005225\n    - 4.398727952741098\n    - 22.715622878108036\n    - 16.875766504742288\n    - 18.114250439974164\n    - 15.628503063339657\n    - 10.733289829366399\n    - 3.511432734310162\n    - 3.980449944004235\n    - 2.718595910461538\n    - 3.855449944004235\n    - 3.4435959104615392\n    - 3.3482301980016658\n    - 3.2932762942943246\n    - 0.8432829301802407\n    - 2.3066187608519906\n    - 2.417314978179112\n    - 4.026300021005225\n    - 2.486227952741097\n    - 12.115622878108042\n    - 18.975766504742282\n    - 10.314250439974167\n    - 9.228503063339659\n    - 9.933289829366402\n    - 6.811432734310159\n    - 1.480449944004235\n    - 1.7185959104615378\n    - 1.5304499440042356\n    - 2.3185959104615392\n    - 2.298230198001665\n    - 1.568276294294325\n    - 1.930782930180241\n    - 1.7441187608519906\n    - 1.6298149781791125\n    - 1.6638000210052253\n    - 2.3362279527410976\n    - 6.215622878108036\n    - 7.275766504742279\n    - 7.314250439974167\n    - 7.528503063339656\n    - 7.033289829366396\n    - 5.311432734310159\n    - 4.080449944004233\n    - 1.7185959104615378\n    - 4.042949944004235\n    - 2.468595910461538\n    - 2.5982301980016658\n    - 2.243276294294324\n    - 2.2682829301802414\n    - 1.331618760851991\n    - 1.8548149781791121\n    - 1.5138000210052258\n    - 1.9987279527410973\n    - 13.615622878108042\n    - 8.914250439974168\n    - 2.617857443870772\n    - -0.020698756342492075\n    - 0.689301243657507\n    - 3.1293012436575083\n    - 2.3486741154642674\n    - 10.115622878108042\n    - 6.414250439974168\n    - 3.317857443870775\n    - 1.4093012436575076\n    - 0.3193012436575078\n    - 2.189301243657507\n    - 0.6586741154642675\n    - 8.915622878108039\n    - 7.51425043997417\n    - 3.0178574438707706\n    - 1.439301243657507\n    - -0.6206987563424917\n    - 0.8793012436575083\n    - 2.158674115464268\n    - 10.115622878108042\n    - 10.01425043997417\n    - 3.917857443870769\n    - 0.12930124365750828\n    - 2.3486741154642674\n    - 0.3193012436575078\n    - 0.689301243657507\n    - 2.158674115464268\n    - 8.915622878108039\n    - 6.51425043997417\n    - 4.533289829366396\n    - 2.117857443870772\n    - 0.5381907245488051\n    - 0.9081907245488061\n    - 2.599956711545751\n    - 1.519301243657507\n    - 3.5858449755949513\n    - 4.060262123100171\n    - 3.3886741154642674\n    - 7.915622878108039\n    - 8.51425043997417\n    - 5.533289829366396\n    - 5.317857443870775\n    - -1.6618092754511942\n    - -1.3418092754511939\n    - 3.0899567115457494\n    - 0.3193012436575078\n    - 2.7558449755949512\n    - 2.3702621231001704\n    - 4.1386741154642674\n    - 2.6156228781080415\n    - 6.01425043997417\n    - 5.233289829366399\n    - 5.917857443870769\n    - -0.6618092754511942\n    - 0.008190724548807538\n    - 4.779956711545751\n    - 3.2093012436575084\n    - 0.5058449755949512\n    - 3.6102621231001715\n    - 6.838674115464267\n    - 10.115622878108042\n    - 5.975766504742282\n    - 8.51425043997417\n    - 10.028503063339656\n    - 4.433289829366402\n    - 2.1114327343101564\n    - 1.5185959104615385\n    - 2.3885959104615395\n    - -0.2942698019983343\n    - -1.2867237057056755\n    - -0.8467170698197588\n    - -1.4033812391480094\n    - -0.6251850218208874\n    - -0.43869997899477475\n    - 0.12372795274109727\n    - 11.759301243657509\n    - 13.32930124365751\n    - 5.639301243657508\n    - -0.6206987563424917\n    - -0.36069875634249193\n    - 3.6893012436575088\n    - 2.9156228781080387\n    - 1.2142504399741725\n    - 4.733289829366399\n    - 9.91785744387077\n    - 8.138190724548807\n    - 7.47995671154575\n    - 7.958190724548807\n    - 7.589956711545749\n    - 5.269301243657507\n    - 3.885844975594952\n    - 7.130262123100171\n    - 5.458674115464268\n    - 12.815622878108044\n    - 7.814250439974167\n    - 4.233289829366399\n    - -0.882142556129228\n    - 2.338190724548806\n    - 1.47995671154575\n    - 2.8981907245488046\n    - 2.4499567115457523\n    - 3.809301243657506\n    - 1.0758449755949515\n    - 2.930262123100171\n    - 5.228674115464267\n    - -1.1843771218919557\n    - 5.7142504399741725\n    - -2.466710170633604\n    - 0.8178574438707713\n    - 4.038190724548805\n    - 0.07995671154575135\n    - 4.328190724548804\n    - 0.7299567115457499\n    - 0.9193012436575074\n    - 0.9558449755949505\n    - 2.260262123100171\n    - 3.358674115464267\n    - 1.2593012436575073\n    - 13.115622878108042\n    - 11.01425043997417\n    - 5.617857443870772\n    - 0.8793012436575083\n    - 1.5886741154642676\n    - 0.5093012436575073\n    - 1.7786741154642671\n    - 2.7593012436575073\n    - 2.189301243657507\n    - 3.2093012436575084\n    - 0.1804499440042342\n    - 0.6304499440042335\n    - 13.815622878108044\n    - 8.114250439974171\n    - 4.480449944004235\n    - 4.490449944004233\n    - 3.005730198001668\n    - 1.783282930180242\n    - 3.0804499440042328\n    - 3.3704499440042355\n    - 13.415622878108039\n    - 7.414250439974168\n    - 2.485730198001665\n    - 1.4432829301802421\n    - 3.5804499440042328\n    - 3.6304499440042335\nresidual_model_expected:\n  intercepts:\n    - 25.015622878108047\n    - 18.675766504742285\n    - 10.714250439974172\n    - 16.92850306333966\n    - 13.8332898293664\n    - 7.611432734310156\n    - 5.780449944004236\n    - 4.918595910461541\n    - 5.730449944004235\n    - 5.093595910461538\n    - 3.573230198001667\n    - 2.8432762942943253\n    - 2.9807829301802418\n    - 2.56911876085199\n    - 2.229814978179112\n    - 2.901300021005225\n    - 4.398727952741098\n    - 22.715622878108036\n    - 16.875766504742288\n    - 18.114250439974164\n    - 15.628503063339657\n    - 10.733289829366399\n    - 3.511432734310162\n    - 3.980449944004235\n    - 2.718595910461538\n    - 3.855449944004235\n    - 3.4435959104615392\n    - 3.3482301980016658\n    - 3.2932762942943246\n    - 0.8432829301802407\n    - 2.3066187608519906\n    - 2.417314978179112\n    - 4.026300021005225\n    - 2.486227952741097\n    - 12.115622878108042\n    - 18.975766504742282\n    - 10.314250439974167\n    - 9.228503063339659\n    - 9.933289829366402\n    - 6.811432734310159\n    - 1.480449944004235\n    - 1.7185959104615378\n    - 1.5304499440042356\n    - 2.3185959104615392\n    - 2.298230198001665\n    - 1.568276294294325\n    - 1.930782930180241\n    - 1.7441187608519906\n    - 1.6298149781791125\n    - 1.6638000210052253\n    - 2.3362279527410976\n    - 6.215622878108036\n    - 7.275766504742279\n    - 7.314250439974167\n    - 7.528503063339656\n    - 7.033289829366396\n    - 5.311432734310159\n    - 4.080449944004233\n    - 1.7185959104615378\n    - 4.042949944004235\n    - 2.468595910461538\n    - 2.5982301980016658\n    - 2.243276294294324\n    - 2.2682829301802414\n    - 1.331618760851991\n    - 1.8548149781791121\n    - 1.5138000210052258\n    - 1.9987279527410973\n    - 13.615622878108042\n    - 8.914250439974168\n    - 2.617857443870772\n    - -0.020698756342492075\n    - 0.689301243657507\n    - 3.1293012436575083\n    - 2.3486741154642674\n    - 10.115622878108042\n    - 6.414250439974168\n    - 3.317857443870775\n    - 1.4093012436575076\n    - 0.3193012436575078\n    - 2.189301243657507\n    - 0.6586741154642675\n    - 8.915622878108039\n    - 7.51425043997417\n    - 3.0178574438707706\n    - 1.439301243657507\n    - -0.6206987563424917\n    - 0.8793012436575083\n    - 2.158674115464268\n    - 10.115622878108042\n    - 10.01425043997417\n    - 3.917857443870769\n    - 0.12930124365750828\n    - 2.3486741154642674\n    - 0.3193012436575078\n    - 0.689301243657507\n    - 2.158674115464268\n    - 8.915622878108039\n    - 6.51425043997417\n    - 4.533289829366396\n    - 2.117857443870772\n    - 0.5381907245488051\n    - 0.9081907245488061\n    - 2.599956711545751\n    - 1.519301243657507\n    - 3.5858449755949513\n    - 4.060262123100171\n    - 3.3886741154642674\n    - 7.915622878108039\n    - 8.51425043997417\n    - 5.533289829366396\n    - 5.317857443870775\n    - -1.6618092754511942\n    - -1.3418092754511939\n    - 3.0899567115457494\n    - 0.3193012436575078\n    - 2.7558449755949512\n    - 2.3702621231001704\n    - 4.1386741154642674\n    - 2.6156228781080415\n    - 6.01425043997417\n    - 5.233289829366399\n    - 5.917857443870769\n    - -0.6618092754511942\n    - 0.008190724548807538\n    - 4.779956711545751\n    - 3.2093012436575084\n    - 0.5058449755949512\n    - 3.6102621231001715\n    - 6.838674115464267\n    - 10.115622878108042\n    - 5.975766504742282\n    - 8.51425043997417\n    - 10.028503063339656\n    - 4.433289829366402\n    - 2.1114327343101564\n    - 1.5185959104615385\n    - 2.3885959104615395\n    - -0.2942698019983343\n    - -1.2867237057056755\n    - -0.8467170698197588\n    - -1.4033812391480094\n    - -0.6251850218208874\n    - -0.43869997899477475\n    - 0.12372795274109727\n    - 11.759301243657509\n    - 13.32930124365751\n    - 5.639301243657508\n    - -0.6206987563424917\n    - -0.36069875634249193\n    - 3.6893012436575088\n    - 2.9156228781080387\n    - 1.2142504399741725\n    - 4.733289829366399\n    - 9.91785744387077\n    - 8.138190724548807\n    - 7.47995671154575\n    - 7.958190724548807\n    - 7.589956711545749\n    - 5.269301243657507\n    - 3.885844975594952\n    - 7.130262123100171\n    - 5.458674115464268\n    - 12.815622878108044\n    - 7.814250439974167\n    - 4.233289829366399\n    - -0.882142556129228\n    - 2.338190724548806\n    - 1.47995671154575\n    - 2.8981907245488046\n    - 2.4499567115457523\n    - 3.809301243657506\n    - 1.0758449755949515\n    - 2.930262123100171\n    - 5.228674115464267\n    - -1.1843771218919557\n    - 5.7142504399741725\n    - -2.466710170633604\n    - 0.8178574438707713\n    - 4.038190724548805\n    - 0.07995671154575135\n    - 4.328190724548804\n    - 0.7299567115457499\n    - 0.9193012436575074\n    - 0.9558449755949505\n    - 2.260262123100171\n    - 3.358674115464267\n    - 1.2593012436575073\n    - 13.115622878108042\n    - 11.01425043997417\n    - 5.617857443870772\n    - 0.8793012436575083\n    - 1.5886741154642676\n    - 0.5093012436575073\n    - 1.7786741154642671\n    - 2.7593012436575073\n    - 2.189301243657507\n    - 3.2093012436575084\n    - 0.1804499440042342\n    - 0.6304499440042335\n    - 13.815622878108044\n    - 8.114250439974171\n    - 4.480449944004235\n    - 4.490449944004233\n    - 3.005730198001668\n    - 1.783282930180242\n    - 3.0804499440042328\n    - 3.3704499440042355\n    - 13.415622878108039\n    - 7.414250439974168\n    - 2.485730198001665\n    - 1.4432829301802421\n    - 3.5804499440042328\n    - 3.6304499440042335\n  linear_terms:\n    -   - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.713742622150146\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 4.348989444230492\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.842268154536409\n        - 4.842268154536409\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.842268154536409\n        - 4.842268154536409\n        - 4.483425879433607\n        - 3.915982374815913\n        - 4.842268154536409\n        - 4.842268154536409\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.483425879433607\n        - 3.915982374815913\n        - 4.842268154536409\n        - 4.842268154536409\n    -   - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.84635481393292\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608854\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.926007839381725\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 12.159194348241295\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241295\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 3.5589080859646285\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569454\n        - 55.926007839381725\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 17.92731548788399\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608854\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.226094689267484\n        - 20.670151246699717\n        - 15.659359279812538\n        - 20.670151246699717\n        - 15.659359279812538\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118885\n        - 3.558908085964629\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 15.659359279812536\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 15.659359279812536\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241295\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964628\n        - 12.159194348241295\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 12.159194348241295\n        - 12.159194348241295\n        - 12.159194348241293\n        - 24.03332128329257\n        - 24.03332128329257\n        - 63.15476421569454\n        - 49.9146681808974\n        - 24.03332128329257\n        - 24.03332128329257\n        - 13.762137034267115\n        - 8.60666628497297\n        - 24.03332128329257\n        - 24.03332128329257\n        - 63.15476421569454\n        - 49.9146681808974\n        - 13.762137034267115\n        - 8.60666628497297\n        - 24.03332128329257\n        - 24.03332128329257\n    -   - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.5229744079594\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511592\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516123\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 47.77743791348328\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348328\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240037\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 20.60237968739701\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516123\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969132\n        - 6.743263411989219\n        - 5.785333776105146\n        - 14.893214379206793\n        - 14.893214379206793\n        - 14.893214379206793\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.10239578132861\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240037\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 14.89321437920679\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 26.325776492983195\n        - 26.325776492983195\n        - 56.638796479154635\n        - 47.77743791348328\n        - 26.325776492983195\n        - 26.325776492983195\n        - 16.515672503683682\n        - 11.16768678177973\n        - 26.325776492983195\n        - 26.325776492983195\n        - 56.638796479154635\n        - 47.77743791348328\n        - 16.515672503683682\n        - 11.16768678177973\n        - 26.325776492983195\n        - 26.325776492983195\n  square_terms:\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\nresiduals_candidate:\n  -   - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.713742622150146\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -4.348989444230492\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.842268154536409\n      - -4.842268154536409\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.842268154536409\n      - -4.842268154536409\n      - -4.483425879433607\n      - -3.915982374815913\n      - -4.842268154536409\n      - -4.842268154536409\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.483425879433607\n      - -3.915982374815913\n      - -4.842268154536409\n      - -4.842268154536409\n  -   - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.45973250532658\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074091\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814582\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010802\n      - 7.810204904010803\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010803\n      - 7.810204904010802\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010802\n      - 0.8612683860797854\n      - 7.810204904010801\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 52.05477737814582\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 13.213572865733845\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.58547903507409\n      - 4.690683910157058\n      - 3.3310071244237753\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010802\n      - 7.810204904010802\n      - 7.810204904010801\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.39014077759771\n      - 15.874166926375274\n      - 11.052454726116114\n      - 15.874166926375274\n      - 11.052454726116114\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690875\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 11.052454726116112\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 11.052454726116112\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010803\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.861268386079785\n      - 7.810204904010803\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 7.810204904010803\n      - 7.810204904010803\n      - 7.810204904010801\n      - 19.19105312875616\n      - 19.19105312875616\n      - 59.674257773851664\n      - 45.74897599198673\n      - 19.19105312875616\n      - 19.19105312875616\n      - 9.278711154833507\n      - 4.690683910157058\n      - 19.19105312875616\n      - 19.19105312875616\n      - 59.674257773851664\n      - 45.74897599198673\n      - 9.278711154833507\n      - 4.690683910157058\n      - 19.19105312875616\n      - 19.19105312875616\n  -   - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.97257477720091\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.7316834383783\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.55904132650958\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.7316834383783\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.27639572698136\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 43.611745724572614\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.611745724572614\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.672688669355195\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551943\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 15.888637065246865\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.27639572698136\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.5707427558908105\n      - 3.671619147708398\n      - 2.967247069285186\n      - 10.5442249349763\n      - 10.5442249349763\n      - 10.5442249349763\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.266441869658838\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551943\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 21.483508338446786\n      - 21.483508338446786\n      - 53.15829003731176\n      - 43.611745724572614\n      - 21.483508338446786\n      - 21.483508338446786\n      - 12.032246624250076\n      - 7.2517044069638175\n      - 21.483508338446786\n      - 21.483508338446786\n      - 53.15829003731176\n      - 43.611745724572614\n      - 12.032246624250076\n      - 7.2517044069638175\n      - 21.483508338446786\n      - 21.483508338446786\nx_candidate:\n  -   - -1.0\n      - 0.0\n      - 0.0\n  -   - -1.0\n      - 1.0\n      - 0.0\n  -   - -1.0\n      - 0.0\n      - 1.0\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_intial_residual_model.yaml",
    "content": "---\ninitial_residual_model:\n  intercepts:\n    - 25.015622878108047\n    - 18.675766504742285\n    - 10.714250439974172\n    - 16.92850306333966\n    - 13.8332898293664\n    - 7.611432734310156\n    - 5.780449944004236\n    - 4.918595910461541\n    - 5.730449944004235\n    - 5.093595910461538\n    - 3.573230198001667\n    - 2.8432762942943253\n    - 2.9807829301802418\n    - 2.56911876085199\n    - 2.229814978179112\n    - 2.901300021005225\n    - 4.398727952741098\n    - 22.715622878108036\n    - 16.875766504742288\n    - 18.114250439974164\n    - 15.628503063339657\n    - 10.733289829366399\n    - 3.511432734310162\n    - 3.980449944004235\n    - 2.718595910461538\n    - 3.855449944004235\n    - 3.4435959104615392\n    - 3.3482301980016658\n    - 3.2932762942943246\n    - 0.8432829301802407\n    - 2.3066187608519906\n    - 2.417314978179112\n    - 4.026300021005225\n    - 2.486227952741097\n    - 12.115622878108042\n    - 18.975766504742282\n    - 10.314250439974167\n    - 9.228503063339659\n    - 9.933289829366402\n    - 6.811432734310159\n    - 1.480449944004235\n    - 1.7185959104615378\n    - 1.5304499440042356\n    - 2.3185959104615392\n    - 2.298230198001665\n    - 1.568276294294325\n    - 1.930782930180241\n    - 1.7441187608519906\n    - 1.6298149781791125\n    - 1.6638000210052253\n    - 2.3362279527410976\n    - 6.215622878108036\n    - 7.275766504742279\n    - 7.314250439974167\n    - 7.528503063339656\n    - 7.033289829366396\n    - 5.311432734310159\n    - 4.080449944004233\n    - 1.7185959104615378\n    - 4.042949944004235\n    - 2.468595910461538\n    - 2.5982301980016658\n    - 2.243276294294324\n    - 2.2682829301802414\n    - 1.331618760851991\n    - 1.8548149781791121\n    - 1.5138000210052258\n    - 1.9987279527410973\n    - 13.615622878108042\n    - 8.914250439974168\n    - 2.617857443870772\n    - -0.020698756342492075\n    - 0.689301243657507\n    - 3.1293012436575083\n    - 2.3486741154642674\n    - 10.115622878108042\n    - 6.414250439974168\n    - 3.317857443870775\n    - 1.4093012436575076\n    - 0.3193012436575078\n    - 2.189301243657507\n    - 0.6586741154642675\n    - 8.915622878108039\n    - 7.51425043997417\n    - 3.0178574438707706\n    - 1.439301243657507\n    - -0.6206987563424917\n    - 0.8793012436575083\n    - 2.158674115464268\n    - 10.115622878108042\n    - 10.01425043997417\n    - 3.917857443870769\n    - 0.12930124365750828\n    - 2.3486741154642674\n    - 0.3193012436575078\n    - 0.689301243657507\n    - 2.158674115464268\n    - 8.915622878108039\n    - 6.51425043997417\n    - 4.533289829366396\n    - 2.117857443870772\n    - 0.5381907245488051\n    - 0.9081907245488061\n    - 2.599956711545751\n    - 1.519301243657507\n    - 3.5858449755949513\n    - 4.060262123100171\n    - 3.3886741154642674\n    - 7.915622878108039\n    - 8.51425043997417\n    - 5.533289829366396\n    - 5.317857443870775\n    - -1.6618092754511942\n    - -1.3418092754511939\n    - 3.0899567115457494\n    - 0.3193012436575078\n    - 2.7558449755949512\n    - 2.3702621231001704\n    - 4.1386741154642674\n    - 2.6156228781080415\n    - 6.01425043997417\n    - 5.233289829366399\n    - 5.917857443870769\n    - -0.6618092754511942\n    - 0.008190724548807538\n    - 4.779956711545751\n    - 3.2093012436575084\n    - 0.5058449755949512\n    - 3.6102621231001715\n    - 6.838674115464267\n    - 10.115622878108042\n    - 5.975766504742282\n    - 8.51425043997417\n    - 10.028503063339656\n    - 4.433289829366402\n    - 2.1114327343101564\n    - 1.5185959104615385\n    - 2.3885959104615395\n    - -0.2942698019983343\n    - -1.2867237057056755\n    - -0.8467170698197588\n    - -1.4033812391480094\n    - -0.6251850218208874\n    - -0.43869997899477475\n    - 0.12372795274109727\n    - 11.759301243657509\n    - 13.32930124365751\n    - 5.639301243657508\n    - -0.6206987563424917\n    - -0.36069875634249193\n    - 3.6893012436575088\n    - 2.9156228781080387\n    - 1.2142504399741725\n    - 4.733289829366399\n    - 9.91785744387077\n    - 8.138190724548807\n    - 7.47995671154575\n    - 7.958190724548807\n    - 7.589956711545749\n    - 5.269301243657507\n    - 3.885844975594952\n    - 7.130262123100171\n    - 5.458674115464268\n    - 12.815622878108044\n    - 7.814250439974167\n    - 4.233289829366399\n    - -0.882142556129228\n    - 2.338190724548806\n    - 1.47995671154575\n    - 2.8981907245488046\n    - 2.4499567115457523\n    - 3.809301243657506\n    - 1.0758449755949515\n    - 2.930262123100171\n    - 5.228674115464267\n    - -1.1843771218919557\n    - 5.7142504399741725\n    - -2.466710170633604\n    - 0.8178574438707713\n    - 4.038190724548805\n    - 0.07995671154575135\n    - 4.328190724548804\n    - 0.7299567115457499\n    - 0.9193012436575074\n    - 0.9558449755949505\n    - 2.260262123100171\n    - 3.358674115464267\n    - 1.2593012436575073\n    - 13.115622878108042\n    - 11.01425043997417\n    - 5.617857443870772\n    - 0.8793012436575083\n    - 1.5886741154642676\n    - 0.5093012436575073\n    - 1.7786741154642671\n    - 2.7593012436575073\n    - 2.189301243657507\n    - 3.2093012436575084\n    - 0.1804499440042342\n    - 0.6304499440042335\n    - 13.815622878108044\n    - 8.114250439974171\n    - 4.480449944004235\n    - 4.490449944004233\n    - 3.005730198001668\n    - 1.783282930180242\n    - 3.0804499440042328\n    - 3.3704499440042355\n    - 13.415622878108039\n    - 7.414250439974168\n    - 2.485730198001665\n    - 1.4432829301802421\n    - 3.5804499440042328\n    - 3.6304499440042335\nresidual_model_expected:\n  intercepts:\n    - 25.015622878108047\n    - 18.675766504742285\n    - 10.714250439974172\n    - 16.92850306333966\n    - 13.8332898293664\n    - 7.611432734310156\n    - 5.780449944004236\n    - 4.918595910461541\n    - 5.730449944004235\n    - 5.093595910461538\n    - 3.573230198001667\n    - 2.8432762942943253\n    - 2.9807829301802418\n    - 2.56911876085199\n    - 2.229814978179112\n    - 2.901300021005225\n    - 4.398727952741098\n    - 22.715622878108036\n    - 16.875766504742288\n    - 18.114250439974164\n    - 15.628503063339657\n    - 10.733289829366399\n    - 3.511432734310162\n    - 3.980449944004235\n    - 2.718595910461538\n    - 3.855449944004235\n    - 3.4435959104615392\n    - 3.3482301980016658\n    - 3.2932762942943246\n    - 0.8432829301802407\n    - 2.3066187608519906\n    - 2.417314978179112\n    - 4.026300021005225\n    - 2.486227952741097\n    - 12.115622878108042\n    - 18.975766504742282\n    - 10.314250439974167\n    - 9.228503063339659\n    - 9.933289829366402\n    - 6.811432734310159\n    - 1.480449944004235\n    - 1.7185959104615378\n    - 1.5304499440042356\n    - 2.3185959104615392\n    - 2.298230198001665\n    - 1.568276294294325\n    - 1.930782930180241\n    - 1.7441187608519906\n    - 1.6298149781791125\n    - 1.6638000210052253\n    - 2.3362279527410976\n    - 6.215622878108036\n    - 7.275766504742279\n    - 7.314250439974167\n    - 7.528503063339656\n    - 7.033289829366396\n    - 5.311432734310159\n    - 4.080449944004233\n    - 1.7185959104615378\n    - 4.042949944004235\n    - 2.468595910461538\n    - 2.5982301980016658\n    - 2.243276294294324\n    - 2.2682829301802414\n    - 1.331618760851991\n    - 1.8548149781791121\n    - 1.5138000210052258\n    - 1.9987279527410973\n    - 13.615622878108042\n    - 8.914250439974168\n    - 2.617857443870772\n    - -0.020698756342492075\n    - 0.689301243657507\n    - 3.1293012436575083\n    - 2.3486741154642674\n    - 10.115622878108042\n    - 6.414250439974168\n    - 3.317857443870775\n    - 1.4093012436575076\n    - 0.3193012436575078\n    - 2.189301243657507\n    - 0.6586741154642675\n    - 8.915622878108039\n    - 7.51425043997417\n    - 3.0178574438707706\n    - 1.439301243657507\n    - -0.6206987563424917\n    - 0.8793012436575083\n    - 2.158674115464268\n    - 10.115622878108042\n    - 10.01425043997417\n    - 3.917857443870769\n    - 0.12930124365750828\n    - 2.3486741154642674\n    - 0.3193012436575078\n    - 0.689301243657507\n    - 2.158674115464268\n    - 8.915622878108039\n    - 6.51425043997417\n    - 4.533289829366396\n    - 2.117857443870772\n    - 0.5381907245488051\n    - 0.9081907245488061\n    - 2.599956711545751\n    - 1.519301243657507\n    - 3.5858449755949513\n    - 4.060262123100171\n    - 3.3886741154642674\n    - 7.915622878108039\n    - 8.51425043997417\n    - 5.533289829366396\n    - 5.317857443870775\n    - -1.6618092754511942\n    - -1.3418092754511939\n    - 3.0899567115457494\n    - 0.3193012436575078\n    - 2.7558449755949512\n    - 2.3702621231001704\n    - 4.1386741154642674\n    - 2.6156228781080415\n    - 6.01425043997417\n    - 5.233289829366399\n    - 5.917857443870769\n    - -0.6618092754511942\n    - 0.008190724548807538\n    - 4.779956711545751\n    - 3.2093012436575084\n    - 0.5058449755949512\n    - 3.6102621231001715\n    - 6.838674115464267\n    - 10.115622878108042\n    - 5.975766504742282\n    - 8.51425043997417\n    - 10.028503063339656\n    - 4.433289829366402\n    - 2.1114327343101564\n    - 1.5185959104615385\n    - 2.3885959104615395\n    - -0.2942698019983343\n    - -1.2867237057056755\n    - -0.8467170698197588\n    - -1.4033812391480094\n    - -0.6251850218208874\n    - -0.43869997899477475\n    - 0.12372795274109727\n    - 11.759301243657509\n    - 13.32930124365751\n    - 5.639301243657508\n    - -0.6206987563424917\n    - -0.36069875634249193\n    - 3.6893012436575088\n    - 2.9156228781080387\n    - 1.2142504399741725\n    - 4.733289829366399\n    - 9.91785744387077\n    - 8.138190724548807\n    - 7.47995671154575\n    - 7.958190724548807\n    - 7.589956711545749\n    - 5.269301243657507\n    - 3.885844975594952\n    - 7.130262123100171\n    - 5.458674115464268\n    - 12.815622878108044\n    - 7.814250439974167\n    - 4.233289829366399\n    - -0.882142556129228\n    - 2.338190724548806\n    - 1.47995671154575\n    - 2.8981907245488046\n    - 2.4499567115457523\n    - 3.809301243657506\n    - 1.0758449755949515\n    - 2.930262123100171\n    - 5.228674115464267\n    - -1.1843771218919557\n    - 5.7142504399741725\n    - -2.466710170633604\n    - 0.8178574438707713\n    - 4.038190724548805\n    - 0.07995671154575135\n    - 4.328190724548804\n    - 0.7299567115457499\n    - 0.9193012436575074\n    - 0.9558449755949505\n    - 2.260262123100171\n    - 3.358674115464267\n    - 1.2593012436575073\n    - 13.115622878108042\n    - 11.01425043997417\n    - 5.617857443870772\n    - 0.8793012436575083\n    - 1.5886741154642676\n    - 0.5093012436575073\n    - 1.7786741154642671\n    - 2.7593012436575073\n    - 2.189301243657507\n    - 3.2093012436575084\n    - 0.1804499440042342\n    - 0.6304499440042335\n    - 13.815622878108044\n    - 8.114250439974171\n    - 4.480449944004235\n    - 4.490449944004233\n    - 3.005730198001668\n    - 1.783282930180242\n    - 3.0804499440042328\n    - 3.3704499440042355\n    - 13.415622878108039\n    - 7.414250439974168\n    - 2.485730198001665\n    - 1.4432829301802421\n    - 3.5804499440042328\n    - 3.6304499440042335\n  linear_terms:\n    -   - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.842268154536409\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 4.348989444230492\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 3.8712304612359034\n        - 4.165692188910668\n        - 4.386622308606341\n        - 4.550399630758491\n        - 4.751877523597983\n        - 4.713742622150146\n        - 4.713742622150146\n        - 4.483425879433607\n        - 4.2077176825347635\n        - 3.915982374815913\n        - 3.6241891423568875\n        - 3.3411505110783217\n        - 3.0716442642808213\n        - 2.81808670681996\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.550399630758491\n        - 4.8359539116697725\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.795984320324443\n        - 4.606904553696424\n        - 4.348989444230492\n        - 3.7694207275538334\n        - 3.20451303242801\n        - 2.697639699884843\n        - 4.348989444230492\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.8359539116697725\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 2.697639699884843\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.348989444230492\n        - 4.842268154536409\n        - 4.842268154536409\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.842268154536409\n        - 4.842268154536409\n        - 4.483425879433607\n        - 3.915982374815913\n        - 4.842268154536409\n        - 4.842268154536409\n        - 3.480506441842877\n        - 4.165692188910668\n        - 4.483425879433607\n        - 3.915982374815913\n        - 4.842268154536409\n        - 4.842268154536409\n    -   - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.84635481393292\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.92600783938173\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608854\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 55.926007839381725\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 24.03332128329257\n        - 17.92731548788399\n        - 24.03332128329257\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608852\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 12.159194348241295\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241295\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241293\n        - 3.5589080859646285\n        - 12.159194348241293\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569454\n        - 55.926007839381725\n        - 49.9146681808974\n        - 44.846354813932926\n        - 40.5229744079594\n        - 33.56053509484467\n        - 17.92731548788399\n        - 17.92731548788399\n        - 13.762137034267115\n        - 10.793196717608854\n        - 8.60666628497297\n        - 6.955196266780662\n        - 5.6825308893242505\n        - 4.685572737243362\n        - 3.8938723589600377\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 12.159194348241293\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.226094689267484\n        - 20.670151246699717\n        - 15.659359279812538\n        - 20.670151246699717\n        - 15.659359279812538\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118885\n        - 3.558908085964629\n        - 63.15476421569454\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 15.659359279812536\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241293\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964629\n        - 63.15476421569455\n        - 49.9146681808974\n        - 40.5229744079594\n        - 28.22609468926748\n        - 20.670151246699717\n        - 15.659359279812536\n        - 20.670151246699717\n        - 15.659359279812536\n        - 12.159194348241295\n        - 7.7253890215938394\n        - 5.154589183118886\n        - 3.558908085964628\n        - 12.159194348241295\n        - 63.15476421569454\n        - 49.9146681808974\n        - 28.22609468926748\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 12.159194348241295\n        - 3.5589080859646285\n        - 12.159194348241295\n        - 12.159194348241295\n        - 12.159194348241293\n        - 24.03332128329257\n        - 24.03332128329257\n        - 63.15476421569454\n        - 49.9146681808974\n        - 24.03332128329257\n        - 24.03332128329257\n        - 13.762137034267115\n        - 8.60666628497297\n        - 24.03332128329257\n        - 24.03332128329257\n        - 63.15476421569454\n        - 49.9146681808974\n        - 13.762137034267115\n        - 8.60666628497297\n        - 24.03332128329257\n        - 24.03332128329257\n    -   - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.5229744079594\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511592\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516123\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 26.325776492983195\n        - 20.60237968739701\n        - 26.325776492983195\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516121\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969131\n        - 6.743263411989219\n        - 5.785333776105146\n        - 56.638796479154635\n        - 47.77743791348328\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348328\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 14.89321437920679\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240037\n        - 56.638796479154635\n        - 52.032136772388945\n        - 47.77743791348329\n        - 43.94566363511593\n        - 40.522974407959396\n        - 34.7426501626184\n        - 20.60237968739701\n        - 20.60237968739701\n        - 16.515672503683682\n        - 13.484113409516123\n        - 11.16768678177973\n        - 9.355872580735188\n        - 7.911893266969132\n        - 6.743263411989219\n        - 5.785333776105146\n        - 14.893214379206793\n        - 14.893214379206793\n        - 14.893214379206793\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.10239578132861\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534715\n        - 7.2977158685733325\n        - 5.370328369240037\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 56.638796479154635\n        - 47.77743791348329\n        - 40.522974407959396\n        - 30.102395781328614\n        - 23.208590873487402\n        - 18.398363818154305\n        - 23.208590873487402\n        - 18.398363818154305\n        - 14.89321437920679\n        - 10.208549778534717\n        - 7.2977158685733325\n        - 5.370328369240038\n        - 14.89321437920679\n        - 56.638796479154635\n        - 47.77743791348329\n        - 30.102395781328614\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 5.370328369240038\n        - 14.89321437920679\n        - 14.89321437920679\n        - 14.89321437920679\n        - 26.325776492983195\n        - 26.325776492983195\n        - 56.638796479154635\n        - 47.77743791348328\n        - 26.325776492983195\n        - 26.325776492983195\n        - 16.515672503683682\n        - 11.16768678177973\n        - 26.325776492983195\n        - 26.325776492983195\n        - 56.638796479154635\n        - 47.77743791348328\n        - 16.515672503683682\n        - 11.16768678177973\n        - 26.325776492983195\n        - 26.325776492983195\n  square_terms:\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n    -   -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\n        -   - 0.0\n            - 0.0\n            - 0.0\nresiduals_candidate:\n  -   - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.842268154536409\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -4.348989444230492\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -3.8712304612359034\n      - -4.165692188910668\n      - -4.386622308606341\n      - -4.550399630758491\n      - -4.751877523597983\n      - -4.713742622150146\n      - -4.713742622150146\n      - -4.483425879433607\n      - -4.2077176825347635\n      - -3.915982374815913\n      - -3.6241891423568875\n      - -3.3411505110783217\n      - -3.0716442642808213\n      - -2.81808670681996\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.550399630758491\n      - -4.8359539116697725\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.795984320324443\n      - -4.606904553696424\n      - -4.348989444230492\n      - -3.7694207275538334\n      - -3.20451303242801\n      - -2.697639699884843\n      - -4.348989444230492\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.8359539116697725\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -2.697639699884843\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.348989444230492\n      - -4.842268154536409\n      - -4.842268154536409\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.842268154536409\n      - -4.842268154536409\n      - -4.483425879433607\n      - -3.915982374815913\n      - -4.842268154536409\n      - -4.842268154536409\n      - -3.480506441842877\n      - -4.165692188910668\n      - -4.483425879433607\n      - -3.915982374815913\n      - -4.842268154536409\n      - -4.842268154536409\n  -   - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.45973250532658\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814583\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074091\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 52.05477737814582\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 19.19105312875616\n      - 13.213572865733845\n      - 19.19105312875616\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.585479035074089\n      - 4.690683910157058\n      - 3.331007124423775\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010802\n      - 7.810204904010803\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010803\n      - 7.810204904010802\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010802\n      - 0.8612683860797854\n      - 7.810204904010801\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 52.05477737814582\n      - 45.74897599198673\n      - 40.459732505326585\n      - 35.97257477720091\n      - 28.808657571246684\n      - 13.213572865733845\n      - 13.213572865733845\n      - 9.278711154833507\n      - 6.58547903507409\n      - 4.690683910157058\n      - 3.3310071244237753\n      - 2.341380378245929\n      - 1.6139284729625407\n      - 1.0757856521400777\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010801\n      - 7.810204904010802\n      - 7.810204904010802\n      - 7.810204904010801\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.39014077759771\n      - 15.874166926375274\n      - 11.052454726116114\n      - 15.874166926375274\n      - 11.052454726116114\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690875\n      - 0.8612683860797858\n      - 59.674257773851664\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 11.052454726116112\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010801\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.8612683860797858\n      - 59.67425777385167\n      - 45.74897599198673\n      - 35.97257477720091\n      - 23.390140777597708\n      - 15.874166926375274\n      - 11.052454726116112\n      - 15.874166926375274\n      - 11.052454726116112\n      - 7.810204904010803\n      - 3.955968294040006\n      - 1.950076150690876\n      - 0.861268386079785\n      - 7.810204904010803\n      - 59.674257773851664\n      - 45.74897599198673\n      - 23.390140777597708\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 7.810204904010803\n      - 0.8612683860797854\n      - 7.810204904010803\n      - 7.810204904010803\n      - 7.810204904010801\n      - 19.19105312875616\n      - 19.19105312875616\n      - 59.674257773851664\n      - 45.74897599198673\n      - 19.19105312875616\n      - 19.19105312875616\n      - 9.278711154833507\n      - 4.690683910157058\n      - 19.19105312875616\n      - 19.19105312875616\n      - 59.674257773851664\n      - 45.74897599198673\n      - 9.278711154833507\n      - 4.690683910157058\n      - 19.19105312875616\n      - 19.19105312875616\n  -   - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.97257477720091\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.7316834383783\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.55904132650958\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.7316834383783\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.27639572698136\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 21.483508338446786\n      - 15.888637065246865\n      - 21.483508338446786\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.276395726981358\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.57074275589081\n      - 3.671619147708398\n      - 2.967247069285186\n      - 53.15829003731176\n      - 43.611745724572614\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.611745724572614\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.672688669355195\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551943\n      - 53.15829003731176\n      - 48.16090631115304\n      - 43.61174572457262\n      - 39.559041326509586\n      - 35.972574777200904\n      - 29.99077263902042\n      - 15.888637065246865\n      - 15.888637065246865\n      - 12.032246624250076\n      - 9.27639572698136\n      - 7.2517044069638175\n      - 5.731683438378299\n      - 4.5707427558908105\n      - 3.671619147708398\n      - 2.967247069285186\n      - 10.5442249349763\n      - 10.5442249349763\n      - 10.5442249349763\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.266441869658838\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980882\n      - 4.0932028361453225\n      - 2.6726886693551943\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 53.15829003731176\n      - 43.61174572457262\n      - 35.972574777200904\n      - 25.26644186965884\n      - 18.41260655316296\n      - 13.791459264457881\n      - 18.41260655316296\n      - 13.791459264457881\n      - 10.544224934976299\n      - 6.439129050980883\n      - 4.0932028361453225\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 53.15829003731176\n      - 43.61174572457262\n      - 25.26644186965884\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 2.6726886693551952\n      - 10.544224934976299\n      - 10.544224934976299\n      - 10.544224934976299\n      - 21.483508338446786\n      - 21.483508338446786\n      - 53.15829003731176\n      - 43.611745724572614\n      - 21.483508338446786\n      - 21.483508338446786\n      - 12.032246624250076\n      - 7.2517044069638175\n      - 21.483508338446786\n      - 21.483508338446786\n      - 53.15829003731176\n      - 43.611745724572614\n      - 12.032246624250076\n      - 7.2517044069638175\n      - 21.483508338446786\n      - 21.483508338446786\nx_candidate:\n  -   - -1.0\n      - 0.0\n      - 0.0\n  -   - -1.0\n      - 1.0\n      - 0.0\n  -   - -1.0\n      - 0.0\n      - 1.0\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_main_from_residual_model.yaml",
    "content": "---\nlinear_terms_main_model_expected:\n  - 171928.93452597785\n  - -177176.19493085583\n  - -42775.40334058995\nlinear_terms_residual_model:\n  -   - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227353\n      - -3.866221813446\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.71243244039\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227353\n      - -3.866221813447\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813447\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813447\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483835\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030153\n      - -4.140626483836\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -138.334179211\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813446\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030153\n      - -4.140626483835\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - -153.6248432276\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -247.3857201056\n      - -247.3857201056\n      - 2363.426377558\n      - -7709.278530499\n      - -247.3857201056\n      - -247.3857201056\n      - -53.7124324404\n      - -99.1378353945\n      - -247.3857201056\n      - -247.3857201056\n      - 2363.426377558\n      - -7709.278530499\n      - -53.7124324404\n      - -99.1378353945\n      - -247.3857201056\n      - -247.3857201056\n  -   - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692858\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692859\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - -104.2839114197\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692859\n      - 80.74754133078\n      - 42.21231909165\n      - 972.3575239774\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 36.28651044149\n      - 36.28651044149\n      - -1393.823686755\n      - 4386.660918749\n      - 36.28651044149\n      - 36.28651044149\n      - -735.8610938667\n      - 87.55073042511\n      - 36.28651044149\n      - 36.28651044149\n      - -1393.823686755\n      - 4386.660918749\n      - -735.8610938666\n      - 87.55073042511\n      - 36.28651044149\n      - 36.28651044149\n  -   - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 92.72176892163\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - 115.5097653415\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 131.5247089125\n      - 131.5247089125\n      - -375.8722506492\n      - 1677.946001564\n      - 131.5247089125\n      - 131.5247089125\n      - 44.33952683361\n      - 63.72812657892\n      - 131.5247089125\n      - 131.5247089125\n      - -375.8722506492\n      - 1677.946001564\n      - 44.3395268336\n      - 63.72812657892\n      - 131.5247089125\n      - 131.5247089125\nresiduals:\n  - 19.67905061421\n  - 12.78536491634\n  - 4.453409401868\n  - 10.42602658124\n  - 7.181651769754\n  - 0.8467383120783\n  - -0.8151544815029\n  - -1.28878727387\n  - -0.8651544815029\n  - -1.11378727387\n  - -2.178296849214\n  - -2.4437228135\n  - -1.857756175876\n  - -1.847417965917\n  - -1.795022214911\n  - -0.7628423028115\n  - 1.065115779582\n  - 17.37905061421\n  - 10.98536491634\n  - 11.85340940187\n  - 9.126026581237\n  - 4.081651769754\n  - -3.253261687922\n  - -2.615154481503\n  - -3.48878727387\n  - -2.740154481503\n  - -2.76378727387\n  - -2.403296849214\n  - -1.9937228135\n  - -3.995256175876\n  - -2.109917965917\n  - -1.607522214911\n  - 0.3621576971885\n  - -0.8473842204181\n  - 6.779050614207\n  - 13.08536491634\n  - 4.053409401868\n  - 2.726026581237\n  - 3.281651769754\n  - 0.04673831207827\n  - -5.115154481503\n  - -4.48878727387\n  - -5.065154481503\n  - -3.88878727387\n  - -3.453296849214\n  - -3.7187228135\n  - -2.907756175876\n  - -2.672417965917\n  - -2.395022214911\n  - -2.000342302812\n  - -0.9973842204181\n  - 0.8790506142075\n  - 1.385364916339\n  - 1.053409401868\n  - 1.026026581237\n  - 0.3816517697539\n  - -1.453261687922\n  - -2.515154481503\n  - -4.48878727387\n  - -2.552654481503\n  - -3.73878727387\n  - -3.153296849214\n  - -3.0437228135\n  - -2.570256175876\n  - -3.084917965917\n  - -2.170022214911\n  - -2.150342302812\n  - -1.334884220418\n  - 8.279050614207\n  - 2.653409401868\n  - -4.10628705125\n  - -5.539020595211\n  - -4.829020595211\n  - -2.389020595211\n  - -0.8305062664824\n  - 4.779050614207\n  - 0.1534094018679\n  - -3.40628705125\n  - -4.109020595211\n  - -5.199020595211\n  - -3.329020595211\n  - -2.520506266482\n  - 3.579050614207\n  - 1.253409401868\n  - -3.70628705125\n  - -4.079020595211\n  - -6.139020595211\n  - -4.639020595211\n  - -1.020506266482\n  - 4.779050614207\n  - 3.753409401868\n  - -2.80628705125\n  - -5.389020595211\n  - -0.8305062664824\n  - -5.199020595211\n  - -4.829020595211\n  - -1.020506266482\n  - 3.579050614207\n  - 0.2534094018679\n  - -2.118348230246\n  - -4.60628705125\n  - -5.8778638078\n  - -5.5078638078\n  - -3.383083895331\n  - -3.999020595211\n  - -1.038044411845\n  - 0.2196196759168\n  - 0.2094937335176\n  - 2.579050614207\n  - 2.253409401868\n  - -1.118348230246\n  - -1.40628705125\n  - -8.0778638078\n  - -7.7578638078\n  - -2.893083895331\n  - -5.199020595211\n  - -1.868044411845\n  - -1.470380324083\n  - 0.9594937335176\n  - -2.720949385793\n  - -0.2465905981321\n  - -1.418348230246\n  - -0.8062870512504\n  - -7.0778638078\n  - -6.4078638078\n  - -1.203083895331\n  - -2.309020595211\n  - -4.118044411845\n  - -0.2303803240832\n  - 3.659493733518\n  - 4.779050614207\n  - 0.0853649163389\n  - 2.253409401868\n  - 3.526026581237\n  - -2.218348230246\n  - -4.653261687922\n  - -4.68878727387\n  - -3.81878727387\n  - -6.045796849214\n  - -6.5737228135\n  - -5.685256175876\n  - -5.819917965917\n  - -4.650022214911\n  - -4.102842302812\n  - -3.209884220418\n  - 6.240979404789\n  - 7.810979404789\n  - 0.1209794047887\n  - -6.139020595211\n  - -5.879020595211\n  - -1.829020595211\n  - -2.420949385793\n  - -5.046590598132\n  - -1.918348230246\n  - 3.19371294875\n  - 1.7221361922\n  - 1.496916104669\n  - 1.5421361922\n  - 1.606916104669\n  - -0.2490205952113\n  - -0.738044411845\n  - 3.289619675917\n  - 2.279493733518\n  - 7.479050614207\n  - 1.553409401868\n  - -2.418348230246\n  - -7.60628705125\n  - -4.0778638078\n  - -4.503083895331\n  - -3.5178638078\n  - -3.533083895331\n  - -1.709020595211\n  - -3.548044411845\n  - -0.9103803240832\n  - 2.049493733518\n  - -6.520949385793\n  - -0.5465905981321\n  - -9.118348230246\n  - -5.90628705125\n  - -2.3778638078\n  - -5.903083895331\n  - -2.0878638078\n  - -5.253083895331\n  - -4.599020595211\n  - -3.668044411845\n  - -1.580380324083\n  - 0.1794937335176\n  - -4.259020595211\n  - 7.779050614207\n  - 4.753409401868\n  - -1.10628705125\n  - -4.639020595211\n  - -1.590506266482\n  - -5.009020595211\n  - -1.400506266482\n  - -2.759020595211\n  - -3.329020595211\n  - -2.309020595211\n  - -6.415154481503\n  - -5.965154481503\n  - 8.479050614207\n  - 1.853409401868\n  - -2.115154481503\n  - -2.105154481503\n  - -2.745796849214\n  - -3.055256175876\n  - -3.515154481503\n  - -3.225154481503\n  - 8.079050614207\n  - 1.153409401868\n  - -3.265796849214\n  - -3.395256175876\n  - -3.015154481503\n  - -2.965154481503\nsquare_terms_main_model_expected:\n  -   - 1398130238.041321\n      - -793279016.9641627\n      - -295676778.6746732\n  -   - -793279016.9641627\n      - 493231504.5229622\n      - 169825981.1998634\n  -   - -295676778.6746732\n      - 169825981.1998634\n      - 63822373.72220981\nsquare_terms_residual_model:\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626186\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112152\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112152\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_main_with_new_accepted_x.yaml",
    "content": "---\nbest_x:\n  - 0.15\n  - 0.008\n  - 0.01\ndelta: 0.025\nlinear_terms:\n  - -19571.13290454\n  - -19823.82644072\n  - 14216.11542373\nlinear_terms_expected:\n  - -1.782609615475e-10\n  - -3.274180926383e-11\n  - 2.546585164964e-11\nmains:\n  - 21.53511643627\n  - 14.80453604351\n  - 6.548558251064\n  - 12.54188075473\n  - 9.282890198608\n  - 2.859555210712\n  - 0.9381817894678\n  - 0.2048532883114\n  - 0.8881817894678\n  - 0.3798532883114\n  - -0.9101956814319\n  - -1.36444138824\n  - -0.9351994446357\n  - -1.055070381505\n  - -1.111335532899\n  - -0.1703442432756\n  - 1.580641245921\n  - 19.23511643627\n  - 13.00453604351\n  - 13.94855825106\n  - 11.24188075473\n  - 6.182890198608\n  - -1.240444789288\n  - -0.8618182105322\n  - -1.995146711689\n  - -0.9868182105322\n  - -1.270146711689\n  - -1.135195681432\n  - -0.9144413882404\n  - -3.072699444636\n  - -1.317570381505\n  - -0.9238355328992\n  - 0.9546557567244\n  - -0.3318587540789\n  - 8.635116436265\n  - 15.10453604351\n  - 6.148558251063\n  - 4.841880754733\n  - 5.382890198608\n  - 2.059555210712\n  - -3.361818210532\n  - -2.995146711689\n  - -3.311818210532\n  - -2.395146711689\n  - -2.185195681432\n  - -2.63944138824\n  - -1.985199444636\n  - -1.880070381505\n  - -1.711335532899\n  - -1.407844243276\n  - -0.4818587540789\n  - 2.735116436265\n  - 3.404536043506\n  - 3.148558251063\n  - 3.141880754733\n  - 2.482890198608\n  - 0.5595552107122\n  - -0.7618182105322\n  - -2.995146711689\n  - -0.7993182105322\n  - -2.245146711689\n  - -1.885195681432\n  - -1.96444138824\n  - -1.647699444636\n  - -2.292570381505\n  - -1.486335532899\n  - -1.557844243276\n  - -0.8193587540789\n  - 10.13511643627\n  - 4.748558251063\n  - -2.218096467799\n  - -4.369688200573\n  - -3.659688200573\n  - -1.219688200573\n  - -0.3489655844206\n  - 6.635116436265\n  - 2.248558251063\n  - -1.518096467799\n  - -2.939688200573\n  - -4.029688200573\n  - -2.159688200573\n  - -2.038965584421\n  - 5.435116436265\n  - 3.348558251064\n  - -1.818096467799\n  - -2.909688200573\n  - -4.969688200573\n  - -3.469688200573\n  - -0.5389655844206\n  - 6.635116436265\n  - 5.848558251064\n  - -0.918096467799\n  - -4.219688200573\n  - -0.3489655844206\n  - -4.029688200573\n  - -3.659688200573\n  - -0.5389655844206\n  - 5.435116436265\n  - 2.348558251064\n  - -0.0171098013921\n  - -2.718096467799\n  - -4.257793595776\n  - -3.887793595776\n  - -2.006947842151\n  - -2.829688200573\n  - -0.1835757519589\n  - 0.8557490906722\n  - 0.6910344155794\n  - 4.435116436265\n  - 4.348558251064\n  - 0.9828901986079\n  - 0.481903532201\n  - -6.457793595776\n  - -6.137793595776\n  - -1.516947842151\n  - -4.029688200573\n  - -1.013575751959\n  - -0.8342509093278\n  - 1.441034415579\n  - -0.8648835637348\n  - 1.848558251064\n  - 0.6828901986079\n  - 1.081903532201\n  - -5.457793595776\n  - -4.787793595776\n  - 0.1730521578493\n  - -1.139688200573\n  - -3.263575751959\n  - 0.4057490906722\n  - 4.141034415579\n  - 6.635116436265\n  - 2.104536043506\n  - 4.348558251064\n  - 5.641880754733\n  - -0.1171098013921\n  - -2.640444789288\n  - -3.195146711689\n  - -2.325146711689\n  - -4.777695681432\n  - -5.49444138824\n  - -4.762699444636\n  - -5.027570381505\n  - -3.966335532899\n  - -3.510344243276\n  - -2.694358754079\n  - 7.410311799427\n  - 8.980311799427\n  - 1.290311799427\n  - -4.969688200573\n  - -4.709688200573\n  - -0.659688200573\n  - -0.5648835637348\n  - -2.951441748936\n  - 0.1828901986079\n  - 5.081903532201\n  - 3.342206404224\n  - 2.873052157849\n  - 3.162206404224\n  - 2.983052157849\n  - 0.920311799427\n  - 0.1164242480411\n  - 3.925749090672\n  - 2.761034415579\n  - 9.335116436265\n  - 3.648558251063\n  - -0.3171098013921\n  - -5.718096467799\n  - -2.457793595776\n  - -3.126947842151\n  - -1.897793595776\n  - -2.156947842151\n  - -0.539688200573\n  - -2.693575751959\n  - -0.2742509093278\n  - 2.531034415579\n  - -4.664883563735\n  - 1.548558251064\n  - -7.017109801392\n  - -4.018096467799\n  - -0.7577935957756\n  - -4.526947842151\n  - -0.4677935957756\n  - -3.876947842151\n  - -3.429688200573\n  - -2.813575751959\n  - -0.9442509093278\n  - 0.6610344155794\n  - -3.089688200573\n  - 9.635116436265\n  - 6.848558251064\n  - 0.781903532201\n  - -3.469688200573\n  - -1.108965584421\n  - -3.839688200573\n  - -0.9189655844206\n  - -1.589688200573\n  - -2.159688200573\n  - -1.139688200573\n  - -4.661818210532\n  - -4.211818210532\n  - 10.33511643627\n  - 3.948558251064\n  - -0.3618182105322\n  - -0.3518182105322\n  - -1.477695681432\n  - -2.132699444636\n  - -1.761818210532\n  - -1.471818210532\n  - 9.935116436265\n  - 3.248558251063\n  - -1.997695681432\n  - -2.472699444636\n  - -1.261818210532\n  - -1.211818210532\nsquare_terms:\n  -   - 23918483.46505184\n      - -221133.04826413715\n      - -3862092.6941709574\n  -   - -221133.04826413715\n      - 3420438.117919954\n      - -157370.87591914795\n  -   - -3862092.6941709574\n      - -157370.87591914795\n      - 925172.8526537095\nx_candidate_uncentered:\n  - 0.149883507892\n  - 0.008098080768719\n  - 0.009146244784311\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_residual_model.yaml",
    "content": "---\ncoefficients_linear_terms:\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - 5851.636806189\n      - -3363.878493533\n      - -1189.242968631\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -2241.152261457\n      - 1287.57973774\n      - 457.6976703957\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -616.8404903244\n      - 353.5363964799\n      - 128.4190782477\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836565\n      - -30.58972763875\n      - -10.39697992437\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.9573029291\n  -   - -634.6254513294\n      - 361.6933575664\n      - 137.9876812992\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - 759.1990161301\n      - -450.6925959322\n      - -113.2196112852\n  -   - -102.1761425864\n      - 56.9978213237\n      - 25.77578197644\n  -   - -90.52892012056\n      - 50.77122751287\n      - 22.0578212495\n  -   - -75.43506032392\n      - 42.33874666634\n      - 18.28624410363\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - 5851.636806189\n      - -3363.878493533\n      - -1189.242968631\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -2241.152261457\n      - 1287.57973774\n      - 457.6976703957\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -616.8404903244\n      - 353.5363964799\n      - 128.4190782477\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836565\n      - -30.58972763875\n      - -10.39697992437\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.9573029291\n  -   - -634.6254513294\n      - 361.6933575664\n      - 137.9876812992\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - 759.1990161301\n      - -450.6925959322\n      - -113.2196112852\n  -   - -102.1761425864\n      - 56.9978213237\n      - 25.77578197644\n  -   - -90.52892012056\n      - 50.77122751287\n      - 22.0578212495\n  -   - -75.43506032392\n      - 42.33874666634\n      - 18.28624410363\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - 5851.636806189\n      - -3363.878493533\n      - -1189.242968631\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -2241.152261457\n      - 1287.57973774\n      - 457.6976703957\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -616.8404903244\n      - 353.5363964799\n      - 128.4190782477\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836565\n      - -30.58972763875\n      - -10.39697992437\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.957302929\n  -   - -634.6254513294\n      - 361.6933575664\n      - 137.9876812992\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - 759.1990161301\n      - -450.6925959322\n      - -113.2196112852\n  -   - -102.1761425864\n      - 56.9978213237\n      - 25.77578197644\n  -   - -90.52892012056\n      - 50.77122751287\n      - 22.0578212495\n  -   - -75.43506032392\n      - 42.33874666634\n      - 18.28624410363\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - 5851.636806189\n      - -3363.878493533\n      - -1189.242968631\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -2241.152261457\n      - 1287.57973774\n      - 457.6976703958\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -616.8404903244\n      - 353.5363964799\n      - 128.4190782477\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.957302929\n  -   - -634.6254513294\n      - 361.6933575664\n      - 137.9876812992\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - 759.1990161301\n      - -450.6925959322\n      - -113.2196112852\n  -   - -102.1761425864\n      - 56.9978213237\n      - 25.77578197644\n  -   - -90.52892012056\n      - 50.77122751287\n      - 22.0578212495\n  -   - -75.43506032392\n      - 42.33874666634\n      - 18.28624410363\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448341\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448341\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448341\n      - 38.60197207293\n      - 16.67275695173\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448341\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794519\n  -   - -98.01155756365\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794518\n  -   - -98.01155756366\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395194\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - -79.96891672215\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794518\n  -   - -98.01155756366\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448341\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - 5851.636806189\n      - -3363.878493533\n      - -1189.242968631\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -2241.152261456\n      - 1287.57973774\n      - 457.6976703957\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -616.8404903244\n      - 353.5363964799\n      - 128.4190782477\n  -   - 52.98781836565\n      - -30.58972763875\n      - -10.39697992437\n  -   - 52.98781836567\n      - -30.58972763875\n      - -10.39697992438\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.9573029291\n  -   - -634.6254513294\n      - 361.6933575664\n      - 137.9876812992\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - 759.1990161301\n      - -450.6925959322\n      - -113.2196112852\n  -   - -102.1761425864\n      - 56.9978213237\n      - 25.77578197643\n  -   - -90.52892012056\n      - 50.77122751287\n      - 22.0578212495\n  -   - -75.43506032391\n      - 42.33874666634\n      - 18.28624410363\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -79.96891672215\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794518\n  -   - -98.01155756365\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -79.96891672215\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794518\n  -   - -98.01155756366\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -1259.398547285\n      - 723.0143826494\n      - 258.7302520168\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -79.96891672213\n      - 45.39053302699\n      - 17.92448050339\n  -   - 273.5011442614\n      - -156.4321396848\n      - -57.8688020304\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -327.7907972813\n      - 188.0301389654\n      - 67.78167794518\n  -   - -98.01155756366\n      - 54.88939519482\n      - 24.10659139958\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -358.1729390416\n      - 204.8877730266\n      - 75.70741395195\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -68.77747448342\n      - 38.60197207293\n      - 16.67275695173\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788385\n  -   - -1787.097006562\n      - 1018.993930402\n      - 387.2171788386\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.9573029291\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - 2196.51612513\n      - -1262.407320136\n      - -447.2228027399\n  -   - -7828.060887584\n      - 4499.150822791\n      - 1593.500698721\n  -   - 1139.519115522\n      - -650.4239742628\n      - -244.957302929\n  -   - -336.602139368\n      - 192.1454087218\n      - 72.30914784614\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\n  -   - -203.0932702765\n      - 115.8979416271\n      - 43.73088360213\ncoefficients_square_terms:\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - -10720.15125926\n          - -4701.162126956\n          - 2877.493275431\n      -   - -4701.162126956\n          - -6974.883293509\n          - -1034.563281084\n      -   - 2877.493275431\n          - -1034.563281084\n          - -66.50544541636\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 4107.502577423\n          - 1801.278961009\n          - -1100.9294753\n      -   - 1801.278961009\n          - 2670.207390632\n          - 399.4634027254\n      -   - -1100.9294753\n          - 399.4634027254\n          - 29.78961963679\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105379\n  -   -   - 1132.425944378\n          - 496.6017216382\n          - -301.7604507638\n      -   - 496.6017216382\n          - 733.6727867614\n          - 113.499372341\n      -   - -301.7604507638\n          - 113.499372341\n          - 12.95003027788\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182092\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918614\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182093\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918629\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733725\n  -   -   - 1169.641704033\n          - 512.9089847695\n          - -307.4565994537\n      -   - 512.9089847695\n          - 751.8082285228\n          - 125.2965573785\n      -   - -307.4565994537\n          - 125.2965573785\n          - 24.72038264179\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - -1358.879936331\n          - -596.007217569\n          - 394.3663309731\n      -   - -596.007217569\n          - -926.0633850662\n          - -74.53434762621\n      -   - 394.3663309731\n          - -74.53434762621\n          - 71.17453818383\n  -   -   - 191.0851566267\n          - 83.78647255099\n          - -47.67820746517\n      -   - 83.78647255099\n          - 119.2114851962\n          - 25.34580158139\n      -   - -47.67820746517\n          - 25.34580158139\n          - 10.89567211601\n  -   -   - 168.6962139181\n          - 73.97112025538\n          - -42.64264193489\n      -   - 73.97112025538\n          - 106.0235548818\n          - 21.3234391598\n      -   - -42.64264193489\n          - 21.3234391598\n          - 8.138709169582\n  -   -   - 140.4964790262\n          - 61.60610305115\n          - -35.58093011922\n      -   - 61.60610305115\n          - 88.39458253098\n          - 17.63175280599\n      -   - -35.58093011922\n          - 17.63175280599\n          - 6.599339566474\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - -10720.15125926\n          - -4701.162126956\n          - 2877.493275431\n      -   - -4701.162126956\n          - -6974.883293509\n          - -1034.563281084\n      -   - 2877.493275431\n          - -1034.563281084\n          - -66.50544541639\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 4107.502577423\n          - 1801.278961009\n          - -1100.9294753\n      -   - 1801.278961009\n          - 2670.207390632\n          - 399.4634027254\n      -   - -1100.9294753\n          - 399.4634027254\n          - 29.78961963679\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105379\n  -   -   - 1132.425944378\n          - 496.6017216382\n          - -301.7604507638\n      -   - 496.6017216382\n          - 733.6727867614\n          - 113.499372341\n      -   - -301.7604507638\n          - 113.499372341\n          - 12.95003027788\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182093\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918629\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182093\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918614\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733724\n  -   -   - 1169.641704033\n          - 512.9089847695\n          - -307.4565994537\n      -   - 512.9089847695\n          - 751.8082285228\n          - 125.2965573785\n      -   - -307.4565994537\n          - 125.2965573785\n          - 24.72038264179\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - -1358.879936331\n          - -596.007217569\n          - 394.3663309731\n      -   - -596.007217569\n          - -926.0633850662\n          - -74.53434762621\n      -   - 394.3663309731\n          - -74.53434762621\n          - 71.17453818383\n  -   -   - 191.0851566267\n          - 83.78647255099\n          - -47.67820746517\n      -   - 83.78647255099\n          - 119.2114851962\n          - 25.34580158139\n      -   - -47.67820746517\n          - 25.34580158139\n          - 10.89567211601\n  -   -   - 168.6962139181\n          - 73.97112025538\n          - -42.64264193489\n      -   - 73.97112025538\n          - 106.0235548818\n          - 21.3234391598\n      -   - -42.64264193489\n          - 21.3234391598\n          - 8.138709169582\n  -   -   - 140.4964790262\n          - 61.60610305115\n          - -35.58093011922\n      -   - 61.60610305115\n          - 88.39458253098\n          - 17.63175280599\n      -   - -35.58093011922\n          - 17.63175280599\n          - 6.599339566474\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - -10720.15125926\n          - -4701.162126956\n          - 2877.493275431\n      -   - -4701.162126956\n          - -6974.883293509\n          - -1034.563281084\n      -   - 2877.493275431\n          - -1034.563281084\n          - -66.50544541638\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 4107.502577423\n          - 1801.278961009\n          - -1100.9294753\n      -   - 1801.278961009\n          - 2670.207390632\n          - 399.4634027254\n      -   - -1100.9294753\n          - 399.4634027254\n          - 29.78961963679\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 1132.425944378\n          - 496.6017216382\n          - -301.7604507638\n      -   - 496.6017216382\n          - 733.6727867614\n          - 113.499372341\n      -   - -301.7604507638\n          - 113.499372341\n          - 12.95003027788\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182093\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128153\n      -   - 26.24677072092\n          - -8.827773128153\n          - 0.120280091863\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182088\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918614\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733725\n  -   -   - 1169.641704033\n          - 512.9089847695\n          - -307.4565994537\n      -   - 512.9089847695\n          - 751.8082285228\n          - 125.2965573785\n      -   - -307.4565994537\n          - 125.2965573785\n          - 24.72038264179\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - -1358.879936331\n          - -596.007217569\n          - 394.3663309731\n      -   - -596.007217569\n          - -926.0633850662\n          - -74.53434762621\n      -   - 394.3663309731\n          - -74.53434762621\n          - 71.17453818383\n  -   -   - 191.0851566267\n          - 83.78647255099\n          - -47.67820746517\n      -   - 83.78647255099\n          - 119.2114851962\n          - 25.34580158139\n      -   - -47.67820746517\n          - 25.34580158139\n          - 10.89567211601\n  -   -   - 168.6962139181\n          - 73.97112025538\n          - -42.64264193489\n      -   - 73.97112025538\n          - 106.0235548818\n          - 21.3234391598\n      -   - -42.64264193489\n          - 21.3234391598\n          - 8.138709169581\n  -   -   - 140.4964790262\n          - 61.60610305115\n          - -35.58093011922\n      -   - 61.60610305115\n          - 88.39458253098\n          - 17.63175280599\n      -   - -35.58093011922\n          - 17.63175280599\n          - 6.599339566474\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - -10720.15125926\n          - -4701.162126956\n          - 2877.493275431\n      -   - -4701.162126956\n          - -6974.883293509\n          - -1034.563281084\n      -   - 2877.493275431\n          - -1034.563281084\n          - -66.50544541636\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 4107.502577423\n          - 1801.278961009\n          - -1100.9294753\n      -   - 1801.278961009\n          - 2670.207390632\n          - 399.4634027254\n      -   - -1100.9294753\n          - 399.4634027254\n          - 29.78961963678\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 1132.425944378\n          - 496.6017216382\n          - -301.7604507638\n      -   - 496.6017216382\n          - 733.6727867614\n          - 113.499372341\n      -   - -301.7604507638\n          - 113.499372341\n          - 12.95003027788\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182092\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128153\n      -   - 26.24677072092\n          - -8.827773128153\n          - 0.120280091863\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182093\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128154\n      -   - 26.24677072092\n          - -8.827773128154\n          - 0.1202800918629\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733725\n  -   -   - 1169.641704033\n          - 512.9089847695\n          - -307.4565994537\n      -   - 512.9089847695\n          - 751.8082285228\n          - 125.2965573785\n      -   - -307.4565994537\n          - 125.2965573785\n          - 24.72038264179\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - -1358.879936331\n          - -596.007217569\n          - 394.3663309731\n      -   - -596.007217569\n          - -926.0633850662\n          - -74.53434762621\n      -   - 394.3663309731\n          - -74.53434762621\n          - 71.17453818383\n  -   -   - 191.0851566267\n          - 83.78647255099\n          - -47.67820746517\n      -   - 83.78647255099\n          - 119.2114851962\n          - 25.34580158139\n      -   - -47.67820746517\n          - 25.34580158139\n          - 10.89567211601\n  -   -   - 168.6962139181\n          - 73.97112025538\n          - -42.64264193489\n      -   - 73.97112025538\n          - 106.0235548818\n          - 21.3234391598\n      -   - -42.64264193489\n          - 21.3234391598\n          - 8.138709169582\n  -   -   - 140.4964790262\n          - 61.60610305115\n          - -35.58093011922\n      -   - 61.60610305115\n          - 88.39458253098\n          - 17.63175280599\n      -   - -35.58093011922\n          - 17.63175280599\n          - 6.599339566474\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985193\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102258\n      -   - -32.4405095433\n          - 16.07620102258\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985193\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404193\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319469\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985191\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102258\n      -   - -32.4405095433\n          - 16.07620102258\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985194\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102258\n      -   - -32.4405095433\n          - 16.07620102258\n          - 6.017649404194\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102258\n      -   - -32.4405095433\n          - 16.07620102258\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319469\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985194\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209044\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290172\n      -   - -160.5922585799\n          - 59.64454290172\n          - 5.986811985787\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571709\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319469\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985191\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290172\n      -   - -160.5922585799\n          - 59.64454290172\n          - 5.98681198579\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571708\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404193\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985197\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246737\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209044\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290173\n      -   - -160.5922585799\n          - 59.64454290173\n          - 5.986811985789\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571708\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102258\n      -   - -32.4405095433\n          - 16.07620102258\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - -10720.15125926\n          - -4701.162126956\n          - 2877.493275431\n      -   - -4701.162126956\n          - -6974.883293509\n          - -1034.563281084\n      -   - 2877.493275431\n          - -1034.563281084\n          - -66.50544541639\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 4107.502577423\n          - 1801.278961009\n          - -1100.9294753\n      -   - 1801.278961009\n          - 2670.207390632\n          - 399.4634027254\n      -   - -1100.9294753\n          - 399.4634027254\n          - 29.7896196368\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 1132.425944378\n          - 496.6017216382\n          - -301.7604507638\n      -   - 496.6017216382\n          - 733.6727867614\n          - 113.499372341\n      -   - -301.7604507638\n          - 113.499372341\n          - 12.95003027788\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128153\n      -   - 26.24677072092\n          - -8.827773128153\n          - 0.1202800918613\n  -   -   - -96.78384376002\n          - -42.4439297132\n          - 26.24677072092\n      -   - -42.4439297132\n          - -63.35038252298\n          - -8.827773128153\n      -   - 26.24677072092\n          - -8.827773128153\n          - 0.1202800918629\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733724\n  -   -   - 1169.641704033\n          - 512.9089847695\n          - -307.4565994537\n      -   - 512.9089847695\n          - 751.8082285228\n          - 125.2965573785\n      -   - -307.4565994537\n          - 125.2965573785\n          - 24.72038264179\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - -1358.879936331\n          - -596.007217569\n          - 394.3663309731\n      -   - -596.007217569\n          - -926.0633850662\n          - -74.53434762621\n      -   - 394.3663309731\n          - -74.53434762621\n          - 71.17453818383\n  -   -   - 191.0851566267\n          - 83.78647255099\n          - -47.67820746517\n      -   - 83.78647255099\n          - 119.2114851962\n          - 25.34580158139\n      -   - -47.67820746517\n          - 25.34580158139\n          - 10.89567211601\n  -   -   - 168.6962139181\n          - 73.97112025538\n          - -42.64264193489\n      -   - 73.97112025538\n          - 106.0235548818\n          - 21.3234391598\n      -   - -42.64264193489\n          - 21.3234391598\n          - 8.138709169582\n  -   -   - 140.4964790262\n          - 61.60610305115\n          - -35.58093011922\n      -   - 61.60610305115\n          - 88.39458253098\n          - 17.63175280599\n      -   - -35.58093011922\n          - 17.63175280599\n          - 6.599339566474\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.98094747369\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985194\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246737\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290173\n      -   - -160.5922585799\n          - 59.64454290173\n          - 5.986811985787\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571709\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404193\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985194\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246737\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290173\n      -   - -160.5922585799\n          - 59.64454290173\n          - 5.986811985787\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571708\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404194\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319469\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 2309.371114485\n          - 1012.73405011\n          - -617.8751193011\n      -   - 1012.73405011\n          - 1499.715375261\n          - 226.6996495043\n      -   - -617.8751193011\n          - 226.6996495043\n          - 19.71367105378\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985191\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 147.8038109482\n          - 64.81345870863\n          - -38.46761366883\n      -   - 64.81345870863\n          - 94.45893845337\n          - 16.56853761046\n      -   - -38.46761366883\n          - 16.56853761046\n          - 4.157765246739\n  -   -   - -502.8297957863\n          - -220.5034461954\n          - 133.321878795\n      -   - -220.5034461954\n          - -324.8254786078\n          - -51.67462438823\n      -   - 133.321878795\n          - -51.67462438823\n          - -7.546900209046\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 601.415922879\n          - 263.7393940918\n          - -160.5922585799\n      -   - 263.7393940918\n          - 390.1127867798\n          - 59.64454290173\n      -   - -160.5922585799\n          - 59.64454290173\n          - 5.986811985788\n  -   -   - 182.8153091813\n          - 80.16167205613\n          - -46.0517180463\n      -   - 80.16167205613\n          - 114.6708300718\n          - 23.41375701469\n      -   - -46.0517180463\n          - 23.41375701469\n          - 9.249705571708\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404193\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 658.4385693042\n          - 288.7419554965\n          - -174.6355818894\n      -   - 288.7419554965\n          - 425.4259635842\n          - 67.56085381701\n      -   - -174.6355818894\n          - 67.56085381701\n          - 9.734306985194\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404194\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 128.0971384699\n          - 56.16913286035\n          - -32.4405095433\n      -   - 56.16913286035\n          - 80.5930469462\n          - 16.07620102257\n      -   - -32.4405095433\n          - 16.07620102257\n          - 6.017649404194\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 3292.641782076\n          - 1443.885786518\n          - -866.4874150134\n      -   - 1443.885786518\n          - 2117.778991587\n          - 350.8652875891\n      -   - -866.4874150134\n          - 350.8652875891\n          - 66.9809474737\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182088\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182088\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182093\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182093\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733725\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182092\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780393\n      -   - -98.61168950718\n          - 39.47545780393\n          - 7.079420182093\n  -   -   - -4024.637363485\n          - -1764.942759693\n          - 1079.698769709\n      -   - -1764.942759693\n          - -2617.72518908\n          - -389.5323589034\n      -   - 1079.698769709\n          - -389.5323589034\n          - -26.55575319468\n  -   -   - 14342.9552468\n          - 6289.882966587\n          - -3848.05977671\n      -   - 6289.882966587\n          - 9329.360846504\n          - 1387.748277719\n      -   - -3848.05977671\n          - 1387.748277719\n          - 93.98859313953\n  -   -   - -2097.995093057\n          - -920.0148615183\n          - 553.5014196588\n      -   - -920.0148615183\n          - -1351.37575036\n          - -220.8956593773\n      -   - 553.5014196588\n          - -220.8956593773\n          - -38.92732733725\n  -   -   - 619.6881094322\n          - 271.7463432808\n          - -163.5235269434\n      -   - 271.7463432808\n          - 399.2074510919\n          - 65.17951383646\n      -   - -163.5235269434\n          - 65.17951383646\n          - 11.40410777447\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182092\n  -   -   - 373.9764193214\n          - 163.996345322\n          - -98.61168950718\n      -   - 163.996345322\n          - 240.8144049061\n          - 39.47545780394\n      -   - -98.61168950718\n          - 39.47545780394\n          - 7.079420182092\ndelta: 0.05\ndelta_old: 0.025\nlinear_terms:\n  -   - 83.4551262143\n      - 70.20115871443\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748021\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878835\n      - -22.14622491455\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.45512621431\n      - 70.20115871451\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748021\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491455\n      - -95.66099878835\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 70.20115871453\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748025\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491453\n      - -95.66099878835\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 70.20115871443\n      - 59.39117854221\n      - 45.24391055624\n      - 38.62628748025\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491455\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139566\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 0.1871101139701\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139566\n      - -50.98637547872\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.5433752969\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 38.62628748025\n      - 0.18711011397\n      - -50.98637547872\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101934\n      - 50.54337529691\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139419\n      - -50.98637547872\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101935\n      - 50.54337529691\n      - 32.31842399979\n      - 83.4551262143\n      - 70.20115871451\n      - 59.39117854221\n      - 45.24391055617\n      - 38.62628748025\n      - 19.82198478816\n      - -95.66099878835\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.8811906615\n      - 44.6773124466\n      - 35.78441925523\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 83.4551262143\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139567\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.5433752969\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139566\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.54337529691\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.18711011397\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101935\n      - 50.54337529691\n      - 32.31842399979\n      - 816.736081667\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - -22.14622491453\n      - -22.14622491453\n      - 83.45512621431\n      - 59.39117854221\n      - -22.14622491455\n      - -22.14622491455\n      - -596.6157739812\n      - 118.7321519868\n      - -22.14622491455\n      - -22.14622491455\n      - 83.4551262143\n      - 59.39117854221\n      - -596.6157739812\n      - 118.7321519868\n      - -22.14622491455\n      - -22.14622491455\n  -   - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.80571559279\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -36.84709189045\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -23.31820321215\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -39.80571559279\n      - -39.80571559279\n      - -65.70818330941\n      - -56.24495202103\n      - -39.8057155928\n      - -39.8057155928\n      - -42.71855980194\n      - -52.29733914833\n      - -39.8057155928\n      - -39.8057155928\n      - -65.70818330941\n      - -56.24495202103\n      - -42.71855980194\n      - -52.29733914833\n      - -39.8057155928\n      - -39.8057155928\n  -   - 35.67527604537\n      - 40.13513697208\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549807\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.29051063361\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697207\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549807\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633609\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697206\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549806\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633609\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211327\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697208\n      - 42.22265142152\n      - 44.37065885408\n      - 43.79191549806\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633611\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164356\n      - 35.67527604536\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604536\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364664\n      - 3.40064839132\n      - 3.585520164357\n      - 35.67527604536\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.990963636463\n      - 3.400648391319\n      - 3.585520164356\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205829\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364644\n      - 3.400648391319\n      - 3.585520164357\n      - 35.67527604537\n      - 40.13513697207\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549806\n      - 43.60185566328\n      - 51.559374423\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633611\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364657\n      - 3.40064839132\n      - 3.585520164356\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364657\n      - 3.400648391319\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364651\n      - 3.400648391319\n      - 3.585520164356\n      - -135.8537067485\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 43.89691265518\n      - 43.89691265518\n      - 35.67527604537\n      - 42.22265142152\n      - 43.89691265518\n      - 43.89691265518\n      - 144.6484148813\n      - -4.29051063361\n      - 43.89691265518\n      - 43.89691265518\n      - 35.67527604537\n      - 42.22265142152\n      - 144.6484148813\n      - -4.29051063361\n      - 43.89691265518\n      - 43.89691265518\nlinear_terms_expected:\n  -   - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227353\n      - -3.866221813446\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.71243244039\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227353\n      - -3.866221813447\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813447\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -247.3857201056\n      - -138.334179211\n      - -247.3857201056\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813447\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483835\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -153.6248432276\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030153\n      - -4.140626483836\n      - 2363.426377558\n      - 5992.039123618\n      - -7709.278530499\n      - -2150.664440344\n      - -1182.145972324\n      - -577.1965207481\n      - -138.334179211\n      - -138.334179211\n      - -53.7124324404\n      - -103.0416465802\n      - -99.1378353945\n      - 954.5967200947\n      - 13.58623873662\n      - -1.174295227354\n      - -3.866221813446\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030153\n      - -4.140626483835\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - 2363.426377558\n      - -7709.278530499\n      - -1182.145972324\n      - -357.7987188137\n      - -181.9416676796\n      - -104.2155735686\n      - -181.9416676796\n      - -104.2155735686\n      - -153.6248432276\n      - -142.8920752426\n      - 3.075193030154\n      - -4.140626483836\n      - -153.6248432276\n      - 2363.426377558\n      - -7709.278530499\n      - -357.7987188137\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -4.140626483836\n      - -153.6248432276\n      - -153.6248432276\n      - -153.6248432276\n      - -247.3857201056\n      - -247.3857201056\n      - 2363.426377558\n      - -7709.278530499\n      - -247.3857201056\n      - -247.3857201056\n      - -53.7124324404\n      - -99.1378353945\n      - -247.3857201056\n      - -247.3857201056\n      - 2363.426377558\n      - -7709.278530499\n      - -53.7124324404\n      - -99.1378353945\n      - -247.3857201056\n      - -247.3857201056\n  -   - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - 36.28651044149\n      - -104.2839114197\n      - 36.28651044149\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 972.3575239774\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692858\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692859\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - -3484.676385903\n      - 4386.660918749\n      - 1174.087828191\n      - 621.0101781775\n      - 261.2029206975\n      - -104.2839114197\n      - -104.2839114197\n      - -735.8610938666\n      - 294.9448835657\n      - 87.55073042511\n      - 1134.846231379\n      - 104.076484543\n      - 66.32539487318\n      - 48.36159029625\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692861\n      - 80.74754133078\n      - 42.21231909165\n      - -1393.823686755\n      - 4386.660918749\n      - 621.0101781775\n      - 119.8053923089\n      - -30.37542941294\n      - -230.7678532341\n      - -30.37542941294\n      - -230.7678532341\n      - 972.3575239774\n      - 1.227223692859\n      - 80.74754133078\n      - 42.21231909165\n      - 972.3575239774\n      - -1393.823686755\n      - 4386.660918749\n      - 119.8053923089\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 42.21231909165\n      - 972.3575239774\n      - 972.3575239774\n      - 972.3575239774\n      - 36.28651044149\n      - 36.28651044149\n      - -1393.823686755\n      - 4386.660918749\n      - 36.28651044149\n      - 36.28651044149\n      - -735.8610938667\n      - 87.55073042511\n      - 36.28651044149\n      - 36.28651044149\n      - -1393.823686755\n      - 4386.660918749\n      - -735.8610938666\n      - 87.55073042511\n      - 36.28651044149\n      - 36.28651044149\n  -   - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 131.5247089125\n      - 92.72176892163\n      - 131.5247089125\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 115.5097653415\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - -1108.972694686\n      - 1677.946001564\n      - 546.4389881039\n      - 346.3140830129\n      - 215.6227895742\n      - 92.72176892163\n      - 92.72176892163\n      - 44.3395268336\n      - 78.93180868232\n      - 63.72812657892\n      - -104.6695747867\n      - 31.93096833735\n      - 29.20072767216\n      - 25.55713657195\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - -375.8722506492\n      - 1677.946001564\n      - 346.3140830129\n      - 162.4888980685\n      - 109.9217527448\n      - 76.12955501496\n      - 109.9217527448\n      - 76.12955501496\n      - 115.5097653415\n      - 65.79975067225\n      - 30.90788818222\n      - 23.84379728044\n      - 115.5097653415\n      - -375.8722506492\n      - 1677.946001564\n      - 162.4888980685\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 23.84379728044\n      - 115.5097653415\n      - 115.5097653415\n      - 115.5097653415\n      - 131.5247089125\n      - 131.5247089125\n      - -375.8722506492\n      - 1677.946001564\n      - 131.5247089125\n      - 131.5247089125\n      - 44.33952683361\n      - 63.72812657892\n      - 131.5247089125\n      - 131.5247089125\n      - -375.8722506492\n      - 1677.946001564\n      - 44.3395268336\n      - 63.72812657892\n      - 131.5247089125\n      - 131.5247089125\nsquare_terms:\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.87397890211\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.87397890211\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211001\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.9566846715422\n      -   - 34.53987590883\n          - 0.9566846715422\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.8894811141752\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861657\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211001\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814764\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229466\n          - 37.07301548541\n      -   - 0.07028381229466\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.8894811141752\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567033\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\nsquare_terms_expected:\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626186\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - -10569.10299644\n          - -4682.769847703\n          - 3951.415969003\n      -   - -4682.769847703\n          - -7035.132202115\n          - -875.1223560398\n      -   - 3951.415969003\n          - -875.1223560398\n          - -122.0013610248\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 4252.951031935\n          - 1848.277580659\n          - -170.2845897301\n      -   - 1848.277580659\n          - 2612.860235584\n          - 805.8419376052\n      -   - -170.2845897301\n          - 805.8419376052\n          - -37.20600626185\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 1240.4084327\n          - 512.7592742227\n          - 475.6238125956\n      -   - 512.7592742227\n          - 690.0567114374\n          - 253.7250568021\n      -   - 475.6238125956\n          - 253.7250568021\n          - -57.22030396345\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -28.40757635067\n          - -31.79743224237\n          - 550.3578108482\n      -   - -31.79743224237\n          - -84.07371396961\n          - 84.22388465059\n      -   - 550.3578108482\n          - 84.22388465059\n          - -92.16014545357\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 1219.218783147\n          - 525.7884332597\n          - -29.15120474328\n      -   - 525.7884332597\n          - 767.5344275996\n          - 236.8486988899\n      -   - -29.15120474328\n          - 236.8486988899\n          - 88.99129419806\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - -1377.243922834\n          - -795.4386681958\n          - 747.122472413\n      -   - -795.4386681958\n          - -2497.250719358\n          - -1840.429221121\n      -   - 747.122472413\n          - -1840.429221121\n          - 63.15352494948\n  -   -   - 217.522078241\n          - 79.32830419073\n          - 153.0067332807\n      -   - 79.32830419073\n          - 39.05322569843\n          - -15.05847653376\n      -   - 153.0067332807\n          - -15.05847653376\n          - 8.195712590102\n  -   -   - 192.1674291662\n          - 73.24546567179\n          - 128.9418592581\n      -   - 73.24546567179\n          - 60.8870259716\n          - 14.10438172489\n      -   - 128.9418592581\n          - 14.10438172489\n          - 4.023771621814\n  -   -   - 160.9847149983\n          - 61.88723830032\n          - 112.7111318224\n      -   - 61.88723830032\n          - 56.36412497327\n          - 19.44285900547\n      -   - 112.7111318224\n          - 19.44285900547\n          - 1.958413872367\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112152\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 2432.916206823\n          - 1032.638149205\n          - 257.4061444594\n      -   - 1032.638149205\n          - 1449.487149189\n          - 399.2375243902\n      -   - 257.4061444594\n          - 399.2375243902\n          - -48.15069583689\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 224.0053660568\n          - 76.49830322054\n          - 530.6144385787\n      -   - 76.49830322054\n          - 68.48505242171\n          - 118.4056191032\n      -   - 530.6144385787\n          - 118.4056191032\n          - -75.99273950177\n  -   -   - -441.5774716513\n          - -211.0258671001\n          - 630.8815307116\n      -   - -211.0258671001\n          - -340.9653527663\n          - 31.64858923452\n      -   - 630.8815307116\n          - 31.64858923452\n          - -131.2299615288\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 643.1868519507\n          - 289.6433117825\n          - 69.96405555076\n      -   - 289.6433117825\n          - 527.631431758\n          - 287.5422429696\n      -   - 69.96405555076\n          - 287.5422429696\n          - 12.86544852284\n  -   -   - 207.8475807462\n          - 78.25234656542\n          - 139.1055791254\n      -   - 78.25234656542\n          - 57.38466331417\n          - 5.647653214982\n      -   - 139.1055791254\n          - 5.647653214982\n          - 5.691781115008\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - 754.037862043\n          - 303.007024288\n          - 520.6337570466\n      -   - 303.007024288\n          - 387.992522728\n          - 191.4946509941\n      -   - 520.6337570466\n          - 191.4946509941\n          - -62.14538340589\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879663\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 147.2292962915\n          - 56.67228284198\n          - 105.718994092\n      -   - 56.67228284198\n          - 52.62002215236\n          - 19.90293970874\n      -   - 105.718994092\n          - 19.90293970874\n          - 1.297978879664\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 3350.123265148\n          - 1460.313549849\n          - -683.3879923253\n      -   - 1460.313549849\n          - 2132.90406945\n          - 491.0039053136\n      -   - -683.3879923253\n          - 491.0039053136\n          - 341.2992596768\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112152\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891304\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - -3856.411663325\n          - -1744.213523774\n          - 2262.533558079\n      -   - -1744.213523774\n          - -2684.929962548\n          - -210.0096145524\n      -   - 2262.533558079\n          - -210.0096145524\n          - -71.22779112153\n  -   -   - 14481.06389629\n          - 6304.851879955\n          - -2851.437438832\n      -   - 6304.851879955\n          - 9273.533330841\n          - 1517.67089317\n      -   - -2851.437438832\n          - 1517.67089317\n          - 32.41624891305\n  -   -   - -2045.110581511\n          - -913.7367115119\n          - 1103.831669941\n      -   - -913.7367115119\n          - -1367.741368628\n          - -163.7437421998\n      -   - 1103.831669941\n          - -163.7437421998\n          - -316.398543429\n  -   -   - 662.0714264794\n          - 287.5911592069\n          - 90.92844099577\n      -   - 287.5911592069\n          - 454.4431485291\n          - 203.8681218118\n      -   - 90.92844099577\n          - 203.8681218118\n          - 25.01577261847\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n  -   -   - 459.1360054272\n          - 176.8440075451\n          - 527.6634588185\n      -   - 176.8440075451\n          - 209.2614271428\n          - 151.24310776\n      -   - 527.6634588185\n          - 151.24310776\n          - -67.51764137812\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/fixtures/update_residual_model_with_new_accepted_x.yaml",
    "content": "---\nbest_x:\n  - 0.15\n  - 0.008\n  - 0.01\ndelta: 0.025\nlinear_terms:\n  -   - 93.72927818337\n      - 79.52774973986\n      - 68.0460977844\n      - 53.31267916955\n      - 46.22344511051\n      - 26.56888710767\n      - -16.71276506001\n      - -91.11716265854\n      - -16.71276506001\n      - -91.11716265855\n      - -591.8618521414\n      - 268.2130703199\n      - 120.9383814367\n      - 100.8847372434\n      - 59.6297183433\n      - 46.17027791586\n      - 37.07406190688\n      - 93.72927818338\n      - 79.52774973994\n      - 68.0460977844\n      - 53.31267916955\n      - 46.22344511051\n      - 26.56888710767\n      - -16.71276506001\n      - -91.11716265855\n      - -16.71276506001\n      - -91.11716265854\n      - -591.8618521414\n      - 268.2130703199\n      - 120.9383814367\n      - 100.8847372434\n      - 59.6297183433\n      - 46.17027791586\n      - 37.07406190688\n      - 93.72927818337\n      - 79.52774973995\n      - 68.0460977844\n      - 53.31267916955\n      - 46.22344511055\n      - 26.56888710767\n      - -16.71276506001\n      - -91.11716265855\n      - -16.71276505999\n      - -91.11716265854\n      - -591.8618521414\n      - 268.2130703199\n      - 120.9383814367\n      - 100.8847372434\n      - 59.6297183433\n      - 46.17027791586\n      - 37.07406190688\n      - 93.72927818337\n      - 79.52774973986\n      - 68.0460977844\n      - 53.31267916961\n      - 46.22344511055\n      - 26.56888710767\n      - -16.71276506001\n      - -91.11716265855\n      - -16.71276506001\n      - -91.11716265855\n      - -591.8618521414\n      - 268.2130703199\n      - 120.9383814367\n      - 100.8847372434\n      - 59.6297183433\n      - 46.17027791586\n      - 37.07406190688\n      - 93.72927818337\n      - 68.0460977844\n      - 6.220382700427\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 33.51976192863\n      - 93.72927818337\n      - 68.0460977844\n      - 6.220382700427\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 33.51976192863\n      - 93.7292781834\n      - 68.0460977844\n      - 6.22038270044\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 33.51976192863\n      - 93.72927818337\n      - 68.0460977844\n      - 6.220382700427\n      - 818.3501514529\n      - 33.51976192863\n      - 818.3501514529\n      - 818.3501514529\n      - 33.51976192863\n      - 93.7292781834\n      - 68.0460977844\n      - 46.22344511055\n      - 6.220382700427\n      - -46.05049956355\n      - -46.05049956355\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065056\n      - 52.15519868068\n      - 33.51976192863\n      - 93.7292781834\n      - 68.0460977844\n      - 46.22344511055\n      - 6.22038270044\n      - -46.05049956355\n      - -46.05049956355\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065054\n      - 52.15519868068\n      - 33.51976192863\n      - 93.72927818338\n      - 68.0460977844\n      - 46.22344511055\n      - 6.220382700412\n      - -46.05049956355\n      - -46.05049956354\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065055\n      - 52.15519868068\n      - 33.51976192863\n      - 93.72927818337\n      - 79.52774973994\n      - 68.0460977844\n      - 53.31267916954\n      - 46.22344511055\n      - 26.56888710767\n      - -91.11716265854\n      - -91.11716265855\n      - -591.8618521414\n      - 268.2130703199\n      - 120.9383814367\n      - 100.8847372434\n      - 59.6297183433\n      - 46.17027791586\n      - 37.07406190688\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - 93.72927818337\n      - 68.0460977844\n      - 46.22344511055\n      - 6.220382700427\n      - -46.05049956355\n      - -184.5483589944\n      - -46.05049956354\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065056\n      - 52.15519868068\n      - 33.51976192863\n      - 93.72927818338\n      - 68.0460977844\n      - 46.22344511055\n      - 6.220382700427\n      - -46.05049956355\n      - -184.5483589944\n      - -46.05049956354\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065056\n      - 52.15519868068\n      - 33.51976192863\n      - 93.72927818338\n      - 68.0460977844\n      - 46.22344511055\n      - 6.22038270044\n      - -46.05049956355\n      - -184.5483589944\n      - -46.05049956355\n      - -184.5483589944\n      - 818.3501514529\n      - 94.44100065055\n      - 52.15519868068\n      - 33.51976192863\n      - 818.3501514529\n      - 93.72927818337\n      - 68.0460977844\n      - 6.220382700427\n      - 818.3501514529\n      - 33.51976192863\n      - 818.3501514529\n      - 33.51976192863\n      - 818.3501514529\n      - 818.3501514529\n      - 818.3501514529\n      - -16.71276505999\n      - -16.71276505999\n      - 93.72927818338\n      - 68.0460977844\n      - -16.71276506001\n      - -16.71276506001\n      - -591.8618521414\n      - 120.9383814367\n      - -16.71276506001\n      - -16.71276506001\n      - 93.72927818337\n      - 68.0460977844\n      - -591.8618521414\n      - 120.9383814367\n      - -16.71276506001\n      - -16.71276506001\n  -   - -64.08543563383\n      - -58.95719282476\n      - -55.06353725601\n      - -53.16548062441\n      - -49.45660019523\n      - -44.9079525408\n      - -38.80557953615\n      - -36.01993057676\n      - -38.80557953615\n      - -36.01993057676\n      - -42.20725725468\n      - -32.42227560985\n      - -51.14899556075\n      - 779.0017047841\n      - 23.26780440071\n      - 7.758875524986\n      - 3.058627447971\n      - -64.08543563383\n      - -58.95719282476\n      - -55.06353725601\n      - -53.16548062441\n      - -49.45660019523\n      - -44.9079525408\n      - -38.80557953615\n      - -36.01993057676\n      - -38.80557953615\n      - -36.01993057676\n      - -42.20725725468\n      - -32.42227560985\n      - -51.14899556075\n      - 779.0017047841\n      - 23.26780440071\n      - 7.758875524986\n      - 3.058627447971\n      - -64.08543563383\n      - -58.95719282476\n      - -55.06353725601\n      - -53.16548062441\n      - -49.45660019523\n      - -44.9079525408\n      - -38.80557953615\n      - -36.01993057676\n      - -38.80557953615\n      - -36.01993057676\n      - -42.20725725468\n      - -32.42227560985\n      - -51.14899556075\n      - 779.0017047841\n      - 23.26780440071\n      - 7.758875524986\n      - 3.058627447971\n      - -64.08543563383\n      - -58.95719282476\n      - -55.06353725601\n      - -53.1654806244\n      - -49.45660019523\n      - -44.9079525408\n      - -38.80557953615\n      - -36.01993057676\n      - -38.80557953615\n      - -36.01993057676\n      - -42.20725725468\n      - -32.42227560985\n      - -51.14899556075\n      - 779.0017047841\n      - 23.26780440071\n      - 7.758875524986\n      - 3.058627447972\n      - -64.08543563383\n      - -55.06353725601\n      - -41.42976641553\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -41.42976641553\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -41.42976641553\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -41.42976641553\n      - -22.11746019896\n      - 1.865866778257\n      - -22.11746019896\n      - -22.11746019896\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641553\n      - -36.97445451644\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641553\n      - -36.97445451644\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641554\n      - -36.97445451644\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -64.08543563383\n      - -58.95719282476\n      - -55.06353725601\n      - -53.16548062441\n      - -49.45660019523\n      - -44.9079525408\n      - -36.01993057676\n      - -36.01993057676\n      - -42.20725725468\n      - -32.42227560985\n      - -51.14899556075\n      - 779.0017047841\n      - 23.26780440071\n      - 7.758875524986\n      - 3.058627447972\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641553\n      - -36.97445451644\n      - -36.42960974813\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641553\n      - -36.97445451644\n      - -36.42960974813\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -64.08543563383\n      - -55.06353725601\n      - -49.45660019523\n      - -41.42976641553\n      - -36.97445451644\n      - -36.42960974813\n      - -36.97445451644\n      - -36.42960974813\n      - -22.11746019896\n      - -91.56047245988\n      - 12.83135652938\n      - 1.865866778257\n      - -22.11746019896\n      - -64.08543563383\n      - -55.06353725601\n      - -41.42976641553\n      - -22.11746019896\n      - 1.865866778257\n      - -22.11746019896\n      - 1.865866778257\n      - -22.11746019896\n      - -22.11746019896\n      - -22.11746019896\n      - -38.80557953615\n      - -38.80557953615\n      - -64.08543563383\n      - -55.06353725601\n      - -38.80557953615\n      - -38.80557953615\n      - -42.20725725468\n      - -51.14899556075\n      - -38.80557953615\n      - -38.80557953615\n      - -64.08543563383\n      - -55.06353725601\n      - -42.20725725468\n      - -51.14899556075\n      - -38.80557953615\n      - -38.80557953615\n  -   - 36.49571808313\n      - 40.75599199724\n      - 42.73053159154\n      - 44.48422885803\n      - 44.06292704726\n      - 43.77082966126\n      - 43.87997490331\n      - 51.29080769453\n      - 43.87997490331\n      - 51.29080769453\n      - 142.8645261718\n      - -28.76442742577\n      - -4.013910726576\n      - 6.349474762352\n      - 3.32795307968\n      - 3.743284626869\n      - 3.766796291069\n      - 36.49571808312\n      - 40.75599199722\n      - 42.73053159154\n      - 44.48422885803\n      - 44.06292704726\n      - 43.77082966126\n      - 43.87997490331\n      - 51.29080769453\n      - 43.87997490331\n      - 51.29080769453\n      - 142.8645261718\n      - -28.76442742577\n      - -4.013910726575\n      - 6.349474762352\n      - 3.32795307968\n      - 3.743284626869\n      - 3.766796291069\n      - 36.49571808313\n      - 40.75599199722\n      - 42.73053159154\n      - 44.48422885803\n      - 44.06292704726\n      - 43.77082966126\n      - 43.87997490331\n      - 51.29080769453\n      - 43.8799749033\n      - 51.29080769453\n      - 142.8645261718\n      - -28.76442742578\n      - -4.013910726575\n      - 6.349474762352\n      - 3.32795307968\n      - 3.743284626868\n      - 3.766796291069\n      - 36.49571808313\n      - 40.75599199724\n      - 42.73053159154\n      - 44.48422885802\n      - 44.06292704726\n      - 43.77082966126\n      - 43.87997490331\n      - 51.29080769453\n      - 43.87997490331\n      - 51.29080769453\n      - 142.8645261718\n      - -28.76442742578\n      - -4.013910726577\n      - 6.349474762352\n      - 3.32795307968\n      - 3.743284626869\n      - 3.766796291069\n      - 36.49571808313\n      - 42.73053159154\n      - 43.46544414082\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - 3.702417354566\n      - 36.49571808313\n      - 42.73053159154\n      - 43.46544414082\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - 3.702417354565\n      - 36.49571808312\n      - 42.73053159154\n      - 43.46544414082\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - 3.702417354566\n      - 36.49571808313\n      - 42.73053159154\n      - 43.46544414082\n      - -133.4358525068\n      - 3.702417354566\n      - -133.4358525068\n      - -133.4358525068\n      - 3.702417354566\n      - 36.49571808312\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 45.87740009259\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239121\n      - 3.603391195481\n      - 3.702417354565\n      - 36.49571808312\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 45.87740009259\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239087\n      - 3.60339119548\n      - 3.702417354565\n      - 36.49571808312\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 45.87740009258\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239101\n      - 3.60339119548\n      - 3.702417354566\n      - 36.49571808313\n      - 40.75599199722\n      - 42.73053159154\n      - 44.48422885803\n      - 44.06292704726\n      - 43.77082966126\n      - 51.29080769453\n      - 51.29080769453\n      - 142.8645261718\n      - -28.76442742578\n      - -4.013910726577\n      - 6.349474762352\n      - 3.327953079681\n      - 3.743284626869\n      - 3.766796291069\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - 36.49571808313\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 66.4411216144\n      - 45.87740009258\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239114\n      - 3.603391195481\n      - 3.702417354565\n      - 36.49571808312\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 66.4411216144\n      - 45.87740009258\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239114\n      - 3.60339119548\n      - 3.702417354565\n      - 36.49571808312\n      - 42.73053159154\n      - 44.06292704726\n      - 43.46544414082\n      - 45.87740009259\n      - 66.4411216144\n      - 45.87740009259\n      - 66.4411216144\n      - -133.4358525068\n      - -0.8871808239108\n      - 3.60339119548\n      - 3.702417354565\n      - -133.4358525068\n      - 36.49571808313\n      - 42.73053159154\n      - 43.46544414082\n      - -133.4358525068\n      - 3.702417354565\n      - -133.4358525068\n      - 3.702417354565\n      - -133.4358525068\n      - -133.4358525068\n      - -133.4358525068\n      - 43.8799749033\n      - 43.8799749033\n      - 36.49571808312\n      - 42.73053159154\n      - 43.87997490331\n      - 43.87997490331\n      - 142.8645261718\n      - -4.013910726576\n      - 43.87997490331\n      - 43.87997490331\n      - 36.49571808313\n      - 42.73053159154\n      - 142.8645261718\n      - -4.013910726576\n      - 43.87997490331\n      - 43.87997490331\nlinear_terms_expected:\n  -   - 83.4551262143\n      - 70.20115871443\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748021\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878835\n      - -22.14622491455\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.45512621431\n      - 70.20115871451\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748021\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491455\n      - -95.66099878835\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 70.20115871453\n      - 59.39117854221\n      - 45.24391055618\n      - 38.62628748025\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491453\n      - -95.66099878835\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 70.20115871443\n      - 59.39117854221\n      - 45.24391055624\n      - 38.62628748025\n      - 19.82198478816\n      - -22.14622491455\n      - -95.66099878836\n      - -22.14622491455\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.88119066151\n      - 44.6773124466\n      - 35.78441925523\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139566\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 0.1871101139701\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 816.736081667\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139566\n      - -50.98637547872\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.5433752969\n      - 32.31842399979\n      - 83.45512621433\n      - 59.39117854221\n      - 38.62628748025\n      - 0.18711011397\n      - -50.98637547872\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101934\n      - 50.54337529691\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139419\n      - -50.98637547872\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101935\n      - 50.54337529691\n      - 32.31842399979\n      - 83.4551262143\n      - 70.20115871451\n      - 59.39117854221\n      - 45.24391055617\n      - 38.62628748025\n      - 19.82198478816\n      - -95.66099878835\n      - -95.66099878836\n      - -596.6157739812\n      - 265.7919023746\n      - 118.7321519868\n      - 97.69885198231\n      - 57.8811906615\n      - 44.6773124466\n      - 35.78441925523\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - 83.4551262143\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139567\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.5433752969\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.1871101139566\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547871\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101936\n      - 50.54337529691\n      - 32.31842399979\n      - 83.45512621431\n      - 59.39117854221\n      - 38.62628748025\n      - 0.18711011397\n      - -50.98637547872\n      - -188.858358915\n      - -50.98637547872\n      - -188.858358915\n      - 816.736081667\n      - 92.44936101935\n      - 50.54337529691\n      - 32.31842399979\n      - 816.736081667\n      - 83.4551262143\n      - 59.39117854221\n      - 0.1871101139567\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 32.31842399979\n      - 816.736081667\n      - 816.736081667\n      - 816.736081667\n      - -22.14622491453\n      - -22.14622491453\n      - 83.45512621431\n      - 59.39117854221\n      - -22.14622491455\n      - -22.14622491455\n      - -596.6157739812\n      - 118.7321519868\n      - -22.14622491455\n      - -22.14622491455\n      - 83.4551262143\n      - 59.39117854221\n      - -596.6157739812\n      - 118.7321519868\n      - -22.14622491455\n      - -22.14622491455\n  -   - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.80571559279\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -39.8057155928\n      - -36.84709189045\n      - -39.8057155928\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - -23.31820321215\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -60.39894618479\n      - -56.24495202103\n      - -56.74595477465\n      - -51.00210223596\n      - -46.1667378912\n      - -36.84709189045\n      - -36.84709189045\n      - -42.71855980194\n      - -33.37423700032\n      - -52.29733914833\n      - 792.7694136555\n      - 23.53933160967\n      - 7.777083680153\n      - 3.011421814957\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -65.70818330941\n      - -56.24495202103\n      - -51.00210223596\n      - -42.54119035883\n      - -37.88298121996\n      - -37.16785677463\n      - -37.88298121996\n      - -37.16785677463\n      - -23.31820321215\n      - -93.40145763629\n      - 12.92907306798\n      - 1.805173509361\n      - -23.31820321215\n      - -65.70818330941\n      - -56.24495202103\n      - -42.54119035883\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - 1.805173509361\n      - -23.31820321215\n      - -23.31820321215\n      - -23.31820321215\n      - -39.80571559279\n      - -39.80571559279\n      - -65.70818330941\n      - -56.24495202103\n      - -39.8057155928\n      - -39.8057155928\n      - -42.71855980194\n      - -52.29733914833\n      - -39.8057155928\n      - -39.8057155928\n      - -65.70818330941\n      - -56.24495202103\n      - -42.71855980194\n      - -52.29733914833\n      - -39.8057155928\n      - -39.8057155928\n  -   - 35.67527604537\n      - 40.13513697208\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549807\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.29051063361\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697207\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549807\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633609\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697206\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549806\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633609\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211327\n      - 3.635446234161\n      - 35.67527604537\n      - 40.13513697208\n      - 42.22265142152\n      - 44.37065885408\n      - 43.79191549806\n      - 43.60185566328\n      - 43.89691265518\n      - 51.559374423\n      - 43.89691265518\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633611\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164356\n      - 35.67527604536\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - -135.8537067485\n      - 3.585520164357\n      - 35.67527604536\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364664\n      - 3.40064839132\n      - 3.585520164357\n      - 35.67527604536\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.990963636463\n      - 3.400648391319\n      - 3.585520164356\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205829\n      - 45.9986361207\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364644\n      - 3.400648391319\n      - 3.585520164357\n      - 35.67527604537\n      - 40.13513697207\n      - 42.22265142152\n      - 44.3706588541\n      - 43.79191549806\n      - 43.60185566328\n      - 51.559374423\n      - 51.559374423\n      - 144.6484148813\n      - -29.52793630843\n      - -4.290510633611\n      - 4.275018249209\n      - 3.077593180459\n      - 3.571453211328\n      - 3.635446234161\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364657\n      - 3.40064839132\n      - 3.585520164356\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364657\n      - 3.400648391319\n      - 3.585520164357\n      - 35.67527604537\n      - 42.22265142152\n      - 43.79191549806\n      - 43.39074205828\n      - 45.9986361207\n      - 66.99917852268\n      - 45.9986361207\n      - 66.99917852268\n      - -135.8537067485\n      - -0.9909636364651\n      - 3.400648391319\n      - 3.585520164356\n      - -135.8537067485\n      - 35.67527604537\n      - 42.22265142152\n      - 43.39074205828\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - 3.585520164357\n      - -135.8537067485\n      - -135.8537067485\n      - -135.8537067485\n      - 43.89691265518\n      - 43.89691265518\n      - 35.67527604537\n      - 42.22265142152\n      - 43.89691265518\n      - 43.89691265518\n      - 144.6484148813\n      - -4.29051063361\n      - 43.89691265518\n      - 43.89691265518\n      - 35.67527604537\n      - 42.22265142152\n      - 144.6484148813\n      - -4.29051063361\n      - 43.89691265518\n      - 43.89691265518\nresiduals:\n  - 21.53511643627\n  - 14.80453604351\n  - 6.548558251064\n  - 12.54188075473\n  - 9.282890198608\n  - 2.859555210712\n  - 0.9381817894678\n  - 0.2048532883114\n  - 0.8881817894678\n  - 0.3798532883114\n  - -0.9101956814319\n  - -1.36444138824\n  - -0.9351994446357\n  - -1.055070381505\n  - -1.111335532899\n  - -0.1703442432756\n  - 1.580641245921\n  - 19.23511643627\n  - 13.00453604351\n  - 13.94855825106\n  - 11.24188075473\n  - 6.182890198608\n  - -1.240444789288\n  - -0.8618182105322\n  - -1.995146711689\n  - -0.9868182105322\n  - -1.270146711689\n  - -1.135195681432\n  - -0.9144413882404\n  - -3.072699444636\n  - -1.317570381505\n  - -0.9238355328992\n  - 0.9546557567244\n  - -0.3318587540789\n  - 8.635116436265\n  - 15.10453604351\n  - 6.148558251063\n  - 4.841880754733\n  - 5.382890198608\n  - 2.059555210712\n  - -3.361818210532\n  - -2.995146711689\n  - -3.311818210532\n  - -2.395146711689\n  - -2.185195681432\n  - -2.63944138824\n  - -1.985199444636\n  - -1.880070381505\n  - -1.711335532899\n  - -1.407844243276\n  - -0.4818587540789\n  - 2.735116436265\n  - 3.404536043506\n  - 3.148558251063\n  - 3.141880754733\n  - 2.482890198608\n  - 0.5595552107122\n  - -0.7618182105322\n  - -2.995146711689\n  - -0.7993182105322\n  - -2.245146711689\n  - -1.885195681432\n  - -1.96444138824\n  - -1.647699444636\n  - -2.292570381505\n  - -1.486335532899\n  - -1.557844243276\n  - -0.8193587540789\n  - 10.13511643627\n  - 4.748558251063\n  - -2.218096467799\n  - -4.369688200573\n  - -3.659688200573\n  - -1.219688200573\n  - -0.3489655844206\n  - 6.635116436265\n  - 2.248558251063\n  - -1.518096467799\n  - -2.939688200573\n  - -4.029688200573\n  - -2.159688200573\n  - -2.038965584421\n  - 5.435116436265\n  - 3.348558251064\n  - -1.818096467799\n  - -2.909688200573\n  - -4.969688200573\n  - -3.469688200573\n  - -0.5389655844206\n  - 6.635116436265\n  - 5.848558251064\n  - -0.918096467799\n  - -4.219688200573\n  - -0.3489655844206\n  - -4.029688200573\n  - -3.659688200573\n  - -0.5389655844206\n  - 5.435116436265\n  - 2.348558251064\n  - -0.0171098013921\n  - -2.718096467799\n  - -4.257793595776\n  - -3.887793595776\n  - -2.006947842151\n  - -2.829688200573\n  - -0.1835757519589\n  - 0.8557490906722\n  - 0.6910344155794\n  - 4.435116436265\n  - 4.348558251064\n  - 0.9828901986079\n  - 0.481903532201\n  - -6.457793595776\n  - -6.137793595776\n  - -1.516947842151\n  - -4.029688200573\n  - -1.013575751959\n  - -0.8342509093278\n  - 1.441034415579\n  - -0.8648835637348\n  - 1.848558251064\n  - 0.6828901986079\n  - 1.081903532201\n  - -5.457793595776\n  - -4.787793595776\n  - 0.1730521578493\n  - -1.139688200573\n  - -3.263575751959\n  - 0.4057490906722\n  - 4.141034415579\n  - 6.635116436265\n  - 2.104536043506\n  - 4.348558251064\n  - 5.641880754733\n  - -0.1171098013921\n  - -2.640444789288\n  - -3.195146711689\n  - -2.325146711689\n  - -4.777695681432\n  - -5.49444138824\n  - -4.762699444636\n  - -5.027570381505\n  - -3.966335532899\n  - -3.510344243276\n  - -2.694358754079\n  - 7.410311799427\n  - 8.980311799427\n  - 1.290311799427\n  - -4.969688200573\n  - -4.709688200573\n  - -0.659688200573\n  - -0.5648835637348\n  - -2.951441748936\n  - 0.1828901986079\n  - 5.081903532201\n  - 3.342206404224\n  - 2.873052157849\n  - 3.162206404224\n  - 2.983052157849\n  - 0.920311799427\n  - 0.1164242480411\n  - 3.925749090672\n  - 2.761034415579\n  - 9.335116436265\n  - 3.648558251063\n  - -0.3171098013921\n  - -5.718096467799\n  - -2.457793595776\n  - -3.126947842151\n  - -1.897793595776\n  - -2.156947842151\n  - -0.539688200573\n  - -2.693575751959\n  - -0.2742509093278\n  - 2.531034415579\n  - -4.664883563735\n  - 1.548558251064\n  - -7.017109801392\n  - -4.018096467799\n  - -0.7577935957756\n  - -4.526947842151\n  - -0.4677935957756\n  - -3.876947842151\n  - -3.429688200573\n  - -2.813575751959\n  - -0.9442509093278\n  - 0.6610344155794\n  - -3.089688200573\n  - 9.635116436265\n  - 6.848558251064\n  - 0.781903532201\n  - -3.469688200573\n  - -1.108965584421\n  - -3.839688200573\n  - -0.9189655844206\n  - -1.589688200573\n  - -2.159688200573\n  - -1.139688200573\n  - -4.661818210532\n  - -4.211818210532\n  - 10.33511643627\n  - 3.948558251064\n  - -0.3618182105322\n  - -0.3518182105322\n  - -1.477695681432\n  - -2.132699444636\n  - -1.761818210532\n  - -1.471818210532\n  - 9.935116436265\n  - 3.248558251063\n  - -1.997695681432\n  - -2.472699444636\n  - -1.261818210532\n  - -1.211818210532\nresiduals_expected:\n  - 19.6353723292\n  - 12.84033601748\n  - 4.582720606307\n  - 10.57944901886\n  - 7.388011778232\n  - 1.080920572465\n  - -0.6242875339148\n  - -1.259097736466\n  - -0.6742875339148\n  - -1.084097736466\n  - -3.217135650199\n  - -1.742307946594\n  - -1.554717017541\n  - 1.384057264504\n  - -1.402675175922\n  - -0.4764291246294\n  - 1.296405523471\n  - 17.3353723292\n  - 11.04033601748\n  - 11.98272060631\n  - 9.27944901886\n  - 4.288011778232\n  - -3.019079427535\n  - -2.424287533915\n  - -3.459097736466\n  - -2.549287533915\n  - -2.734097736466\n  - -3.442135650199\n  - -1.292307946594\n  - -3.692217017541\n  - 1.121557264504\n  - -1.215175175922\n  - 0.6485708753706\n  - -0.616094476529\n  - 6.735372329195\n  - 13.14033601748\n  - 4.182720606307\n  - 2.87944901886\n  - 3.488011778232\n  - 0.2809205724649\n  - -4.924287533915\n  - -4.459097736466\n  - -4.874287533915\n  - -3.859097736466\n  - -4.492135650199\n  - -3.017307946594\n  - -2.604717017541\n  - 0.5590572645039\n  - -2.002675175922\n  - -1.713929124629\n  - -0.766094476529\n  - 0.8353723291955\n  - 1.440336017483\n  - 1.182720606307\n  - 1.17944901886\n  - 0.588011778232\n  - -1.219079427535\n  - -2.324287533915\n  - -4.459097736466\n  - -2.361787533915\n  - -3.709097736466\n  - -4.192135650199\n  - -2.342307946594\n  - -2.267217017541\n  - 0.1465572645039\n  - -1.777675175922\n  - -1.863929124629\n  - -1.103594476529\n  - 8.235372329195\n  - 2.782720606307\n  - -3.880822074155\n  - -3.670161022919\n  - -2.960161022919\n  - -0.5201610229187\n  - -0.6195992900418\n  - 4.735372329195\n  - 0.2827206063069\n  - -3.180822074155\n  - -2.240161022919\n  - -3.330161022919\n  - -1.460161022919\n  - -2.309599290042\n  - 3.535372329196\n  - 1.382720606307\n  - -3.480822074155\n  - -2.210161022919\n  - -4.270161022919\n  - -2.770161022919\n  - -0.8095992900418\n  - 4.735372329195\n  - 3.882720606307\n  - -2.580822074155\n  - -3.520161022919\n  - -0.6195992900418\n  - -3.330161022919\n  - -2.960161022919\n  - -0.8095992900418\n  - 3.535372329196\n  - 0.3827206063069\n  - -1.911988221768\n  - -4.380822074155\n  - -5.747347392322\n  - -5.377347392322\n  - -3.559846094244\n  - -2.130161022919\n  - -0.9497554490721\n  - 0.547414971422\n  - 0.4204007099582\n  - 2.535372329196\n  - 2.382720606307\n  - -0.911988221768\n  - -1.180822074155\n  - -7.947347392322\n  - -7.627347392322\n  - -3.069846094244\n  - -3.330161022919\n  - -1.779755449072\n  - -1.142585028578\n  - 1.170400709958\n  - -2.764627670804\n  - -0.1172793936931\n  - -1.211988221768\n  - -0.5808220741553\n  - -6.947347392322\n  - -6.277347392322\n  - -1.379846094244\n  - -0.4401610229187\n  - -4.029755449072\n  - 0.09741497142202\n  - 3.870400709958\n  - 4.735372329195\n  - 0.1403360174832\n  - 2.382720606307\n  - 3.67944901886\n  - -2.011988221768\n  - -4.419079427535\n  - -4.659097736466\n  - -3.789097736466\n  - -7.084635650199\n  - -5.872307946594\n  - -5.382217017541\n  - -2.588442735496\n  - -4.257675175922\n  - -3.816429124629\n  - -2.978594476529\n  - 8.109838977081\n  - 9.679838977081\n  - 1.989838977081\n  - -4.270161022919\n  - -4.010161022919\n  - 0.0398389770813\n  - -2.464627670805\n  - -4.917279393693\n  - -1.711988221768\n  - 3.419177925845\n  - 1.852652607678\n  - 1.320153905756\n  - 1.672652607678\n  - 1.430153905756\n  - 1.619838977081\n  - -0.6497554490721\n  - 3.617414971422\n  - 2.490400709958\n  - 7.435372329196\n  - 1.682720606307\n  - -2.211988221768\n  - -7.380822074155\n  - -3.947347392322\n  - -4.679846094244\n  - -3.387347392322\n  - -3.709846094244\n  - 0.1598389770813\n  - -3.459755449072\n  - -0.582585028578\n  - 2.260400709958\n  - -6.564627670804\n  - -0.4172793936931\n  - -8.911988221768\n  - -5.680822074155\n  - -2.247347392322\n  - -6.079846094244\n  - -1.957347392322\n  - -5.429846094244\n  - -2.730161022919\n  - -3.579755449072\n  - -1.252585028578\n  - 0.3904007099582\n  - -2.390161022919\n  - 7.735372329195\n  - 4.882720606307\n  - -0.8808220741553\n  - -2.770161022919\n  - -1.379599290042\n  - -3.140161022919\n  - -1.189599290042\n  - -0.8901610229187\n  - -1.460161022919\n  - -0.4401610229187\n  - -6.224287533915\n  - -5.774287533915\n  - 8.435372329196\n  - 1.982720606307\n  - -1.924287533915\n  - -1.914287533915\n  - -3.784635650199\n  - -2.752217017541\n  - -3.324287533915\n  - -3.034287533915\n  - 8.035372329195\n  - 1.282720606307\n  - -4.304635650199\n  - -3.092217017541\n  - -2.824287533915\n  - -2.774287533915\nsquare_terms:\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.87397890211\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.87397890211\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211001\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814763\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229468\n          - 37.07301548541\n      -   - 0.07028381229468\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.9566846715422\n      -   - 34.53987590883\n          - 0.9566846715422\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.8894811141752\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.9566846715419\n      -   - 34.53987590883\n          - 0.9566846715419\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 37.76206570488\n          - 4.598069813344\n          - 268.480673393\n      -   - 4.598069813344\n          - -15.06222715154\n          - 39.86023126116\n      -   - 268.480673393\n          - 39.86023126116\n          - -13.8739789021\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 36.36211362787\n          - 11.74965491243\n          - 232.6612213924\n      -   - 11.74965491243\n          - -14.33678876201\n          - 101.5946337199\n      -   - 232.6612213924\n          - 101.5946337199\n          - -16.74890647466\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 26.99562208048\n          - 4.039388146136\n          - 194.3460658399\n      -   - 4.039388146136\n          - -10.904018831\n          - 35.05642111527\n      -   - 194.3460658399\n          - 35.05642111527\n          - -17.54258356033\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861657\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 17.09406685234\n          - 2.66162436771\n          - 131.0277600318\n      -   - 2.66162436771\n          - -5.180832861658\n          - 23.26291444469\n      -   - 131.0277600318\n          - 23.26291444469\n          - -23.07010638636\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 12.39426977847\n          - 3.219862122548\n          - 69.57634867759\n      -   - 3.219862122548\n          - 3.931549769208\n          - 27.88803537784\n      -   - 69.57634867759\n          - 27.88803537784\n          - 16.06772788907\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211001\n  -   -   - -4.590996625593\n          - -49.8578626567\n          - 88.18903535997\n      -   - -49.8578626567\n          - -392.7968335729\n          - -441.4737183738\n      -   - 88.18903535997\n          - -441.4737183738\n          - -2.005253308587\n  -   -   - 6.609230403572\n          - -1.114542090064\n          - 50.17123518647\n      -   - -1.114542090064\n          - -20.03956487444\n          - -10.10106952879\n      -   - 50.17123518647\n          - -10.10106952879\n          - -0.6749898814764\n  -   -   - 5.867803812028\n          - -0.1814136458974\n          - 42.89612529825\n      -   - -0.1814136458974\n          - -11.28413222756\n          - -1.804764358727\n      -   - 42.89612529825\n          - -1.804764358727\n          - -1.028734386942\n  -   -   - 5.122058993026\n          - 0.07028381229466\n          - 37.07301548541\n      -   - 0.07028381229466\n          - -8.007614389428\n          - 0.4527765498708\n      -   - 37.07301548541\n          - 0.4527765498708\n          - -1.160231423527\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539624\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.8894811141752\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134263\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 30.88627308451\n          - 4.976024773829\n          - 218.8203159401\n      -   - 4.976024773829\n          - -12.55705651792\n          - 43.13446872147\n      -   - 218.8203159401\n          - 43.13446872147\n          - -16.96609172267\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 19.05038877716\n          - 2.921211127977\n          - 142.2705130619\n      -   - 2.921211127977\n          - -6.493471507915\n          - 25.45927037319\n      -   - 142.2705130619\n          - 25.45927037319\n          - -20.03762618713\n  -   -   - 15.31308103373\n          - 2.369394773812\n          - 124.3899129792\n      -   - 2.369394773812\n          - -4.034968539623\n          - 20.83080340569\n      -   - 124.3899129792\n          - 20.83080340569\n          - -30.92076532995\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 10.44273226791\n          - 6.475979422676\n          - 57.63907853267\n      -   - 6.475979422676\n          - 34.37966124455\n          - 56.97442501698\n      -   - 57.63907853267\n          - 56.97442501698\n          - 1.719659134262\n  -   -   - 6.258067891218\n          - -0.4773313726774\n          - 46.28932429293\n      -   - -0.4773313726774\n          - -14.3215416894\n          - -4.441525949927\n      -   - 46.28932429293\n          - -4.441525949927\n          - -0.889481114175\n  -   -   - 4.783039455404\n          - 0.1257874954058\n          - 34.53987590883\n      -   - 0.1257874954058\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631132\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 23.89982318472\n          - 3.566267197888\n          - 173.817334734\n      -   - 3.566267197888\n          - -9.358360214064\n          - 30.98344929428\n      -   - 173.817334734\n          - 30.98344929428\n          - -17.96992259777\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 4.783039455404\n          - 0.1257874954057\n          - 34.53987590883\n      -   - 0.1257874954057\n          - -6.993256198462\n          - 0.956684671542\n      -   - 34.53987590883\n          - 0.956684671542\n          - -1.179917631133\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 14.37037076795\n          - 4.106940832789\n          - 45.77485567203\n      -   - 4.106940832789\n          - 3.781269465646\n          - 35.03465443111\n      -   - 45.77485567203\n          - 35.03465443111\n          - 68.57957805078\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567033\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440814\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 42.05642503987\n          - 5.182308979952\n          - 295.7086970924\n      -   - 5.182308979952\n          - -16.8011933671\n          - 44.88068608775\n      -   - 295.7086970924\n          - 44.88068608775\n          - -11.16800948171\n  -   -   - 34.52716237442\n          - 3.742228341934\n          - 249.1555844695\n      -   - 3.742228341934\n          - -13.95687891579\n          - 32.4806538628\n      -   - 249.1555844695\n          - 32.4806538628\n          - -15.39308605662\n  -   -   - 13.22112788651\n          - 1.569537501602\n          - 137.5825625705\n      -   - 1.569537501602\n          - -4.091404567032\n          - 14.28797929438\n      -   - 137.5825625705\n          - 14.28797929438\n          - -69.36780402294\n  -   -   - 10.59582926181\n          - 3.961203981524\n          - 63.6129919848\n      -   - 3.961203981524\n          - 13.8089243593\n          - 34.67215199382\n      -   - 63.6129919848\n          - 34.67215199382\n          - 3.402916211\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\n  -   -   - 21.28989652643\n          - 3.211915555773\n          - 156.5687870814\n      -   - 3.211915555773\n          - -7.888244440813\n          - 27.94191248902\n      -   - 156.5687870814\n          - 27.94191248902\n          - -18.64926539005\nx_candidate_uncentered:\n  - 0.149883507892\n  - 0.008098080768719\n  - 0.009146244784311\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/test_linear_subsolvers.py",
    "content": "\"\"\"Test suite for linear trust-region subsolvers.\"\"\"\n\nimport math\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimizers._pounders.linear_subsolvers import (\n    LinearModel,\n    improve_geomtery_trsbox_linear,\n    minimize_trsbox_linear,\n)\n\n\n@pytest.mark.parametrize(\n    \"model_gradient, lower_bounds, upper_bounds, delta, expected\",\n    [\n        (\n            np.array([1.0, 0.0, 1.0]),\n            -np.ones(3),\n            np.ones(3),\n            2.0,\n            np.array([-1.0, 0.0, -1.0]),\n        ),\n        (\n            np.array([0.00028774, 0.00763968, 0.01217268]),\n            -np.ones(3),\n            np.ones(3),\n            9.5367431640625e-05,\n            np.array([-1.90902854e-06, -5.06859218e-05, -8.07603861e-05]),\n        ),\n        (\n            np.array([0.00028774, 0.00763968, 0.01217268]),\n            np.array([0, -1, -1]),\n            np.ones(3),\n            0.1,\n            np.array([0.0, -5.31586927e-02, -8.47003742e-02]),\n        ),\n        (\n            np.arange(5) * 0.1,\n            -np.ones(5),\n            np.ones(5),\n            0.1,\n            np.array([0.0, -0.01825742, -0.03651484, -0.05477226, -0.07302967]),\n        ),\n        (\n            np.arange(4, -1, -1) * 0.1,\n            -np.ones(5),\n            np.ones(5),\n            0.1,\n            np.array([-0.07302967, -0.05477226, -0.03651484, -0.01825742, 0]),\n        ),\n        (\n            np.arange(5) * 0.1,\n            np.array([-1, -1, 0, -1, -1]),\n            np.array([1, 1, 0.2, 0.2, 1]),\n            0.1,\n            np.array([0.0, -1.96116135e-02, 0.0, -5.88348405e-02, -7.84464541e-02]),\n        ),\n        (\n            np.arange(4, -1, -1) * 0.1,\n            np.array([-1, -1, -1, -1, 0]),\n            np.array([0.3, 0.3, 1, 1, 1]),\n            0.1,\n            np.array([-0.07302967, -0.05477226, -0.03651484, -0.01825742, 0.0]),\n        ),\n    ],\n)\ndef test_trsbox_linear(model_gradient, lower_bounds, upper_bounds, delta, expected):\n    linear_model = LinearModel(linear_terms=model_gradient)\n\n    x_out = minimize_trsbox_linear(linear_model, lower_bounds, upper_bounds, delta)\n    aaae(x_out, expected)\n\n\n@pytest.mark.parametrize(\n    \"x_center, c_term, model_gradient, lower_bounds, upper_bounds, delta, expected\",\n    [\n        (\n            np.array([0.0, 0.0]),\n            -1.0,\n            np.array([1.0, -1.0]),\n            np.array([-2.0, -2.0]),\n            np.array([1.0, 2.0]),\n            2.0,\n            np.array([-math.sqrt(2.0), math.sqrt(2.0)]),\n        ),\n        (\n            np.array([0.0, 0.0]),\n            -1.0,\n            np.array([1.0, -1.0]),\n            np.array([-2.0, -2.0]),\n            np.array([1.0, 2.0]),\n            5.0,\n            np.array([-2.0, 2.0]),\n        ),\n        (\n            np.array([0.0, 0.0]) + 1,\n            3.0,\n            np.array([1.0, -1.0]),\n            np.array([-2.0, -2.0]) + 1,\n            np.array([1.0, 2.0]) + 1,\n            5.0,\n            np.array([1.0, -2.0]) + 1,\n        ),\n        (\n            np.array([0.0, 0.0]),\n            -1.0,\n            np.array([-1.0, -1.0]),\n            np.array([-2.0, -2.0]),\n            np.array([0.1, 0.9]),\n            math.sqrt(2.0),\n            np.array([0.1, 0.9]),\n        ),\n        (\n            np.array([0.0, 0.0, 0.0]),\n            -1.0,\n            np.array([-1.0, -1.0, -1.0]),\n            np.array([-2.0, -2.0, -2.0]),\n            np.array([0.9, 0.1, 5.0]),\n            math.sqrt(3.0),\n            np.array([0.9, 0.1, math.sqrt(3.0 - 0.81 - 0.01)]),\n        ),\n        (\n            np.array([0.0, 0.0]),\n            0.0,\n            np.array([1e-15, -1.0]),\n            np.array([-2.0, -2.0]),\n            np.array([1.0, 2.0]),\n            5.0,\n            np.array([0.0, 2.0]),\n        ),\n        (\n            np.array([0.0, 0.0]),\n            0.0,\n            np.array([1e-15, 0.0]),\n            np.array([-2.0, -2.0]),\n            np.array([1.0, 2.0]),\n            5.0,\n            np.array([0.0, 0.0]),\n        ),\n    ],\n)\ndef test_trsbox_geometry(\n    x_center,\n    c_term,\n    model_gradient,\n    lower_bounds,\n    upper_bounds,\n    delta,\n    expected,\n):\n    linear_model = LinearModel(intercept=c_term, linear_terms=model_gradient)\n\n    x_out = improve_geomtery_trsbox_linear(\n        x_center,\n        linear_model,\n        lower_bounds,\n        upper_bounds,\n        delta,\n    )\n    aaae(x_out, expected)\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/test_pounders_history.py",
    "content": "\"\"\"Test the history class for least-squares optimizers.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory\n\nENTRIES = [\n    (np.arange(3), [np.arange(5)]),\n    ([np.arange(3)], list(range(5))),\n    (np.arange(3).reshape(1, 3), np.arange(5).reshape(1, 5)),\n]\n\nTEST_CASES = []\nfor entries in ENTRIES:\n    for is_center in True, False:\n        TEST_CASES.append((entries, is_center))\n\n\n@pytest.mark.parametrize(\"entries, is_center\", TEST_CASES)\ndef test_add_entries_not_initialized(entries, is_center):\n    history = LeastSquaresHistory()\n\n    if is_center:\n        c_info = {\"x\": np.zeros(3), \"residuals\": np.zeros(5), \"radius\": 1}\n        history.add_centered_entries(*entries, c_info)\n    else:\n        history.add_entries(*entries)\n\n    xs, residuals, critvals = history.get_entries()\n    xs_sinlge = history.get_xs()\n    residuals_sinlge = history.get_residuals()\n    critvals_sinlge = history.get_critvals()\n\n    for entry in xs, residuals, critvals:\n        assert isinstance(entry, np.ndarray)\n\n    aaae(xs, np.arange(3).reshape(1, 3))\n    aaae(xs_sinlge, np.arange(3).reshape(1, 3))\n    aaae(residuals, np.arange(5).reshape(1, 5))\n    aaae(residuals_sinlge, np.arange(5).reshape(1, 5))\n    aaae(critvals, np.array([30.0]))\n    aaae(critvals_sinlge, np.array([30.0]))\n\n\n@pytest.mark.parametrize(\"entries, is_center\", TEST_CASES)\ndef test_add_entries_initialized_with_space(entries, is_center):\n    history = LeastSquaresHistory()\n    history.add_entries(np.ones((4, 3)), np.zeros((4, 5)))\n\n    if is_center:\n        c_info = {\"x\": np.zeros(3), \"residuals\": np.zeros(5), \"radius\": 1}\n        history.add_centered_entries(*entries, c_info)\n    else:\n        history.add_entries(*entries)\n\n    xs, residuals, critvals = history.get_entries(index=-1)\n    xs_sinlge = history.get_xs(index=-1)\n    residuals_sinlge = history.get_residuals(index=-1)\n    critvals_sinlge = history.get_critvals(index=-1)\n\n    for entry in xs, residuals:\n        assert isinstance(entry, np.ndarray)\n\n    aaae(xs, np.arange(3))\n    aaae(xs_sinlge, np.arange(3))\n    aaae(residuals, np.arange(5))\n    aaae(residuals_sinlge, np.arange(5))\n    assert critvals == 30\n    assert critvals_sinlge == 30\n\n\ndef test_add_entries_initialized_extension_needed():\n    history = LeastSquaresHistory()\n    history.add_entries(np.ones((4, 3)), np.zeros((4, 5)))\n    history.xs = history.xs[:5]\n    history.residuals = history.residuals[:5]\n    history.critvals = history.critvals[:5]\n\n    history.add_entries(np.arange(12).reshape(4, 3), np.arange(20).reshape(4, 5))\n\n    assert len(history.xs) == 10\n    assert len(history.residuals) == 10\n    assert len(history.critvals) == 10\n\n    xs, residuals, _ = history.get_entries(index=-1)\n    xs_sinlge = history.get_xs(index=-1)\n    residuals_sinlge = history.get_residuals(index=-1)\n\n    for entry in xs, xs_sinlge, residuals, residuals_sinlge:\n        assert isinstance(entry, np.ndarray)\n\n    assert history.get_n_fun() == 8\n\n\ndef test_add_centered_entries():\n    history = LeastSquaresHistory()\n    history.add_entries(np.ones((2, 2)), np.ones((2, 4)))\n    center_info = {\n        \"x\": history.get_xs(index=-1),\n        \"residuals\": history.get_residuals(index=-1),\n        \"radius\": 0.5,\n    }\n    history.add_centered_entries(\n        xs=np.ones(2), residuals=np.ones(4) * 2, center_info=center_info\n    )\n\n    xs, residuals, critvals = history.get_entries(index=-1)\n\n    aaae(xs, np.array([1.5, 1.5]))\n    aaae(residuals, np.array([3, 3, 3, 3]))\n    assert critvals == 36\n    assert history.get_n_fun() == 3\n\n\ndef test_get_centered_entries():\n    history = LeastSquaresHistory()\n    history.add_entries(np.ones((4, 3)), np.ones((4, 5)))\n    center_info = {\n        \"x\": np.arange(3),\n        \"residuals\": np.arange(5),\n        \"radius\": 0.25,\n    }\n\n    xs, residuals, critvals = history.get_centered_entries(\n        center_info=center_info, index=-1\n    )\n\n    aaae(xs, np.array([4, 0, -4]))\n    aaae(residuals, np.arange(1, -4, -1))\n    assert critvals == 15\n    assert history.get_n_fun() == 4\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/test_pounders_unit.py",
    "content": "\"\"\"Test the auxiliary functions of the pounders algorithm.\"\"\"\n\nfrom collections import namedtuple\nfrom functools import partial\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport yaml\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimizers._pounders.pounders_auxiliary import (\n    add_geomtery_points_to_make_main_model_fully_linear,\n    create_initial_residual_model,\n    create_main_from_residual_model,\n    evaluate_residual_model,\n    find_affine_points,\n    fit_residual_model,\n    get_feature_matrices_residual_model,\n    update_main_model_with_new_accepted_x,\n    update_residual_model,\n    update_residual_model_with_new_accepted_x,\n)\nfrom optimagic.optimizers._pounders.pounders_history import LeastSquaresHistory\n\nFIXTURES_DIR = Path(__file__).parent / \"fixtures\"\n\n\ndef read_yaml(path):\n    with open(rf\"{path}\") as file:\n        data = yaml.full_load(file)\n\n    return data\n\n\n# ======================================================================================\n# Fixtures\n# ======================================================================================\n\n\n@pytest.fixture()\ndef criterion():\n    data = pd.read_csv(FIXTURES_DIR / \"pounders_example_data.csv\")\n    endog = np.asarray(data[\"y\"])\n    exog = np.asarray(data[\"t\"])\n\n    def func(x: np.ndarray, exog: np.ndarray, endog: np.ndarray) -> np.ndarray:\n        \"\"\"User provided residual function.\"\"\"\n        return endog - np.exp(-x[0] * exog) / (x[1] + x[2] * exog)\n\n    return partial(func, exog=exog, endog=endog)\n\n\n@pytest.fixture()\ndef data_create_initial_residual_model():\n    test_data = read_yaml(FIXTURES_DIR / \"update_initial_residual_model.yaml\")\n    history = LeastSquaresHistory()\n    ResidualModel = namedtuple(\n        \"ResidualModel\", [\"intercepts\", \"linear_terms\", \"square_terms\"]\n    )\n\n    history.add_entries(\n        np.array(test_data[\"x_candidate\"]),\n        np.array(test_data[\"residuals_candidate\"]),\n    )\n    accepted_index = 0\n    delta = 0.1\n\n    inputs_dict = {\"history\": history, \"accepted_index\": accepted_index, \"delta\": delta}\n\n    residual_model_expected = ResidualModel(\n        intercepts=test_data[\"residual_model_expected\"][\"intercepts\"],\n        linear_terms=test_data[\"residual_model_expected\"][\"linear_terms\"],\n        square_terms=test_data[\"residual_model_expected\"][\"square_terms\"],\n    )\n\n    return inputs_dict, residual_model_expected\n\n\n@pytest.fixture()\ndef data_update_residual_model():\n    test_data = read_yaml(FIXTURES_DIR / \"update_residual_model.yaml\")\n\n    ResidualModel = namedtuple(\n        \"ResidualModel\", [\"intercepts\", \"linear_terms\", \"square_terms\"]\n    )\n\n    residual_model = ResidualModel(\n        intercepts=None,\n        linear_terms=np.array(test_data[\"linear_terms\"]),\n        square_terms=np.array(test_data[\"square_terms\"]),\n    )\n    coefficients_to_add = {\n        \"linear_terms\": np.array(test_data[\"coefficients_linear_terms\"]).T,\n        \"square_terms\": np.array(test_data[\"coefficients_square_terms\"]),\n    }\n\n    inputs_dict = {\n        \"residual_model\": residual_model,\n        \"coefficients_to_add\": coefficients_to_add,\n        \"delta\": test_data[\"delta\"],\n        \"delta_old\": test_data[\"delta_old\"],\n    }\n\n    expected_dict = {\n        \"linear_terms\": test_data[\"linear_terms_expected\"],\n        \"square_terms\": test_data[\"square_terms_expected\"],\n    }\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture()\ndef data_update_main_from_residual_model():\n    test_data = read_yaml(FIXTURES_DIR / \"update_main_from_residual_model.yaml\")\n\n    ResidualModel = namedtuple(\n        \"ResidualModel\", [\"intercepts\", \"linear_terms\", \"square_terms\"]\n    )\n    MainModel = namedtuple(\"MainModel\", [\"linear_terms\", \"square_terms\"])\n\n    residual_model = ResidualModel(\n        intercepts=np.array(test_data[\"residuals\"]),\n        linear_terms=np.array(test_data[\"linear_terms_residual_model\"]),\n        square_terms=np.array(test_data[\"square_terms_residual_model\"]),\n    )\n\n    main_model_expected = MainModel(\n        linear_terms=test_data[\"linear_terms_main_model_expected\"],\n        square_terms=test_data[\"square_terms_main_model_expected\"],\n    )\n\n    return residual_model, main_model_expected\n\n\n@pytest.fixture()\ndef data_update_residual_model_with_new_accepted_x():\n    test_data = read_yaml(\n        FIXTURES_DIR / \"update_residual_model_with_new_accepted_x.yaml\"\n    )\n\n    ResidualModel = namedtuple(\n        \"ResidualModel\", [\"intercepts\", \"linear_terms\", \"square_terms\"]\n    )\n    inputs_dict = {}\n    residual_model_expected = {}\n\n    residual_model = ResidualModel(\n        intercepts=np.array(test_data[\"residuals\"]),\n        linear_terms=np.array(test_data[\"linear_terms\"]),\n        square_terms=np.array(test_data[\"square_terms\"]),\n    )\n\n    inputs_dict[\"residual_model\"] = residual_model\n    inputs_dict[\"x_candidate\"] = (\n        np.array(test_data[\"x_candidate_uncentered\"]) - np.array(test_data[\"best_x\"])\n    ) / test_data[\"delta\"]\n\n    residual_model_expected = ResidualModel(\n        intercepts=test_data[\"residuals_expected\"],\n        linear_terms=test_data[\"linear_terms_expected\"],\n        square_terms=np.array(test_data[\"square_terms\"]),\n    )\n\n    return inputs_dict, residual_model_expected\n\n\n@pytest.fixture()\ndef data_update_main_model_with_new_accepted_x():\n    test_data = read_yaml(FIXTURES_DIR / \"update_main_model_with_new_accepted_x.yaml\")\n\n    MainModel = namedtuple(\"MainModel\", [\"linear_terms\", \"square_terms\"])\n\n    inputs_dict = {}\n    expected_dict = {}\n\n    main_model = MainModel(\n        linear_terms=np.array(test_data[\"linear_terms\"]),\n        square_terms=np.array(test_data[\"square_terms\"]),\n    )\n\n    inputs_dict[\"main_model\"] = main_model\n    inputs_dict[\"x_candidate\"] = (\n        np.array(test_data[\"x_candidate_uncentered\"]) - np.array(test_data[\"best_x\"])\n    ) / test_data[\"delta\"]\n\n    expected_dict[\"linear_terms\"] = test_data[\"linear_terms_expected\"]\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture(\n    params=[\n        \"zero_i\",\n        \"zero_ii\",\n        \"zero_iii\",\n        \"zero_iv\",\n        \"nonzero_i\",\n        \"nonzero_ii\",\n        \"nonzero_iii\",\n    ]\n)\ndef data_find_affine_points(request):\n    test_data = read_yaml(FIXTURES_DIR / f\"find_affine_points_{request.param}.yaml\")\n\n    history = LeastSquaresHistory()\n    history_x = np.array(test_data[\"history_x\"])\n    history.add_entries(history_x, np.zeros(history_x.shape))\n\n    inputs_dict = {\n        \"history\": history,\n        \"x_accepted\": np.array(test_data[\"x_accepted\"]),\n        \"model_improving_points\": np.array(test_data[\"model_improving_points\"]),\n        \"project_x_onto_null\": test_data[\"project_x_onto_null\"],\n        \"delta\": test_data[\"delta\"],\n        \"theta1\": test_data[\"theta1\"],\n        \"c\": test_data[\"c\"],\n        \"model_indices\": np.array(test_data[\"model_indices\"]),\n        \"n_modelpoints\": test_data[\"n_modelpoints\"],\n    }\n\n    expected_dict = {\n        \"model_improving_points\": test_data[\"model_improving_points_expected\"],\n        \"model_indices\": test_data[\"model_indices_expected\"],\n        \"n_modelpoints\": test_data[\"n_modelpoints_expected\"],\n    }\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture(params=[\"i\", \"ii\"])\ndef data_add_points_until_main_model_fully_linear(request, criterion):\n    test_data = read_yaml(\n        FIXTURES_DIR / f\"add_points_until_main_model_fully_linear_{request.param}.yaml\"\n    )\n\n    history = LeastSquaresHistory()\n    n = 3\n    n_modelpoints = test_data[\"n_modelpoints\"]\n    history.add_entries(\n        np.array(test_data[\"history_x\"])[: -(n - n_modelpoints)],\n        np.array(test_data[\"history_criterion\"])[: -(n - n_modelpoints)],\n    )\n\n    MainModel = namedtuple(\"MainModel\", [\"linear_terms\", \"square_terms\"])\n    main_model = MainModel(\n        linear_terms=np.array(test_data[\"linear_terms\"]),\n        square_terms=np.array(test_data[\"square_terms\"]),\n    )\n\n    index_best_x = test_data[\"index_best_x\"]\n    x_accepted = test_data[\"history_x\"][index_best_x]\n\n    def batch_fun(x_list, n_cores):\n        return [criterion(x) for x in x_list]\n\n    inputs_dict = {\n        \"history\": history,\n        \"main_model\": main_model,\n        \"model_improving_points\": np.array(test_data[\"model_improving_points\"]),\n        \"model_indices\": np.array(test_data[\"model_indices\"]),\n        \"x_accepted\": np.array(x_accepted),\n        \"n_modelpoints\": n_modelpoints,\n        \"delta\": test_data[\"delta\"],\n        \"criterion\": criterion,\n        \"lower_bounds\": None,\n        \"upper_bounds\": None,\n        \"batch_fun\": batch_fun,\n    }\n\n    expected_dict = {\n        \"model_indices\": test_data[\"model_indices_expected\"],\n        \"history_x\": test_data[\"history_x_expected\"],\n    }\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture()\ndef data_get_interpolation_matrices_residual_model():\n    test_data = read_yaml(\n        FIXTURES_DIR / \"get_interpolation_matrices_residual_model.yaml\"\n    )\n\n    history = LeastSquaresHistory()\n    history_x = np.array(test_data[\"history_x\"])\n    history.add_entries(history_x, np.zeros(history_x.shape))\n\n    n_params = 3\n    n_maxinterp = 2 * n_params + 1\n    n_modelpoints = 7\n\n    inputs_dict = {\n        \"history\": history,\n        \"x_accepted\": np.array(test_data[\"x_accepted\"]),\n        \"model_indices\": np.array(test_data[\"model_indices\"]),\n        \"delta\": test_data[\"delta\"],\n        \"c2\": 10,\n        \"theta2\": 1e-4,\n        \"n_maxinterp\": n_maxinterp,\n    }\n\n    expected_dict = {\n        \"x_sample_monomial_basis\": np.array(\n            test_data[\"x_sample_monomial_basis_expected\"]\n        )[: n_params + 1, : n_params + 1],\n        \"monomial_basis\": np.array(test_data[\"monomial_basis_expected\"])[\n            :n_modelpoints\n        ],\n        \"basis_null_space\": test_data[\"basis_null_space_expected\"],\n        \"lower_triangular\": np.array(test_data[\"lower_triangular_expected\"])[\n            :, n_params + 1 : n_maxinterp\n        ],\n        \"n_modelpoints\": test_data[\"n_modelpoints_expected\"],\n    }\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture(params=[\"4\", \"7\"])\ndef data_evaluate_residual_model(request):\n    test_data = read_yaml(FIXTURES_DIR / f\"interpolate_f_iter_{request.param}.yaml\")\n\n    history = LeastSquaresHistory()\n    history.add_entries(\n        np.array(test_data[\"history_x\"]),\n        np.array(test_data[\"history_criterion\"]),\n    )\n\n    ResidualModel = namedtuple(\n        \"ResidualModel\", [\"intercepts\", \"linear_terms\", \"square_terms\"]\n    )\n    residual_model = ResidualModel(\n        intercepts=np.array(test_data[\"residuals\"]),\n        linear_terms=np.array(test_data[\"linear_terms_residual_model\"]),\n        square_terms=np.array(test_data[\"square_terms_residual_model\"]),\n    )\n\n    x_accepted = np.array(test_data[\"x_accepted\"])\n    model_indices = np.array(test_data[\"model_indices\"])\n    n_modelpoints = test_data[\"n_modelpoints\"]\n    delta_old = test_data[\"delta_old\"]\n\n    center_info = {\"x\": x_accepted, \"radius\": delta_old}\n    centered_xs = history.get_centered_xs(\n        center_info, index=model_indices[:n_modelpoints]\n    )\n\n    center_info = {\"residuals\": residual_model.intercepts}\n    centered_residuals = history.get_centered_residuals(\n        center_info, index=model_indices\n    )\n\n    inputs_dict = {\n        \"centered_xs\": centered_xs,\n        \"centered_residuals\": centered_residuals,\n        \"residual_model\": residual_model,\n    }\n\n    expected_dict = {\n        \"y_residuals\": test_data[\"f_interpolated_expected\"],\n    }\n\n    return inputs_dict, expected_dict\n\n\n@pytest.fixture()\ndef data_fit_residual_model():\n    test_data = read_yaml(FIXTURES_DIR / \"get_coefficients_residual_model.yaml\")\n\n    n_params = 3\n    n_maxinterp = 2 * n_params + 1\n    n_modelpoints = 7\n\n    inputs_dict = {\n        \"m_mat\": np.array(test_data[\"x_sample_monomial_basis\"])[\n            : n_params + 1, : n_params + 1\n        ],\n        \"n_mat\": np.array(test_data[\"monomial_basis\"])[:n_modelpoints],\n        \"z_mat\": np.array(test_data[\"basis_null_space\"]),\n        \"n_z_mat\": np.array(test_data[\"lower_triangular\"])[\n            :, n_params + 1 : n_maxinterp\n        ],\n        \"y_residuals\": np.array(test_data[\"f_interpolated\"]),\n        \"n_modelpoints\": test_data[\"n_modelpoints\"],\n    }\n\n    expected_coefficients_dict = {\n        \"linear_terms\": np.array(test_data[\"linear_terms_expected\"]).T,\n        \"square_terms\": np.array(test_data[\"square_terms_expected\"]),\n    }\n\n    return inputs_dict, expected_coefficients_dict\n\n\n# ======================================================================================\n# Test cases\n# ======================================================================================\n\n\n@pytest.mark.skip(reason=\"refactoring\")\ndef test_update_initial_residual_model(data_update_initial_residual_model):\n    inputs, residual_model_expected = data_update_initial_residual_model\n\n    residual_model_out = create_initial_residual_model(**inputs)\n\n    aaae(residual_model_out[\"intercepts\"], residual_model_expected[\"intercepts\"])\n    aaae(residual_model_out[\"linear_terms\"], residual_model_expected[\"linear_terms\"])\n\n\ndef test_update_residual_model(data_update_residual_model):\n    inputs, expected = data_update_residual_model\n\n    residual_model_out = update_residual_model(**inputs)\n\n    aaae(\n        residual_model_out.linear_terms,\n        expected[\"linear_terms\"],\n    )\n    aaae(\n        residual_model_out.square_terms,\n        expected[\"square_terms\"],\n    )\n\n\ndef test_update_main_from_residual_model(data_update_main_from_residual_model):\n    residual_model, main_model_expected = data_update_main_from_residual_model\n\n    main_model_out = create_main_from_residual_model(\n        residual_model, multiply_square_terms_with_intercepts=True\n    )\n\n    aaae(\n        main_model_out.linear_terms,\n        main_model_expected.linear_terms,\n    )\n    aaae(\n        main_model_out.square_terms,\n        main_model_expected.square_terms,\n        decimal=3,\n    )\n\n\ndef test_update_residual_model_with_new_accepted_x(\n    data_update_residual_model_with_new_accepted_x,\n):\n    (\n        inputs,\n        residual_model_expected,\n    ) = data_update_residual_model_with_new_accepted_x\n\n    residual_model_out = update_residual_model_with_new_accepted_x(**inputs)\n\n    aaae(residual_model_out.intercepts, residual_model_expected.intercepts)\n    aaae(residual_model_out.linear_terms, residual_model_expected.linear_terms)\n\n\n@pytest.mark.xfail(reason=\"Known rounding differences between C and Python.\")\ndef test_update_main_model_with_new_accepted_x(\n    data_update_main_model_with_new_accepted_x,\n):\n    (\n        inputs,\n        main_model_expected,\n    ) = data_update_main_model_with_new_accepted_x\n\n    main_model_out = update_main_model_with_new_accepted_x(**inputs)\n\n    aaae(main_model_out.linear_terms, main_model_expected.linear_terms)\n\n\ndef test_find_affine_points(data_find_affine_points):\n    inputs, expected = data_find_affine_points\n\n    (\n        model_improving_points_out,\n        model_indices_out,\n        n_modelpoints_out,\n        project_x_onto_null_out,\n    ) = find_affine_points(**inputs)\n\n    aaae(\n        model_improving_points_out,\n        expected[\"model_improving_points\"],\n    )\n    aaae(model_indices_out, expected[\"model_indices\"])\n    assert np.allclose(n_modelpoints_out, expected[\"n_modelpoints\"])\n    assert np.allclose(project_x_onto_null_out, True)\n\n\ndef test_add_points_until_main_model_fully_linear(\n    data_add_points_until_main_model_fully_linear,\n):\n    inputs, expected = data_add_points_until_main_model_fully_linear\n    n = 3\n\n    (\n        history_out,\n        model_indices_out,\n    ) = add_geomtery_points_to_make_main_model_fully_linear(**inputs, n_cores=1)\n\n    aaae(model_indices_out, expected[\"model_indices\"])\n    for index_added in range(n - inputs[\"n_modelpoints\"], 0, -1):\n        aaae(\n            history_out.get_xs(index=-index_added),\n            expected[\"history_x\"][-index_added],\n        )\n\n\ndef test_get_interpolation_matrices_residual_model(\n    data_get_interpolation_matrices_residual_model,\n):\n    inputs, expected = data_get_interpolation_matrices_residual_model\n    (\n        x_sample_monomial_basis,\n        monomial_basis,\n        basis_null_space,\n        lower_triangular,\n        n_modelpoints,\n    ) = get_feature_matrices_residual_model(**inputs)\n\n    aaae(x_sample_monomial_basis, expected[\"x_sample_monomial_basis\"])\n    aaae(monomial_basis, expected[\"monomial_basis\"])\n    aaae(basis_null_space, expected[\"basis_null_space\"])\n    aaae(lower_triangular, expected[\"lower_triangular\"])\n    assert np.allclose(n_modelpoints, expected[\"n_modelpoints\"])\n\n\ndef test_evaluate_residual_model(data_evaluate_residual_model):\n    inputs, expected = data_evaluate_residual_model\n    y_residuals = evaluate_residual_model(**inputs)\n\n    aaae(y_residuals, expected[\"y_residuals\"])\n\n\ndef test_fit_residual_model(data_fit_residual_model):\n    inputs, expected_coefficients = data_fit_residual_model\n\n    coefficients_to_add = fit_residual_model(**inputs)\n\n    aaae(\n        coefficients_to_add[\"linear_terms\"],\n        expected_coefficients[\"linear_terms\"],\n    )\n    aaae(\n        coefficients_to_add[\"square_terms\"],\n        expected_coefficients[\"square_terms\"],\n    )\n"
  },
  {
    "path": "tests/optimagic/optimizers/_pounders/test_quadratic_subsolvers.py",
    "content": "\"\"\"Test various solvers for quadratic trust-region subproblems.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimizers._pounders._conjugate_gradient import (\n    minimize_trust_cg,\n)\nfrom optimagic.optimizers._pounders._steihaug_toint import (\n    minimize_trust_stcg,\n)\nfrom optimagic.optimizers._pounders._trsbox import minimize_trust_trsbox\nfrom optimagic.optimizers._pounders.bntr import (\n    bntr,\n)\nfrom optimagic.optimizers._pounders.gqtpar import (\n    gqtpar,\n)\nfrom optimagic.optimizers._pounders.pounders_auxiliary import MainModel\n\n# ======================================================================================\n# Subsolver BNTR\n# ======================================================================================\n\nTEST_CASES_BNTR = [\n    (\n        np.array([0.0002877431832243, 0.00763968126032, 0.01217268029151]),\n        np.array(\n            [\n                [\n                    4.0080360351800763e00,\n                    1.6579091056425378e02,\n                    1.7322297746691254e02,\n                ],\n                [\n                    1.6579091056425378e02,\n                    1.6088016292793940e04,\n                    1.1041403355728811e04,\n                ],\n                [\n                    1.7322297746691254e02,\n                    1.1041403355728811e04,\n                    9.2992625728417297e03,\n                ],\n            ]\n        ),\n        -np.ones(3),\n        np.ones(3),\n        np.array([0.000122403, 3.92712e-06, -8.2519e-06]),\n    ),\n    (\n        np.array([7.898833044695e-06, 254.9676549378, 0.0002864050095122]),\n        np.array(\n            [\n                [3.97435226e00, 1.29126446e02, 1.90424789e02],\n                [1.29126446e02, 1.08362658e04, 9.05024598e03],\n                [1.90424789e02, 9.05024598e03, 1.06395102e04],\n            ]\n        ),\n        np.array([-1.0, 0, -1.0]),\n        np.ones(3),\n        np.array([-4.89762e-06, 0.0, 6.0738e-08]),\n    ),\n    (\n        np.array([0.000208896, 0.040137, 0.0237668]),\n        np.array(\n            [\n                [\n                    8.6267971128257614e-01,\n                    3.3589357331133463e01,\n                    3.8550834275262481e01,\n                ],\n                [\n                    3.3589357331133463e01,\n                    4.0625660472990171e03,\n                    2.7006581320776222e03,\n                ],\n                [\n                    3.8550834275262481e01,\n                    2.7006581320776222e03,\n                    2.3157072223295277e03,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.000404701, -8.56315e-06, -7.01394e-06]),\n    ),\n    (\n        np.array([1053.998577258, -1768.195151975, 1091.754813306]),\n        np.array(\n            [\n                [\n                    5.1009001863913858e02,\n                    -2.9142602235646069e02,\n                    2.4000221805201900e02,\n                ],\n                [\n                    -2.9142602235646069e02,\n                    1.3922341317778117e04,\n                    5.7863734667132694e03,\n                ],\n                [\n                    2.4000221805201900e02,\n                    5.7863734667132694e03,\n                    1.5911148658889811e03,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([-1, 0.52169, -1]),\n    ),\n    (\n        np.array([-191889.2320478, -1002015.908232, -573072.9226335]),\n        np.array(\n            [\n                [\n                    1.1012704153339069e07,\n                    4.9533363163771488e07,\n                    2.9628266883962810e07,\n                ],\n                [\n                    4.9533363163771488e07,\n                    2.2267942225630835e08,\n                    1.3303758212303287e08,\n                ],\n                [\n                    2.9628266883962810e07,\n                    1.3303758212303287e08,\n                    7.9554367206848219e07,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([-1, 0.148669, 0.131015]),\n    ),\n    (\n        np.array([1076.73, -4802.74, 828.249]),\n        np.array(\n            [\n                [\n                    4.8212187042743824e02,\n                    -9.8489480047918653e02,\n                    1.1822837156689332e03,\n                ],\n                [\n                    -9.8489480047918653e02,\n                    7.7891876734093257e03,\n                    2.1566788126264223e03,\n                ],\n                [\n                    1.1822837156689332e03,\n                    2.1566788126264223e03,\n                    1.9148005132287210e03,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([1.0, 1, -1]),\n    ),\n    (\n        np.array([39307.4, 43176.2, 19136.1]),\n        np.array(\n            [\n                [\n                    2.1888915578112096e05,\n                    1.9734665605071097e05,\n                    1.0865582588513123e05,\n                ],\n                [\n                    1.9734665605071097e05,\n                    1.5802957082548781e05,\n                    9.3932751210457645e04,\n                ],\n                [\n                    1.0865582588513123e05,\n                    9.3932751210457645e04,\n                    6.9919507495186845e04,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.835475, -1, -0.228586]),\n    ),\n    (\n        np.array([15924.6, -7936.89, 4559.77]),\n        np.array(\n            [\n                [\n                    1.4823363165787258e05,\n                    -9.3991198881618606e04,\n                    -6.7423849020288171e03,\n                ],\n                [\n                    -9.3991198881618606e04,\n                    1.0299013233992350e05,\n                    2.7454282523562739e04,\n                ],\n                [\n                    -6.7423849020288171e03,\n                    2.7454282523562739e04,\n                    -8.7825122820168282e04,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.15422, 0.484382, -1.0]),\n    ),\n    (\n        np.array([-223.491, -2375.1, -3508.53]),\n        np.array(\n            [\n                [\n                    1.8762040451468388e03,\n                    4.5209129063298806e03,\n                    3.7587689627124179e04,\n                ],\n                [\n                    4.5209129063298806e03,\n                    2.6540113149319626e06,\n                    1.3806874591227937e06,\n                ],\n                [\n                    3.7587689627124179e04,\n                    1.3806874591227937e06,\n                    1.4430203128871324e06,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.700966, 0.0157984, -0.0309433]),\n    ),\n    (\n        np.array([-0.00566046, -0.26497, -0.24923]),\n        np.array(\n            [\n                [\n                    9.0152048402068141e-01,\n                    3.9069240493708740e01,\n                    4.0976585309530130e01,\n                ],\n                [\n                    3.9069240493708740e01,\n                    4.0339538281863297e03,\n                    2.7447144903267226e03,\n                ],\n                [\n                    4.0976585309530130e01,\n                    2.7447144903267226e03,\n                    2.3178455554478642e03,\n                ],\n            ],\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.0141205, 0.000131845, -0.000298234]),\n    ),\n    (\n        np.array([16459.6, 42312.7, 33953.9]),\n        np.array(\n            [\n                [\n                    3.4897766687256113e07,\n                    1.7536007046689782e08,\n                    1.0424382825704373e08,\n                ],\n                [\n                    1.7536007046689782e08,\n                    8.8481756045390594e08,\n                    5.2619306030723321e08,\n                ],\n                [\n                    1.0424382825704373e08,\n                    5.2619306030723321e08,\n                    3.1297679051347983e08,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([-0.131066, 0.180817, -0.260453]),\n    ),\n    (\n        np.array([17660.3, 18827.2, 28759.5]),\n        np.array(\n            [\n                [\n                    9.7041306729050993e04,\n                    1.0613110916937439e05,\n                    1.5558443292460032e05,\n                ],\n                [\n                    1.0613110916937439e05,\n                    1.0840421118778562e05,\n                    1.5388850550829183e05,\n                ],\n                [\n                    1.5558443292460032e05,\n                    1.5388850550829183e05,\n                    2.1840298326937514e05,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([1, 0.266874, -1]),\n    ),\n    (\n        np.array([16678, 65723.7, -153755]),\n        np.array(\n            [\n                [\n                    2.8786103367161286e04,\n                    1.0278873046014908e05,\n                    -2.4232333719251846e05,\n                ],\n                [\n                    1.0278873046014908e05,\n                    7.9423330424583505e05,\n                    -4.3975347261092327e04,\n                ],\n                [\n                    -2.4232333719251846e05,\n                    -4.3975347261092327e04,\n                    3.5707186446013493e06,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([1, -0.206169, 0.108385]),\n    ),\n    (\n        np.array([26602.2, -118867, 7457.08]),\n        np.array(\n            [\n                [\n                    1.3510413991352668e05,\n                    -4.4190620422288636e05,\n                    1.6183211956800147e04,\n                ],\n                [\n                    -4.4190620422288636e05,\n                    6.7224673907168563e06,\n                    1.5956835170839101e05,\n                ],\n                [\n                    1.6183211956800147e04,\n                    1.5956835170839101e05,\n                    6.7613560286023448e03,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.0743402, 0.0463054, -1.0]),\n    ),\n    (\n        np.array([-1726.71, -394.745, -340.876]),\n        np.array(\n            [\n                [\n                    3.2235026082366367e03,\n                    3.5903801754879023e03,\n                    1.4504956347170955e03,\n                ],\n                [\n                    3.5903801754879023e03,\n                    1.0326690788609463e04,\n                    4.9152962632434155e03,\n                ],\n                [\n                    1.4504956347170955e03,\n                    4.9152962632434155e03,\n                    2.7645273367617360e03,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([0.925468, -0.722815, 0.922884]),\n    ),\n    (\n        np.array([-1460.95, -48078.5, -61349.4]),\n        np.array(\n            [\n                [\n                    -2.1558862194927831e04,\n                    2.9346854336376925e05,\n                    3.6945385626803833e05,\n                ],\n                [\n                    2.9346854336376925e05,\n                    7.6788393809145853e07,\n                    5.7299202312126122e07,\n                ],\n                [\n                    3.6945385626803833e05,\n                    5.7299202312126122e07,\n                    5.0198599698606022e07,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([1, 0.00933713, -0.0167956]),\n    ),\n    (\n        np.array([-7292.55, -299376, -269052]),\n        np.array(\n            [\n                [\n                    3.6778621108518197e05,\n                    1.5160538979173467e07,\n                    1.3518289246498797e07,\n                ],\n                [\n                    1.5160538979173467e07,\n                    6.1341858259608674e08,\n                    5.4813989289859617e08,\n                ],\n                [\n                    1.3518289246498797e07,\n                    5.4813989289859617e08,\n                    4.9252782230468601e08,\n                ],\n            ]\n        ),\n        np.array([-1.0, -1.0, -1.0]),\n        np.ones(3),\n        np.array([-1, 0.0341927, -0.0100605]),\n    ),\n]\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\n    \"linear_terms, square_terms, lower_bounds, upper_bounds, x_expected\",\n    TEST_CASES_BNTR,\n)\ndef test_bounded_newton_trustregion(\n    linear_terms,\n    square_terms,\n    lower_bounds,\n    upper_bounds,\n    x_expected,\n):\n    main_model = MainModel(linear_terms=linear_terms, square_terms=square_terms)\n\n    options = {\n        \"conjugate_gradient_method\": \"cg\",\n        \"maxiter\": 50,\n        \"maxiter_gradient_descent\": 5,\n        \"gtol_abs\": 1e-8,\n        \"gtol_rel\": 1e-8,\n        \"gtol_scaled\": 0,\n        \"gtol_abs_conjugate_gradient\": 1e-8,\n        \"gtol_rel_conjugate_gradient\": 1e-6,\n    }\n\n    result = bntr(\n        main_model,\n        lower_bounds,\n        upper_bounds,\n        x_candidate=np.zeros_like(x_expected),\n        **options,\n    )\n    aaae(result[\"x\"], x_expected, decimal=5)\n\n\n# ======================================================================================\n# Subsolver GQTPAR\n# ======================================================================================\n\nTEST_CASES_GQTPAR = [\n    (\n        np.array([-0.0005429824695352, -0.1032556117176, -0.06816855282091]),\n        np.array(\n            [\n                [2.05714077e-02, 7.58182390e-01, 9.00050279e-01],\n                [7.58182390e-01, 6.25867992e01, 4.20096648e01],\n                [9.00050279e-01, 4.20096648e01, 4.03810858e01],\n            ]\n        ),\n        np.array(\n            [\n                -0.9994584757179,\n                -0.007713730538474,\n                0.03198833730482,\n            ]\n        ),\n        -0.001340933981148,\n    )\n]\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\n    \"linear_terms, square_terms, x_expected, criterion_expected\", TEST_CASES_GQTPAR\n)\ndef test_gqtpar_quadratic(linear_terms, square_terms, x_expected, criterion_expected):\n    main_model = MainModel(linear_terms=linear_terms, square_terms=square_terms)\n\n    result = gqtpar(main_model, x_candidate=np.zeros_like(x_expected))\n\n    aaae(result[\"x\"], x_expected)\n    aaae(result[\"criterion\"], criterion_expected)\n\n\n# ======================================================================================\n# Conjugate Gradient Algorithms\n# ======================================================================================\n\nTEST_CASES_CG = [\n    (\n        np.array([79579.8, 35973.7]),\n        np.array(\n            [\n                [2.2267942225630835e08, 1.3303758212303287e08],\n                [1.3303758212303287e08, 7.9554367206848219e07],\n            ]\n        ),\n        0.2393319731158,\n        -np.array([0.0958339, -0.159809]),\n    ),\n    (\n        np.array([0.00028774, 0.00763968, 0.01217268]),\n        np.array(\n            [\n                [4.00803604e00, 1.65790911e02, 1.73222977e02],\n                [1.65790911e02, 1.60880163e04, 1.10414034e04],\n                [1.73222977e02, 1.10414034e04, 9.29926257e03],\n            ]\n        ),\n        9.5367431640625e-05,\n        np.array([9.50204689e-05, 3.56030822e-06, -7.30627902e-06]),\n    ),\n    (\n        np.array([0.00028774, 0.00763968, 0.01217268]),\n        np.array(\n            [\n                [4.00803604e00, 1.65790911e02, 1.73222977e02],\n                [1.65790911e02, 1.60880163e04, 1.10414034e04],\n                [1.73222977e02, 1.10414034e04, 9.29926257e03],\n            ]\n        ),\n        9.5367431640625e-05,\n        np.array([9.50204689e-05, 3.56030822e-06, -7.30627902e-06]),\n    ),\n    (\n        -np.array([-6.76002e-06, -6.56323e-08, 2.00988e-07]),\n        np.array(\n            [\n                [\n                    4.0080360351800763e00,\n                    1.6579091056425378e02,\n                    1.7322297746691254e02,\n                ],\n                [\n                    1.6579091056425378e02,\n                    1.6088016292793940e04,\n                    1.1041403355728811e04,\n                ],\n                [\n                    1.7322297746691254e02,\n                    1.1041403355728811e04,\n                    9.2992625728417297e03,\n                ],\n            ]\n        ),\n        0.0003814697265625,\n        np.array([-2.7382e-05, -3.66814e-07, 9.45617e-07]),\n    ),\n    (\n        -np.array([-4.69447, -0.619271, 0.837666]),\n        np.array(\n            [\n                [\n                    6.9147751896043360e01,\n                    2.6192110911280561e03,\n                    2.8094172839794960e03,\n                ],\n                [\n                    2.6192110911280561e03,\n                    2.4907533417816096e05,\n                    1.6917615514201863e05,\n                ],\n                [\n                    2.8094172839794960e03,\n                    1.6917615514201863e05,\n                    1.4352314212505225e05,\n                ],\n            ]\n        ),\n        0.0657627701334,\n        np.array([-0.0656472, -0.00168561, 0.00351321]),\n    ),\n    (\n        -np.array([-2.45646e-05, -4.1711e-07, 9.2032e-07]),\n        np.array(\n            [\n                [\n                    8.6267971128257614e-01,\n                    3.3589357331133463e01,\n                    3.8550834275262481e01,\n                ],\n                [\n                    3.3589357331133463e01,\n                    4.0625660472990171e03,\n                    2.7006581320776222e03,\n                ],\n                [\n                    3.8550834275262481e01,\n                    2.7006581320776222e03,\n                    2.3157072223295277e03,\n                ],\n            ]\n        ),\n        0.0003814697265625,\n        np.array([-0.000310185, -3.86464e-06, 9.67128e-06]),\n    ),\n    (\n        -np.array([-4.29172e-08, -1.8127e-06, -1.38313e-06]),\n        np.array(\n            [\n                [\n                    1.7207808265135328e06,\n                    7.2130304472968280e07,\n                    5.5202182930777229e07,\n                ],\n                [\n                    7.2130304472968280e07,\n                    3.0516230749633555e09,\n                    2.3274035648401971e09,\n                ],\n                [\n                    5.5202182930777229e07,\n                    2.3274035648401971e09,\n                    1.7782503817136776e09,\n                ],\n            ]\n        ),\n        0.390625,\n        np.array([-7.44084e-15, -6.07092e-16, 2.47754e-16]),\n    ),\n    (\n        -np.array([79525.7, 3.04463e06, 2.42641e06]),\n        np.array(\n            [\n                [\n                    6.3624954351893254e06,\n                    2.5406887701711509e08,\n                    1.9610463258207005e08,\n                ],\n                [\n                    2.5406887701711509e08,\n                    1.0261536342839724e10,\n                    7.8819891642426796e09,\n                ],\n                [\n                    1.9610463258207005e08,\n                    7.8819891642426796e09,\n                    6.0688426371444454e09,\n                ],\n            ]\n        ),\n        0.0001192842356654,\n        np.array([2.43607e-06, 9.32646e-05, 7.4327e-05]),\n    ),\n]\n\nTEST_CASES_TRSBOX = [\n    (\n        np.array([1.0, 0.0, 1.0]),\n        np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]),\n        2.0,\n        np.array([-1.0, 0.0, -0.5]),\n    ),\n    (\n        np.array([1.0, 0.0, 1.0]),\n        np.array([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]),\n        5.0 / 12.0,\n        np.array([-1.0 / 3.0, 0.0, -0.25]),\n    ),\n    (\n        np.array([1.0, 0.0, 1.0]),\n        np.array([[-2.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]),\n        5.0 / 12.0,\n        np.array([-1.0 / 3.0, 0.0, -0.25]),\n    ),\n    (\n        np.array([0.0, 0.0, 1.0]),\n        np.array([[-2.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]),\n        0.5,\n        np.array([0.0, 0.0, -0.5]),\n    ),\n]\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\n    \"gradient, hessian, trustregion_radius, x_expected\", TEST_CASES_CG\n)\ndef test_trustregion_conjugate_gradient(\n    gradient, hessian, trustregion_radius, x_expected\n):\n    x_out = minimize_trust_cg(\n        gradient, hessian, trustregion_radius, gtol_abs=1e-8, gtol_rel=1e-6\n    )\n    aaae(x_out, x_expected)\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\n    \"gradient, hessian, trustregion_radius, x_expected\", TEST_CASES_CG\n)\ndef test_trustregion_steihaug_toint(gradient, hessian, trustregion_radius, x_expected):\n    x_out = minimize_trust_stcg(gradient, hessian, trustregion_radius)\n    aaae(x_out, x_expected)\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\n    \"linear_terms, square_terms, trustregion_radius, x_expected\",\n    TEST_CASES_CG + TEST_CASES_TRSBOX,\n)\ndef test_trustregion_trsbox(linear_terms, square_terms, trustregion_radius, x_expected):\n    lower_bounds = -1e20 * np.ones_like(linear_terms)\n    upper_bounds = 1e20 * np.ones_like(linear_terms)\n\n    x_out = minimize_trust_trsbox(\n        linear_terms,\n        square_terms,\n        trustregion_radius,\n        lower_bounds=lower_bounds,\n        upper_bounds=upper_bounds,\n    )\n\n    aaae(x_out, x_expected, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_bayesian_optimizer.py",
    "content": "\"\"\"Unit tests for Bayesian optimizer helper functions.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom optimagic.config import IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2\nfrom optimagic.optimization.internal_optimization_problem import InternalBounds\n\nif IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2:\n    from bayes_opt import acquisition\n\n    from optimagic.optimizers.bayesian_optimizer import (\n        _extract_params_from_kwargs,\n        _process_acquisition_function,\n        _process_bounds,\n    )\n\n\ndef test_extract_params_from_kwargs():\n    \"\"\"Test basic parameter extraction from kwargs dictionary.\"\"\"\n    params_dict = {\"param0\": 1.0, \"param1\": 2.0, \"param2\": 3.0}\n    result = _extract_params_from_kwargs(params_dict)\n    np.testing.assert_array_equal(result, np.array([1.0, 2.0, 3.0]))\n\n\ndef test_process_bounds_valid():\n    \"\"\"Test processing valid bounds for Bayesian optimization.\"\"\"\n    bounds = InternalBounds(lower=np.array([-1.0, 0.0]), upper=np.array([1.0, 2.0]))\n    result = _process_bounds(bounds)\n    expected = {\"param0\": (-1.0, 1.0), \"param1\": (0.0, 2.0)}\n    assert result == expected\n\n\ndef test_process_bounds_none():\n    \"\"\"Test processing bounds with None values.\"\"\"\n    bounds = InternalBounds(lower=None, upper=np.array([1.0, 2.0]))\n    with pytest.raises(\n        ValueError, match=\"Bayesian optimization requires finite bounds\"\n    ):\n        _process_bounds(bounds)\n\n\ndef test_process_bounds_infinite():\n    \"\"\"Test processing bounds with infinite values.\"\"\"\n    bounds = InternalBounds(lower=np.array([-1.0, 0.0]), upper=np.array([1.0, np.inf]))\n    with pytest.raises(\n        ValueError, match=\"Bayesian optimization requires finite bounds\"\n    ):\n        _process_bounds(bounds)\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2,\n    reason=\"bayes_opt is not installed in a recent enough version >= 2.0.0.\",\n)\ndef test_process_acquisition_function_none():\n    \"\"\"Test processing None acquisition function.\"\"\"\n    result = _process_acquisition_function(\n        acquisition_function=None,\n        kappa=2.576,\n        xi=0.01,\n        exploration_decay=None,\n        exploration_decay_delay=None,\n        random_seed=None,\n    )\n    assert result is None\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason=\"bayes_opt not installed\"\n)\n@pytest.mark.parametrize(\n    \"acq_name, expected_class\",\n    [\n        (\"ucb\", acquisition.UpperConfidenceBound),\n        (\"upper_confidence_bound\", acquisition.UpperConfidenceBound),\n        (\"ei\", acquisition.ExpectedImprovement),\n        (\"expected_improvement\", acquisition.ExpectedImprovement),\n        (\"poi\", acquisition.ProbabilityOfImprovement),\n        (\"probability_of_improvement\", acquisition.ProbabilityOfImprovement),\n    ],\n)\ndef test_process_acquisition_function_string(acq_name, expected_class):\n    \"\"\"Test processing string acquisition function.\"\"\"\n    result = _process_acquisition_function(\n        acquisition_function=acq_name,\n        kappa=2.576,\n        xi=0.01,\n        exploration_decay=None,\n        exploration_decay_delay=None,\n        random_seed=None,\n    )\n    assert isinstance(result, expected_class)\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason=\"bayes_opt not installed\"\n)\ndef test_process_acquisition_function_invalid_string():\n    \"\"\"Test processing invalid string acquisition function.\"\"\"\n    with pytest.raises(ValueError, match=\"Invalid acquisition_function string\"):\n        _process_acquisition_function(\n            acquisition_function=\"acq\",\n            kappa=2.576,\n            xi=0.01,\n            exploration_decay=None,\n            exploration_decay_delay=None,\n            random_seed=None,\n        )\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason=\"bayes_opt not installed\"\n)\ndef test_process_acquisition_function_instance():\n    \"\"\"Test processing acquisition function instance.\"\"\"\n    acq_instance = acquisition.UpperConfidenceBound()\n    result = _process_acquisition_function(\n        acquisition_function=acq_instance,\n        kappa=2.576,\n        xi=0.01,\n        exploration_decay=None,\n        exploration_decay_delay=None,\n        random_seed=None,\n    )\n    assert result is acq_instance\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason=\"bayes_opt not installed\"\n)\ndef test_process_acquisition_function_class():\n    \"\"\"Test processing acquisition function class.\"\"\"\n    result = _process_acquisition_function(\n        acquisition_function=acquisition.UpperConfidenceBound,\n        kappa=2.576,\n        xi=0.01,\n        exploration_decay=None,\n        exploration_decay_delay=None,\n        random_seed=None,\n    )\n    assert isinstance(result, acquisition.UpperConfidenceBound)\n\n\n@pytest.mark.skipif(\n    not IS_BAYESOPT_INSTALLED_AND_VERSION_NEWER_THAN_2, reason=\"bayes_opt not installed\"\n)\ndef test_process_acquisition_function_invalid_type():\n    \"\"\"Test processing invalid acquisition function type.\"\"\"\n    with pytest.raises(TypeError, match=\"acquisition_function must be None, a string\"):\n        _process_acquisition_function(\n            acquisition_function=123,\n            kappa=2.576,\n            xi=0.01,\n            exploration_decay=None,\n            exploration_decay_delay=None,\n            random_seed=None,\n        )\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_bhhh.py",
    "content": "\"\"\"Test the internal BHHH algorithm.\"\"\"\n\nfrom functools import partial\n\nimport numpy as np\nimport pytest\nimport statsmodels.api as sm\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom scipy.stats import norm\n\nfrom optimagic import mark, minimize\nfrom optimagic.optimizers.bhhh import bhhh_internal\nfrom optimagic.utilities import get_rng\n\n\ndef generate_test_data():\n    rng = get_rng(seed=12)\n\n    num_observations = 5000\n    x1 = rng.multivariate_normal([0, 0], [[1, 0.75], [0.75, 1]], num_observations)\n    x2 = rng.multivariate_normal([1, 4], [[1, 0.75], [0.75, 1]], num_observations)\n\n    endog = np.hstack((np.zeros(num_observations), np.ones(num_observations)))\n\n    simulated_exog = np.vstack((x1, x2)).astype(np.float32)\n    exog = simulated_exog\n    intercept = np.ones((exog.shape[0], 1))\n    exog = np.hstack((intercept, exog))\n\n    return endog, exog\n\n\ndef _cdf_logit(x):\n    return 1 / (1 + np.exp(-x))\n\n\ndef get_loglikelihood_logit(endog, exog, x):\n    q = 2 * endog - 1\n    linear_prediction = np.dot(exog, x)\n\n    return np.log(_cdf_logit(q * linear_prediction))\n\n\ndef get_score_logit(endog, exog, x):\n    linear_prediction = np.dot(exog, x)\n\n    return (endog - _cdf_logit(linear_prediction))[:, None] * exog\n\n\ndef get_loglikelihood_probit(endog, exog, x):\n    q = 2 * endog - 1\n    linear_prediction = np.dot(exog, x[: exog.shape[1]])\n\n    return np.log(norm.cdf(q * linear_prediction))\n\n\ndef get_score_probit(endog, exog, x):\n    q = 2 * endog - 1\n    linear_prediction = np.dot(exog, x[: exog.shape[1]])\n\n    derivative_loglikelihood = (\n        q * norm.pdf(q * linear_prediction) / norm.cdf(q * linear_prediction)\n    )\n\n    return derivative_loglikelihood[:, None] * exog\n\n\ndef criterion_and_derivative_logit(x):\n    \"\"\"Return Logit criterion and derivative.\n\n    Args:\n        x (np.ndarray): Parameter vector of shape (n_obs,).\n\n    Returns:\n        tuple: first entry is the criterion, second entry is the score\n\n    \"\"\"\n    endog, exog = generate_test_data()\n    score = partial(get_score_logit, endog, exog)\n    loglike = partial(get_loglikelihood_logit, endog, exog)\n\n    return -loglike(x), score(x)\n\n\ndef criterion_and_derivative_probit(x):\n    \"\"\"Return Probit criterion and derivative.\n\n    Args:\n        x (np.ndarray): Parameter vector of shape (n_obs,).\n\n    Returns:\n        tuple: first entry is the criterion, second entry is the score\n\n    \"\"\"\n    endog, exog = generate_test_data()\n\n    score = partial(get_score_probit, endog, exog)\n    loglike = partial(get_loglikelihood_probit, endog, exog)\n\n    return -loglike(x), score(x)\n\n\n@pytest.fixture()\ndef result_statsmodels_logit():\n    endog, exog = generate_test_data()\n    result = sm.Logit(endog, exog).fit()\n\n    return result\n\n\n@pytest.fixture()\ndef result_statsmodels_probit():\n    endog, exog = generate_test_data()\n    result = sm.Probit(endog, exog).fit()\n\n    return result\n\n\n@pytest.mark.parametrize(\n    \"criterion_and_derivative, result_statsmodels\",\n    [\n        (criterion_and_derivative_logit, \"result_statsmodels_logit\"),\n        (criterion_and_derivative_probit, \"result_statsmodels_probit\"),\n    ],\n)\ndef test_maximum_likelihood(criterion_and_derivative, result_statsmodels, request):\n    result_expected = request.getfixturevalue(result_statsmodels)\n\n    x = np.zeros(3)\n\n    result_bhhh = bhhh_internal(\n        criterion_and_derivative,\n        x=x,\n        gtol_abs=1e-8,\n        maxiter=200,\n    )\n\n    aaae(result_bhhh.x, result_expected.params, decimal=4)\n\n\n@pytest.mark.parametrize(\n    \"criterion_and_derivative, result_statsmodels\",\n    [\n        (criterion_and_derivative_logit, \"result_statsmodels_logit\"),\n        (criterion_and_derivative_probit, \"result_statsmodels_probit\"),\n    ],\n)\ndef test_maximum_likelihood_external_interfaace(\n    criterion_and_derivative, result_statsmodels, request\n):\n    result_expected = request.getfixturevalue(result_statsmodels)\n\n    x = np.zeros(3)\n\n    result_bhhh = minimize(\n        fun=mark.likelihood(criterion_and_derivative),\n        jac=True,\n        params=x,\n        algorithm=\"bhhh\",\n    )\n\n    aaae(result_bhhh.params, result_expected.params, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_fides_options.py",
    "content": "\"\"\"Test the different options of fides.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.config import IS_FIDES_INSTALLED\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.bounds import Bounds\n\nif IS_FIDES_INSTALLED:\n    from fides.hessian_approximation import FX, SR1, Broyden\n\n    from optimagic.optimizers.fides import Fides\nelse:\n    FX = lambda: None\n    SR1 = lambda: None\n    Broyden = lambda phi: None  # noqa: ARG005\n\ntest_cases_no_contribs_needed = [\n    {},\n    {\"hessian_update_strategy\": \"bfgs\"},\n    {\"hessian_update_strategy\": \"BFGS\"},\n    {\"hessian_update_strategy\": SR1()},\n    {\"hessian_update_strategy\": Broyden(phi=0.5)},\n    {\"hessian_update_strategy\": \"sr1\"},\n    {\"hessian_update_strategy\": \"DFP\"},\n    {\"hessian_update_strategy\": \"bb\"},\n    {\"convergence_ftol_rel\": 1e-6},\n    {\"convergence_xtol_abs\": 1e-6},\n    {\"convergence_gtol_abs\": 1e-6},\n    {\"convergence_gtol_rel\": 1e-6},\n    {\"stopping_maxiter\": 100},\n    {\"stopping_max_seconds\": 200},\n    {\"trustregion_initial_radius\": 20, \"trustregion_stepback_strategy\": \"truncate\"},\n    {\"trustregion_subspace_dimension\": \"full\"},\n    {\"trustregion_max_stepback_fraction\": 0.8},\n    {\"trustregion_decrease_threshold\": 0.4, \"trustregion_decrease_factor\": 0.2},\n    {\"trustregion_increase_threshold\": 0.9, \"trustregion_increase_factor\": 4},\n]\n\n\ndef criterion_and_derivative(x):\n    return (x**2).sum(), 2 * x\n\n\ndef criterion(x):\n    return (x**2).sum()\n\n\n@pytest.mark.skipif(not IS_FIDES_INSTALLED, reason=\"fides not installed.\")\n@pytest.mark.parametrize(\"algo_options\", test_cases_no_contribs_needed)\ndef test_fides_correct_algo_options(algo_options):\n    res = minimize(\n        fun_and_jac=criterion_and_derivative,\n        fun=criterion,\n        x0=np.array([1, -5, 3]),\n        bounds=Bounds(\n            lower=np.array([-10, -10, -10]),\n            upper=np.array([10, 10, 10]),\n        ),\n        algorithm=Fides(**algo_options),\n    )\n    aaae(res.params, np.zeros(3), decimal=4)\n\n\ntest_cases_needing_contribs = [\n    {\"hessian_update_strategy\": FX()},\n    {\"hessian_update_strategy\": \"ssm\"},\n    {\"hessian_update_strategy\": \"TSSM\"},\n    {\"hessian_update_strategy\": \"gnsbfgs\"},\n]\n\n\n@pytest.mark.skipif(not IS_FIDES_INSTALLED, reason=\"fides not installed.\")\n@pytest.mark.parametrize(\"algo_options\", test_cases_needing_contribs)\ndef test_fides_unimplemented_algo_options(algo_options):\n    with pytest.raises(NotImplementedError):\n        minimize(\n            fun_and_jac=criterion_and_derivative,\n            fun=criterion,\n            x0=np.array([1, -5, 3]),\n            bounds=Bounds(\n                lower=np.array([-10, -10, -10]),\n                upper=np.array([10, 10, 10]),\n            ),\n            algorithm=Fides(**algo_options),\n        )\n\n\n@pytest.mark.skipif(not IS_FIDES_INSTALLED, reason=\"fides not installed.\")\ndef test_fides_stop_after_one_iteration():\n    res = minimize(\n        fun_and_jac=criterion_and_derivative,\n        fun=criterion,\n        x0=np.array([1, -5, 3]),\n        bounds=Bounds(\n            lower=np.array([-10, -10, -10]),\n            upper=np.array([10, 10, 10]),\n        ),\n        algorithm=Fides(stopping_maxiter=1),\n    )\n    assert not res.success\n    assert res.n_iterations == 1\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_gfo_optimizers.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.config import IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED\nfrom optimagic.optimization.internal_optimization_problem import (\n    SphereExampleInternalOptimizationProblemWithConverter,\n)\nfrom optimagic.optimizers.gfo_optimizers import (\n    GFOCommonOptions,\n    _get_gfo_constraints,\n    _get_initialize_gfo,\n    _get_search_space_gfo,\n    _gfo_internal,\n    _value2para,\n)\nfrom optimagic.parameters.bounds import Bounds\n\nproblem = SphereExampleInternalOptimizationProblemWithConverter()\n\n\ndef test_get_gfo_constraints():\n    got = _get_gfo_constraints()\n    expected = []\n    assert got == expected\n\n\ndef test_get_initialize_gfo():\n    x0 = np.array([1, 0, 1])\n    x1 = [\n        {\"x0\": 1, \"x1\": 2, \"x2\": 3},\n    ]\n    n_init = 20\n    got = _get_initialize_gfo(x0, n_init, x1, problem.converter)\n    expected = {\n        \"warm_start\": [\n            {\"x0\": 1, \"x1\": 0, \"x2\": 1},  # x0\n            {\"x0\": 1, \"x1\": 2, \"x2\": 3},\n        ],  # x1\n        \"vertices\": n_init // 2,\n        \"grid\": n_init // 2,\n    }\n    assert got == expected\n\n\ndef test_get_search_space_gfo():\n    bounds = Bounds(lower=np.array([-10, -10]), upper=np.array([10, 10]))\n    n_grid_points = {\n        \"x0\": 5,\n        \"x1\": 5,\n    }\n    got = _get_search_space_gfo(bounds, n_grid_points, problem.converter)\n    expected = {\n        \"x0\": np.array([-10.0, -5.0, 0.0, 5.0, 10.0]),\n        \"x1\": np.array([-10.0, -5.0, 0.0, 5.0, 10.0]),\n    }\n    assert len(got.keys()) == 2\n    assert np.all(got[\"x0\"] == expected[\"x0\"])\n    assert np.all(got[\"x1\"] == expected[\"x1\"])\n\n\ndef test_value2para():\n    assert _value2para(np.array([0, 1, 2])) == {\"x0\": 0, \"x1\": 1, \"x2\": 2}\n\n\n@pytest.mark.skipif(\n    not IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, reason=\"gfo not installed\"\n)\ndef test_gfo_internal():\n    from gradient_free_optimizers import DownhillSimplexOptimizer\n\n    res = _gfo_internal(\n        common_options=GFOCommonOptions(),\n        problem=problem,\n        x0=np.full(10, 2),\n        optimizer=DownhillSimplexOptimizer,\n    )\n\n    assert np.all(res.x == np.full(10, 0))\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_iminuit_migrad.py",
    "content": "\"\"\"Test suite for the iminuit migrad optimizer.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.config import IS_IMINUIT_INSTALLED\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.optimizers.iminuit_migrad import (\n    IminuitMigrad,\n    _convert_bounds_to_minuit_limits,\n)\n\n\ndef sphere(x):\n    return (x**2).sum()\n\n\ndef sphere_grad(x):\n    return 2 * x\n\n\ndef test_convert_bounds_unbounded():\n    \"\"\"Test converting unbounded bounds.\"\"\"\n    lower = np.array([-np.inf, -np.inf])\n    upper = np.array([np.inf, np.inf])\n    limits = _convert_bounds_to_minuit_limits(lower, upper)\n\n    assert len(limits) == 2\n    assert limits[0] == (None, None)\n    assert limits[1] == (None, None)\n\n\ndef test_convert_bounds_lower_only():\n    \"\"\"Test converting lower bounds only.\"\"\"\n    lower = np.array([1.0, 2.0])\n    upper = np.array([np.inf, np.inf])\n    limits = _convert_bounds_to_minuit_limits(lower, upper)\n\n    assert len(limits) == 2\n    assert limits[0] == (1.0, None)\n    assert limits[1] == (2.0, None)\n\n\ndef test_convert_bounds_upper_only():\n    \"\"\"Test converting upper bounds only.\"\"\"\n    lower = np.array([-np.inf, -np.inf])\n    upper = np.array([1.0, 2.0])\n    limits = _convert_bounds_to_minuit_limits(lower, upper)\n\n    assert len(limits) == 2\n    assert limits[0] == (None, 1.0)\n    assert limits[1] == (None, 2.0)\n\n\ndef test_convert_bounds_two_sided():\n    \"\"\"Test converting two-sided bounds.\"\"\"\n    lower = np.array([1.0, -2.0])\n    upper = np.array([2.0, -1.0])\n    limits = _convert_bounds_to_minuit_limits(lower, upper)\n\n    assert len(limits) == 2\n    assert limits[0] == (1.0, 2.0)\n    assert limits[1] == (-2.0, -1.0)\n\n\ndef test_convert_bounds_mixed():\n    \"\"\"Test converting mixed bounds (some infinite, some finite).\"\"\"\n    lower = np.array([-np.inf, 0.0, 1.0])\n    upper = np.array([1.0, np.inf, 2.0])\n    limits = _convert_bounds_to_minuit_limits(lower, upper)\n\n    assert len(limits) == 3\n    assert limits[0] == (None, 1.0)\n    assert limits[1] == (0.0, None)\n    assert limits[2] == (1.0, 2.0)\n\n\n@pytest.mark.skipif(not IS_IMINUIT_INSTALLED, reason=\"iminuit not installed.\")\ndef test_iminuit_migrad():\n    \"\"\"Test basic optimization with sphere function.\"\"\"\n    x0 = np.array([1.0, 2.0, 3.0])\n    algorithm = IminuitMigrad()\n\n    res = minimize(\n        fun=sphere,\n        jac=sphere_grad,\n        algorithm=algorithm,\n        x0=x0,\n    )\n\n    assert res.success\n    aaae(res.x, np.zeros(3), decimal=6)\n    assert res.n_fun_evals > 0\n    assert res.n_jac_evals > 0\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_ipopt_options.py",
    "content": "\"\"\"Test the different options of ipopt.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.config import IS_CYIPOPT_INSTALLED\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.optimizers.ipopt import Ipopt\nfrom optimagic.parameters.bounds import Bounds\n\ntest_cases = [\n    {},\n    {\"convergence_ftol_rel\": 1e-7},\n    {\"stopping_maxiter\": 1_100_000},\n    {\"mu_target\": 1e-8},\n    {\"s_max\": 200},\n    {\"stopping_max_wall_time_seconds\": 200},\n    {\"stopping_max_cpu_time\": 1e10},\n    {\"dual_inf_tol\": 2.5},\n    {\"constr_viol_tol\": 1e-7},\n    {\"compl_inf_tol\": 1e-7},\n    {\"acceptable_iter\": 15},\n    {\"acceptable_tol\": 1e-5},\n    {\"acceptable_dual_inf_tol\": 1e-5},\n    {\"acceptable_constr_viol_tol\": 1e-5},\n    {\"acceptable_compl_inf_tol\": 1e-5},\n    {\"acceptable_obj_change_tol\": 1e5},\n    {\"diverging_iterates_tol\": 1e5},\n    {\"nlp_lower_bound_inf\": -1e5},\n    {\"nlp_upper_bound_inf\": 1e10},\n    {\"fixed_variable_treatment\": \"relax_bounds\"},\n    {\"dependency_detector\": \"mumps\"},\n    {\"dependency_detection_with_rhs\": \"no\"},\n    {\"dependency_detection_with_rhs\": False},\n    {\"kappa_d\": 1e-7},\n    {\"bound_relax_factor\": 1e-12},\n    {\"honor_original_bounds\": \"yes\"},\n    {\"check_derivatives_for_naninf\": True},\n    {\"jac_c_constant\": True},\n    {\"jac_d_constant\": True},\n    {\"hessian_constant\": True},\n    # scaling\n    {\"nlp_scaling_method\": None},\n    {\"obj_scaling_factor\": 1.1},\n    {\"nlp_scaling_max_gradient\": 200},\n    {\"nlp_scaling_obj_target_gradient\": 0.2},\n    {\"nlp_scaling_constr_target_gradient\": 0},\n    {\"nlp_scaling_constr_target_gradient\": 2e-9},\n    {\"nlp_scaling_min_value\": 1e-9},\n    {\"bound_push\": 0.02},\n    {\"bound_frac\": 0.02},\n    {\"slack_bound_push\": 0.001},\n    {\"slack_bound_frac\": 0.001},\n    {\"constr_mult_init_max\": 5000},\n    {\"bound_mult_init_val\": 1.2},\n    {\"bound_mult_init_method\": \"mu-based\"},\n    {\"least_square_init_primal\": \"yes\"},\n    {\"least_square_init_duals\": \"yes\"},\n    {\"warm_start_init_point\": \"yes\"},\n    {\"warm_start_same_structure\": False},\n    {\"warm_start_bound_push\": 0.002},\n    {\"warm_start_bound_frac\": 0.002},\n    {\"warm_start_slack_bound_push\": 0.0001},\n    {\"warm_start_slack_bound_frac\": 0.002},\n    {\"warm_start_mult_bound_push\": 0.002},\n    {\"warm_start_mult_init_max\": 1e8},\n    {\"warm_start_entire_iterate\": \"yes\"},\n    {\"replace_bounds\": \"yes\"},\n    {\"skip_finalize_solution_call\": \"no\"},\n    {\"timing_statistics\": \"yes\"},\n    {\"mu_max_fact\": 1500},\n    {\"mu_max\": 100_500},\n    {\"mu_min\": 1e-09},\n    {\"adaptive_mu_globalization\": \"kkt-error\"},\n    {\"adaptive_mu_kkterror_red_iters\": 5},\n    {\"adaptive_mu_kkterror_red_fact\": 0.9},\n    {\"filter_margin_fact\": 1e-4},\n    {\"filter_max_margin\": 0.5},\n    {\"adaptive_mu_restore_previous_iterate\": False},\n    {\"adaptive_mu_monotone_init_factor\": 0.9},\n    {\"adaptive_mu_kkt_norm_type\": \"max-norm\"},\n    {\"mu_strategy\": \"adaptive\"},\n    {\"mu_oracle\": \"probing\"},\n    {\"mu_oracle\": \"loqo\"},\n    {\"fixed_mu_oracle\": \"loqo\"},\n    {\"mu_init\": 0.2},\n    {\"barrier_tol_factor\": 10.5},\n    {\"mu_linear_decrease_factor\": 0.01},\n    {\"mu_superlinear_decrease_power\": 1.2},\n    {\"mu_allow_fast_monotone_decrease\": False},\n    {\"tau_min\": 0.75},\n    {\"sigma_max\": 200},\n    {\"sigma_min\": 1e-8},\n    {\"quality_function_norm_type\": \"2-norm\"},\n    {\"quality_function_centrality\": \"log\"},\n    {\"quality_function_balancing_term\": \"cubic\"},\n    {\"quality_function_max_section_steps\": 10},\n    {\"quality_function_max_section_steps\": 5.5},\n    {\"quality_function_section_sigma_tol\": 0.02},\n    {\"quality_function_section_qf_tol\": 0.5},\n    {\"line_search_method\": \"penalty\"},\n    {\"alpha_red_factor\": 0.8},\n    {\"accept_every_trial_step\": True},\n    {\"accept_after_max_steps\": 3},\n    {\"alpha_for_y\": \"max\"},\n    {\"alpha_for_y_tol\": 5},\n    {\"tiny_step_tol\": 1e-15},\n    {\"tiny_step_y_tol\": 0.02},\n    {\"watchdog_shortened_iter_trigger\": 20},\n    {\"watchdog_trial_iter_max\": 5},\n    {\"theta_max_fact\": 2e5},\n    {\"theta_min_fact\": 0.002},\n    {\"eta_phi\": 0.3},\n    {\"delta\": 0.9},\n    {\"s_phi\": 2.2},\n    {\"s_theta\": 1.5},\n    {\"gamma_phi\": 1e-6},\n    {\"gamma_theta\": 1e-5},\n    {\"alpha_min_frac\": 0.08},\n    {\"max_soc\": 5},\n    {\"kappa_soc\": 0.9},\n    {\"obj_max_inc\": 5.3},\n    {\"max_filter_resets\": 10},\n    {\"filter_reset_trigger\": 3},\n    {\"corrector_type\": \"affine\"},\n    {\"skip_corr_if_neg_curv\": True},\n    {\"skip_corr_in_monotone_mode\": False},\n    {\"corrector_compl_avrg_red_fact\": 3},\n    {\"corrector_compl_avrg_red_fact\": 3.5},\n    {\"soc_method\": 1},\n    {\"nu_init\": 1e-5},\n    {\"nu_inc\": 1e-5},\n    {\"rho\": 0.2},\n    {\"kappa_sigma\": 1e8},\n    {\"recalc_y\": True},\n    {\"recalc_y_feas_tol\": 1e-4},\n    {\"slack_move\": 1e-11},\n    {\"constraint_violation_norm_type\": \"2-norm\"},\n    # step calculation\n    {\"mehrotra_algorithm\": False},\n    {\"fast_step_computation\": True},\n    {\"min_refinement_steps\": 3},\n    {\"max_refinement_steps\": 12},\n    {\"residual_ratio_max\": 1e-9},\n    {\"residual_ratio_singular\": 1e-4},\n    {\"residual_improvement_factor\": 1.3},\n    {\"neg_curv_test_tol\": 1e-11},\n    {\"neg_curv_test_reg\": False},\n    {\"max_hessian_perturbation\": 1e19},\n    {\"min_hessian_perturbation\": 1e-19},\n    {\"perturb_inc_fact_first\": 50.3},\n    {\"perturb_inc_fact\": 4.4},\n    {\"perturb_dec_fact\": 0.25},\n    {\"first_hessian_perturbation\": 0.002},\n    {\"jacobian_regularization_value\": 1e-7},\n    {\"jacobian_regularization_exponent\": 0.2},\n    {\"perturb_always_cd\": False},\n    # restoration phase\n    {\"expect_infeasible_problem\": False},\n    {\"expect_infeasible_problem_ctol\": 0.005},\n    {\"expect_infeasible_problem_ytol\": 1e7},\n    {\"start_with_resto\": False},\n    {\"soft_resto_pderror_reduction_factor\": 0.99},\n    {\"max_soft_resto_iters\": 5},\n    {\"required_infeasibility_reduction\": 0.8},\n    {\"max_resto_iter\": 4_000_000},\n    {\"evaluate_orig_obj_at_resto_trial\": False},\n    {\"resto_penalty_parameter\": 830.4},\n    {\"resto_proximity_weight\": 2.4},\n    {\"bound_mult_reset_threshold\": 804.4},\n    {\"constr_mult_reset_threshold\": 1.4},\n    {\"resto_failure_feasibility_threshold\": 0.4},\n    # hessian approximation\n    {\"limited_memory_aug_solver\": \"extended\"},\n    {\"limited_memory_max_history\": 5},\n    {\"limited_memory_update_type\": \"sr1\"},\n    {\"limited_memory_initialization\": \"scalar2\"},\n    {\"limited_memory_init_val\": 0.5},\n    {\"limited_memory_init_val_max\": 2e9},\n    {\"limited_memory_init_val_min\": 2e-9},\n    {\"limited_memory_max_skipping\": 4},\n    {\"limited_memory_special_for_resto\": False},\n    {\"hessian_approximation_space\": \"all-variables\"},\n    # linear solver\n    # using ma27, ma57, ma77, ma86 leads to remaining at the start values\n    # using ma97 leads to segmentation fault\n    {\"linear_solver_options\": {\"mumps_pivtol\": 1e-5}},\n    {\"linear_solver_options\": {\"linear_system_scaling\": None}},\n    {\"linear_solver_options\": {\"ma86_scaling\": None}},\n    {\"linear_solver_options\": {\"mumps_pivtol\": 1e-7}},\n    {\"linear_solver_options\": {\"mumps_pivtolmax\": 0.2}},\n    {\"linear_solver_options\": {\"mumps_mem_percent\": 2000}},\n    {\"linear_solver_options\": {\"mumps_permuting_scaling\": 5}},\n    {\"linear_solver_options\": {\"mumps_pivot_order\": 5}},\n    {\"linear_solver_options\": {\"mumps_scaling\": 74}},\n    {\"linear_solver_options\": {\"mumps_dep_tol\": 0.1}},\n]\n\n\ndef criterion(x):\n    return (x**2).sum()\n\n\ndef derivative(x):\n    return 2 * x\n\n\n@pytest.mark.skipif(not IS_CYIPOPT_INSTALLED, reason=\"cyipopt not installed.\")\n@pytest.mark.parametrize(\"algo_options\", test_cases)\ndef test_ipopt_algo_options(algo_options):\n    algorithm = Ipopt(**algo_options)\n    res = minimize(\n        fun=criterion,\n        jac=derivative,\n        algorithm=algorithm,\n        x0=np.array([1, 2, 3]),\n        bounds=Bounds(\n            lower=np.array([-np.inf, -np.inf, -np.inf]),\n            upper=np.array([np.inf, np.inf, np.inf]),\n        ),\n    )\n    aaae(res.params, np.zeros(3), decimal=7)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_nag_optimizers.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic import mark\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.optimizers.nag_optimizers import (\n    IS_DFOLS_INSTALLED,\n    _build_options_dict,\n    _change_evals_per_point_interface,\n    _get_fast_start_method,\n)\nfrom optimagic.parameters.bounds import Bounds\nfrom tests.estimagic.test_bootstrap import aaae\n\n\ndef test_change_evals_per_point_interface_none():\n    res = _change_evals_per_point_interface(None)\n    assert res is None\n\n\ndef test_change_evals_per_point_interface_func():\n    def return_args(\n        upper_trustregion_radius, lower_trustregion_radius, n_iterations, n_resets\n    ):\n        return (\n            upper_trustregion_radius,\n            lower_trustregion_radius,\n            n_iterations,\n            n_resets,\n        )\n\n    func = _change_evals_per_point_interface(return_args)\n    res = func(delta=0, rho=1, iter=2, nrestarts=3)\n    expected = (0, 1, 2, 3)\n    assert res == expected\n\n\ndef test_get_fast_start_method_auto():\n    res = _get_fast_start_method(\"auto\")\n    assert res == (None, None)\n\n\ndef test_get_fast_start_method_jacobian():\n    res = _get_fast_start_method(\"jacobian\")\n    assert res == (True, False)\n\n\ndef test_get_fast_start_method_trust():\n    res = _get_fast_start_method(\"trustregion\")\n    assert res == (False, True)\n\n\ndef test_get_fast_start_method_error():\n    with pytest.raises(ValueError):\n        _get_fast_start_method(\"wrong_input\")\n\n\ndef test_build_options_dict_none():\n    default = {\"a\": 1, \"b\": 2}\n    assert default == _build_options_dict(None, default)\n\n\ndef test_build_options_dict_override():\n    default = {\"a\": 1, \"b\": 2}\n    user_input = {\"a\": 0}\n    res = _build_options_dict(user_input, default)\n    expected = {\"a\": 0, \"b\": 2}\n    assert res == expected\n\n\ndef test_build_options_dict_invalid_key():\n    default = {\"a\": 1, \"b\": 2}\n    user_input = {\"other_key\": 0}\n    with pytest.raises(ValueError):\n        _build_options_dict(user_input, default)\n\n\n@mark.least_squares\ndef sos(x):\n    return x\n\n\n@pytest.mark.skipif(\n    not IS_DFOLS_INSTALLED,\n    reason=\"DFO-LS is not installed.\",\n)\ndef test_nag_dfols_starting_at_optimum():\n    # From issue: https://github.com/optimagic-dev/optimagic/issues/538\n    params = np.zeros(2, dtype=float)\n    res = minimize(\n        fun=sos,\n        params=params,\n        algorithm=\"nag_dfols\",\n        bounds=Bounds(-1 * np.ones_like(params), np.ones_like(params)),\n    )\n    aaae(res.params, params)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_neldermead.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.optimizers.neldermead import (\n    _gao_han,\n    _init_algo_params,\n    _init_simplex,\n    _nash,\n    _pfeffer,\n    _varadhan_borchers,\n    neldermead_parallel,\n)\n\n\n# function to test\ndef sphere(x, *args, **kwargs):  # noqa: ARG001\n    return (x**2).sum()\n\n\n# unit tests\ndef test_init_algo_params():\n    # test setting\n    j = 2\n    adaptive = True\n\n    # outcome\n    result = _init_algo_params(adaptive, j)\n\n    # expected outcome\n    expected = (1, 2, 0.5, 0.5)\n\n    assert result == expected\n\n\ndef test_init_simplex():\n    # test setting\n    x = np.array([1, 2, 3])\n\n    # outcome\n    result = _init_simplex(x)\n\n    # expected outcome\n    expected = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])\n\n    assert (result == expected).all()\n\n\ndef test_pfeffer():\n    # test setting\n    x = np.array([1, 0, 1])\n\n    # outcome\n    result = _pfeffer(x)\n\n    # expected outcome\n    expected = np.array([[1, 0, 1], [1.05, 0, 1], [1, 0.00025, 1], [1, 0, 1.05]])\n\n    assert (result == expected).all()\n\n\ndef test_nash():\n    # test setting\n    x = np.array([1, 0, 1])\n\n    # outcome\n    result = _nash(x)\n\n    # expected outcome\n    expected = np.array([[1, 0, 1], [1.1, 0, 1], [1, 0.1, 1], [1, 0, 1.1]])\n\n    assert (result == expected).all()\n\n\ndef test_gao_han():\n    # test setting\n    x = np.array([1, 0, 1])\n\n    # outcome\n    result = _gao_han(x)\n\n    # expected outcome\n    expected = np.array([[0.66667, -0.33333, 0.66667], [2, 0, 1], [1, 1, 1], [1, 0, 2]])\n\n    np.testing.assert_allclose(result, expected, atol=1e-3)\n\n\ndef test_varadhan_borchers():\n    # test setting\n    x = np.array([1, 0, 1])\n\n    # outcome\n    result = _varadhan_borchers(x)\n\n    # expected outcome\n    expected = np.array(\n        [\n            [1, 0, 1],\n            [2.3333, 0.3333, 1.3333],\n            [1.3333, 1.3333, 1.3333],\n            [1.3333, 0.3333, 2.3333],\n        ]\n    )\n\n    np.testing.assert_allclose(result, expected, atol=1e-3)\n\n\n# general parameter test\ntest_cases = [\n    {},\n    {\"adaptive\": False},\n    {\"init_simplex_method\": \"nash\"},\n    {\"init_simplex_method\": \"pfeffer\"},\n    {\"init_simplex_method\": \"varadhan_borchers\"},\n]\n\n\n@pytest.mark.parametrize(\"algo_options\", test_cases)\ndef test_neldermead_correct_algo_options(algo_options):\n    res = neldermead_parallel(\n        criterion=sphere,\n        x=np.array([1, -5, 3]),\n        **algo_options,\n    )\n    np.testing.assert_allclose(res[\"solution_x\"], np.zeros(3), atol=5e-4)\n\n\n# test if maximum number of iterations works\ndef test_fides_stop_after_one_iteration():\n    res = neldermead_parallel(\n        criterion=sphere,\n        x=np.array([1, -5, 3]),\n        stopping_maxiter=1,\n    )\n    assert not res[\"success\"]\n    assert res[\"n_iterations\"] == 1\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_nevergrad.py",
    "content": "\"\"\"Test helper functions for nevergrad optimizers.\"\"\"\n\nfrom typing import get_args\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import algorithms, mark\nfrom optimagic.config import IS_NEVERGRAD_INSTALLED\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.bounds import Bounds\n\nif IS_NEVERGRAD_INSTALLED:\n    import nevergrad as ng\n\n\n@mark.least_squares\ndef sos(x):\n    return x\n\n\n### Nonlinear constraints on hold until improved handling.\n# def dummy_func():\n#     return lambda x: x\n\n\n# vec_constr = [\n#     {\n#         \"type\": \"ineq\",\n#         \"fun\": lambda x: [np.prod(x) + 1.0, 2.0 - np.prod(x)],\n#         \"jac\": dummy_func,\n#         \"n_constr\": 2,\n#     }\n# ]\n\n# constrs = [\n#     {\n#         \"type\": \"ineq\",\n#         \"fun\": lambda x: np.prod(x) + 1.0,\n#         \"jac\": dummy_func,\n#         \"n_constr\": 1,\n#     },\n#     {\n#         \"type\": \"ineq\",\n#         \"fun\": lambda x: 2.0 - np.prod(x),\n#         \"jac\": dummy_func,\n#         \"n_constr\": 1,\n#     },\n# ]\n\n\n# def test_process_nonlinear_constraints():\n#     got = _process_nonlinear_constraints(vec_constr)\n#     assert len(got) == 2\n\n\n# def test_get_constraint_evaluations():\n#     x = np.array([1, 1])\n#     got = _get_constraint_evaluations(constrs, x)\n#     expected = [np.array([-2.0]), np.array([-1.0])]\n#     assert got == expected\n\n\n# def test_batch_constraint_evaluations():\n#     x = np.array([1, 1])\n#     x_list = [x] * 2\n#     got = _batch_constraint_evaluations(constrs, x_list, 2)\n#     expected = [[np.array([-2.0]), np.array([-1.0])]] * 2\n#     assert got == expected\n###\n\n\n# test if all optimizers listed in Literal type hint are valid attributes\n@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason=\"nevergrad not installed\")\ndef test_meta_optimizers_are_valid():\n    opt = algorithms.NevergradMeta\n    optimizers = get_args(opt.__annotations__[\"optimizer\"])\n    for optimizer in optimizers:\n        try:\n            getattr(ng.optimizers, optimizer)\n        except AttributeError:\n            pytest.fail(f\"Optimizer '{optimizer}' not found in Nevergrad\")\n\n\n@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason=\"nevergrad not installed\")\ndef test_ngopt_optimizers_are_valid():\n    opt = algorithms.NevergradNGOpt\n    optimizers = get_args(opt.__annotations__[\"optimizer\"])\n    for optimizer in optimizers:\n        try:\n            getattr(ng.optimizers, optimizer)\n        except AttributeError:\n            pytest.fail(f\"Optimizer '{optimizer}' not found in Nevergrad\")\n\n\n# list of available optimizers in nevergrad_meta\nNEVERGRAD_META = get_args(algorithms.NevergradMeta.__annotations__[\"optimizer\"])\n# list of available optimizers in nevergrad_ngopt\nNEVERGRAD_NGOPT = get_args(algorithms.NevergradNGOpt.__annotations__[\"optimizer\"])\n\n\n# test stochastic_global_algorithm_on_sum_of_squares\n@pytest.mark.slow\n@pytest.mark.parametrize(\"algorithm\", NEVERGRAD_META)\n@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason=\"nevergrad not installed\")\ndef test_meta_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm):\n    res = minimize(\n        fun=sos,\n        params=np.array([0.35, 0.35]),\n        bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])),\n        algorithm=algorithms.NevergradMeta(algorithm),\n        collect_history=False,\n        skip_checks=True,\n        algo_options={\"seed\": 12345},\n    )\n    assert res.success in [True, None]\n    aaae(res.params, np.array([0.2, 0]), decimal=1)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"algorithm\", NEVERGRAD_NGOPT)\n@pytest.mark.skipif(not IS_NEVERGRAD_INSTALLED, reason=\"nevergrad not installed\")\ndef test_ngopt_optimizers_with_stochastic_global_algorithm_on_sum_of_squares(algorithm):\n    res = minimize(\n        fun=sos,\n        params=np.array([0.35, 0.35]),\n        bounds=Bounds(lower=np.array([0.2, -0.5]), upper=np.array([1, 0.5])),\n        algorithm=algorithms.NevergradNGOpt(algorithm),\n        collect_history=False,\n        skip_checks=True,\n        algo_options={\"seed\": 12345},\n    )\n    assert res.success in [True, None]\n    aaae(res.params, np.array([0.2, 0]), decimal=1)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_pounders_integration.py",
    "content": "\"\"\"Test suite for the internal pounders interface.\"\"\"\n\nimport sys\nfrom functools import partial\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.optimizers.pounders import internal_solve_pounders\nfrom tests.optimagic.optimizers._pounders.test_pounders_unit import FIXTURES_DIR\n\n\ndef load_history(start_vec, solver_sub):\n    start_vec_str = np.array2string(\n        start_vec, precision=3, separator=\",\", suppress_small=False\n    )\n\n    history_x = np.genfromtxt(\n        FIXTURES_DIR / f\"history_x_{start_vec_str}_{solver_sub}_3_8.csv\",\n        delimiter=\",\",\n    )\n    history_criterion = np.genfromtxt(\n        FIXTURES_DIR / f\"history_criterion_{start_vec_str}_{solver_sub}_3_8.csv\",\n        delimiter=\",\",\n    )\n\n    return history_x, history_criterion\n\n\n@pytest.fixture()\ndef criterion():\n    data = pd.read_csv(FIXTURES_DIR / \"pounders_example_data.csv\")\n    endog = np.asarray(data[\"y\"])\n    exog = np.asarray(data[\"t\"])\n\n    def func(x: np.ndarray, exog: np.ndarray, endog: np.ndarray) -> np.ndarray:\n        \"\"\"User provided residual function.\"\"\"\n        return endog - np.exp(-x[0] * exog) / (x[1] + x[2] * exog)\n\n    return partial(func, exog=exog, endog=endog)\n\n\n@pytest.fixture()\ndef pounders_options():\n    out = {\n        \"delta\": 0.1,\n        \"delta_min\": 1e-6,\n        \"delta_max\": 1e6,\n        \"gamma0\": 0.5,\n        \"gamma1\": 2.0,\n        \"theta1\": 1e-5,\n        \"theta2\": 1e-4,\n        \"eta0\": 0.0,\n        \"eta1\": 0.1,\n        \"c1\": np.sqrt(3),\n        \"c2\": 10,\n        \"lower_bounds\": None,\n        \"upper_bounds\": None,\n        \"maxiter\": 200,\n    }\n    return out\n\n\n@pytest.fixture()\ndef trustregion_subproblem_options():\n    out = {\n        \"maxiter\": 50,\n        \"maxiter_gradient_descent\": 5,\n        \"gtol_abs\": 1e-8,\n        \"gtol_rel\": 1e-8,\n        \"gtol_scaled\": 0,\n        \"gtol_abs_cg\": 1e-8,\n        \"gtol_rel_cg\": 1e-6,\n        \"k_easy\": 0.1,\n        \"k_hard\": 0.2,\n    }\n    return out\n\n\nstart_vec = [np.array([0.15, 0.008, 0.01], dtype=np.float64)]\ncg_routine = [\"cg\", \"steihaug_toint\", \"trsbox\"]\nuniversal_tests = list(product(start_vec, cg_routine))\nspecific_tests = [\n    (np.array([1e-6, 1e-6, 1e-6]), \"cg\"),\n    (np.array([1e-3, 1e-3, 1e-3]), \"cg\"),\n]\nTEST_CASES = universal_tests + specific_tests\n\n\n@pytest.mark.skipif(sys.platform == \"win32\", reason=\"Not accurate on Windows.\")\n@pytest.mark.skipif(\n    sys.platform == \"linux\" and sys.version_info[:2] >= (3, 10),\n    reason=\"Not accurate on Linux with Python 3.10 or higher.\",\n)\n@pytest.mark.parametrize(\"start_vec, conjugate_gradient_method_sub\", TEST_CASES)\ndef test_bntr(\n    start_vec,\n    conjugate_gradient_method_sub,\n    criterion,\n    pounders_options,\n    trustregion_subproblem_options,\n):\n    solver_sub = \"bntr\"\n\n    gtol_abs = 1e-8\n    gtol_rel = 1e-8\n    gtol_scaled = 0\n\n    def batch_fun(x_list, n_cores):\n        return [criterion(x) for x in x_list]\n\n    result = internal_solve_pounders(\n        x0=start_vec,\n        criterion=criterion,\n        gtol_abs=gtol_abs,\n        gtol_rel=gtol_rel,\n        gtol_scaled=gtol_scaled,\n        maxinterp=2 * len(start_vec) + 1,\n        solver_sub=solver_sub,\n        conjugate_gradient_method_sub=conjugate_gradient_method_sub,\n        maxiter_sub=trustregion_subproblem_options[\"maxiter\"],\n        maxiter_gradient_descent_sub=trustregion_subproblem_options[\n            \"maxiter_gradient_descent\"\n        ],\n        gtol_abs_sub=trustregion_subproblem_options[\"gtol_abs\"],\n        gtol_rel_sub=trustregion_subproblem_options[\"gtol_rel\"],\n        gtol_scaled_sub=trustregion_subproblem_options[\"gtol_scaled\"],\n        gtol_abs_conjugate_gradient_sub=trustregion_subproblem_options[\"gtol_abs_cg\"],\n        gtol_rel_conjugate_gradient_sub=trustregion_subproblem_options[\"gtol_rel_cg\"],\n        k_easy_sub=trustregion_subproblem_options[\"k_easy\"],\n        k_hard_sub=trustregion_subproblem_options[\"k_hard\"],\n        n_cores=1,\n        batch_fun=batch_fun,\n        **pounders_options,\n    )\n\n    x_expected = np.array([0.1902789114691, 0.006131410288292, 0.01053088353832])\n    aaae(result.x, x_expected, decimal=3)\n\n\n@pytest.mark.parametrize(\"start_vec\", [(np.array([0.15, 0.008, 0.01]))])\ndef test_gqtpar(start_vec, criterion, pounders_options, trustregion_subproblem_options):\n    solver_sub = \"gqtpar\"\n\n    gtol_abs = 1e-8\n    gtol_rel = 1e-8\n    gtol_scaled = 0\n\n    def batch_fun(x_list, n_cores):\n        return [criterion(x) for x in x_list]\n\n    result = internal_solve_pounders(\n        x0=start_vec,\n        criterion=criterion,\n        gtol_abs=gtol_abs,\n        gtol_rel=gtol_rel,\n        gtol_scaled=gtol_scaled,\n        maxinterp=7,\n        solver_sub=solver_sub,\n        conjugate_gradient_method_sub=\"trsbox\",\n        maxiter_sub=trustregion_subproblem_options[\"maxiter\"],\n        maxiter_gradient_descent_sub=trustregion_subproblem_options[\n            \"maxiter_gradient_descent\"\n        ],\n        gtol_abs_sub=trustregion_subproblem_options[\"gtol_abs\"],\n        gtol_rel_sub=trustregion_subproblem_options[\"gtol_rel\"],\n        gtol_scaled_sub=trustregion_subproblem_options[\"gtol_scaled\"],\n        gtol_abs_conjugate_gradient_sub=trustregion_subproblem_options[\"gtol_abs_cg\"],\n        gtol_rel_conjugate_gradient_sub=trustregion_subproblem_options[\"gtol_rel_cg\"],\n        k_easy_sub=trustregion_subproblem_options[\"k_easy\"],\n        k_hard_sub=trustregion_subproblem_options[\"k_hard\"],\n        n_cores=1,\n        batch_fun=batch_fun,\n        **pounders_options,\n    )\n\n    x_expected = np.array([0.1902789114691, 0.006131410288292, 0.01053088353832])\n    aaae(result.x, x_expected, decimal=4)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_pygad_optimizer.py",
    "content": "\"\"\"Test helper functions for PyGAD optimizer.\"\"\"\n\nimport warnings\n\nimport pytest\n\nfrom optimagic.optimizers.pygad_optimizer import (\n    AdaptiveMutation,\n    InversionMutation,\n    RandomMutation,\n    ScrambleMutation,\n    SwapMutation,\n    _convert_mutation_to_pygad_params,\n    _create_mutation_from_string,\n    _determine_effective_batch_size,\n    _get_default_mutation_params,\n)\n\n\n@pytest.mark.parametrize(\n    \"batch_size, n_cores, expected\",\n    [\n        (None, 1, None),\n        (None, 4, 4),\n        (10, 4, 10),\n        (4, 4, 4),\n        (2, 4, 2),\n        (5, 1, 5),\n        (0, 4, 0),\n        (None, 100, 100),\n        (1, 1, 1),\n    ],\n)\ndef test_determine_effective_batch_size_return_values(batch_size, n_cores, expected):\n    result = _determine_effective_batch_size(batch_size, n_cores)\n    assert result == expected\n\n\n@pytest.mark.parametrize(\n    \"batch_size, n_cores, should_warn\",\n    [\n        (2, 4, True),\n        (1, 8, True),\n        (0, 4, True),\n        (4, 4, False),\n        (8, 4, False),\n        (None, 4, False),\n        (5, 1, False),\n        (None, 1, False),\n    ],\n)\ndef test_determine_effective_batch_size_warnings(batch_size, n_cores, should_warn):\n    if should_warn:\n        warning_pattern = (\n            f\"batch_size \\\\({batch_size}\\\\) is smaller than \"\n            f\"n_cores \\\\({n_cores}\\\\)\\\\. This may reduce parallel efficiency\\\\. \"\n            f\"Consider setting batch_size >= n_cores\\\\.\"\n        )\n        with pytest.warns(UserWarning, match=warning_pattern):\n            result = _determine_effective_batch_size(batch_size, n_cores)\n            assert result == batch_size\n    else:\n        with warnings.catch_warnings():\n            warnings.simplefilter(\"error\")\n            result = _determine_effective_batch_size(batch_size, n_cores)\n\n\n# Tests for _get_default_mutation_params\n@pytest.mark.parametrize(\n    \"mutation_type, expected\",\n    [\n        (\n            \"random\",\n            {\n                \"mutation_type\": \"random\",\n                \"mutation_probability\": None,\n                \"mutation_percent_genes\": \"default\",\n                \"mutation_num_genes\": None,\n                \"mutation_by_replacement\": False,\n            },\n        ),\n        (\n            None,\n            {\n                \"mutation_type\": None,\n                \"mutation_probability\": None,\n                \"mutation_percent_genes\": None,\n                \"mutation_num_genes\": None,\n                \"mutation_by_replacement\": None,\n            },\n        ),\n    ],\n)\ndef test_get_default_mutation_params(mutation_type, expected):\n    result = _get_default_mutation_params(mutation_type)\n    assert result == expected\n\n\n# Tests for _create_mutation_from_string\n@pytest.mark.parametrize(\n    \"mutation_type, expected_class\",\n    [\n        (\"random\", RandomMutation),\n        (\"swap\", SwapMutation),\n        (\"inversion\", InversionMutation),\n        (\"scramble\", ScrambleMutation),\n        (\"adaptive\", AdaptiveMutation),\n    ],\n)\ndef test_create_mutation_from_string_valid(mutation_type, expected_class):\n    result = _create_mutation_from_string(mutation_type)\n    assert isinstance(result, expected_class)\n\n\ndef test_create_mutation_from_string_invalid():\n    with pytest.raises(ValueError, match=\"Unsupported mutation type: invalid\"):\n        _create_mutation_from_string(\"invalid\")\n\n\n# Tests for _convert_mutation_to_pygad_params\ndef test_convert_mutation_none():\n    result = _convert_mutation_to_pygad_params(None)\n    expected = {\n        \"mutation_type\": None,\n        \"mutation_probability\": None,\n        \"mutation_percent_genes\": None,\n        \"mutation_num_genes\": None,\n        \"mutation_by_replacement\": None,\n    }\n    assert result == expected\n\n\n@pytest.mark.parametrize(\n    \"mutation_string\",\n    [\"random\", \"swap\", \"inversion\", \"scramble\", \"adaptive\"],\n)\ndef test_convert_mutation_string(mutation_string):\n    result = _convert_mutation_to_pygad_params(mutation_string)\n    assert result[\"mutation_type\"] == mutation_string\n    assert \"mutation_probability\" in result\n    assert \"mutation_percent_genes\" in result\n    assert \"mutation_num_genes\" in result\n    assert \"mutation_by_replacement\" in result\n\n\n@pytest.mark.parametrize(\n    \"mutation_class\",\n    [\n        RandomMutation,\n        SwapMutation,\n        InversionMutation,\n        ScrambleMutation,\n        AdaptiveMutation,\n    ],\n)\ndef test_convert_mutation_class(mutation_class):\n    result = _convert_mutation_to_pygad_params(mutation_class)\n    assert result[\"mutation_type\"] == mutation_class.mutation_type\n    assert \"mutation_probability\" in result\n    assert \"mutation_percent_genes\" in result\n    assert \"mutation_num_genes\" in result\n    assert \"mutation_by_replacement\" in result\n\n\ndef test_convert_mutation_instance():\n    # Test RandomMutation instance\n    mutation = RandomMutation(probability=0.2, by_replacement=True)\n    result = _convert_mutation_to_pygad_params(mutation)\n    assert result[\"mutation_type\"] == \"random\"\n    assert result[\"mutation_probability\"] == 0.2\n    assert result[\"mutation_by_replacement\"] is True\n\n    # Test SwapMutation instance\n    mutation = SwapMutation()\n    result = _convert_mutation_to_pygad_params(mutation)\n    assert result[\"mutation_type\"] == \"swap\"\n\n    # Test AdaptiveMutation instance\n    mutation = AdaptiveMutation(probability_bad=0.3, probability_good=0.1)\n    result = _convert_mutation_to_pygad_params(mutation)\n    assert result[\"mutation_type\"] == \"adaptive\"\n    assert result[\"mutation_probability\"] == [0.3, 0.1]\n\n\ndef test_convert_mutation_custom_function():\n    def custom_mutation(offspring, ga_instance):\n        return offspring\n\n    result = _convert_mutation_to_pygad_params(custom_mutation)\n    assert result[\"mutation_type\"] == custom_mutation\n\n\ndef test_convert_mutation_invalid_type():\n    with pytest.raises(ValueError, match=\"Unsupported mutation type\"):\n        _convert_mutation_to_pygad_params(123)\n\n\n# Tests for mutation dataclasses\ndef test_random_mutation_default():\n    mutation = RandomMutation()\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"random\"\n    assert result[\"mutation_probability\"] is None\n    assert result[\"mutation_percent_genes\"] == \"default\"\n    assert result[\"mutation_num_genes\"] is None\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_random_mutation_with_parameters():\n    mutation = RandomMutation(\n        probability=0.15, num_genes=5, percent_genes=20.0, by_replacement=True\n    )\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"random\"\n    assert result[\"mutation_probability\"] == 0.15\n    assert result[\"mutation_percent_genes\"] == 20.0\n    assert result[\"mutation_num_genes\"] == 5\n    assert result[\"mutation_by_replacement\"] is True\n\n\n@pytest.mark.parametrize(\n    \"mutation_class, expected_type\",\n    [\n        (SwapMutation, \"swap\"),\n        (InversionMutation, \"inversion\"),\n        (ScrambleMutation, \"scramble\"),\n    ],\n)\ndef test_simple_mutations(mutation_class, expected_type):\n    mutation = mutation_class()\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == expected_type\n    assert result[\"mutation_probability\"] is None\n    assert result[\"mutation_percent_genes\"] == \"default\"\n    assert result[\"mutation_num_genes\"] is None\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_adaptive_mutation_default():\n    mutation = AdaptiveMutation()\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"adaptive\"\n    assert result[\"mutation_probability\"] == [0.1, 0.05]  # Default values\n    assert result[\"mutation_percent_genes\"] is None\n    assert result[\"mutation_num_genes\"] is None\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_adaptive_mutation_with_probabilities():\n    mutation = AdaptiveMutation(probability_bad=0.2, probability_good=0.08)\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"adaptive\"\n    assert result[\"mutation_probability\"] == [0.2, 0.08]\n    assert result[\"mutation_percent_genes\"] is None\n    assert result[\"mutation_num_genes\"] is None\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_adaptive_mutation_with_num_genes():\n    mutation = AdaptiveMutation(num_genes_bad=10, num_genes_good=5)\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"adaptive\"\n    assert result[\"mutation_probability\"] is None\n    assert result[\"mutation_num_genes\"] == [10, 5]\n    assert result[\"mutation_percent_genes\"] is None\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_adaptive_mutation_with_percent_genes():\n    mutation = AdaptiveMutation(percent_genes_bad=25.0, percent_genes_good=10.0)\n    result = mutation.to_pygad_params()\n    assert result[\"mutation_type\"] == \"adaptive\"\n    assert result[\"mutation_probability\"] is None\n    assert result[\"mutation_num_genes\"] is None\n    assert result[\"mutation_percent_genes\"] == [25.0, 10.0]\n    assert result[\"mutation_by_replacement\"] is False\n\n\ndef test_mutation_type_class_variables():\n    assert RandomMutation.mutation_type == \"random\"\n    assert SwapMutation.mutation_type == \"swap\"\n    assert InversionMutation.mutation_type == \"inversion\"\n    assert ScrambleMutation.mutation_type == \"scramble\"\n    assert AdaptiveMutation.mutation_type == \"adaptive\"\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_pygmo_optimizers.py",
    "content": "\"\"\"Test optimization helper functions.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom optimagic.optimizers.pygmo_optimizers import (\n    _convert_str_to_int,\n    get_population_size,\n)\n\ntest_cases = [\n    # popsize, x, lower_bound, expected\n    (55.3, None, None, 55),\n    (None, np.ones(5), 500, 500),\n    (None, np.ones(5), 4, 60),\n]\n\n\n@pytest.mark.parametrize(\"popsize, x, lower_bound, expected\", test_cases)\ndef test_determine_population_size(popsize, x, lower_bound, expected):\n    res = get_population_size(population_size=popsize, x=x, lower_bound=lower_bound)\n    assert res == expected\n\n\ndef test_convert_str_to_int():\n    d = {\"a\": 1, \"b\": 3}\n    assert _convert_str_to_int(d, \"a\") == 1\n    assert _convert_str_to_int(d, 1) == 1\n    with pytest.raises(ValueError):\n        _convert_str_to_int(d, 5)\n    with pytest.raises(ValueError):\n        _convert_str_to_int(d, \"hello\")\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_pyswarms_optimizers.py",
    "content": "\"\"\"Test helper functions in PySwarms optimizers.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom optimagic.config import IS_PYSWARMS_INSTALLED\nfrom optimagic.optimization.internal_optimization_problem import InternalBounds\nfrom optimagic.optimizers.pyswarms_optimizers import (\n    PyramidTopology,\n    RandomTopology,\n    RingTopology,\n    StarTopology,\n    VonNeumannTopology,\n    _build_velocity_clamp,\n    _create_initial_positions,\n    _get_pyswarms_bounds,\n    _resolve_topology_config,\n)\n\nRNG = np.random.default_rng(12345)\n\n\n# Test _build_velocity_clamp\ndef test_build_velocity_clamp_both_values():\n    \"\"\"Test velocity clamp with both min and max values.\"\"\"\n    result = _build_velocity_clamp(-1.0, 1.0)\n    assert result == (-1.0, 1.0)\n\n\ndef test_build_velocity_clamp_partial_values():\n    \"\"\"Test velocity clamp with only one value provided.\"\"\"\n    result = _build_velocity_clamp(-1.0, None)\n    assert result is None\n\n    result = _build_velocity_clamp(None, 1.0)\n    assert result is None\n\n\ndef test_build_velocity_clamp_none_values():\n    \"\"\"Test velocity clamp with None values.\"\"\"\n    result = _build_velocity_clamp(None, None)\n    assert result is None\n\n\n# Test _get_pyswarms_bounds\ndef test_get_pyswarms_bounds_with_both():\n    \"\"\"Test bounds conversion when both lower and upper bounds are provided.\"\"\"\n    bounds = InternalBounds(lower=np.array([-2.0, -3.0]), upper=np.array([5.0, 4.0]))\n\n    result = _get_pyswarms_bounds(bounds)\n\n    assert result is not None\n    lower, upper = result\n    assert_array_equal(lower, np.array([-2.0, -3.0]))\n    assert_array_equal(upper, np.array([5.0, 4.0]))\n\n\ndef test_get_pyswarms_bounds_with_none():\n    \"\"\"Test bounds conversion when no bounds are provided.\"\"\"\n    bounds = InternalBounds(lower=None, upper=None)\n\n    result = _get_pyswarms_bounds(bounds)\n    assert result is None\n\n\ndef test_get_pyswarms_bounds_partial_bounds():\n    \"\"\"Test bounds conversion with only one bound provided.\"\"\"\n    # Only lower bounds\n    bounds = InternalBounds(lower=np.array([1.0, 2.0]), upper=None)\n    result = _get_pyswarms_bounds(bounds)\n    assert result is None\n\n    # Only upper bounds\n    bounds = InternalBounds(lower=None, upper=np.array([3.0, 4.0]))\n    result = _get_pyswarms_bounds(bounds)\n    assert result is None\n\n\ndef test_get_pyswarms_bounds_with_infinite():\n    \"\"\"Test that infinite bounds raise ValueError.\"\"\"\n    bounds = InternalBounds(\n        lower=np.array([-np.inf, -1.0]), upper=np.array([1.0, np.inf])\n    )\n\n    with pytest.raises(ValueError, match=\"PySwarms does not support infinite bounds\"):\n        _get_pyswarms_bounds(bounds)\n\n\n# Test _create_initial_positions\n@pytest.mark.parametrize(\"center\", [0.5, 1.0, 2.0])\ndef test_create_initial_positions_basic(center):\n    \"\"\"Test basic initial positions creation.\"\"\"\n    x0 = np.array([1.0, 2.0])\n    n_particles = 5\n    bounds = (np.array([-5.0, -5.0]), np.array([5.0, 5.0]))\n\n    init_pos = _create_initial_positions(\n        x0=x0, n_particles=n_particles, bounds=bounds, center=center, rng=RNG\n    )\n\n    assert init_pos.shape == (5, 2)\n\n    assert_array_equal(init_pos[0], x0)\n\n    # Check all particles are within bounds\n    assert np.all(init_pos >= bounds[0])\n    assert np.all(init_pos <= bounds[1])\n\n\ndef test_create_initial_positions_no_bounds():\n    \"\"\"Test initial positions creation with no bounds.\"\"\"\n    x0 = np.array([0.5, 1.5])\n    n_particles = 3\n    bounds = None\n\n    init_pos = _create_initial_positions(\n        x0=x0, n_particles=n_particles, bounds=bounds, center=1.0, rng=RNG\n    )\n\n    assert init_pos.shape == (3, 2)\n\n    expected_x0 = np.array([0.5, 1.0])\n    assert_array_equal(init_pos[0], expected_x0)\n\n    assert np.all(init_pos >= 0.0)\n    assert np.all(init_pos <= 1.0)\n\n\n@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason=\"PySwarms not installed\")\n@pytest.mark.parametrize(\n    (\"topology_string\", \"expected_class_name\", \"expected_options\"),\n    [\n        (\"star\", \"Star\", {}),\n        (\"ring\", \"Ring\", {\"k\": 3, \"p\": 2}),\n        (\"vonneumann\", \"VonNeumann\", {\"p\": 2, \"r\": 1}),\n        (\"random\", \"Random\", {\"k\": 3}),\n        (\"pyramid\", \"Pyramid\", {}),\n    ],\n)\ndef test_resolve_topology_config_by_string(\n    topology_string, expected_class_name, expected_options\n):\n    \"\"\"Test topology resolution with string names.\"\"\"\n    topology, options = _resolve_topology_config(topology_string)\n\n    assert topology.__class__.__name__ == expected_class_name\n    assert options == expected_options\n\n\n@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason=\"PySwarms not installed\")\n@pytest.mark.parametrize(\n    (\"config_instance\", \"expected_class_name\", \"expected_options\"),\n    [\n        (StarTopology(), \"Star\", {}),\n        (RingTopology(k_neighbors=5, p_norm=1, static=True), \"Ring\", {\"k\": 5, \"p\": 1}),\n        (\n            VonNeumannTopology(p_norm=1, range_param=2),\n            \"VonNeumann\",\n            {\"p\": 1, \"r\": 2},\n        ),\n        (RandomTopology(k_neighbors=4, static=False), \"Random\", {\"k\": 4}),\n        (PyramidTopology(static=True), \"Pyramid\", {}),\n    ],\n)\ndef test_resolve_topology_config_by_instance(\n    config_instance, expected_class_name, expected_options\n):\n    \"\"\"Test topology resolution with instances.\"\"\"\n    topology, options = _resolve_topology_config(config_instance)\n\n    # Check the class name and options\n    assert topology.__class__.__name__ == expected_class_name\n    assert options == expected_options\n\n    if hasattr(config_instance, \"static\"):\n        assert topology.static == config_instance.static\n\n\n@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason=\"PySwarms not installed\")\ndef test_resolve_topology_config_invalid_string():\n    \"\"\"Test topology resolution with invalid string.\"\"\"\n    with pytest.raises(ValueError, match=\"Unknown topology string: 'invalid'\"):\n        _resolve_topology_config(\"invalid\")\n\n\n@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason=\"PySwarms not installed\")\ndef test_resolve_topology_config_invalid_type():\n    \"\"\"Test topology resolution with invalid type.\"\"\"\n    with pytest.raises(TypeError, match=\"Unsupported topology configuration type\"):\n        _resolve_topology_config(123)\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_tao_optimizers.py",
    "content": "\"\"\"Test the wrapper around pounders.\"\"\"\n\nimport functools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom optimagic.config import IS_PETSC4PY_INSTALLED\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.utilities import get_rng\n\nif not IS_PETSC4PY_INSTALLED:\n    pytestmark = pytest.mark.skip(reason=\"petsc4py is not installed.\")\n\n\nNUM_AGENTS = 2_000\nfrom optimagic import mark\n\n\ndef get_random_params(\n    length,\n    rng,  # noqa: ARG001\n    low=0,\n    high=1,\n    lower_bound=-np.inf,\n    upper_bound=np.inf,\n):\n    params = pd.DataFrame(\n        {\n            \"value\": np.random.uniform(low, high, size=length),\n            \"lower_bound\": lower_bound,\n            \"upper_bound\": upper_bound,\n        }\n    )\n\n    return params\n\n\ndef test_robustness():\n    rng = get_rng(5471)\n    true_params = get_random_params(2, rng)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(criterion_func, start_params, \"tao_pounders\")\n\n    x = np.column_stack([np.ones_like(exog), exog])\n    y = endog.reshape(len(endog), 1)\n    expected = np.linalg.lstsq(x, y, rcond=None)[0].flatten()\n\n    np.testing.assert_almost_equal(\n        result.params[\"value\"].to_numpy(), expected, decimal=6\n    )\n\n\ndef test_box_constr():\n    rng = get_rng(5472)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(criterion_func, start_params, \"tao_pounders\")\n\n    assert 0 <= result.params[\"value\"].to_numpy()[0] <= 0.3\n    assert 0 <= result.params[\"value\"].to_numpy()[1] <= 0.3\n\n\ndef test_max_iters():\n    rng = get_rng(5473)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(\n        criterion_func,\n        start_params,\n        \"tao_pounders\",\n        algo_options={\"stopping.maxiter\": 25},\n    )\n\n    assert result.message in (\"user defined\", \"step size small\")\n\n\ndef test_grtol():\n    rng = get_rng(5474)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(\n        criterion_func,\n        start_params,\n        \"tao_pounders\",\n        algo_options={\n            \"convergence.gtol_abs\": False,\n            \"convergence.gtol_scaled\": False,\n        },\n    )\n\n    assert result.message in (\n        \"relative_gradient_tolerance below critical value\",\n        \"step size small\",\n    )\n\n\ndef test_gatol():\n    rng = get_rng(5475)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(\n        criterion_func,\n        start_params,\n        \"tao_pounders\",\n        algo_options={\n            \"convergence.gtol_rel\": False,\n            \"convergence.gtol_scaled\": False,\n        },\n    )\n\n    assert result.message in (\n        \"absolute_gradient_tolerance below critical value\",\n        \"step size small\",\n    )\n\n\ndef test_gttol():\n    rng = get_rng(5476)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    result = minimize(\n        criterion_func,\n        start_params,\n        \"tao_pounders\",\n        algo_options={\n            \"convergence.gtol_rel\": False,\n            \"convergence.gtol_abs\": False,\n        },\n    )\n\n    assert result.message in (\n        \"gradient_total_tolerance below critical value\",\n        \"step size small\",\n    )\n\n\ndef test_tol():\n    rng = get_rng(5477)\n    true_params = get_random_params(2, rng, 0.3, 0.4, 0, 0.3)\n    start_params = true_params.copy()\n    start_params[\"value\"] = get_random_params(2, rng, 0.1, 0.2)[\"value\"]\n\n    exog, endog = _simulate_ols_sample(NUM_AGENTS, true_params)\n    criterion_func = mark.least_squares(\n        functools.partial(_ols_criterion, endog=endog, exog=exog)\n    )\n    minimize(\n        criterion_func,\n        start_params,\n        \"tao_pounders\",\n        algo_options={\n            \"convergence.gtol_abs\": 1e-7,\n            \"convergence.gtol_rel\": 1e-7,\n            \"convergence.gtol_scaled\": 1e-9,\n        },\n    )\n\n\ndef _ols_criterion(x, endog, exog):\n    return endog - x.loc[0, \"value\"] - x.loc[1, \"value\"] * exog\n\n\ndef _simulate_ols_sample(num_agents, paras):\n    rng = get_rng(seed=1234)\n    exog = rng.uniform(-5, 5, num_agents)\n    error_term = rng.normal(0, 1, num_agents)\n    endog = paras.at[0, \"value\"] + paras.at[1, \"value\"] * exog + error_term\n\n    return exog, endog\n"
  },
  {
    "path": "tests/optimagic/optimizers/test_tranquilo.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.exceptions import NotInstalledError\nfrom optimagic.optimizers.tranquilo import Tranquilo, TranquiloLS\n\n\n@pytest.fixture()\ndef mock_problem():\n    \"\"\"Create a minimal mock of InternalOptimizationProblem.\"\"\"\n\n    class MockBounds:\n        lower = np.array([-1.0, -1.0])\n        upper = np.array([1.0, 1.0])\n\n    class MockProblem:\n        bounds = MockBounds()\n\n        def batch_fun(self, xs):\n            return [np.sum(x**2) for x in xs]\n\n    return MockProblem()\n\n\ndef test_tranquilo_raises_if_version_too_old(monkeypatch, mock_problem):\n    import optimagic.optimizers.tranquilo as tranquilo_mod\n\n    monkeypatch.setattr(\n        tranquilo_mod, \"IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0\", False\n    )\n\n    algo = Tranquilo()\n    x0 = np.array([0.5, 0.5])\n\n    with pytest.raises(NotInstalledError, match=\"tranquilo\"):\n        algo._solve_internal_problem(mock_problem, x0)\n\n\ndef test_tranquilo_ls_raises_if_version_too_old(monkeypatch, mock_problem):\n    import optimagic.optimizers.tranquilo as tranquilo_mod\n\n    monkeypatch.setattr(\n        tranquilo_mod, \"IS_TRANQUILO_VERSION_NEWER_OR_EQUAL_TO_0_1_0\", False\n    )\n\n    algo = TranquiloLS()\n    x0 = np.array([0.5, 0.5])\n\n    with pytest.raises(NotInstalledError, match=\"tranquilo\"):\n        algo._solve_internal_problem(mock_problem, x0)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_block_trees.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pybaum import tree_equal\nfrom pybaum import tree_just_flatten as tree_leaves\n\nfrom optimagic import second_derivative\nfrom optimagic.parameters.block_trees import (\n    block_tree_to_hessian,\n    block_tree_to_matrix,\n    hessian_to_block_tree,\n    matrix_to_block_tree,\n)\nfrom optimagic.parameters.tree_registry import get_registry\n\n\ndef test_matrix_to_block_tree_array_and_scalar():\n    t = {\"a\": 1.0, \"b\": np.arange(2)}\n    calculated = matrix_to_block_tree(np.arange(9).reshape(3, 3), t, t)\n\n    expected = {\n        \"a\": {\"a\": np.array(0), \"b\": np.array([1, 2])},\n        \"b\": {\"a\": np.array([3, 6]), \"b\": np.array([[4, 5], [7, 8]])},\n    }\n\n    assert _tree_equal_up_to_dtype(calculated, expected)\n\n\ndef test_matrix_to_block_tree_only_params_dfs():\n    tree = {\n        \"a\": pd.DataFrame(index=[\"a\", \"b\"]).assign(value=[1, 2]),\n        \"b\": pd.DataFrame(index=[\"j\", \"k\", \"l\"]).assign(value=[3, 4, 5]),\n    }\n\n    calculated = matrix_to_block_tree(np.arange(25).reshape(5, 5), tree, tree)\n\n    expected = {\n        \"a\": {\n            \"a\": pd.DataFrame([[0, 1], [5, 6]], columns=[\"a\", \"b\"], index=[\"a\", \"b\"]),\n            \"b\": pd.DataFrame(\n                [[2, 3, 4], [7, 8, 9]], columns=[\"j\", \"k\", \"l\"], index=[\"a\", \"b\"]\n            ),\n        },\n        \"b\": {\n            \"a\": pd.DataFrame(\n                [[10, 11], [15, 16], [20, 21]],\n                index=[\"j\", \"k\", \"l\"],\n                columns=[\"a\", \"b\"],\n            ),\n            \"b\": pd.DataFrame(\n                [[12, 13, 14], [17, 18, 19], [22, 23, 24]],\n                index=[\"j\", \"k\", \"l\"],\n                columns=[\"j\", \"k\", \"l\"],\n            ),\n        },\n    }\n\n    assert _tree_equal_up_to_dtype(calculated, expected)\n\n\ndef test_matrix_to_block_tree_single_element():\n    tree1 = {\"a\": 0}\n    tree2 = {\"b\": 1, \"c\": 2}\n\n    block_tree = {\"a\": {\"b\": 0, \"c\": 1}}\n    matrix = np.array([[0, 1]])\n\n    calculated = matrix_to_block_tree(matrix, tree1, tree2)\n    assert tree_equal(block_tree, calculated)\n\n\n# one params df (make sure we don't get a list back)\n# dataframe and scalar\n# tests against jax\n\n\ndef test_block_tree_to_matrix_array_and_scalar():\n    t1 = {\"c\": np.arange(3), \"d\": (2.0, 1)}\n    t2 = {\"a\": 1.0, \"b\": np.arange(2)}\n\n    expected = np.arange(15).reshape(5, 3)\n\n    block_tree = {\n        \"c\": {\"a\": np.array([0, 3, 6]), \"b\": np.array([[1, 2], [4, 5], [7, 8]])},\n        \"d\": (\n            {\"a\": np.array(9), \"b\": np.array([10, 11])},\n            {\"a\": np.array(12), \"b\": np.array([13, 14])},\n        ),\n    }\n\n    calculated = block_tree_to_matrix(block_tree, t1, t2)\n    assert_array_equal(expected, calculated)\n\n\ndef test_block_tree_to_matrix_only_params_dfs():\n    expected = np.arange(25).reshape(5, 5)\n\n    tree = {\n        \"a\": pd.DataFrame(index=[\"a\", \"b\"]).assign(value=[1, 2]),\n        \"b\": pd.DataFrame(index=[\"j\", \"k\", \"l\"]).assign(value=[3, 4, 5]),\n    }\n    block_tree = {\n        \"a\": {\n            \"a\": pd.DataFrame([[0, 1], [5, 6]], columns=[\"a\", \"b\"], index=[\"a\", \"b\"]),\n            \"b\": pd.DataFrame(\n                [[2, 3, 4], [7, 8, 9]], columns=[\"j\", \"k\", \"l\"], index=[\"a\", \"b\"]\n            ),\n        },\n        \"b\": {\n            \"a\": pd.DataFrame(\n                [[10, 11], [15, 16], [20, 21]],\n                index=[\"j\", \"k\", \"l\"],\n                columns=[\"a\", \"b\"],\n            ),\n            \"b\": pd.DataFrame(\n                [[12, 13, 14], [17, 18, 19], [22, 23, 24]],\n                index=[\"j\", \"k\", \"l\"],\n                columns=[\"j\", \"k\", \"l\"],\n            ),\n        },\n    }\n\n    calculated = block_tree_to_matrix(block_tree, tree, tree)\n    assert_array_equal(expected, calculated)\n\n\ndef test_block_tree_to_hessian_bijection():\n    params = {\"a\": np.arange(4), \"b\": [{\"c\": (1, 2), \"d\": np.array([5, 6])}]}\n    f_tree = {\"e\": np.arange(3), \"f\": (5, 6, [7, 8, {\"g\": 1.0}])}\n\n    registry = get_registry(extended=True)\n    n_p = len(tree_leaves(params, registry=registry))\n    n_f = len(tree_leaves(f_tree, registry=registry))\n\n    expected = np.arange(n_f * n_p**2).reshape(n_f, n_p, n_p)\n    block_hessian = hessian_to_block_tree(expected, f_tree, params)\n    got = block_tree_to_hessian(block_hessian, f_tree, params)\n    assert_array_equal(expected, got)\n\n\ndef test_hessian_to_block_tree_bijection():\n    params = {\"a\": np.arange(4), \"b\": [{\"c\": (1, 2), \"d\": np.array([5, 6])}]}\n\n    def func(params):\n        return {\"e\": params[\"a\"] ** 3, \"f\": (params[\"b\"][0][\"c\"][1] / 0.5)}\n\n    expected = second_derivative(func, params).derivative\n    hessian = block_tree_to_hessian(expected, func(params), params)\n    got = hessian_to_block_tree(hessian, func(params), params)\n    _tree_equal_up_to_dtype(expected, got)\n\n\ndef test_block_tree_to_matrix_valueerror():\n    # test that value error is raised when dimensions don't match\n    inner = {\"a\": 1, \"b\": 1}\n    outer = 1\n    block_tree = {\"a\": 1}  # should have same structure as inner\n    with pytest.raises(ValueError):\n        block_tree_to_matrix(block_tree, inner, outer)\n\n\ndef _tree_equal_up_to_dtype(left, right):\n    # does not compare dtypes for pandas.DataFrame\n    return tree_equal(left, right, equality_checkers={pd.DataFrame: _frame_equal})\n\n\ndef _frame_equal(left, right):\n    try:\n        pd.testing.assert_frame_equal(left, right, check_dtype=False)\n        return True\n    except AssertionError:\n        return False\n"
  },
  {
    "path": "tests/optimagic/parameters/test_bounds.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom optimagic.exceptions import InvalidBoundsError\nfrom optimagic.parameters.bounds import (\n    Bounds,\n    _get_fast_path_bounds,\n    get_internal_bounds,\n    pre_process_bounds,\n)\n\n\n@pytest.fixture()\ndef pytree_params():\n    pytree_params = {\n        \"delta\": 0.95,\n        \"utility\": pd.DataFrame(\n            [[0.5, 0]] * 3, index=[\"a\", \"b\", \"c\"], columns=[\"value\", \"lower_bound\"]\n        ),\n        \"probs\": np.array([[0.8, 0.2], [0.3, 0.7]]),\n    }\n    return pytree_params\n\n\n@pytest.fixture()\ndef array_params():\n    return np.arange(2)\n\n\ndef test_pre_process_bounds_trivial_case():\n    got = pre_process_bounds(Bounds(lower=[0], upper=[1]))\n    expected = Bounds(lower=[0], upper=[1])\n    assert got == expected\n\n\ndef test_pre_process_bounds_none_case():\n    assert pre_process_bounds(None) is None\n\n\ndef test_pre_process_bounds_sequence():\n    got = pre_process_bounds([(0, 1), (None, 1)])\n    expected = Bounds(lower=[0, -np.inf], upper=[1, 1])\n    assert_array_equal(got.lower, expected.lower)\n    assert_array_equal(got.upper, expected.upper)\n\n\ndef test_pre_process_bounds_invalid_type():\n    with pytest.raises(InvalidBoundsError):\n        pre_process_bounds(1)\n\n\ndef test_get_bounds_subdataframe(pytree_params):\n    upper_bounds = {\n        \"utility\": pd.DataFrame([[2]] * 2, index=[\"b\", \"c\"], columns=[\"value\"]),\n    }\n    lower_bounds = {\n        \"delta\": 0,\n        \"utility\": pd.DataFrame([[1]] * 2, index=[\"a\", \"b\"], columns=[\"value\"]),\n    }\n\n    bounds = Bounds(lower=lower_bounds, upper=upper_bounds)\n\n    lb, ub = get_internal_bounds(pytree_params, bounds=bounds)\n\n    assert np.all(lb[1:3] == np.ones(2))\n    assert np.all(ub[2:4] == 2 * np.ones(2))\n\n\nTEST_CASES = [\n    Bounds(lower={\"delta\": [0, -1]}, upper=None),\n    Bounds(lower={\"probs\": 1}, upper=None),\n    Bounds(lower={\"probs\": np.array([0, 1])}, upper=None),  # wrong size lower bounds\n    Bounds(lower=None, upper={\"probs\": np.array([0, 1])}),  # wrong size upper bounds\n]\n\n\n@pytest.mark.parametrize(\"bounds\", TEST_CASES)\ndef test_get_bounds_error(pytree_params, bounds):\n    with pytest.raises(InvalidBoundsError):\n        get_internal_bounds(pytree_params, bounds=bounds)\n\n\ndef test_get_bounds_no_arguments(pytree_params):\n    got_lower, got_upper = get_internal_bounds(pytree_params)\n\n    expected_lower = np.array([-np.inf] + 3 * [0] + 4 * [-np.inf])\n\n    assert_array_equal(got_lower, expected_lower)\n    assert got_upper is None\n\n\ndef test_get_bounds_with_lower_bounds(pytree_params):\n    lower_bounds = {\"delta\": 0.1}\n\n    bounds = Bounds(lower=lower_bounds)\n\n    got_lower, got_upper = get_internal_bounds(pytree_params, bounds=bounds)\n\n    expected_lower = np.array([0.1] + 3 * [0] + 4 * [-np.inf])\n\n    assert_array_equal(got_lower, expected_lower)\n    assert got_upper is None\n\n\ndef test_get_bounds_with_upper_bounds(pytree_params):\n    upper_bounds = {\n        \"utility\": pd.DataFrame([[1]] * 3, index=[\"a\", \"b\", \"c\"], columns=[\"value\"]),\n    }\n    bounds = Bounds(upper=upper_bounds)\n    got_lower, got_upper = get_internal_bounds(pytree_params, bounds=bounds)\n\n    expected_lower = np.array([-np.inf] + 3 * [0] + 4 * [-np.inf])\n    expected_upper = np.array([np.inf] + 3 * [1] + 4 * [np.inf])\n\n    assert_array_equal(got_lower, expected_lower)\n    assert_array_equal(got_upper, expected_upper)\n\n\ndef test_get_bounds_numpy(array_params):\n    got_lower, got_upper = get_internal_bounds(array_params)\n\n    assert got_lower is None\n    assert got_upper is None\n\n\ndef test_get_bounds_numpy_error(array_params):\n    # lower bounds larger than upper bounds\n    bounds = Bounds(lower=np.ones_like(array_params), upper=np.zeros_like(array_params))\n    with pytest.raises(InvalidBoundsError):\n        get_internal_bounds(\n            array_params,\n            bounds=bounds,\n        )\n\n\ndef test_get_fast_path_bounds_both_none():\n    got_lower, got_upper = _get_fast_path_bounds(Bounds(lower=None, upper=None))\n    assert got_lower is None\n    assert got_upper is None\n\n\ndef test_get_fast_path_bounds_lower_none():\n    got_lower, got_upper = _get_fast_path_bounds(\n        bounds=Bounds(lower=None, upper=np.array([1, 2, 3])),\n    )\n    assert_array_equal(got_lower, None)\n    assert_array_equal(got_upper, np.array([1, 2, 3]))\n"
  },
  {
    "path": "tests/optimagic/parameters/test_check_constraints.py",
    "content": "import numpy as np\nimport pytest\n\nimport optimagic as om\nfrom optimagic.exceptions import InvalidParamsError\nfrom optimagic.parameters.check_constraints import _iloc\nfrom optimagic.parameters.constraint_tools import check_constraints\n\n\ndef test_iloc():\n    dictionary = {\n        \"index\": np.array([\"a\", \"b\", \"c\"]),\n        \"lower_bounds\": np.array([0, 0, 0]),\n        \"upper_bounds\": np.array([1, 1, 1]),\n        \"is_fixed_to_value\": np.array([False, False, True]),\n    }\n    position = [0, 2]\n    expected_result = {\n        \"index\": np.array([\"a\", \"c\"]),\n        \"lower_bounds\": np.array([0, 0]),\n        \"upper_bounds\": np.array([1, 1]),\n        \"is_fixed_to_value\": np.array([False, True]),\n    }\n    result = _iloc(dictionary, position)\n    assert len(result) == len(expected_result)\n    for k, v in expected_result.items():\n        assert k in result\n        assert np.array_equal(result[k], v)\n\n\ndef test_check_constraints_are_satisfied_type_equality():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.array([1, 2, 3]),\n            constraints=om.EqualityConstraint(lambda x: x[:2]),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_increasing():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.array([1, 2, 3, 2, 4]),\n            constraints=om.IncreasingConstraint(lambda x: x[[1, 2, 3]]),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_decreasing():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.array([1, 2, 3, 2, 4]),\n            constraints=om.DecreasingConstraint(lambda x: x[[0, 1, 3]]),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_pairwise_equality():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.array([1, 2, 3, 3, 4]),\n            constraints=om.PairwiseEqualityConstraint(\n                selectors=[lambda x: x[[0, 4]], lambda x: x[[3, 2]]]\n            ),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_probability():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.array([0.10, 0.25, 0.50, 1, 0.7]),\n            constraints=om.ProbabilityConstraint(lambda x: x[[0, 1, 2, 4]]),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_linear_lower_bound():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.ones(5),\n            constraints=om.LinearConstraint(\n                selector=lambda x: x[[0, 2, 3, 4]], lower_bound=1.1, weights=0.25\n            ),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_linear_upper_bound():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.ones(5),\n            constraints=om.LinearConstraint(\n                selector=lambda x: x[[0, 2, 3, 4]], upper_bound=0.9, weights=0.25\n            ),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_linear_value():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=np.ones(5),\n            constraints=om.LinearConstraint(\n                selector=lambda x: x[[0, 2, 3, 4]], value=2, weights=0.25\n            ),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_covariance():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=[1, 1, 1, -1, 1, -1],\n            constraints=om.FlatCovConstraint(selector=lambda params: params),\n        )\n\n\ndef test_check_constraints_are_satisfied_type_sdcorr():\n    with pytest.raises(InvalidParamsError):\n        check_constraints(\n            params=[1, 1, 1, -1, 1, 1],\n            constraints=om.FlatSDCorrConstraint(selector=lambda params: params),\n        )\n"
  },
  {
    "path": "tests/optimagic/parameters/test_constraint_tools.py",
    "content": "import pytest\n\nimport optimagic as om\nfrom optimagic.exceptions import InvalidParamsError\nfrom optimagic.parameters.constraint_tools import check_constraints, count_free_params\n\n\ndef test_count_free_params_no_constraints():\n    params = {\"a\": 1, \"b\": 2, \"c\": [3, 3]}\n    assert count_free_params(params) == 4\n\n\ndef test_count_free_params_with_constraints():\n    params = {\"a\": 1, \"b\": 2, \"c\": [3, 3]}\n    constraints = om.EqualityConstraint(lambda x: x[\"c\"])\n    assert count_free_params(params, constraints=constraints) == 3\n\n\ndef test_check_constraints():\n    params = {\"a\": 1, \"b\": 2, \"c\": [3, 4]}\n    constraints = om.EqualityConstraint(lambda x: x[\"c\"])\n\n    with pytest.raises(InvalidParamsError):\n        check_constraints(params, constraints=constraints)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_conversion.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.conversion import (\n    _is_fast_deriv_eval,\n    _is_fast_path,\n    get_converter,\n)\nfrom optimagic.parameters.scaling import ScalingOptions\nfrom optimagic.typing import AggregationLevel\n\n\ndef test_get_converter_fast_case():\n    converter, internal = get_converter(\n        params=np.arange(3),\n        constraints=None,\n        bounds=None,\n        func_eval=3,\n        derivative_eval=2 * np.arange(3),\n        solver_type=AggregationLevel.SCALAR,\n    )\n\n    aaae(internal.values, np.arange(3))\n    assert internal.lower_bounds is None\n    assert internal.upper_bounds is None\n\n    aaae(converter.params_to_internal(np.arange(3)), np.arange(3))\n    aaae(converter.params_from_internal(np.arange(3)), np.arange(3))\n    aaae(\n        converter.derivative_to_internal(2 * np.arange(3), np.arange(3)),\n        2 * np.arange(3),\n    )\n\n\ndef test_get_converter_with_constraints_and_bounds():\n    bounds = Bounds(\n        lower=np.array([-1, -np.inf, -np.inf]),\n        upper=np.array([np.inf, 10, np.inf]),\n    )\n    converter, internal = get_converter(\n        params=np.arange(3),\n        constraints=[{\"loc\": 2, \"type\": \"fixed\"}],\n        bounds=bounds,\n        func_eval=3,\n        derivative_eval=2 * np.arange(3),\n        solver_type=AggregationLevel.SCALAR,\n    )\n\n    aaae(internal.values, np.arange(2))\n    aaae(internal.lower_bounds, np.array([-1, -np.inf]))\n    aaae(internal.upper_bounds, np.array([np.inf, 10]))\n\n    aaae(converter.params_to_internal(np.arange(3)), np.arange(2))\n    aaae(converter.params_from_internal(np.arange(2)), np.arange(3))\n    aaae(\n        converter.derivative_to_internal(2 * np.arange(3), np.arange(2)),\n        2 * np.arange(2),\n    )\n\n\ndef test_get_converter_with_scaling():\n    bounds = Bounds(\n        lower=np.arange(3) - 1,\n        upper=np.arange(3) + 1,\n    )\n    converter, internal = get_converter(\n        params=np.arange(3),\n        constraints=None,\n        bounds=bounds,\n        func_eval=3,\n        derivative_eval=2 * np.arange(3),\n        solver_type=AggregationLevel.SCALAR,\n        scaling=ScalingOptions(method=\"start_values\", clipping_value=0.5),\n    )\n\n    aaae(internal.values, np.array([0, 1, 1]))\n    aaae(internal.lower_bounds, np.array([-2, 0, 0.5]))\n    aaae(internal.upper_bounds, np.array([2, 2, 1.5]))\n\n    aaae(converter.params_to_internal(np.arange(3)), np.array([0, 1, 1]))\n    aaae(converter.params_from_internal(np.array([0, 1, 1])), np.arange(3))\n    aaae(\n        converter.derivative_to_internal(2 * np.arange(3), np.arange(3)),\n        np.array([0, 2, 8]),\n    )\n\n\ndef test_get_converter_with_trees():\n    params = {\"a\": 0, \"b\": 1, \"c\": 2}\n    converter, internal = get_converter(\n        params=params,\n        constraints=None,\n        bounds=None,\n        func_eval={\"d\": 1, \"e\": 2},\n        derivative_eval={\"a\": 0, \"b\": 2, \"c\": 4},\n        solver_type=AggregationLevel.SCALAR,\n    )\n\n    aaae(internal.values, np.arange(3))\n    aaae(internal.lower_bounds, np.full(3, -np.inf))\n    aaae(internal.upper_bounds, np.full(3, np.inf))\n\n    aaae(converter.params_to_internal(params), np.arange(3))\n    assert converter.params_from_internal(np.arange(3)) == params\n    aaae(\n        converter.derivative_to_internal(params, np.arange(3)),\n        np.arange(3),\n    )\n\n\n@pytest.fixture()\ndef fast_kwargs():\n    kwargs = {\n        \"params\": np.arange(3),\n        \"constraints\": None,\n        \"solver_type\": AggregationLevel.SCALAR,\n        \"scaling\": None,\n        \"derivative_eval\": np.arange(3),\n        \"add_soft_bounds\": False,\n    }\n    return kwargs\n\n\nSTILL_FAST = [\n    (\"params\", np.arange(3)),\n    (\"constraints\", []),\n]\n\n\n@pytest.mark.parametrize(\"name, value\", STILL_FAST)\ndef test_is_fast_path_when_true(fast_kwargs, name, value):\n    kwargs = fast_kwargs.copy()\n    kwargs[name] = value\n    assert _is_fast_path(**kwargs)\n\n\nSLOW = [\n    (\"params\", {\"a\": 1}),\n    (\"params\", np.arange(4).reshape(2, 2)),\n    (\"constraints\", [{}]),\n    (\"scaling\", ScalingOptions()),\n    (\"derivative_eval\", {\"bla\": 3}),\n    (\"derivative_eval\", np.arange(3).reshape(1, 3)),\n    (\"add_soft_bounds\", True),\n]\n\n\n@pytest.mark.parametrize(\"name, value\", SLOW)\ndef test_is_fast_path_when_false(fast_kwargs, name, value):\n    kwargs = fast_kwargs.copy()\n    kwargs[name] = value\n    assert not _is_fast_path(**kwargs)\n\n\nhelper = np.arange(6).reshape(3, 2)\n\nFAST_DERIV_CASES = [\n    (AggregationLevel.LIKELIHOOD, helper),\n    (AggregationLevel.LEAST_SQUARES, helper),\n    (AggregationLevel.SCALAR, None),\n    (AggregationLevel.LIKELIHOOD, None),\n    (AggregationLevel.LEAST_SQUARES, None),\n]\n\n\n@pytest.mark.parametrize(\"key, f\", FAST_DERIV_CASES)\ndef test_is_fast_deriv_eval_true(key, f):\n    assert _is_fast_deriv_eval(f, key)\n\n\nSLOW_DERIV_CASES = [\n    (AggregationLevel.LIKELIHOOD, np.arange(8).reshape(2, 2, 2)),\n    (AggregationLevel.LIKELIHOOD, {\"contributions\": np.arange(8).reshape(2, 2, 2)}),\n    (AggregationLevel.LEAST_SQUARES, np.arange(8).reshape(2, 2, 2)),\n    (\n        AggregationLevel.LEAST_SQUARES,\n        {\"root_contributions\": np.arange(8).reshape(2, 2, 2)},\n    ),\n]\n\n\n@pytest.mark.parametrize(\"key, f\", SLOW_DERIV_CASES)\ndef test_is_fast_deriv_eval_false(key, f):\n    assert not _is_fast_deriv_eval(f, key)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_kernel_transformations.py",
    "content": "from functools import partial\nfrom itertools import product\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nimport optimagic.parameters.kernel_transformations as kt\nfrom optimagic.differentiation.derivatives import first_derivative\nfrom optimagic.parameters.kernel_transformations import cov_matrix_to_sdcorr_params\nfrom optimagic.utilities import get_rng\n\nto_test = list(product(range(10, 30, 5), [1234, 5471]))\n\n\ndef get_internal_cholesky(dim, seed=0):\n    \"\"\"Return random internal cholesky values given dimension.\"\"\"\n    rng = get_rng(seed)\n    chol = np.tril(rng.normal(size=(dim, dim)))\n    internal = chol[np.tril_indices(len(chol))]\n    return internal\n\n\ndef get_external_covariance(dim, seed=0):\n    \"\"\"Return random external covariance values given dimension.\"\"\"\n    rng = get_rng(seed)\n    data = rng.normal(size=(dim, 1000))\n    cov = np.cov(data)\n    external = cov[np.tril_indices(dim)]\n    return external\n\n\ndef get_internal_probability(dim, seed=0):\n    \"\"\"Return random internal positive values given dimension.\"\"\"\n    rng = get_rng(seed)\n    internal = rng.uniform(size=dim)\n    return internal\n\n\ndef get_external_probability(dim, seed=0):\n    \"\"\"Return random internal positive values that sum to one.\"\"\"\n    internal = get_internal_probability(dim, seed)\n    external = internal / internal.sum()\n    return external\n\n\ndef get_external_sdcorr(dim, seed=0):\n    \"\"\"Return random external sdcorr values given dimension.\"\"\"\n    rng = get_rng(seed)\n    data = rng.normal(size=(dim, 1000))\n    cov = np.cov(data)\n    external = cov_matrix_to_sdcorr_params(cov)\n    return external\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_covariance_from_internal_jacobian(dim, seed):  # noqa: ARG001\n    internal = get_internal_cholesky(dim)\n\n    func = partial(kt.covariance_from_internal, constr=None)\n    numerical_deriv = first_derivative(func, internal)\n    deriv = kt.covariance_from_internal_jacobian(internal, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_covariance_to_internal_jacobian(dim, seed):  # noqa: ARG001\n    external = get_external_covariance(dim)\n\n    func = partial(kt.covariance_to_internal, constr=None)\n    numerical_deriv = first_derivative(func, external)\n    deriv = kt.covariance_to_internal_jacobian(external, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_probability_from_internal_jacobian(dim, seed):  # noqa: ARG001\n    internal = get_internal_probability(dim)\n\n    func = partial(kt.probability_from_internal, constr=None)\n    numerical_deriv = first_derivative(func, internal)\n    deriv = kt.probability_from_internal_jacobian(internal, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_probability_to_internal_jacobian(dim, seed):  # noqa: ARG001\n    external = get_external_probability(dim)\n\n    func = partial(kt.probability_to_internal, constr=None)\n    numerical_deriv = first_derivative(func, external)\n    deriv = kt.probability_to_internal_jacobian(external, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_sdcorr_from_internal_jacobian(dim, seed):  # noqa: ARG001\n    internal = get_internal_cholesky(dim)\n\n    func = partial(kt.sdcorr_from_internal, constr=None)\n    numerical_deriv = first_derivative(func, internal)\n    deriv = kt.sdcorr_from_internal_jacobian(internal, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n\n\n@pytest.mark.parametrize(\"dim, seed\", to_test)\ndef test_sdcorr_to_internal_jacobian(dim, seed):  # noqa: ARG001\n    external = get_external_sdcorr(dim)\n\n    func = partial(kt.sdcorr_to_internal, constr=None)\n    numerical_deriv = first_derivative(func, external)\n    deriv = kt.sdcorr_to_internal_jacobian(external, None)\n\n    aaae(deriv, numerical_deriv.derivative, decimal=3)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_nonlinear_constraints.py",
    "content": "import itertools\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pandas.testing import assert_frame_equal\nfrom pybaum import tree_just_flatten\n\nfrom optimagic.differentiation.numdiff_options import NumdiffOptions\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.parameters.nonlinear_constraints import (\n    _check_validity_and_return_evaluation,\n    _get_components,\n    _get_selection_indices,\n    _get_transformation,\n    _get_transformation_type,\n    _process_selector,\n    _vector_to_list_of_scalar,\n    equality_as_inequality_constraints,\n    process_nonlinear_constraints,\n    vector_as_list_of_scalar_constraints,\n)\nfrom optimagic.parameters.tree_registry import get_registry\n\n\n@dataclass\nclass Converter:\n    def params_from_internal(self, x):\n        return x\n\n    def params_to_internal(self, params):\n        registry = get_registry(extended=True)\n        return np.array(tree_just_flatten(params, registry=registry))\n\n\n# ======================================================================================\n# _get_transformation_type\n# ======================================================================================\nTEST_CASES = [\n    (0, np.inf, \"identity\"),  # (lower_bounds, upper_bounds, expected)\n    (-1, 2, \"stack\"),\n    (np.zeros(3), np.ones(3), \"stack\"),\n    (np.zeros(3), np.tile(np.inf, 3), \"identity\"),\n    (np.array([1, 2]), np.tile(np.inf, 2), \"subtract_lb\"),\n]\n\n\n@pytest.mark.parametrize(\"lower_bounds, upper_bounds, expected\", TEST_CASES)\ndef test_get_transformation_type(lower_bounds, upper_bounds, expected):\n    got = _get_transformation_type(lower_bounds, upper_bounds)\n    assert got == expected\n\n\n# ======================================================================================\n# _get_transformation\n# ======================================================================================\n\nTEST_CASES = [\n    #  (lower_bounds, upper_bounds, case, expected)  # noqa: ERA001\n    (0, 0, \"func\", {\"name\": \"stack\", \"out\": np.array([1, -1])}),\n    (1, 1, \"func\", {\"name\": \"stack\", \"out\": np.array([0, 0])}),\n    (0, 0, \"derivative\", {\"name\": \"stack\", \"out\": np.array([1, -1])}),\n    (1, 1, \"derivative\", {\"name\": \"stack\", \"out\": np.array([1, -1])}),\n    (1, np.inf, \"func\", {\"name\": \"subtract_lb\", \"out\": np.array([0])}),\n    (0, np.inf, \"derivative\", {\"name\": \"identity\", \"out\": np.array([1])}),\n]\n\n\n@pytest.mark.parametrize(\"lower_bounds, upper_bounds, case, expected\", TEST_CASES)\ndef test_get_positivity_transform(lower_bounds, upper_bounds, case, expected):\n    transform = _get_transformation(lower_bounds, upper_bounds)\n\n    got = transform[case](np.array([1]))\n    assert np.all(got == expected[\"out\"])\n    assert transform[\"name\"] == expected[\"name\"]\n\n\n# ======================================================================================\n# _get_selection_indices\n# ======================================================================================\n\n\ndef test_get_selection_indices():\n    params = {\"a\": [0, 1, 2], \"b\": [3, 4, 5]}\n    selector = lambda p: p[\"a\"]\n\n    expected = np.array([0, 1, 2], dtype=int)\n    got_index, got_n_params = _get_selection_indices(params, selector)\n\n    assert got_n_params == 6\n    assert_array_equal(got_index, expected)\n\n\n# ======================================================================================\n# _process_selector\n# ======================================================================================\nTEST_CASES = [\n    ({\"selector\": lambda x: x**2}, 10, 100),  # (constraint, params, expected)\n    ({\"loc\": \"a\"}, pd.Series([0, 1], index=[\"a\", \"b\"]), 0),\n    (\n        {\"query\": \"a == 1\"},\n        pd.DataFrame([[1], [0]], columns=[\"a\"]),\n        pd.DataFrame([[1]], columns=[\"a\"]),\n    ),\n]\n\n\n@pytest.mark.parametrize(\"constraint, params, expected\", TEST_CASES)\ndef test_process_selector(constraint, params, expected):\n    _selector = _process_selector(constraint)\n    got = _selector(params)\n\n    if isinstance(got, pd.DataFrame):\n        assert_frame_equal(got, expected)\n    else:\n        assert got == expected\n\n\n# ======================================================================================\n# _check_validity_nonlinear_constraint\n# ======================================================================================\nTEST_CASES = [\n    {},  # no fun\n    {\"func\": 10},  # non-callable fun\n    {\"func\": lambda x: x, \"derivative\": 10},  # non-callable jac\n    {\"func\": lambda x: x},  # no bounds at all\n    {\n        \"func\": lambda x: x,\n        \"value\": 1,\n        \"lower_bounds\": 1,\n    },  # cannot have value and bounds\n    {\n        \"func\": lambda x: x,\n        \"value\": 1,\n        \"upper_bounds\": 1,\n    },  # cannot have value and bounds\n    {\"func\": lambda x: x},  # needs to have at least one bound\n    {\"func\": lambda x: x, \"lower_bounds\": 1, \"upper_bounds\": 0},\n    {\"func\": lambda x: x, \"selector\": 10},\n    {\"func\": lambda x: x, \"loc\": 10},\n    {\"func\": lambda x: x, \"query\": 10},\n]\n\nTEST_CASES = list(\n    itertools.product(TEST_CASES, [np.arange(3), pd.DataFrame({\"a\": [0, 1, 2]})])\n)\n\n\n@pytest.mark.parametrize(\"constraint, params\", TEST_CASES)\ndef test_check_validity_nonlinear_constraint(constraint, params):\n    with pytest.raises(InvalidConstraintError):\n        _check_validity_and_return_evaluation(constraint, params, skip_checks=False)\n\n\ndef test_check_validity_nonlinear_constraint_correct_example():\n    constr = {\n        \"func\": lambda x: x,\n        \"derivative\": np.ones_like,\n        \"lower_bounds\": np.arange(4),\n        \"selector\": lambda x: x[:1],\n    }\n    _check_validity_and_return_evaluation(\n        constr, params=np.arange(4), skip_checks=False\n    )\n\n\n# ======================================================================================\n# equality_as_inequality_constraints\n# ======================================================================================\nTEST_CASES = [\n    (\n        [\n            {\n                \"type\": \"ineq\",\n                \"fun\": lambda x: np.array([x]),\n                \"jac\": lambda x: np.array([[1]]),  # noqa: ARG005\n                \"n_constr\": 1,\n            }\n        ],  # constraints\n        \"same\",  # expected\n    ),\n    (\n        [\n            {\n                \"type\": \"ineq\",\n                \"fun\": lambda x: np.array([x]),\n                \"jac\": lambda x: np.array([[1]]),  # noqa: ARG005\n                \"n_constr\": 1,\n            }\n        ],  # constraints\n        [\n            {\n                \"type\": \"eq\",\n                \"fun\": lambda x: np.array([x, -x]).reshape(-1, 1),\n                \"jac\": lambda x: np.array([[1], [-1]]),  # noqa: ARG005\n                \"n_constr\": 1,\n            }\n        ],  # expected\n    ),\n]\n\n\n@pytest.mark.parametrize(\"constraints, expected\", TEST_CASES)\ndef test_equality_as_inequality_constraints(constraints, expected):\n    got = equality_as_inequality_constraints(constraints)\n    if expected == \"same\":\n        assert got == constraints\n\n    for g, c in zip(got, constraints, strict=False):\n        if c[\"type\"] == \"eq\":\n            assert g[\"n_constr\"] == 2 * c[\"n_constr\"]\n        assert g[\"type\"] == \"ineq\"\n\n\n# ======================================================================================\n# process_nonlinear_constraints\n# ======================================================================================\n\n\ndef test_process_nonlinear_constraints():\n    nonlinear_constraints = [\n        {\"type\": \"nonlinear\", \"func\": lambda x: np.dot(x, x), \"value\": 1},\n        {\n            \"type\": \"nonlinear\",\n            \"func\": lambda x: x,\n            \"lower_bounds\": -1,\n            \"upper_bounds\": 2,\n        },\n    ]\n\n    params = np.array([1.0])\n\n    converter = Converter()\n\n    numdiff_options = NumdiffOptions()\n\n    got = process_nonlinear_constraints(\n        nonlinear_constraints,\n        params=params,\n        bounds=None,\n        converter=converter,\n        numdiff_options=numdiff_options,\n        skip_checks=False,\n    )\n\n    expected = [\n        {\"type\": \"eq\", \"fun\": lambda x: np.dot(x, x) - 1.0, \"n_constr\": 1},\n        {\n            \"type\": \"ineq\",\n            \"fun\": lambda x: np.concatenate((x + 1.0, 2.0 - x), axis=0),\n            \"n_constr\": 2,\n        },\n    ]\n\n    for g, e in zip(got, expected, strict=False):\n        assert g[\"type\"] == e[\"type\"]\n        assert g[\"n_constr\"] == e[\"n_constr\"]\n        for value in [0.1, 0.2, 1.2, -2.0]:\n            x = np.array([value])\n            assert_array_equal(g[\"fun\"](x), e[\"fun\"](x))\n        assert \"jac\" in g\n        assert \"tol\" in g\n\n\n# ======================================================================================\n# vector_as_list_of_scalar_constraints\n# ======================================================================================\n\n\ndef test_get_components():\n    fun = lambda x: np.array([x[0], 2 * x[1]])\n    jac = lambda x: np.array([[1, 0], [0, 2]])  # noqa: ARG005\n\n    fun_component, jac_component = _get_components(fun, jac, idx=1)\n\n    x = np.array([0, 3])\n    assert fun_component(x) == 6\n    assert_array_equal(jac_component(x), np.array([0, 2]))\n\n\ndef test_vector_to_list_of_scalar():\n    constr = {\n        \"fun\": lambda x: x,\n        \"jac\": lambda x: np.eye(len(x)),\n        \"n_constr\": 2,\n    }\n    got = _vector_to_list_of_scalar(constr)\n    for got_constr in got:\n        assert got_constr[\"n_constr\"] == 1\n    for i in range(2):\n        assert got[i][\"fun\"](np.arange(2)) == i\n        assert_array_equal(got[i][\"jac\"](np.arange(2)), np.eye(2)[i])\n\n\ndef test_vector_as_list_of_scalar_constraints():\n    constr = {\n        \"fun\": lambda x: x,\n        \"jac\": lambda x: np.eye(len(x)),\n        \"n_constr\": 2,\n    }\n    constraints = [constr, constr]\n    got = vector_as_list_of_scalar_constraints(constraints)\n    assert len(got) == 4\n"
  },
  {
    "path": "tests/optimagic/parameters/test_process_constraints.py",
    "content": "\"\"\"Test the pc processing.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport optimagic as om\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.constraint_tools import check_constraints\nfrom optimagic.parameters.process_constraints import (\n    _replace_pairwise_equality_by_equality,\n)\n\n\ndef test_replace_pairwise_equality_by_equality():\n    constr = {\"indices\": [[0, 1], [2, 3]], \"type\": \"pairwise_equality\"}\n\n    expected = [\n        {\"index\": [0, 2], \"type\": \"equality\"},\n        {\"index\": [1, 3], \"type\": \"equality\"},\n    ]\n\n    calculated = _replace_pairwise_equality_by_equality([constr])\n\n    assert calculated == expected\n\n\n@pytest.mark.filterwarnings(\"ignore:Specifying constraints as a dictionary is\")\ndef test_empty_constraints_work():\n    params = pd.DataFrame()\n    params[\"value\"] = np.arange(5)\n    params[\"bla\"] = list(\"abcde\")\n\n    constraints = [{\"query\": \"bla == 'blubb'\", \"type\": \"equality\"}]\n\n    check_constraints(params, constraints)\n\n\ndef test_to_many_bounds_in_increasing_constraint_raise_good_error():\n    with pytest.raises(InvalidConstraintError):\n        check_constraints(\n            params=np.arange(3),\n            bounds=Bounds(lower=np.arange(3) - 1),\n            constraints=om.IncreasingConstraint(selector=lambda x: x[:3]),\n        )\n"
  },
  {
    "path": "tests/optimagic/parameters/test_process_selectors.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal as aae\nfrom pybaum import tree_flatten, tree_just_flatten, tree_unflatten\n\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.parameters.process_selectors import process_selectors\nfrom optimagic.parameters.tree_conversion import TreeConverter\nfrom optimagic.parameters.tree_registry import get_registry\n\n\n@pytest.mark.parametrize(\"constraints\", [None, []])\ndef test_process_selectors_no_constraint(constraints):\n    calculated = process_selectors(\n        constraints=constraints,\n        params=np.arange(5),\n        tree_converter=None,\n        param_names=list(\"abcde\"),\n    )\n\n    assert calculated == []\n\n\n@pytest.fixture()\ndef tree_params():\n    df = pd.DataFrame({\"value\": [3, 4], \"lower_bound\": [0, 0]}, index=[\"c\", \"d\"])\n    params = ([0, np.array([1, 2]), {\"a\": df, \"b\": 5}], 6)\n    return params\n\n\n@pytest.fixture()\ndef tree_params_converter(tree_params):\n    registry = get_registry(extended=True)\n    _, treedef = tree_flatten(tree_params, registry=registry)\n\n    converter = TreeConverter(\n        params_flatten=lambda params: np.array(\n            tree_just_flatten(params, registry=registry)\n        ),\n        params_unflatten=lambda x: tree_unflatten(\n            treedef, x.tolist(), registry=registry\n        ),\n        derivative_flatten=None,\n    )\n    return converter\n\n\n@pytest.fixture()\ndef np_params_converter():\n    converter = TreeConverter(\n        lambda x: x,\n        lambda x: x,\n        lambda x: x,\n    )\n    return converter\n\n\n@pytest.fixture()\ndef df_params():\n    df = pd.DataFrame({\"value\": np.arange(6) + 10}, index=list(\"abcdef\"))\n    df.index.name = \"name\"\n    return df\n\n\n@pytest.fixture()\ndef df_params_converter(df_params):\n    converter = TreeConverter(\n        lambda x: x[\"value\"].to_numpy(),\n        lambda x: df_params.assign(value=x),\n        None,\n    )\n    return converter\n\n\ndef test_process_selectors_tree_selector(tree_params, tree_params_converter):\n    calculated = process_selectors(\n        constraints=[{\"type\": \"equality\", \"selector\": lambda x: x[1]}],\n        params=tree_params,\n        tree_converter=tree_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n    aae(calculated[0][\"index\"], np.array([6]))\n\n\ndef test_process_selectors_tree_selectors(tree_params, tree_params_converter):\n    constraints = [\n        {\n            \"type\": \"pairwise_equality\",\n            \"selectors\": [lambda x: x[1], lambda x: x[0][1][0]],\n        }\n    ]\n    calculated = process_selectors(\n        constraints=constraints,\n        params=tree_params,\n        tree_converter=tree_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n    aae(calculated[0][\"indices\"][0], np.array([6]))\n    aae(calculated[0][\"indices\"][1], np.array([1]))\n\n\ndef test_process_selectors_numpy_array_loc(np_params_converter):\n    calculated = process_selectors(\n        constraints=[{\"type\": \"equality\", \"loc\": [1, 4]}],\n        params=np.arange(6) + 10,\n        tree_converter=np_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"index\"], np.array([1, 4]))\n\n\ndef test_process_selectors_numpy_array_locs(np_params_converter):\n    constraints = [\n        {\n            \"type\": \"pairwise_equality\",\n            \"locs\": [[1, 4], [0, 3]],\n        }\n    ]\n    calculated = process_selectors(\n        constraints=constraints,\n        params=np.arange(6) + 10,\n        tree_converter=np_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"indices\"][0], np.array([1, 4]))\n    aae(calculated[0][\"indices\"][1], np.array([0, 3]))\n\n\ndef test_process_selectors_dataframe_loc(df_params, df_params_converter):\n    constraints = [{\"type\": \"equality\", \"loc\": [\"b\", \"e\"]}]\n\n    calculated = process_selectors(\n        constraints=constraints,\n        params=df_params,\n        tree_converter=df_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"index\"], np.array([1, 4]))\n\n\ndef test_process_selectors_dataframe_query(df_params, df_params_converter):\n    q = \"name == 'b' | name == 'e'\"\n    constraints = [{\"type\": \"equality\", \"query\": q}]\n\n    calculated = process_selectors(\n        constraints=constraints,\n        params=df_params,\n        tree_converter=df_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"index\"], np.array([1, 4]))\n\n\ndef test_process_selectors_dataframe_locs(df_params, df_params_converter):\n    constraints = [{\"type\": \"pairwise_equality\", \"locs\": [[\"b\", \"e\"], [\"a\", \"d\"]]}]\n\n    calculated = process_selectors(\n        constraints=constraints,\n        params=df_params,\n        tree_converter=df_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"indices\"][0], np.array([1, 4]))\n    aae(calculated[0][\"indices\"][1], np.array([0, 3]))\n\n\ndef test_process_selectors_dataframe_queries(df_params, df_params_converter):\n    queries = [\"name == 'b' | name == 'e'\", \"name == 'a' | name == 'd'\"]\n    constraints = [{\"type\": \"pairwise_equality\", \"queries\": queries}]\n\n    calculated = process_selectors(\n        constraints=constraints,\n        params=df_params,\n        tree_converter=df_params_converter,\n        param_names=list(\"abcdefg\"),\n    )\n\n    aae(calculated[0][\"indices\"][0], np.array([1, 4]))\n    aae(calculated[0][\"indices\"][1], np.array([0, 3]))\n\n\n@pytest.mark.parametrize(\"field\", [\"selectors\", \"queries\", \"query\", \"locs\"])\ndef test_process_selectors_numpy_array_invalid_fields(field, np_params_converter):\n    with pytest.raises(InvalidConstraintError):\n        process_selectors(\n            constraints=[{\"type\": \"equality\", field: None}],\n            params=np.arange(6),\n            tree_converter=np_params_converter,\n            param_names=list(\"abcdefg\"),\n        )\n\n\n@pytest.mark.parametrize(\"field\", [\"selectors\", \"queries\", \"locs\"])\ndef test_process_selectors_dataframe_invalid_fields(\n    field, df_params, df_params_converter\n):\n    with pytest.raises(InvalidConstraintError):\n        process_selectors(\n            constraints=[{\"type\": \"equality\", field: None}],\n            params=df_params,\n            tree_converter=df_params_converter,\n            param_names=list(\"abcdefg\"),\n        )\n\n\n@pytest.mark.parametrize(\"field\", [\"selectors\", \"queries\", \"query\", \"locs\", \"loc\"])\ndef test_process_selectors_tree_invalid_fields(\n    field, tree_params, tree_params_converter\n):\n    with pytest.raises(InvalidConstraintError):\n        process_selectors(\n            constraints=[{\"type\": \"equality\", field: None}],\n            params=tree_params,\n            tree_converter=tree_params_converter,\n            param_names=list(\"abcdefg\"),\n        )\n\n\ndef test_process_selectors_duplicates(np_params_converter):\n    constraints = [\n        {\n            \"type\": \"pairwise_equality\",\n            \"locs\": [[1, 4], [0, 0]],\n        }\n    ]\n    with pytest.raises(InvalidConstraintError):\n        process_selectors(\n            constraints=constraints,\n            params=np.arange(6) + 10,\n            tree_converter=np_params_converter,\n            param_names=list(\"abcdefg\"),\n        )\n\n\ndef test_process_selectors_differen_length_in_multiple_selectors(np_params_converter):\n    constraints = [\n        {\n            \"type\": \"pairwise_equality\",\n            \"locs\": [[1, 4], [0, 3, 5]],\n        }\n    ]\n    with pytest.raises(InvalidConstraintError):\n        process_selectors(\n            constraints=constraints,\n            params=np.arange(6) + 10,\n            tree_converter=np_params_converter,\n            param_names=list(\"abcdefg\"),\n        )\n"
  },
  {
    "path": "tests/optimagic/parameters/test_scale_conversion.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\nfrom numpy.testing import assert_array_equal as aae\n\nfrom optimagic import first_derivative\nfrom optimagic.parameters.conversion import InternalParams\nfrom optimagic.parameters.scale_conversion import get_scale_converter\nfrom optimagic.parameters.scaling import ScalingOptions\n\nTEST_CASES = {\n    \"start_values\": InternalParams(\n        values=np.array([0, 1, 1, 1, 1, 1]),\n        lower_bounds=np.array([-2, 0, 0.5, 2 / 3, 3 / 4, 4 / 5]),\n        upper_bounds=np.array([2, 2, 1.5, 4 / 3, 5 / 4, 6 / 5]),\n        names=None,\n    ),\n    \"bounds\": InternalParams(\n        values=np.full(6, 0.5),\n        lower_bounds=np.zeros(6),\n        upper_bounds=np.ones(6),\n        names=None,\n    ),\n}\n\nIDS = list(TEST_CASES)\nPARAMETRIZATION = list(TEST_CASES.items())\n\n\n@pytest.mark.parametrize(\"method, expected\", PARAMETRIZATION, ids=IDS)\ndef test_get_scale_converter_active(method, expected):\n    params = InternalParams(\n        values=np.arange(6),\n        lower_bounds=np.arange(6) - 1,\n        upper_bounds=np.arange(6) + 1,\n        names=list(\"abcdef\"),\n    )\n\n    scaling = ScalingOptions(\n        method=method,\n        clipping_value=0.5,\n    )\n\n    converter, scaled = get_scale_converter(\n        internal_params=params,\n        scaling=scaling,\n    )\n\n    aaae(scaled.values, expected.values)\n    aaae(scaled.lower_bounds, expected.lower_bounds)\n    aaae(scaled.upper_bounds, expected.upper_bounds)\n\n    aaae(converter.params_to_internal(params.values), expected.values)\n    aaae(converter.params_from_internal(expected.values), params.values)\n\n    calculated_jacobian = converter.derivative_to_internal(np.eye(len(params.values)))\n\n    numerical_jacobian = first_derivative(\n        converter.params_from_internal, expected.values\n    ).derivative\n\n    aaae(calculated_jacobian, numerical_jacobian)\n\n\ndef test_scale_conversion_fast_path():\n    params = InternalParams(\n        values=np.arange(6),\n        lower_bounds=np.arange(6) - 1,\n        upper_bounds=np.arange(6) + 1,\n        names=list(\"abcdef\"),\n    )\n\n    converter, scaled = get_scale_converter(\n        internal_params=params,\n        scaling=None,\n    )\n\n    aae(params.values, scaled.values)\n    aae(params.lower_bounds, scaled.lower_bounds)\n    aae(params.upper_bounds, scaled.upper_bounds)\n\n    aae(converter.params_to_internal(params.values), params.values)\n    aae(converter.params_from_internal(params.values), params.values)\n    aae(converter.derivative_to_internal(np.ones(3)), np.ones(3))\n"
  },
  {
    "path": "tests/optimagic/parameters/test_scaling.py",
    "content": "import pytest\n\nfrom optimagic.exceptions import InvalidScalingError\nfrom optimagic.parameters.scaling import (\n    ScalingOptions,\n    pre_process_scaling,\n)\n\n\ndef test_pre_process_scaling_trivial_case():\n    scaling = ScalingOptions(\n        method=\"start_values\",\n        clipping_value=1,\n        magnitude=2,\n    )\n    got = pre_process_scaling(scaling=scaling)\n    assert got == scaling\n\n\ndef test_pre_process_scaling_none_case():\n    assert pre_process_scaling(scaling=None) is None\n\n\ndef test_pre_process_scaling_false_case():\n    assert pre_process_scaling(scaling=False) is None\n\n\ndef test_pre_process_scaling_true_case():\n    got = pre_process_scaling(scaling=True)\n    assert got == ScalingOptions()\n\n\ndef test_pre_process_scaling_dict_case():\n    got = pre_process_scaling(\n        scaling={\"method\": \"start_values\", \"clipping_value\": 1, \"magnitude\": 2}\n    )\n    assert got == ScalingOptions(method=\"start_values\", clipping_value=1, magnitude=2)\n\n\ndef test_pre_process_scaling_invalid_type():\n    with pytest.raises(InvalidScalingError, match=\"Invalid scaling options\"):\n        pre_process_scaling(scaling=\"invalid\")\n\n\ndef test_pre_process_scaling_invalid_dict_key():\n    with pytest.raises(InvalidScalingError, match=\"Invalid scaling options of type:\"):\n        pre_process_scaling(scaling={\"wrong_key\": \"start_values\"})\n\n\ndef test_pre_process_scaling_invalid_dict_value():\n    with pytest.raises(InvalidScalingError, match=\"Invalid clipping value:\"):\n        pre_process_scaling(scaling={\"clipping_value\": \"invalid\"})\n\n\ndef test_scaling_options_invalid_method_value():\n    with pytest.raises(InvalidScalingError, match=\"Invalid scaling method:\"):\n        ScalingOptions(method=\"invalid\")\n\n\ndef test_scaling_options_invalid_clipping_value_type():\n    with pytest.raises(InvalidScalingError, match=\"Invalid clipping value:\"):\n        ScalingOptions(clipping_value=\"invalid\")\n\n\ndef test_scaling_options_invalid_magnitude_value_type():\n    with pytest.raises(InvalidScalingError, match=\"Invalid scaling magnitude:\"):\n        ScalingOptions(magnitude=\"invalid\")\n\n\ndef test_scaling_options_invalid_magnitude_value_range():\n    with pytest.raises(InvalidScalingError, match=\"Invalid scaling magnitude:\"):\n        ScalingOptions(magnitude=-1)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_space_conversion.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic import first_derivative\nfrom optimagic.parameters.space_conversion import (\n    InternalParams,\n    _multiply_from_left,\n    _multiply_from_right,\n    get_space_converter,\n)\nfrom optimagic.utilities import get_rng\n\n\ndef _get_test_case_no_constraint():\n    n_params = 10\n    fp = InternalParams(\n        values=np.arange(n_params),\n        lower_bounds=np.full(n_params, -1),\n        upper_bounds=np.full(n_params, 11),\n        names=list(\"abcdefghij\"),\n    )\n\n    constraints = []\n    return constraints, fp, fp\n\n\ndef _get_test_case_fixed(with_value):\n    fp = InternalParams(\n        values=np.arange(5),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.full(5, np.inf),\n        names=list(\"abcde\"),\n    )\n    if with_value:\n        constraints = [{\"index\": [0, 2, 4], \"type\": \"fixed\", \"value\": [0, 2, 4]}]\n    else:\n        constraints = [{\"index\": [0, 2, 4], \"type\": \"fixed\"}]\n\n    internal = InternalParams(\n        values=np.array([1, 3]),\n        lower_bounds=np.full(2, -np.inf),\n        upper_bounds=np.full(2, np.inf),\n        names=None,\n    )\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_increasing(as_one):\n    fp = InternalParams(\n        values=np.array([0.1, 2.2, 2.3, 10.1, -1]),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.full(5, np.inf),\n        names=list(\"abcde\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([0.1, -2.1, -0.1, -7.8, -1]),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.array([np.inf, 0, 0, 0, np.inf]),\n        names=None,\n    )\n\n    if as_one:\n        constraints = [{\"type\": \"increasing\", \"index\": [0, 1, 2, 3]}]\n    else:\n        constraints = [\n            {\"type\": \"increasing\", \"index\": [0, 1, 2]},\n            {\"type\": \"increasing\", \"index\": [2, 3]},\n        ]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_decreasing(as_one):\n    fp = InternalParams(\n        values=np.array([0.1, 2.2, 2.3, 10.1, -1]),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.full(5, np.inf),\n        names=list(\"abcde\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([0.1, -2.1, -0.1, -7.8, -1]),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.array([np.inf, 0, 0, 0, np.inf]),\n        names=None,\n    )\n\n    if as_one:\n        constraints = [{\"type\": \"decreasing\", \"index\": [3, 2, 1, 0]}]\n    else:\n        constraints = [\n            {\"type\": \"decreasing\", \"index\": [2, 1, 0]},\n            {\"type\": \"decreasing\", \"index\": [3, 2]},\n        ]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_equality(as_one):\n    fp = InternalParams(\n        values=np.array([0, 1.5, 1.5, 0, 1.5, 1]),\n        lower_bounds=np.array([-10, 1, 0.9, -np.inf, -np.inf, -10]),\n        upper_bounds=np.full(6, np.inf),\n        names=list(\"abcdef\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([0, 1.5, 0, 1]),\n        lower_bounds=np.array([-10, 1, -np.inf, -10]),\n        upper_bounds=np.full(4, np.inf),\n        names=None,\n    )\n\n    if as_one:\n        constraints = [{\"type\": \"equality\", \"index\": [1, 2, 4]}]\n    else:\n        constraints = [\n            {\"type\": \"equality\", \"index\": [1, 2]},\n            {\"type\": \"equality\", \"index\": [1, 4]},\n        ]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_probability():\n    fp = InternalParams(\n        values=np.array([0.1, 0.2, 0.2, 0.5, 10]),\n        lower_bounds=np.full(5, -np.inf),\n        upper_bounds=np.full(5, np.inf),\n        names=list(\"abcde\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([0.2, 0.4, 0.4, 10]),\n        lower_bounds=np.array([0, 0, 0, -np.inf]),\n        upper_bounds=np.full(4, np.inf),\n        names=None,\n    )\n\n    constraints = [{\"type\": \"probability\", \"index\": [0, 1, 2, 3]}]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_uncorrelated_covariance():\n    fp = InternalParams(\n        values=np.array([1, 0, 4, 0, 0, 9, 10]),\n        lower_bounds=np.full(7, -np.inf),\n        upper_bounds=np.full(7, np.inf),\n        names=list(\"abcdefg\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([1, 4, 9, 10]),\n        lower_bounds=np.array([0, 0, 0, -np.inf]),\n        upper_bounds=np.full(4, np.inf),\n        names=None,\n    )\n\n    constraints = [\n        {\"type\": \"covariance\", \"index\": [0, 1, 2, 3, 4, 5]},\n        {\"type\": \"fixed\", \"index\": [1, 3, 4], \"value\": 0},\n    ]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_covariance():\n    fp = InternalParams(\n        values=np.array([1, -0.2, 1.2, -0.2, 0.1, 1.3, 0.1, -0.05, 0.2, 1, 10]),\n        lower_bounds=np.full(11, -np.inf),\n        upper_bounds=np.full(11, np.inf),\n        names=list(\"abcdefghijk\"),\n    )\n\n    internal = InternalParams(\n        values=np.array(\n            [\n                1,\n                -0.2,\n                1.07703296,\n                -0.2,\n                0.0557086,\n                1.12111398,\n                0.1,\n                -0.0278543,\n                0.19761748,\n                0.97476739,\n                10,\n            ]\n        ),\n        lower_bounds=np.array(\n            [0, -np.inf, 0, -np.inf, -np.inf, 0, -np.inf, -np.inf, -np.inf, 0, -np.inf]\n        ),\n        upper_bounds=np.full(11, np.inf),\n        names=None,\n    )\n\n    constraints = [{\"type\": \"covariance\", \"index\": np.arange(10)}]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_normalized_covariance():\n    fp = InternalParams(\n        values=np.array([4, 0.1, 2, 0.2, 0.3, 3, 10]),\n        lower_bounds=np.full(7, -np.inf),\n        upper_bounds=np.full(7, np.inf),\n        names=list(\"abcdefg\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([0.05, 1.4133294025, 0.1, 0.2087269956, 1.7165177078, 10]),\n        lower_bounds=[-np.inf, 0, -np.inf, -np.inf, 0, -np.inf],\n        upper_bounds=np.full(6, np.inf),\n        names=None,\n    )\n\n    constraints = [\n        {\"type\": \"covariance\", \"index\": np.arange(6)},\n        {\"type\": \"fixed\", \"index\": [0], \"value\": 4},\n    ]\n\n    return constraints, fp, internal\n\n\ndef _get_test_case_sdcorr():\n    fp = InternalParams(\n        values=np.array([2, 1.5, 3, 0.2, 0.15, 0.33, 10]),\n        lower_bounds=np.full(7, -np.inf),\n        upper_bounds=np.full(7, np.inf),\n        names=list(\"abcdefg\"),\n    )\n\n    internal = InternalParams(\n        values=np.array([2, 0.3, 1.46969385, 0.45, 0.91855865, 2.82023935, 10]),\n        lower_bounds=np.array([0, -np.inf, 0, -np.inf, -np.inf, 0, -np.inf]),\n        upper_bounds=np.full(7, np.inf),\n        names=None,\n    )\n\n    constraints = [{\"type\": \"sdcorr\", \"index\": np.arange(6)}]\n\n    return constraints, fp, internal\n\n\nTEST_CASES = {\n    \"no_constraints\": _get_test_case_no_constraint(),\n    \"fixed_at_start\": _get_test_case_fixed(with_value=False),\n    \"fixed_at_value\": _get_test_case_fixed(with_value=True),\n    \"one_increasing\": _get_test_case_increasing(as_one=True),\n    \"overlapping_increasing\": _get_test_case_increasing(as_one=False),\n    \"one_decreasing\": _get_test_case_decreasing(as_one=True),\n    \"overlapping_decreasing\": _get_test_case_decreasing(as_one=False),\n    \"one_equality\": _get_test_case_equality(as_one=True),\n    \"everlapping_equality\": _get_test_case_equality(as_one=False),\n    \"probability\": _get_test_case_probability(),\n    \"uncorrelated_covariance\": _get_test_case_uncorrelated_covariance(),\n    \"covariance\": _get_test_case_covariance(),\n    \"normalized_covariance\": _get_test_case_normalized_covariance(),\n    \"sdcorr\": _get_test_case_sdcorr(),\n}\n\n\nPARAMETRIZATION = list(TEST_CASES.values())\nIDS = list(TEST_CASES)\n\n\n@pytest.mark.parametrize(\n    \"constraints, params, expected_internal\", PARAMETRIZATION, ids=IDS\n)\ndef test_space_converter_with_params(constraints, params, expected_internal):\n    converter, internal = get_space_converter(\n        internal_params=params,\n        internal_constraints=constraints,\n    )\n\n    aaae(internal.values, expected_internal.values)\n    aaae(internal.lower_bounds, expected_internal.lower_bounds)\n    aaae(internal.upper_bounds, expected_internal.upper_bounds)\n\n    aaae(converter.params_to_internal(params.values), expected_internal.values)\n    aaae(converter.params_from_internal(expected_internal.values), params.values)\n\n    numerical_jacobian = first_derivative(\n        converter.params_from_internal, expected_internal.values\n    ).derivative\n\n    calculated_jacobian = converter.derivative_to_internal(\n        external_derivative=np.eye(len(params.values)),\n        internal_values=expected_internal.values,\n    )\n\n    aaae(calculated_jacobian, numerical_jacobian)\n\n\n@pytest.mark.parametrize(\"seed\", range(5))\ndef test_multiply_from_left_and_right(seed):\n    rng = get_rng(seed)\n    mat_list = [rng.uniform(size=(10, 10)) for i in range(5)]\n    a, b, c, d, e = mat_list\n\n    expected = a @ b @ c @ d @ e\n\n    calc_from_left = _multiply_from_left(mat_list)\n    calc_from_right = _multiply_from_right(mat_list)\n\n    aaae(calc_from_left, expected)\n    aaae(calc_from_right, expected)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_tree_conversion.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal as aae\n\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.tree_conversion import get_tree_converter\nfrom optimagic.typing import AggregationLevel\n\n\n@pytest.fixture()\ndef params():\n    df = pd.DataFrame({\"value\": [3, 4], \"lower_bound\": [0, 0]}, index=[\"c\", \"d\"])\n    params = ([0, np.array([1, 2]), {\"a\": df, \"b\": 5}], 6)\n    return params\n\n\n@pytest.fixture()\ndef upper_bounds():\n    upper = ([None, np.array([11, np.inf]), None], 100)\n    return upper\n\n\nFUNC_EVALS = [\n    5.0,\n    np.float32(5),\n    np.ones(5),\n    {\"a\": 1, \"b\": 2, \"c\": [np.full(4, 0.5)]},\n    pd.Series(1, index=list(\"abcde\")),\n    np.ones(5),\n    {\"a\": 1, \"b\": 2},\n]\n\n\n@pytest.mark.parametrize(\"func_eval\", FUNC_EVALS)\ndef test_tree_converter_scalar_solver(params, upper_bounds, func_eval):\n    bounds = Bounds(\n        upper=upper_bounds,\n    )\n    converter, flat_params = get_tree_converter(\n        params=params,\n        bounds=bounds,\n        func_eval=func_eval,\n        derivative_eval=params,\n        solver_type=AggregationLevel.SCALAR,\n    )\n\n    expected_values = np.arange(7)\n    expected_lb = np.array([-np.inf, -np.inf, -np.inf, 0, 0, -np.inf, -np.inf])\n    expected_ub = np.array([np.inf, 11, np.inf, np.inf, np.inf, np.inf, 100])\n    expected_names = [\"0_0\", \"0_1_0\", \"0_1_1\", \"0_2_a_c\", \"0_2_a_d\", \"0_2_b\", \"1\"]\n\n    aae(flat_params.values, expected_values)\n    aae(flat_params.lower_bounds, expected_lb)\n    aae(flat_params.upper_bounds, expected_ub)\n    assert flat_params.names == expected_names\n\n    aae(converter.params_flatten(params), np.arange(7))\n    unflat = converter.params_unflatten(np.arange(7))\n    assert unflat[0][0] == params[0][0]\n    aae(unflat[0][1], params[0][1])\n\n\nSOLVER_TYPES = [\n    AggregationLevel.SCALAR,\n    AggregationLevel.LIKELIHOOD,\n    AggregationLevel.LEAST_SQUARES,\n]\n\n\n@pytest.mark.parametrize(\"solver_type\", SOLVER_TYPES)\ndef test_tree_conversion_fast_path(solver_type):\n    if solver_type == AggregationLevel.SCALAR:\n        derivative_eval = np.arange(3) * 2\n        func_eval = 3\n    else:\n        derivative_eval = np.arange(6).reshape(2, 3)\n        func_eval = np.ones(2)\n\n    converter, flat_params = get_tree_converter(\n        params=np.arange(3),\n        bounds=Bounds(lower=None, upper=np.arange(3) + 1),\n        func_eval=func_eval,\n        derivative_eval=derivative_eval,\n        solver_type=solver_type,\n    )\n\n    aae(flat_params.values, np.arange(3))\n    assert flat_params.lower_bounds is None\n    aae(flat_params.upper_bounds, np.arange(3) + 1)\n    assert flat_params.names == list(map(str, range(3)))\n\n    aae(converter.params_flatten(np.arange(3)), np.arange(3))\n    aae(converter.params_unflatten(np.arange(3)), np.arange(3))\n    aae(converter.derivative_flatten(derivative_eval), derivative_eval)\n"
  },
  {
    "path": "tests/optimagic/parameters/test_tree_registry.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\nfrom pybaum import leaf_names, tree_flatten, tree_unflatten\n\nfrom optimagic.parameters.tree_registry import get_registry\n\n\n@pytest.fixture()\ndef value_df():\n    df = pd.DataFrame(\n        np.arange(6).reshape(3, 2),\n        columns=[\"a\", \"value\"],\n        index=[\"alpha\", \"beta\", \"gamma\"],\n    )\n    return df\n\n\n@pytest.fixture()\ndef other_df():\n    df = pd.DataFrame(index=[\"alpha\", \"beta\", \"gamma\"])\n    df[\"b\"] = np.arange(3).astype(np.int16)\n    df[\"c\"] = 3.14\n    return df\n\n\ndef test_flatten_df_with_value_column(value_df):\n    registry = get_registry(extended=True)\n    flat, _ = tree_flatten(value_df, registry=registry)\n    assert flat == [1, 3, 5]\n\n\ndef test_unflatten_df_with_value_column(value_df):\n    registry = get_registry(extended=True)\n    _, treedef = tree_flatten(value_df, registry=registry)\n    unflat = tree_unflatten(treedef, [10, 11, 12], registry=registry)\n    assert unflat.equals(value_df.assign(value=[10, 11, 12]))\n\n\ndef test_leaf_names_df_with_value_column(value_df):\n    registry = get_registry(extended=True)\n    names = leaf_names(value_df, registry=registry)\n    assert names == [\"alpha\", \"beta\", \"gamma\"]\n\n\ndef test_flatten_partially_numeric_df(other_df):\n    registry = get_registry(extended=True)\n    flat, _ = tree_flatten(other_df, registry=registry)\n    assert flat == [0, 3.14, 1, 3.14, 2, 3.14]\n\n\ndef test_unflatten_partially_numeric_df(other_df):\n    registry = get_registry(extended=True)\n    _, treedef = tree_flatten(other_df, registry=registry)\n    unflat = tree_unflatten(treedef, [1, 2, 3, 4, 5, 6], registry=registry)\n    other_df = other_df.assign(b=[1, 3, 5], c=[2, 4, 6])\n    assert_frame_equal(unflat, other_df, check_dtype=False)\n\n\ndef test_leaf_names_partially_numeric_df(other_df):\n    registry = get_registry(extended=True)\n    names = leaf_names(other_df, registry=registry)\n    assert names == [\"alpha_b\", \"alpha_c\", \"beta_b\", \"beta_c\", \"gamma_b\", \"gamma_c\"]\n"
  },
  {
    "path": "tests/optimagic/shared/__init__.py",
    "content": ""
  },
  {
    "path": "tests/optimagic/shared/test_process_user_functions.py",
    "content": "import numpy as np\nimport pytest\nfrom numpy.typing import NDArray\n\nfrom optimagic import mark\nfrom optimagic.exceptions import InvalidKwargsError\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.shared.process_user_function import (\n    get_kwargs_from_args,\n    infer_aggregation_level,\n    partial_func_of_params,\n)\nfrom optimagic.typing import AggregationLevel\n\n\ndef test_partial_func_of_params():\n    def f(params, b, c):\n        return params + b + c\n\n    func = partial_func_of_params(f, {\"b\": 2, \"c\": 3})\n\n    assert func(1) == 6\n\n\ndef test_partial_func_of_params_too_many_kwargs():\n    def f(params, b, c):\n        return params + b + c\n\n    with pytest.raises(InvalidKwargsError):\n        partial_func_of_params(f, {\"params\": 1, \"b\": 2, \"c\": 3})\n\n\ndef test_partial_func_of_params_too_few_kwargs():\n    def f(params, b, c):\n        return params + b + c\n\n    with pytest.raises(InvalidKwargsError):\n        partial_func_of_params(f, {\"c\": 3})\n\n\ndef test_get_kwargs_from_args():\n    def f(a, b, c=3, d=4):\n        return a + b + c\n\n    got = get_kwargs_from_args([1, 2], f, offset=1)\n    expected = {\"b\": 1, \"c\": 2}\n\n    assert got == expected\n\n\ndef test_infer_aggregation_level_no_decorator():\n    def f(params):\n        return 1\n\n    assert infer_aggregation_level(f) == AggregationLevel.SCALAR\n\n\ndef test_infer_aggregation_level_scalar_decorator():\n    @mark.scalar\n    def f(params):\n        return 1\n\n    assert infer_aggregation_level(f) == AggregationLevel.SCALAR\n\n\ndef test_infer_aggregation_level_scalar_anotation():\n    def f(params: NDArray[np.float64]) -> ScalarFunctionValue:\n        return ScalarFunctionValue(1)\n\n    assert infer_aggregation_level(f) == AggregationLevel.SCALAR\n\n\ndef test_infer_aggregation_level_least_squares_decorator():\n    @mark.least_squares\n    def f(params):\n        return np.ones(3)\n\n    assert infer_aggregation_level(f) == AggregationLevel.LEAST_SQUARES\n\n\ndef test_infer_aggregation_level_least_squares_anotation():\n    def f(params: NDArray[np.float64]) -> LeastSquaresFunctionValue:\n        return LeastSquaresFunctionValue(np.ones(3))\n\n    assert infer_aggregation_level(f) == AggregationLevel.LEAST_SQUARES\n\n\ndef test_infer_aggregation_level_likelihood_decorator():\n    @mark.likelihood\n    def f(params):\n        return np.ones(3)\n\n    assert infer_aggregation_level(f) == AggregationLevel.LIKELIHOOD\n\n\ndef test_infer_aggregation_level_likelihood_anotation():\n    def f(params: NDArray[np.float64]) -> LikelihoodFunctionValue:\n        return LikelihoodFunctionValue(np.ones(3))\n\n    assert infer_aggregation_level(f) == AggregationLevel.LIKELIHOOD\n"
  },
  {
    "path": "tests/optimagic/test_algo_selection.py",
    "content": "from optimagic import algos\n\n\ndef test_dfols_is_present():\n    assert hasattr(algos, \"nag_dfols\")\n    assert hasattr(algos.Bounded, \"nag_dfols\")\n    assert hasattr(algos.LeastSquares, \"nag_dfols\")\n    assert hasattr(algos.Local, \"nag_dfols\")\n    assert hasattr(algos.Bounded.Local.LeastSquares, \"nag_dfols\")\n    assert hasattr(algos.Local.Bounded.LeastSquares, \"nag_dfols\")\n    assert hasattr(algos.LeastSquares.Bounded.Local, \"nag_dfols\")\n\n\ndef test_scipy_cobyla_is_present():\n    assert hasattr(algos, \"scipy_cobyla\")\n    assert hasattr(algos.Local, \"scipy_cobyla\")\n    assert hasattr(algos.NonlinearConstrained, \"scipy_cobyla\")\n    assert hasattr(algos.GradientFree, \"scipy_cobyla\")\n    assert hasattr(algos.Local.NonlinearConstrained, \"scipy_cobyla\")\n    assert hasattr(algos.NonlinearConstrained.Local, \"scipy_cobyla\")\n    assert hasattr(algos.GradientFree.NonlinearConstrained, \"scipy_cobyla\")\n    assert hasattr(algos.GradientFree.NonlinearConstrained.Local, \"scipy_cobyla\")\n    assert hasattr(algos.Local.GradientFree.NonlinearConstrained, \"scipy_cobyla\")\n    assert hasattr(algos.NonlinearConstrained.GradientFree.Local, \"scipy_cobyla\")\n    assert hasattr(algos.NonlinearConstrained.Local.GradientFree, \"scipy_cobyla\")\n    assert hasattr(algos.Local.NonlinearConstrained.GradientFree, \"scipy_cobyla\")\n\n\ndef test_algorithm_lists():\n    assert len(algos.All) >= len(algos.Available)\n    assert len(algos.AllNames) == len(algos.All)\n    assert len(algos.AvailableNames) == len(algos.Available)\n"
  },
  {
    "path": "tests/optimagic/test_batch_evaluators.py",
    "content": "import itertools\nimport warnings\n\nimport pytest\n\nfrom optimagic.batch_evaluators import process_batch_evaluator\n\nbatch_evaluators = [\"joblib\", \"threading\"]\n\nn_core_list = [1, 2]\n\ntest_cases = list(itertools.product(batch_evaluators, n_core_list))\n\n\ndef double(x):\n    return 2 * x\n\n\ndef buggy_func(x):  # noqa: ARG001\n    raise AssertionError()\n\n\ndef add_x_and_y(x, y):\n    return x + y\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\"batch_evaluator, n_cores\", test_cases)\ndef test_batch_evaluator_without_exceptions(batch_evaluator, n_cores):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n\n    calculated = batch_evaluator(\n        func=double,\n        arguments=list(range(10)),\n        n_cores=n_cores,\n    )\n\n    expected = list(range(0, 20, 2))\n\n    assert calculated == expected\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\"batch_evaluator, n_cores\", test_cases)\ndef test_batch_evaluator_with_unhandled_exceptions(batch_evaluator, n_cores):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n    with pytest.raises(AssertionError):\n        batch_evaluator(\n            func=buggy_func,\n            arguments=list(range(10)),\n            n_cores=n_cores,\n            error_handling=\"raise\",\n        )\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\"batch_evaluator, n_cores\", test_cases)\ndef test_batch_evaluator_with_handled_exceptions(batch_evaluator, n_cores):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\")\n\n        calculated = batch_evaluator(\n            func=buggy_func,\n            arguments=list(range(10)),\n            n_cores=n_cores,\n            error_handling=\"continue\",\n        )\n\n        for calc in calculated:\n            assert isinstance(calc, str)\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\"batch_evaluator, n_cores\", test_cases)\ndef test_batch_evaluator_with_list_unpacking(batch_evaluator, n_cores):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n    calculated = batch_evaluator(\n        func=add_x_and_y,\n        arguments=[(1, 2), (3, 4)],\n        n_cores=n_cores,\n        unpack_symbol=\"*\",\n    )\n    expected = [3, 7]\n    assert calculated == expected\n\n\n@pytest.mark.slow()\n@pytest.mark.parametrize(\"batch_evaluator, n_cores\", test_cases)\ndef test_batch_evaluator_with_dict_unpacking(batch_evaluator, n_cores):\n    batch_evaluator = process_batch_evaluator(batch_evaluator)\n    calculated = batch_evaluator(\n        func=add_x_and_y,\n        arguments=[{\"x\": 1, \"y\": 2}, {\"x\": 3, \"y\": 4}],\n        n_cores=n_cores,\n        unpack_symbol=\"**\",\n    )\n    expected = [3, 7]\n    assert calculated == expected\n\n\ndef test_get_batch_evaluator_invalid_value():\n    with pytest.raises(ValueError):\n        process_batch_evaluator(\"bla\")\n\n\ndef test_get_batch_evaluator_invalid_type():\n    with pytest.raises(TypeError):\n        process_batch_evaluator(3)\n\n\ndef test_get_batch_evaluator_with_callable():\n    assert callable(process_batch_evaluator(lambda x: x))\n"
  },
  {
    "path": "tests/optimagic/test_constraints.py",
    "content": "import pytest\n\nfrom optimagic.constraints import (\n    Constraint,\n    DecreasingConstraint,\n    EqualityConstraint,\n    FixedConstraint,\n    FlatCovConstraint,\n    FlatSDCorrConstraint,\n    IncreasingConstraint,\n    LinearConstraint,\n    NonlinearConstraint,\n    PairwiseEqualityConstraint,\n    ProbabilityConstraint,\n    _all_none,\n    _select_non_none,\n)\nfrom optimagic.exceptions import InvalidConstraintError\n\n\n@pytest.fixture\ndef dummy_func():\n    return lambda x: x\n\n\ndef test_fixed_constraint(dummy_func):\n    constr = FixedConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"fixed\", \"selector\": dummy_func}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_increasing_constraint(dummy_func):\n    constr = IncreasingConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"increasing\", \"selector\": dummy_func}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_decreasing_constraint(dummy_func):\n    constr = DecreasingConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"decreasing\", \"selector\": dummy_func}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_equality_constraint(dummy_func):\n    constr = EqualityConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"equality\", \"selector\": dummy_func}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_pairwise_equality_constraint(dummy_func):\n    constr = PairwiseEqualityConstraint(selectors=[dummy_func, dummy_func])\n    dict_repr = {\"type\": \"pairwise_equality\", \"selectors\": [dummy_func, dummy_func]}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_probability_constraint(dummy_func):\n    constr = ProbabilityConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"probability\", \"selector\": dummy_func}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_covariance_constraint(dummy_func):\n    constr = FlatCovConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"covariance\", \"selector\": dummy_func, \"regularization\": 0.0}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_sdcorr_constraint(dummy_func):\n    constr = FlatSDCorrConstraint(selector=dummy_func)\n    dict_repr = {\"type\": \"sdcorr\", \"selector\": dummy_func, \"regularization\": 0.0}\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_linear_constraint_with_value(dummy_func):\n    constr = LinearConstraint(selector=dummy_func, value=2.1, weights=[1, 2])\n    dict_repr = {\n        \"type\": \"linear\",\n        \"selector\": dummy_func,\n        \"value\": 2.1,\n        \"weights\": [1, 2],\n    }\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_linear_constraint_with_bounds(dummy_func):\n    constr = LinearConstraint(\n        selector=dummy_func, lower_bound=1.0, upper_bound=2.0, weights=[1, 2]\n    )\n    dict_repr = {\n        \"type\": \"linear\",\n        \"selector\": dummy_func,\n        \"lower_bound\": 1.0,\n        \"upper_bound\": 2.0,\n        \"weights\": [1, 2],\n    }\n    assert constr._to_dict() == dict_repr\n\n\ndef test_linear_constraint_with_bounds_and_value(dummy_func):\n    msg = \"'value' cannot be used with 'lower_bound' or 'upper_bound'.\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        LinearConstraint(\n            selector=dummy_func,\n            lower_bound=1.0,\n            upper_bound=2.0,\n            value=2.1,\n            weights=[1, 2],\n        )\n\n\ndef test_linear_constraint_with_nothing(dummy_func):\n    msg = \"At least one of 'lower_bound', 'upper_bound', or 'value' must be non-None.\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        LinearConstraint(selector=dummy_func, weights=[1, 2])\n\n\ndef test_nonlinear_constraint_with_value(dummy_func):\n    constr = NonlinearConstraint(selector=dummy_func, value=2.1, func=dummy_func)\n    dict_repr = {\n        \"type\": \"nonlinear\",\n        \"selector\": dummy_func,\n        \"value\": 2.1,\n        \"func\": dummy_func,\n        \"tol\": 1e-5,\n    }\n    assert constr._to_dict() == dict_repr\n    assert isinstance(constr, Constraint)\n\n\ndef test_nonlinear_constraint_with_bounds(dummy_func):\n    constr = NonlinearConstraint(\n        selector=dummy_func, lower_bound=1.0, upper_bound=2.0, func=dummy_func\n    )\n    dict_repr = {\n        \"type\": \"nonlinear\",\n        \"selector\": dummy_func,\n        \"func\": dummy_func,\n        \"lower_bounds\": 1.0,\n        \"upper_bounds\": 2.0,\n        \"tol\": 1e-5,\n    }\n    assert constr._to_dict() == dict_repr\n\n\ndef test_nonlinear_constraint_with_bounds_and_value(dummy_func):\n    msg = \"'value' cannot be used with 'lower_bound' or 'upper_bound'.\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        NonlinearConstraint(\n            selector=dummy_func,\n            lower_bound=1.0,\n            upper_bound=2.0,\n            value=2.1,\n            func=dummy_func,\n        )\n\n\ndef test_nonlinear_constraint_with_nothing(dummy_func):\n    msg = \"At least one of 'lower_bound', 'upper_bound', or 'value' must be non-None.\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        NonlinearConstraint(selector=dummy_func, func=dummy_func)\n\n\ndef test_all_none():\n    assert _all_none(None, None, None)\n    assert not _all_none(None, 1, None)\n\n\ndef test_select_non_none():\n    assert _select_non_none(a=None, b=None, c=None) == {}\n    assert _select_non_none(a=None, b=1, c=None) == {\"b\": 1}\n    assert _select_non_none(a=None, b=None, c=2) == {\"c\": 2}\n    assert _select_non_none(a=1, b=2, c=3) == {\"a\": 1, \"b\": 2, \"c\": 3}\n"
  },
  {
    "path": "tests/optimagic/test_decorators.py",
    "content": "import pytest\n\nfrom optimagic.decorators import (\n    catch,\n    unpack,\n)\n\n\ndef test_catch_at_defaults():\n    @catch\n    def f():\n        raise ValueError\n\n    with pytest.warns(UserWarning):\n        assert f() is None\n\n    @catch\n    def g():\n        raise KeyboardInterrupt()\n\n    with pytest.raises(KeyboardInterrupt):\n        g()\n\n\ndef test_catch_with_reraise():\n    @catch(reraise=True)\n    def f():\n        raise ValueError\n\n    with pytest.raises(ValueError):\n        f()\n\n\ndef test_unpack_decorator_none():\n    @unpack(symbol=None)\n    def f(x):\n        return x\n\n    assert f(3) == 3\n\n\ndef test_unpack_decorator_one_star():\n    @unpack(symbol=\"*\")\n    def f(x, y):\n        return x + y\n\n    assert f((3, 4)) == 7\n\n\ndef test_unpack_decorator_two_stars():\n    @unpack(symbol=\"**\")\n    def f(x, y):\n        return x + y\n\n    assert f({\"x\": 3, \"y\": 4}) == 7\n"
  },
  {
    "path": "tests/optimagic/test_deprecations.py",
    "content": "\"\"\"Test that our deprecations work.\n\nThis also serves as an internal overview of deprecated functions.\n\n\"\"\"\n\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal as aaae\n\nimport estimagic as em\nimport optimagic as om\nfrom estimagic import (\n    OptimizeLogReader,\n    OptimizeResult,\n    batch_evaluators,\n    check_constraints,\n    convergence_plot,\n    convergence_report,\n    count_free_params,\n    criterion_plot,\n    first_derivative,\n    get_benchmark_problems,\n    maximize,\n    minimize,\n    params_plot,\n    profile_plot,\n    rank_report,\n    run_benchmark,\n    second_derivative,\n    slice_plot,\n    traceback_report,\n    utilities,\n)\nfrom optimagic.deprecations import (\n    convert_dict_to_function_value,\n    handle_log_options_throw_deprecated_warning,\n    infer_problem_type_from_dict_output,\n    is_dict_output,\n    pre_process_constraints,\n)\nfrom optimagic.differentiation.derivatives import NumdiffResult\nfrom optimagic.exceptions import InvalidConstraintError\nfrom optimagic.logging.logger import SQLiteLogOptions\nfrom optimagic.optimization.fun_value import (\n    LeastSquaresFunctionValue,\n    LikelihoodFunctionValue,\n    ScalarFunctionValue,\n)\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.typing import AggregationLevel\n\n# ======================================================================================\n# Deprecated in 0.5.0, remove in 0.6.0\n# ======================================================================================\n\n\ndef test_estimagic_minimize_is_deprecated():\n    with pytest.warns(FutureWarning, match=\"estimagic.minimize has been deprecated\"):\n        minimize(lambda x: x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n\n\ndef test_estimagic_maximize_is_deprecated():\n    with pytest.warns(FutureWarning, match=\"estimagic.maximize has been deprecated\"):\n        maximize(lambda x: -x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n\n\ndef test_estimagic_first_derivative_is_deprecated():\n    msg = \"estimagic.first_derivative has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        first_derivative(lambda x: x @ x, np.arange(3))\n\n\ndef test_estimagic_second_derivative_is_deprecated():\n    msg = \"estimagic.second_derivative has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        second_derivative(lambda x: x @ x, np.arange(3))\n\n\ndef test_estimagic_benchmarking_functions_are_deprecated():\n    msg = \"estimagic.get_benchmark_problems has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        problems = get_benchmark_problems(\"example\")\n\n    msg = \"estimagic.run_benchmark has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        results = run_benchmark(\n            problems, optimize_options={\"test\": {\"algorithm\": \"scipy_lbfgsb\"}}\n        )\n\n    msg = \"estimagic.convergence_report has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        convergence_report(problems, results)\n\n    msg = \"estimagic.rank_report has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        rank_report(problems, results)\n\n    msg = \"estimagic.traceback_report has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        traceback_report(problems, results)\n\n    msg = \"estimagic.profile_plot has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        profile_plot(problems, results)\n\n    msg = \"estimagic.convergence_plot has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        convergence_plot(problems, results)\n\n\ndef test_estimagic_slice_plot_is_deprecated():\n    msg = \"estimagic.slice_plot has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        slice_plot(\n            func=lambda x: x @ x,\n            params=np.arange(3),\n            bounds=Bounds(lower=np.zeros(3), upper=np.ones(3) * 5),\n        )\n\n\ndef test_estimagic_check_constraints_is_deprecated():\n    msg = \"estimagic.check_constraints has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        check_constraints(\n            params=np.arange(3),\n            constraints=om.FixedConstraint(lambda x: x[0]),\n        )\n\n\ndef test_estimagic_count_free_params_is_deprecated():\n    msg = \"estimagic.count_free_params has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        count_free_params(\n            params=np.arange(3),\n            constraints=om.FixedConstraint(lambda x: x[0]),\n        )\n\n\n@pytest.fixture()\ndef example_db(tmp_path):\n    path = tmp_path / \"test.db\"\n\n    def _crit(params):\n        x = np.array(list(params.values()))\n        return x @ x\n\n    om.minimize(\n        fun=_crit,\n        params={\"a\": 1, \"b\": 2, \"c\": 3},\n        algorithm=\"scipy_lbfgsb\",\n        logging=path,\n    )\n    return path\n\n\ndef test_estimagic_log_reader_is_deprecated(example_db):\n    msg = \"OptimizeLogReader is deprecated and will be removed in a future \"\n    \"version. Please use optimagic.logging.SQLiteLogger instead.\"\n    with pytest.warns(FutureWarning, match=msg):\n        OptimizeLogReader(example_db)\n\n\ndef test_estimagic_optimize_result_is_deprecated():\n    res = om.minimize(lambda x: x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n\n    msg = \"estimagic.OptimizeResult has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        OptimizeResult(\n            params=res.params,\n            fun=res.fun,\n            start_fun=res.start_fun,\n            start_params=res.start_params,\n            algorithm=res.algorithm,\n            direction=res.direction,\n            n_free=res.n_free,\n        )\n\n\ndef test_estimagic_chol_params_to_lower_triangular_matrix_is_deprecated():\n    msg = \"estimagic.utilities.chol_params_to_lower_triangular_matrix has been deprecat\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.chol_params_to_lower_triangular_matrix(np.arange(6))\n\n\ndef test_estimagic_cov_params_to_matrix_is_deprecated():\n    msg = \"estimagic.utilities.cov_params_to_matrix has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.cov_params_to_matrix(np.arange(6))\n\n\ndef test_estimagic_cov_matrix_to_params_is_deprecated():\n    msg = \"estimagic.utilities.cov_matrix_to_params has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.cov_matrix_to_params(np.eye(3))\n\n\ndef test_estimagic_sdcorr_params_to_sds_and_corr_is_deprecated():\n    msg = \"estimagic.utilities.sdcorr_params_to_sds_and_corr has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.sdcorr_params_to_sds_and_corr(np.arange(6))\n\n\ndef test_estimagic_sds_and_corr_to_cov_is_deprecated():\n    msg = \"estimagic.utilities.sds_and_corr_to_cov has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.sds_and_corr_to_cov(np.arange(3), np.eye(3))\n\n\ndef test_estimagic_cov_to_sds_and_corr_is_deprecated():\n    msg = \"estimagic.utilities.cov_to_sds_and_corr has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.cov_to_sds_and_corr(np.eye(3))\n\n\ndef test_estimagic_sdcorr_params_to_matrix_is_deprecated():\n    msg = \"estimagic.utilities.sdcorr_params_to_matrix has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.sdcorr_params_to_matrix(np.arange(6))\n\n\ndef test_estimagic_cov_matrix_to_sdcorr_params_is_deprecated():\n    msg = \"estimagic.utilities.cov_matrix_to_sdcorr_params has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.cov_matrix_to_sdcorr_params(np.eye(3))\n\n\ndef test_estimagic_number_of_triangular_elements_to_dimension_is_deprecated():\n    msg = \"estimagic.utilities.number_of_triangular_elements_to_dimension has been\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.number_of_triangular_elements_to_dimension(6)\n\n\ndef test_estimagic_dimension_to_number_of_triangular_elements_is_deprecated():\n    msg = \"estimagic.utilities.dimension_to_number_of_triangular_elements has been\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.dimension_to_number_of_triangular_elements(3)\n\n\ndef test_estimagic_propose_alternatives_is_deprecated():\n    msg = \"estimagic.utilities.propose_alternatives has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.propose_alternatives(\"estimagic\", list(\"abcdefg\"))\n\n\ndef test_estimagic_robust_cholesky_is_deprecated():\n    msg = \"estimagic.utilities.robust_cholesky has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.robust_cholesky(np.eye(3))\n\n\ndef test_estimagic_robust_inverse_is_deprecated():\n    msg = \"estimagic.utilities.robust_inverse has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.robust_inverse(np.eye(3))\n\n\ndef test_estimagic_hash_array_is_deprecated():\n    msg = \"estimagic.utilities.hash_array has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.hash_array(np.arange(3))\n\n\ndef test_estimagic_calculate_trustregion_initial_radius_is_deprecated():\n    msg = \"estimagic.utilities.calculate_trustregion_initial_radius has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.calculate_trustregion_initial_radius(np.arange(3))\n\n\ndef test_estimagic_pickle_functions_are_deprecated(tmp_path):\n    msg = \"estimagic.utilities.to_pickle has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.to_pickle(np.arange(3), tmp_path / \"test.pkl\")\n\n    msg = \"estimagic.utilities.read_pickle has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.read_pickle(tmp_path / \"test.pkl\")\n\n\ndef test_estimagic_isscalar_is_deprecated():\n    msg = \"estimagic.utilities.isscalar has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.isscalar(1)\n\n\ndef test_estimagic_get_rng_is_deprecated():\n    msg = \"estimagic.utilities.get_rng has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        utilities.get_rng(42)\n\n\ndef test_estimagic_criterion_plot_is_deprecated():\n    msg = \"estimagic.criterion_plot has been deprecated\"\n    res = om.minimize(lambda x: x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n    with pytest.warns(FutureWarning, match=msg):\n        criterion_plot(res)\n\n\ndef test_estimagic_params_plot_is_deprecated():\n    msg = \"estimagic.params_plot has been deprecated\"\n    res = om.minimize(lambda x: x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n    with pytest.warns(FutureWarning, match=msg):\n        params_plot(res)\n\n\ndef test_criterion_is_depracated():\n    msg = \"the `criterion` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            criterion=lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n        )\n\n\ndef test_criterion_kwargs_is_deprecated():\n    msg = \"the `criterion_kwargs` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x, a: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            criterion_kwargs={\"a\": 1},\n        )\n\n\ndef test_derivative_is_deprecated():\n    msg = \"the `derivative` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            derivative=lambda x: 2 * x,\n        )\n\n\ndef test_derivative_kwargs_is_deprecated():\n    msg = \"the `derivative_kwargs` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            jac=lambda x, a: 2 * x,\n            derivative_kwargs={\"a\": 1},\n        )\n\n\ndef test_criterion_and_derivative_is_deprecated():\n    msg = \"the `criterion_and_derivative` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            criterion_and_derivative=lambda x: (x @ x, 2 * x),\n        )\n\n\ndef test_criterion_and_derivative_kwargs_is_deprecated():\n    msg = \"the `criterion_and_derivative_kwargs` argument has been renamed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            params=np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac=lambda x, a: (x @ x, 2 * x),\n            criterion_and_derivative_kwargs={\"a\": 1},\n        )\n\n\nALGO_OPTIONS = [\n    {\"convergence_absolute_criterion_tolerance\": 1e-8},\n    {\"convergence_relative_criterion_tolerance\": 1e-8},\n    {\"convergence_absolute_params_tolerance\": 1e-8},\n    {\"convergence_relative_params_tolerance\": 1e-8},\n    {\"convergence_absolute_gradient_tolerance\": 1e-8},\n    {\"convergence_relative_gradient_tolerance\": 1e-8},\n    {\"convergence_scaled_gradient_tolerance\": 1e-8},\n    {\"stopping_max_iterations\": 1_000},\n    {\"stopping_max_criterion_evaluations\": 1_000},\n]\n\n\n@pytest.mark.parametrize(\"algo_option\", ALGO_OPTIONS)\ndef test_old_convergence_criteria_are_deprecated(algo_option):\n    msg = \"The following keys in `algo_options` are deprecated\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=UserWarning)\n        with pytest.warns(FutureWarning, match=msg):\n            om.minimize(\n                lambda x: x @ x,\n                params=np.arange(3),\n                algorithm=\"scipy_lbfgsb\",\n                algo_options=algo_option,\n            )\n\n\ndef test_deprecated_attributes_of_optimize_result():\n    res = om.minimize(lambda x: x @ x, np.arange(3), algorithm=\"scipy_lbfgsb\")\n\n    msg = \"attribute is deprecated\"\n\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.n_criterion_evaluations\n\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.n_derivative_evaluations\n\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.criterion\n\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.start_criterion\n\n\nBOUNDS_KWARGS = [\n    {\"lower_bounds\": np.full(3, -1)},\n    {\"upper_bounds\": np.full(3, 2)},\n]\n\nSOFT_BOUNDS_KWARGS = [\n    {\"soft_lower_bounds\": np.full(3, -1)},\n    {\"soft_upper_bounds\": np.full(3, 1)},\n]\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS + SOFT_BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_minimize(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            **bounds_kwargs,\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS + SOFT_BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_maximize(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            **bounds_kwargs,\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_first_derivative(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(\n            lambda x: x @ x,\n            np.arange(3),\n            **bounds_kwargs,\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_second_derivative(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(\n            lambda x: x @ x,\n            np.arange(3),\n            **bounds_kwargs,\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_estimate_ml(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_ml(\n            loglike=loglike,\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            **bounds_kwargs,\n        )\n\n\ndef test_numdiff_options_is_deprecated_in_estimate_ml():\n    msg = \"The argument `numdiff_options` is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_ml(\n            loglike=loglike,\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            numdiff_options={\"method\": \"forward\"},\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_estimate_msm(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        em.estimate_msm(\n            simulate_moments=lambda x: x,\n            empirical_moments=np.zeros(3),\n            moments_cov=np.eye(3),\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            **bounds_kwargs,\n        )\n\n\ndef test_numdiff_options_is_deprecated_in_estimate_msm():\n    msg = \"The argument `numdiff_options` is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        em.estimate_msm(\n            simulate_moments=lambda x: x,\n            empirical_moments=np.zeros(3),\n            moments_cov=np.eye(3),\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            numdiff_options={\"method\": \"forward\"},\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_count_free_params(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.count_free_params(\n            np.arange(3),\n            constraints=om.FixedConstraint(lambda x: x[0]),\n            **bounds_kwargs,\n        )\n\n\n@pytest.mark.parametrize(\"bounds_kwargs\", BOUNDS_KWARGS)\ndef test_old_bounds_are_deprecated_in_check_constraints(bounds_kwargs):\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.check_constraints(\n            np.arange(3),\n            constraints=om.FixedConstraint(lambda x: x[0]),\n            **bounds_kwargs,\n        )\n\n\ndef test_old_bounds_are_deprecated_in_slice_plot():\n    msg = \"Specifying bounds via the arguments\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.slice_plot(\n            lambda x: x @ x,\n            np.arange(3),\n            lower_bounds=np.full(3, -1),\n            upper_bounds=np.full(3, 2),\n        )\n\n\ndef test_is_dict_output():\n    assert is_dict_output({\"value\": 1})\n    assert not is_dict_output(1)\n\n\ndef test_infer_problem_type_from_dict_output():\n    assert infer_problem_type_from_dict_output({\"value\": 1}) == AggregationLevel.SCALAR\n    assert (\n        infer_problem_type_from_dict_output({\"value\": 1, \"root_contributions\": 2})\n        == AggregationLevel.LEAST_SQUARES\n    )\n    assert (\n        infer_problem_type_from_dict_output({\"value\": 1, \"contributions\": 2})\n        == AggregationLevel.LIKELIHOOD\n    )\n\n\ndef test_convert_value_dict_to_function_value():\n    got = convert_dict_to_function_value({\"value\": 1})\n    assert isinstance(got, ScalarFunctionValue)\n    assert got.value == 1\n\n\ndef test_convert_root_contributions_dict_to_function_value():\n    got = convert_dict_to_function_value({\"value\": 5, \"root_contributions\": [1, 2]})\n    assert isinstance(got, LeastSquaresFunctionValue)\n    assert got.value == [1, 2]\n\n\ndef test_convert_contributions_dict_to_function_value():\n    got = convert_dict_to_function_value({\"value\": 5, \"contributions\": [1, 4]})\n    assert isinstance(got, LikelihoodFunctionValue)\n    assert got.value == [1, 4]\n\n\ndef test_old_scaling_options_are_deprecated_in_minimize():\n    msg = \"Specifying scaling options via the argument `scaling_options` is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            scaling_options={\"method\": \"start_values\", \"magnitude\": 1},\n        )\n\n\ndef test_old_scaling_options_are_deprecated_in_maximize():\n    msg = \"Specifying scaling options via the argument `scaling_options` is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            scaling_options={\"method\": \"start_values\", \"magnitude\": 1},\n        )\n\n\ndef test_old_multistart_options_are_deprecated_in_minimize():\n    msg = \"Specifying multistart options via the argument `multistart_options` is\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            multistart_options={\"n_samples\": 10},\n        )\n\n\ndef test_old_multistart_options_are_deprecated_in_maximize():\n    msg = \"Specifying multistart options via the argument `multistart_options` is\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            multistart_options={\"n_samples\": 10},\n        )\n\n\ndef test_multistart_option_share_optimization_option_is_deprecated():\n    msg = \"The `share_optimization` option is deprecated and will be removed in\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)),\n            multistart={\"share_optimization\": 0.1},\n        )\n\n\ndef test_multistart_option_convergence_relative_params_tolerance_option_is_deprecated():\n    msg = \"The `convergence_relative_params_tolerance` option is deprecated and will\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)),\n            multistart={\"convergence_relative_params_tolerance\": 0.01},\n        )\n\n\ndef test_multistart_option_optimization_error_handling_option_is_deprecated():\n    msg = \"The `optimization_error_handling` option is deprecated and will be removed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)),\n            multistart={\"optimization_error_handling\": \"continue\"},\n        )\n\n\ndef test_multistart_option_exploration_error_handling_option_is_deprecated():\n    msg = \"The `exploration_error_handling` option is deprecated and will be removed\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)),\n            multistart={\"exploration_error_handling\": \"continue\"},\n        )\n\n\ndef test_deprecated_dict_access_of_multistart_info():\n    res = om.minimize(\n        lambda x: x @ x,\n        np.arange(3),\n        algorithm=\"scipy_lbfgsb\",\n        multistart=True,\n        bounds=om.Bounds(lower=np.full(3, -1), upper=np.full(3, 2)),\n    )\n    msg = \"The dictionary access for 'local_optima' is deprecated and will be removed\"\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.multistart_info[\"local_optima\"]\n\n\ndef test_base_steps_in_first_derivatives_is_deprecated():\n    msg = \"The `base_steps` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(lambda x: x @ x, np.arange(3), base_steps=1e-3)\n\n\ndef test_step_ratio_in_first_derivatives_is_deprecated():\n    msg = \"The `step_ratio` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(lambda x: x @ x, np.arange(3), step_ratio=2)\n\n\ndef test_n_steps_in_first_derivatives_is_deprecated():\n    msg = \"The `n_steps` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(lambda x: x @ x, np.arange(3), n_steps=2)\n\n\ndef test_return_info_in_first_derivatives_is_deprecated():\n    msg = \"The `return_info` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(lambda x: x @ x, np.arange(3), return_info=True)\n\n\ndef test_return_func_value_in_first_derivatives_is_deprecated():\n    msg = \"The `return_func_value` argument is deprecated and will be removed in\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.first_derivative(lambda x: x @ x, np.arange(3), return_func_value=True)\n\n\ndef test_base_steps_in_second_derivatives_is_deprecated():\n    msg = \"The `base_steps` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(lambda x: x @ x, np.arange(3), base_steps=1e-3)\n\n\ndef test_step_ratio_in_second_derivatives_is_deprecated():\n    msg = \"The `step_ratio` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(lambda x: x @ x, np.arange(3), step_ratio=2)\n\n\ndef test_n_steps_in_second_derivatives_is_deprecated():\n    msg = \"The `n_steps` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(lambda x: x @ x, np.arange(3), n_steps=1)\n\n\ndef test_return_func_value_in_second_derivatives_is_deprecated():\n    msg = \"The `return_func_value` argument is deprecated and will be removed in\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(lambda x: x @ x, np.arange(3), return_func_value=True)\n\n\ndef test_return_info_in_second_derivatives_is_deprecated():\n    msg = \"The `return_info` argument is deprecated and will be removed alongside\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.second_derivative(lambda x: x @ x, np.arange(3), return_info=True)\n\n\ndef test_numdiff_result_func_evals_is_deprecated():\n    msg = \"The `func_evals` attribute is deprecated and will be removed in optimagic\"\n    res = NumdiffResult(derivative=1)\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.func_evals\n\n\ndef test_numdiff_result_derivative_candidates_is_deprecated():\n    msg = \"The `derivative_candidates` attribute is deprecated and will be removed\"\n    res = NumdiffResult(derivative=1)\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res.derivative_candidates\n\n\ndef test_numdiff_result_dict_access_is_deprecated():\n    msg = \"The dictionary access for 'derivative' is deprecated and will be removed\"\n    res = NumdiffResult(derivative=1)\n    with pytest.warns(FutureWarning, match=msg):\n        _ = res[\"derivative\"]\n\n\ndef test_key_argument_is_deprecated_in_first_derivative():\n    with pytest.warns(FutureWarning, match=\"The `key` argument in\"):\n        om.first_derivative(lambda x: {\"value\": x @ x}, np.arange(3), key=\"value\")\n\n\ndef test_key_argument_is_deprecated_in_second_derivative():\n    with pytest.warns(FutureWarning, match=\"The `key` argument in\"):\n        om.second_derivative(lambda x: {\"value\": x @ x}, np.arange(3), key=\"value\")\n\n\ndef test_jac_dicts_are_deprecated_in_minimize():\n    msg = \"Specifying a dictionary of jac functions is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        res = om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            jac={\"value\": lambda x: 2 * x},\n        )\n        aaae(res.params, np.zeros(3))\n\n\ndef test_jac_dicts_are_deprecated_in_maximize():\n    msg = \"Specifying a dictionary of jac functions is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        res = om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            jac={\"value\": lambda x: -2 * x},\n        )\n        aaae(res.params, np.zeros(3))\n\n\ndef test_fun_and_jac_dicts_are_deprecated_in_minimize():\n    msg = \"Specifying a dictionary of fun_and_jac functions is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        res = om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac={\"value\": lambda x: (x @ x, 2 * x)},\n        )\n        aaae(res.params, np.zeros(3))\n\n\ndef test_fun_and_jac_dicts_are_deprecated_in_maximize():\n    msg = \"Specifying a dictionary of fun_and_jac functions is deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        res = om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            fun_and_jac={\"value\": lambda x: (-x @ x, -2 * x)},\n        )\n        aaae(res.params, np.zeros(3))\n\n\ndef test_fun_with_dict_return_is_deprecated_in_minimize():\n    msg = \"Returning a dictionary with the special keys\"\n    with pytest.warns(FutureWarning, match=msg):\n        res = om.minimize(\n            lambda x: {\"value\": x @ x},\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n        )\n        aaae(res.params, np.zeros(3))\n\n\ndef test_fun_with_dict_return_is_deprecated_in_slice_plot():\n    msg = \"Functions that return dictionaries\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.slice_plot(\n            lambda x: {\"value\": x @ x},\n            np.arange(3),\n            bounds=om.Bounds(lower=np.zeros(3), upper=np.ones(3) * 5),\n        )\n\n\ndef test_handle_log_options():\n    msg = (\n        \"Usage of the parameter log_options is deprecated \"\n        \"and will be removed in a future version. \"\n        \"Provide a LogOptions instance for the parameter `logging`, if you need to \"\n        \"configure the logging.\"\n    )\n    log_options = {\"fast_logging\": True}\n    with pytest.warns(FutureWarning, match=msg):\n        logger = None\n        handled_logger = handle_log_options_throw_deprecated_warning(\n            log_options, logger\n        )\n        assert handled_logger is None\n\n    creation_warning = (\n        f\"\\nUsing {log_options=} to create an instance of SQLiteLogOptions. \"\n        f\"This mechanism will be removed in the future.\"\n    )\n\n    with pytest.warns(match=creation_warning):\n        handled_logger = handle_log_options_throw_deprecated_warning(\n            log_options, \":memory:\"\n        )\n        assert isinstance(handled_logger, SQLiteLogOptions)\n\n    incompatibility_msg = \"Found string or path for logger argument, but parameter\"\n    f\" {log_options=} is not compatible \"\n    log_options_typo = {\"fast_lugging\": False}\n\n    with pytest.raises(ValueError, match=incompatibility_msg):\n        handled_logger = handle_log_options_throw_deprecated_warning(\n            log_options_typo, \":memory:\"\n        )\n        assert handled_logger == \":memory:\"\n\n\ndef test_log_options_are_deprecated_in_estimate_ml(tmp_path):\n    with pytest.warns(FutureWarning, match=\"LogOptions\"):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_ml(\n            loglike=loglike,\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            logging=tmp_path / \"log.db\",\n            log_options={\"fast_logging\": True, \"if_database_exists\": \"replace\"},\n        )\n\n    with pytest.warns(FutureWarning, match=\"if_table_exists\"):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_ml(\n            loglike=loglike,\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            logging=tmp_path / \"log_1.db\",\n            log_options={\"fast_logging\": True, \"if_table_exists\": \"replace\"},\n        )\n\n\ndef test_log_options_are_deprecated_in_estimate_msm(tmp_path):\n    with pytest.warns(FutureWarning, match=\"LogOptions\"):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_msm(\n            simulate_moments=lambda x: x,\n            empirical_moments=np.zeros(3),\n            moments_cov=np.eye(3),\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            logging=tmp_path / \"log.db\",\n            log_options={\"fast_logging\": True, \"if_database_exists\": \"replace\"},\n        )\n\n    with pytest.warns(FutureWarning, match=\"if_table_exists\"):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_msm(\n            simulate_moments=lambda x: x,\n            empirical_moments=np.zeros(3),\n            moments_cov=np.eye(3),\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            logging=tmp_path / \"log_1.db\",\n            log_options={\"fast_logging\": True, \"if_table_exists\": \"replace\"},\n        )\n\n\ndef test_log_options_are_deprecated_in_minimize(tmp_path):\n    with pytest.warns(FutureWarning, match=\"LogOptions\"):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            logging=tmp_path / \"log.db\",\n            log_options={\"fast_logging\": True, \"if_database_exists\": \"replace\"},\n        )\n\n    with pytest.warns(FutureWarning, match=\"if_table_exists\"):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            logging=tmp_path / \"log_1.db\",\n            log_options={\"fast_logging\": True, \"if_table_exists\": \"replace\"},\n        )\n\n\ndef test_log_options_are_deprecated_in_maximize(tmp_path):\n    with pytest.warns(FutureWarning, match=\"LogOptions\"):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            logging=tmp_path / \"log.db\",\n            log_options={\"fast_logging\": True, \"if_database_exists\": \"replace\"},\n        )\n\n    with pytest.warns(FutureWarning, match=\"if_table_exists\"):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            logging=tmp_path / \"log_1.db\",\n            log_options={\"fast_logging\": True, \"if_table_exists\": \"replace\"},\n        )\n\n\ndef test_dict_constraints_are_deprecated_in_minimize():\n    msg = \"Specifying constraints as a dictionary is deprecated and\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.minimize(\n            lambda x: x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            constraints={\"type\": \"fixed\", \"loc\": [0, 1]},\n        )\n\n\ndef test_dict_constraints_are_deprecated_in_maximize():\n    msg = \"Specifying constraints as a dictionary is deprecated and\"\n    with pytest.warns(FutureWarning, match=msg):\n        om.maximize(\n            lambda x: -x @ x,\n            np.arange(3),\n            algorithm=\"scipy_lbfgsb\",\n            constraints={\"type\": \"fixed\", \"loc\": [0, 1]},\n        )\n\n\ndef test_dict_constraints_are_deprecated_in_estimate_ml():\n    msg = \"Specifying constraints as a dictionary is deprecated and\"\n    with pytest.warns(FutureWarning, match=msg):\n\n        @om.mark.likelihood\n        def loglike(x):\n            return -(x**2)\n\n        em.estimate_ml(\n            loglike=loglike,\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            constraints={\"type\": \"fixed\", \"loc\": [0, 1]},\n        )\n\n\ndef test_dict_constraints_are_deprecated_in_estimate_msm():\n    msg = \"Specifying constraints as a dictionary is deprecated and\"\n    with pytest.warns(FutureWarning, match=msg):\n        em.estimate_msm(\n            simulate_moments=lambda x: x,\n            empirical_moments=np.zeros(3),\n            moments_cov=np.eye(3),\n            params=np.arange(3),\n            optimize_options={\"algorithm\": \"scipy_lbfgsb\"},\n            constraints={\"type\": \"fixed\", \"loc\": [0, 1]},\n        )\n\n\n@pytest.fixture\ndef dummy_func():\n    return lambda x: x\n\n\ndef test_pre_process_constraints_trivial_case(dummy_func):\n    constraints = om.FixedConstraint(selector=dummy_func)\n    expected = [{\"type\": \"fixed\", \"selector\": dummy_func}]\n    assert pre_process_constraints(constraints) == expected\n\n\ndef test_pre_process_constraints_list_of_constraints(dummy_func):\n    constraints = [\n        om.FixedConstraint(selector=dummy_func),\n        om.IncreasingConstraint(selector=dummy_func),\n    ]\n    expected = [\n        {\"type\": \"fixed\", \"selector\": dummy_func},\n        {\"type\": \"increasing\", \"selector\": dummy_func},\n    ]\n    assert pre_process_constraints(constraints) == expected\n\n\ndef test_pre_process_constraints_none_case():\n    assert pre_process_constraints(None) == []\n\n\ndef test_pre_process_constraints_mixed_case(dummy_func):\n    constraints = [\n        om.FixedConstraint(selector=dummy_func),\n        {\"type\": \"increasing\", \"selector\": dummy_func},\n    ]\n    expected = [\n        {\"type\": \"fixed\", \"selector\": dummy_func},\n        {\"type\": \"increasing\", \"selector\": dummy_func},\n    ]\n    assert pre_process_constraints(constraints) == expected\n\n\ndef test_pre_process_constraints_dict_case(dummy_func):\n    constraints = {\"type\": \"fixed\", \"selector\": dummy_func}\n    expected = [{\"type\": \"fixed\", \"selector\": dummy_func}]\n    assert pre_process_constraints(constraints) == expected\n\n\ndef test_pre_process_constraints_invalid_case():\n    constraints = \"invalid\"\n    msg = \"Invalid constraint type: <class 'str'>\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        pre_process_constraints(constraints)\n\n\ndef test_pre_process_constraints_invalid_mixed_case():\n    constraints = [\n        {\"type\": \"fixed\", \"loc\": [0, 1]},\n        om.FixedConstraint(),\n        \"invalid\",\n    ]\n    msg = \"Invalid constraint types: {<class 'str'>}\"\n    with pytest.raises(InvalidConstraintError, match=msg):\n        pre_process_constraints(constraints)\n\n\ndef test_deprecated_log_reader(example_db):\n    with pytest.warns(FutureWarning, match=\"SQLiteLogReader\"):\n        reader = OptimizeLogReader(example_db)\n        res = reader.read_start_params()\n        assert res == {\"a\": 1, \"b\": 2, \"c\": 3}\n\n\ndef test_estimagic_joblib_batch_evaluator_is_deprecated():\n    msg = \"estimagic.batch_evaluators.joblib_batch_evaluator has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        batch_evaluators.joblib_batch_evaluator(lambda x: x, [1, 2], n_cores=1)\n\n\ndef test_estimagic_process_batch_evaluator_is_deprecated():\n    msg = \"estimagic.batch_evaluators.process_batch_evaluator has been deprecated\"\n    with pytest.warns(FutureWarning, match=msg):\n        batch_evaluators.process_batch_evaluator(\"joblib\")\n"
  },
  {
    "path": "tests/optimagic/test_mark.py",
    "content": "import functools\nfrom dataclasses import dataclass\n\nimport pytest\n\nimport optimagic as om\nfrom optimagic.optimization.algorithm import AlgoInfo, Algorithm\nfrom optimagic.typing import AggregationLevel\n\n\ndef f(x):\n    pass\n\n\n@dataclass(frozen=True)\nclass ImmutableF:\n    def __call__(self, x):\n        pass\n\n\ndef _g(x, y):\n    pass\n\n\ng = functools.partial(_g, y=1)\n\n\nCALLABLES = [f, ImmutableF(), g]\n\n\n@pytest.mark.parametrize(\"func\", CALLABLES)\ndef test_scalar(func):\n    got = om.mark.scalar(func)\n\n    assert got._problem_type == AggregationLevel.SCALAR\n\n\n@pytest.mark.parametrize(\"func\", CALLABLES)\ndef test_least_squares(func):\n    got = om.mark.least_squares(func)\n\n    assert got._problem_type == AggregationLevel.LEAST_SQUARES\n\n\n@pytest.mark.parametrize(\"func\", CALLABLES)\ndef test_likelihood(func):\n    got = om.mark.likelihood(func)\n\n    assert got._problem_type == AggregationLevel.LIKELIHOOD\n\n\ndef test_mark_minimizer():\n    @om.mark.minimizer(\n        name=\"test\",\n        solver_type=AggregationLevel.LEAST_SQUARES,\n        is_available=True,\n        is_global=True,\n        needs_jac=True,\n        needs_hess=True,\n        needs_bounds=True,\n        supports_parallelism=True,\n        supports_bounds=True,\n        supports_infinite_bounds=True,\n        supports_linear_constraints=True,\n        supports_nonlinear_constraints=True,\n        disable_history=False,\n    )\n    @dataclass(frozen=True)\n    class DummyAlgorithm(Algorithm):\n        initial_radius: float = 1.0\n        max_radius: float = 10.0\n        convergence_ftol_rel: float = 1e-6\n        stopping_maxiter: int = 1000\n\n        def _solve_internal_problem(self, problem, x0):\n            pass\n\n    assert hasattr(DummyAlgorithm, \"__algo_info__\")\n    assert isinstance(DummyAlgorithm.__algo_info__, AlgoInfo)\n    assert DummyAlgorithm.__algo_info__.name == \"test\"\n"
  },
  {
    "path": "tests/optimagic/test_timing.py",
    "content": "import pytest\n\nfrom optimagic import timing\n\n\ndef test_invalid_aggregate_batch_time():\n    with pytest.raises(ValueError, match=\"aggregate_batch_time must be a callable\"):\n        timing.CostModel(\n            fun=None,\n            jac=None,\n            fun_and_jac=None,\n            label=\"label\",\n            aggregate_batch_time=\"Not callable\",\n        )\n"
  },
  {
    "path": "tests/optimagic/test_type_conversion.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.type_conversion import TYPE_CONVERTERS\nfrom optimagic.typing import (\n    GtOneFloat,\n    NonNegativeFloat,\n    NonNegativeInt,\n    PositiveFloat,\n    PositiveInt,\n)\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_int_conversion(candidate):\n    got = TYPE_CONVERTERS[int](candidate)\n    assert isinstance(got, int)\n    assert got == 1\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_positive_int_conversion(candidate):\n    got = TYPE_CONVERTERS[PositiveInt](candidate)\n    assert isinstance(got, int)\n    assert got == 1\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_non_negative_int_conversion(candidate):\n    got = TYPE_CONVERTERS[NonNegativeInt](candidate)\n    assert isinstance(got, int)\n    assert got == 1\n\n\n@pytest.mark.parametrize(\"candidate\", [-1, \"-1\", -1.0, 0])\ndef test_positive_int_conversion_fail(candidate):\n    with pytest.raises(Exception):  # noqa: B017\n        TYPE_CONVERTERS[PositiveInt](candidate)\n\n\n@pytest.mark.parametrize(\"candidate\", [-1, \"-1\", -1.0])\ndef test_non_negative_int_conversion_fail(candidate):\n    with pytest.raises(Exception):  # noqa: B017\n        TYPE_CONVERTERS[NonNegativeInt](candidate)\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_float_conversion(candidate):\n    got = TYPE_CONVERTERS[float](candidate)\n    assert isinstance(got, float)\n    assert got == 1.0\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_positive_float_conversion(candidate):\n    got = TYPE_CONVERTERS[PositiveFloat](candidate)\n    assert isinstance(got, float)\n    assert got == 1.0\n\n\n@pytest.mark.parametrize(\"candidate\", [1, \"1\", 1.0, \"1.0\", np.int32(1), np.array(1.0)])\ndef test_non_negative_float_conversion(candidate):\n    got = TYPE_CONVERTERS[NonNegativeFloat](candidate)\n    assert isinstance(got, float)\n    assert got == 1.0\n\n\n@pytest.mark.parametrize(\"candidate\", [-1, \"-1\", -1.0, 0])\ndef test_positive_float_conversion_fail(candidate):\n    with pytest.raises(Exception):  # noqa: B017\n        TYPE_CONVERTERS[PositiveFloat](candidate)\n\n\n@pytest.mark.parametrize(\"candidate\", [-1, \"-1\", -1.0])\ndef test_non_negative_float_conversion_fail(candidate):\n    with pytest.raises(Exception):  # noqa: B017\n        TYPE_CONVERTERS[NonNegativeFloat](candidate)\n\n\n@pytest.mark.parametrize(\"candidate\", [np.bool_(True), \"yes\", \"1\", \"true\", True])\ndef test_bool_conversion_true(candidate):\n    got = TYPE_CONVERTERS[bool](candidate)\n    assert got is True\n\n\n@pytest.mark.parametrize(\"candidate\", [np.bool_(False), \"no\", \"0\", \"false\", False])\ndef test_bool_conversion_false(candidate):\n    got = TYPE_CONVERTERS[bool](candidate)\n    assert got is False\n\n\n@pytest.mark.parametrize(\"candidate\", [1.3, \"1.3\", np.float32(1.3), np.array(1.3)])\ndef test_gt_one_float(candidate):\n    got = TYPE_CONVERTERS[PositiveFloat](candidate)\n    assert isinstance(got, float)\n    assert np.allclose(got, 1.3)\n\n\n@pytest.mark.parametrize(\"candidate\", [0.5, \"0.5\", np.float32(0.5), np.array(0.5)])\ndef test_gt_one_float_fail(candidate):\n    with pytest.raises(Exception):  # noqa: B017\n        TYPE_CONVERTERS[GtOneFloat](candidate)\n"
  },
  {
    "path": "tests/optimagic/test_typed_dicts_consistency.py",
    "content": "from typing import get_args, get_type_hints\n\nfrom optimagic.differentiation.numdiff_options import NumdiffOptions, NumdiffOptionsDict\nfrom optimagic.optimization.multistart_options import (\n    MultistartOptions,\n    MultistartOptionsDict,\n)\nfrom optimagic.parameters.scaling import ScalingOptions, ScalingOptionsDict\n\n\ndef assert_attributes_and_type_hints_are_equal(dataclass, typed_dict):\n    \"\"\"Test that dataclass and typed_dict have same attributes and types.\n\n    This assertion purposefully ignores that all type hints in the typed dict are\n    wrapped by typing.NotRequired.\n\n    As there is no easy way to *not* read the NotRequired types in 3.10, we need to\n    activate include_extras=True to get the NotRequired types in Python 3.11 and\n    above. Once we drop support for Python 3.10, we can remove the\n    include_extras=True argument and the removal of the NotRequired types.\n\n    Args:\n        dataclass: An instance of a dataclass\n        typed_dict: An instance of a typed dict\n\n    \"\"\"\n    types_from_dataclass = get_type_hints(dataclass)\n    types_from_typed_dict = get_type_hints(typed_dict, include_extras=True)\n    types_from_typed_dict = {\n        # Remove typing.NotRequired from the types\n        k: get_args(v)[0]\n        for k, v in types_from_typed_dict.items()\n    }\n    assert types_from_dataclass == types_from_typed_dict\n\n\ndef test_scaling_options_and_dict_have_same_attributes():\n    assert_attributes_and_type_hints_are_equal(ScalingOptions, ScalingOptionsDict)\n\n\ndef test_multistart_options_and_dict_have_same_attributes():\n    assert_attributes_and_type_hints_are_equal(MultistartOptions, MultistartOptionsDict)\n\n\ndef test_numdiff_options_and_dict_have_same_attributes():\n    assert_attributes_and_type_hints_are_equal(NumdiffOptions, NumdiffOptionsDict)\n"
  },
  {
    "path": "tests/optimagic/test_utilities.py",
    "content": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as aaae\n\nfrom optimagic.config import IS_JAX_INSTALLED\nfrom optimagic.utilities import (\n    calculate_trustregion_initial_radius,\n    chol_params_to_lower_triangular_matrix,\n    cov_matrix_to_params,\n    cov_matrix_to_sdcorr_params,\n    cov_params_to_matrix,\n    cov_to_sds_and_corr,\n    dimension_to_number_of_triangular_elements,\n    get_rng,\n    hash_array,\n    isscalar,\n    number_of_triangular_elements_to_dimension,\n    propose_alternatives,\n    read_pickle,\n    robust_cholesky,\n    robust_inverse,\n    sdcorr_params_to_matrix,\n    sdcorr_params_to_sds_and_corr,\n    sds_and_corr_to_cov,\n    to_pickle,\n)\n\nif IS_JAX_INSTALLED:\n    import jax.numpy as jnp\n\n\ndef test_chol_params_to_lower_triangular_matrix():\n    calculated = chol_params_to_lower_triangular_matrix(pd.Series([1, 2, 3]))\n    expected = np.array([[1, 0], [2, 3]])\n    aaae(calculated, expected)\n\n\ndef test_cov_params_to_matrix():\n    params = np.array([1, 0.1, 2, 0.2, 0.22, 3])\n    expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n    calculated = cov_params_to_matrix(params)\n    aaae(calculated, expected)\n\n\ndef test_cov_matrix_to_params():\n    expected = np.array([1, 0.1, 2, 0.2, 0.22, 3])\n    cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n    calculated = cov_matrix_to_params(cov)\n    aaae(calculated, expected)\n\n\ndef test_sdcorr_params_to_sds_and_corr():\n    sdcorr_params = pd.Series([1, 2, 3, 0.1, 0.2, 0.3])\n    exp_corr = np.array([[1, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 1]])\n    exp_sds = np.array([1, 2, 3])\n    calc_sds, calc_corr = sdcorr_params_to_sds_and_corr(sdcorr_params)\n    aaae(calc_sds, exp_sds)\n    aaae(calc_corr, exp_corr)\n\n\ndef test_sdcorr_params_to_matrix():\n    sds = np.sqrt([1, 2, 3])\n    corrs = [0.07071068, 0.11547005, 0.08981462]\n    params = np.hstack([sds, corrs])\n    expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n    calculated = sdcorr_params_to_matrix(params)\n    aaae(calculated, expected)\n\n\ndef test_cov_matrix_to_sdcorr_params():\n    sds = np.sqrt([1, 2, 3])\n    corrs = [0.07071068, 0.11547005, 0.08981462]\n    expected = np.hstack([sds, corrs])\n    cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])\n    calculated = cov_matrix_to_sdcorr_params(cov)\n    aaae(calculated, expected)\n\n\ndef test_sds_and_corr_to_cov():\n    sds = [1, 2, 3]\n    corr = np.ones((3, 3)) * 0.2\n    corr[np.diag_indices(3)] = 1\n    calculated = sds_and_corr_to_cov(sds, corr)\n    expected = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])\n    aaae(calculated, expected)\n\n\ndef test_cov_to_sds_and_corr():\n    cov = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])\n    calc_sds, calc_corr = cov_to_sds_and_corr(cov)\n    exp_sds = [1, 2, 3]\n    exp_corr = np.ones((3, 3)) * 0.2\n    exp_corr[np.diag_indices(3)] = 1\n    aaae(calc_sds, exp_sds)\n    aaae(calc_corr, exp_corr)\n\n\ndef test_number_of_triangular_elements_to_dimension():\n    inputs = [6, 10, 15, 21]\n    expected = [3, 4, 5, 6]\n    for inp, exp in zip(inputs, expected, strict=False):\n        assert number_of_triangular_elements_to_dimension(inp) == exp\n\n\ndef test_dimension_to_number_of_triangular_elements():\n    inputs = [3, 4, 5, 6]\n    expected = [6, 10, 15, 21]\n    for inp, exp in zip(inputs, expected, strict=False):\n        assert dimension_to_number_of_triangular_elements(inp) == exp\n\n\ndef random_cov(dim, seed):\n    rng = np.random.default_rng(seed)\n\n    num_elements = int(dim * (dim + 1) / 2)\n    chol = np.zeros((dim, dim))\n    chol[np.tril_indices(dim)] = rng.uniform(size=num_elements)\n    cov = chol @ chol.T\n    zero_positions = rng.choice(range(dim), size=int(dim / 5), replace=False)\n    for pos in zero_positions:\n        cov[:, pos] = 0\n        cov[pos] = 0\n    return cov\n\n\nseeds = [58822, 3181, 98855, 44002, 47631, 97741, 10655, 4600, 1151, 58189]\ndims = [8] * 6 + [10, 12, 15, 20]\n\n\n@pytest.mark.parametrize(\"dim, seed\", zip(dims, seeds, strict=False))\ndef test_robust_cholesky_with_zero_variance(dim, seed):\n    cov = random_cov(dim, seed)\n    chol = robust_cholesky(cov)\n    aaae(chol.dot(chol.T), cov)\n    assert (chol[np.triu_indices(len(cov), k=1)] == 0).all()\n\n\ndef test_robust_cholesky_with_extreme_cases():\n    for cov in [np.ones((5, 5)), np.zeros((5, 5))]:\n        chol = robust_cholesky(cov)\n        aaae(chol.dot(chol.T), cov)\n\n\ndef test_robust_inverse_nonsingular():\n    mat = np.eye(3) + 0.2\n    expected = np.linalg.inv(mat)\n    calculated = robust_inverse(mat)\n    aaae(calculated, expected)\n\n\ndef test_robust_inverse_singular():\n    mat = np.zeros((5, 5))\n    expected = np.zeros((5, 5))\n    with pytest.warns(UserWarning, match=\"LinAlgError\"):\n        calculated = robust_inverse(mat)\n    aaae(calculated, expected)\n\n\ndef test_hash_array():\n    arr1 = np.arange(4)[::2]\n    arr2 = np.array([0, 2])\n\n    arr3 = np.array([0, 3])\n    assert hash_array(arr1) == hash_array(arr2)\n    assert hash_array(arr1) != hash_array(arr3)\n\n\ndef test_initial_trust_radius_small_x():\n    x = np.array([0.01, 0.01])\n    expected = 0.1\n    res = calculate_trustregion_initial_radius(x)\n    assert expected == pytest.approx(res, abs=1e-8)\n\n\ndef test_initial_trust_radius_large_x():\n    x = np.array([20.5, 10])\n    expected = 2.05\n    res = calculate_trustregion_initial_radius(x)\n    assert expected == pytest.approx(res, abs=1e-8)\n\n\ndef test_pickling(tmp_path):\n    a = [1, 2, 3]\n    path = tmp_path / \"bla.pkl\"\n    to_pickle(a, path)\n    b = read_pickle(path)\n    assert a == b\n\n\nSCALARS = [1, 2.0, np.pi, np.array(1), np.array(2.0), np.array(np.pi), np.nan]\n\n\n@pytest.mark.parametrize(\"element\", SCALARS)\ndef test_isscalar_true(element):\n    assert isscalar(element) is True\n\n\nNON_SCALARS = [np.arange(3), {\"a\": 1}, [1, 2, 3]]\n\n\n@pytest.mark.parametrize(\"element\", NON_SCALARS)\ndef test_isscalar_false(element):\n    assert isscalar(element) is False\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef tets_isscalar_jax_true():\n    x = jnp.arange(3)\n    element = x @ x\n    assert isscalar(element) is True\n\n\n@pytest.mark.skipif(not IS_JAX_INSTALLED, reason=\"Needs jax.\")\ndef test_isscalar_jax_false():\n    element = jnp.arange(3)\n    assert isscalar(element) is False\n\n\nTEST_CASES = [\n    0,\n    1,\n    10,\n    1000000,\n    None,\n    np.random.default_rng(),\n    np.random.Generator(np.random.MT19937()),\n]\n\n\n@pytest.mark.parametrize(\"seed\", TEST_CASES)\ndef test_get_rng_correct_input(seed):\n    rng = get_rng(seed)\n    assert isinstance(rng, np.random.Generator)\n\n\nTEST_CASES = [0.1, \"a\", object(), lambda x: x**2]\n\n\n@pytest.mark.parametrize(\"seed\", TEST_CASES)\ndef test_get_rng_wrong_input(seed):\n    with pytest.raises(TypeError):\n        get_rng(seed)\n\n\ndef test_propose_alternatives():\n    possibilities = [\"scipy_lbfgsb\", \"scipy_slsqp\", \"nlopt_lbfgsb\"]\n    inputs = [[\"scipy_L-BFGS-B\", 1], [\"L-BFGS-B\", 2]]\n    expected = [[\"scipy_slsqp\"], [\"scipy_slsqp\", \"scipy_lbfgsb\"]]\n    for inp, exp in zip(inputs, expected, strict=False):\n        assert propose_alternatives(inp[0], possibilities, number=inp[1]) == exp\n"
  },
  {
    "path": "tests/optimagic/visualization/test_backends.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic.exceptions import InvalidPlottingBackendError, NotInstalledError\nfrom optimagic.visualization.backends import (\n    BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION,\n    line_plot,\n)\nfrom optimagic.visualization.plotting_utilities import LineData\n\n\n@pytest.fixture()\ndef sample_lines():\n    lines = [\n        LineData(x=np.array([0, 1, 2]), y=np.array([0, 1, 2])),\n        LineData(x=np.array([0, 1, 2]), y=np.array([2, 1, 0])),\n    ]\n    return lines\n\n\n@pytest.mark.parametrize(\"backend\", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())\ndef test_line_plot_all_backends(sample_lines, backend, close_mpl_figures):\n    line_plot(sample_lines, backend=backend)\n\n\ndef test_line_plot_invalid_backend(sample_lines):\n    with pytest.raises(InvalidPlottingBackendError):\n        line_plot(sample_lines, backend=\"bla\")\n\n\ndef test_line_plot_unavailable_backend(sample_lines, monkeypatch):\n    # Use monkeypatch to simulate that 'matplotlib' backend is not installed.\n    monkeypatch.setitem(\n        BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION, \"matplotlib\", (False, None, None)\n    )\n\n    with pytest.raises(NotInstalledError):\n        line_plot(sample_lines, backend=\"matplotlib\")\n"
  },
  {
    "path": "tests/optimagic/visualization/test_convergence_plot.py",
    "content": "import pytest\n\nfrom optimagic import get_benchmark_problems\nfrom optimagic.benchmarking.process_benchmark_results import process_benchmark_results\nfrom optimagic.benchmarking.run_benchmark import run_benchmark\nfrom optimagic.visualization.convergence_plot import (\n    _check_only_allowed_subset_provided,\n    _extract_convergence_plot_lines,\n    convergence_plot,\n)\n\n\n@pytest.fixture()\ndef benchmark_results():\n    problems = get_benchmark_problems(\"example\")\n    stop_after_10 = {\n        \"stopping_max_criterion_evaluations\": 10,\n        \"stopping_max_iterations\": 10,\n    }\n    optimizers = {\n        \"lbfgsb\": {\"algorithm\": \"scipy_lbfgsb\", \"algo_options\": stop_after_10},\n        \"nm\": {\"algorithm\": \"scipy_neldermead\", \"algo_options\": stop_after_10},\n    }\n    results = run_benchmark(\n        problems,\n        optimizers,\n        n_cores=1,  # must be 1 for the test to work\n    )\n    return problems, results\n\n\ndef test_convergence_plot_default_options(benchmark_results):\n    problems, results = benchmark_results\n\n    convergence_plot(\n        problems=problems,\n        results=results,\n        problem_subset=[\"bard_good_start\"],\n    )\n\n\n# integration test to make sure non default argument do not throw Errors\nprofile_options = [\n    {\"n_cols\": 3},\n    {\"distance_measure\": \"parameter_distance\"},\n    {\"monotone\": False},\n    {\"normalize_distance\": False},\n    {\"runtime_measure\": \"walltime\"},\n    {\"runtime_measure\": \"n_batches\"},\n    {\"stopping_criterion\": \"x\"},\n    {\"stopping_criterion\": \"x_and_y\"},\n    {\"stopping_criterion\": \"x_or_y\"},\n    {\"x_precision\": 1e-5},\n    {\"y_precision\": 1e-5},\n    {\"backend\": \"matplotlib\"},\n    {\"backend\": \"bokeh\"},\n    {\"backend\": \"altair\"},\n]\n\n\n@pytest.mark.parametrize(\"options\", profile_options)\n@pytest.mark.parametrize(\"grid\", [True, False])\ndef test_convergence_plot_options(options, grid, benchmark_results, close_mpl_figures):\n    problems, results = benchmark_results\n\n    convergence_plot(\n        problems=problems,\n        results=results,\n        problem_subset=[\"bard_good_start\"],\n        combine_plots_in_grid=grid,\n        **options,\n    )\n\n\ndef test_convergence_plot_stopping_criterion_none(benchmark_results):\n    problems, results = benchmark_results\n\n    with pytest.raises(UnboundLocalError):\n        convergence_plot(\n            problems=problems,\n            results=results,\n            problem_subset=[\"bard_good_start\"],\n            stopping_criterion=None,\n        )\n\n\ndef test_check_only_allowed_subset_provided_none():\n    allowed = [\"a\", \"b\", \"c\"]\n    _check_only_allowed_subset_provided(None, allowed, \"name\")\n\n\ndef test_check_only_allowed_subset_provided_all_included():\n    allowed = [\"a\", \"b\", \"c\"]\n    _check_only_allowed_subset_provided([\"a\", \"b\"], allowed, \"name\")\n\n\ndef test_check_only_allowed_subset_provided_missing():\n    allowed = [\"a\", \"b\", \"c\"]\n    with pytest.raises(ValueError):\n        _check_only_allowed_subset_provided([\"d\"], allowed, \"name\")\n\n\ndef test_extract_convergence_plot_lines(benchmark_results):\n    problems, results = benchmark_results\n\n    df, _ = process_benchmark_results(\n        problems=problems, results=results, stopping_criterion=\"y\"\n    )\n\n    lines_list, titles = _extract_convergence_plot_lines(\n        df=df,\n        problems=problems,\n        runtime_measure=\"n_evaluations\",\n        outcome=\"criterion_normalized\",\n        palette=[\"red\", \"green\", \"blue\"],\n        combine_plots_in_grid=True,\n        backend=\"bla\",\n    )\n\n    assert isinstance(lines_list, list) and isinstance(titles, list)\n    assert len(lines_list) == len(titles) == len(problems)\n\n    for subplot_lines in lines_list:\n        assert isinstance(subplot_lines, list) and len(subplot_lines) == 2\n        assert subplot_lines[0].name == \"lbfgsb\"\n        assert subplot_lines[1].name == \"nm\"\n        assert subplot_lines[0].color == \"red\"\n        assert subplot_lines[1].color == \"green\"\n"
  },
  {
    "path": "tests/optimagic/visualization/test_deviation_plot.py",
    "content": "import pytest\n\nfrom optimagic import get_benchmark_problems\nfrom optimagic.benchmarking.run_benchmark import run_benchmark\nfrom optimagic.visualization.deviation_plot import (\n    deviation_plot,\n)\n\n# integration test to make sure non default argument do not throw Errors\nprofile_options = [\n    {\"distance_measure\": \"parameter_distance\"},\n    {\"distance_measure\": \"criterion\"},\n    {\"monotone\": True},\n    {\"monotone\": False},\n    {\"runtime_measure\": \"n_evaluations\"},\n    {\"runtime_measure\": \"n_batches\"},\n]\n\n\n@pytest.mark.parametrize(\"options\", profile_options)\ndef test_convergence_plot_options(options):\n    problems = get_benchmark_problems(\"example\")\n    stop_after_10 = {\n        \"stopping_max_criterion_evaluations\": 10,\n        \"stopping_max_iterations\": 10,\n    }\n    optimizers = {\n        \"lbfgsb\": {\"algorithm\": \"scipy_lbfgsb\", \"algo_options\": stop_after_10},\n        \"nm\": {\"algorithm\": \"scipy_neldermead\", \"algo_options\": stop_after_10},\n    }\n    results = run_benchmark(\n        problems,\n        optimizers,\n        n_cores=1,  # must be 1 for the test to work\n    )\n\n    deviation_plot(problems=problems, results=results, **options)\n"
  },
  {
    "path": "tests/optimagic/visualization/test_history_plots.py",
    "content": "import itertools\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nimport optimagic as om\nfrom optimagic.logging import SQLiteLogOptions\nfrom optimagic.optimization.optimize import minimize\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.visualization.backends import (\n    BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION,\n)\nfrom optimagic.visualization.history_plots import (\n    LineData,\n    _extract_criterion_plot_lines,\n    _extract_params_plot_lines,\n    _harmonize_inputs_to_dict,\n    _PlottingMultistartHistory,\n    _retrieve_optimization_data_from_results,\n    _retrieve_optimization_data_from_single_result,\n    criterion_plot,\n    params_plot,\n)\n\n\n@pytest.fixture()\ndef minimize_result():\n    bounds = Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 6))\n    out = {}\n    for multistart in [True, False]:\n        res = []\n        for algorithm in [\"scipy_neldermead\", \"scipy_lbfgsb\"]:\n            _res = minimize(\n                fun=lambda x: x @ x,\n                params=np.arange(5),\n                algorithm=algorithm,\n                bounds=bounds,\n                multistart=(\n                    om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5)\n                    if multistart\n                    else None\n                ),\n            )\n            res.append(_res)\n        out[multistart] = res\n    return out\n\n\n# ======================================================================================\n# Params plot\n# ======================================================================================\n\n\nTEST_CASES = list(\n    itertools.product(\n        [True, False],  # multistart\n        [None, lambda x: x[:2]],  # selector\n        [None, 50],  # max_evaluations\n        [True, False],  # show_exploration\n    )\n)\n\n\n@pytest.mark.parametrize(\n    \"multistart, selector, max_evaluations, show_exploration\", TEST_CASES\n)\ndef test_params_plot_multistart(\n    minimize_result, multistart, selector, max_evaluations, show_exploration\n):\n    for _res in minimize_result[multistart]:\n        params_plot(\n            _res,\n            selector=selector,\n            max_evaluations=max_evaluations,\n            show_exploration=show_exploration,\n        )\n\n\n# ======================================================================================\n# Test criterion plot\n# ======================================================================================\n\n\nTEST_CASES = list(itertools.product([True, False], repeat=4))\n\n\n@pytest.mark.parametrize(\n    \"multistart, monotone, stack_multistart, exploration\", TEST_CASES\n)\ndef test_criterion_plot_list_input(\n    minimize_result, multistart, monotone, stack_multistart, exploration\n):\n    res = minimize_result[multistart]\n\n    criterion_plot(\n        res,\n        monotone=monotone,\n        stack_multistart=stack_multistart,\n        show_exploration=exploration,\n    )\n\n\ndef test_criterion_plot_name_input(minimize_result):\n    result = minimize_result[False]\n    criterion_plot(result[0], names=\"neldermead\", palette=\"blue\")\n\n\ndef test_criterion_plot_wrong_results():\n    with pytest.raises(TypeError):\n        criterion_plot([10, np.array([1, 2, 3])])\n\n\ndef test_criterion_plot_different_input_types():\n    bounds = Bounds(soft_lower=np.full(5, -1), soft_upper=np.full(5, 6))\n    # logged result\n    minimize(\n        fun=lambda x: x @ x,\n        params=np.arange(5),\n        algorithm=\"scipy_lbfgsb\",\n        bounds=bounds,\n        multistart=om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5),\n        logging=SQLiteLogOptions(\"test.db\", fast_logging=True),\n    )\n\n    res = minimize(\n        fun=lambda x: x @ x,\n        params=np.arange(5),\n        algorithm=\"scipy_lbfgsb\",\n        bounds=bounds,\n        multistart=om.MultistartOptions(n_samples=1000, convergence_max_discoveries=5),\n    )\n\n    results = [\"test.db\", res]\n\n    criterion_plot(results)\n    criterion_plot(results, monotone=True)\n    criterion_plot(results, stack_multistart=True)\n    criterion_plot(results, monotone=True, stack_multistart=True)\n    criterion_plot(results, show_exploration=True)\n    criterion_plot(\"test.db\")\n\n\ndef test_criterion_plot_wrong_inputs():\n    with pytest.raises(ValueError):\n        criterion_plot(\"bla\", names=[1, 2])\n\n    with pytest.raises(ValueError):\n        criterion_plot([\"bla\", \"bla\"], names=\"blub\")\n\n\n@pytest.mark.parametrize(\"backend\", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())\ndef test_criterion_plot_different_backends(minimize_result, backend, close_mpl_figures):\n    res = minimize_result[False][0]\n    criterion_plot(res, backend=backend)\n\n\n@pytest.mark.parametrize(\"backend\", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())\ndef test_params_plot_different_backends(minimize_result, backend, close_mpl_figures):\n    res = minimize_result[False][0]\n    params_plot(res, backend=backend)\n\n\ndef test_harmonize_inputs_to_dict_single_result():\n    res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm=\"scipy_lbfgsb\")\n    assert _harmonize_inputs_to_dict(results=res, names=None) == {\"0\": res}\n\n\ndef test_harmonize_inputs_to_dict_single_result_with_name():\n    res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm=\"scipy_lbfgsb\")\n    assert _harmonize_inputs_to_dict(results=res, names=\"bla\") == {\"bla\": res}\n\n\ndef test_harmonize_inputs_to_dict_list_results():\n    res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm=\"scipy_lbfgsb\")\n    results = [res, res]\n    assert _harmonize_inputs_to_dict(results=results, names=None) == {\n        \"0\": res,\n        \"1\": res,\n    }\n\n\ndef test_harmonize_inputs_to_dict_dict_input():\n    res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm=\"scipy_lbfgsb\")\n    results = {\"bla\": res, om.algos.scipy_lbfgsb(): res, om.algos.scipy_neldermead: res}\n    got = _harmonize_inputs_to_dict(results=results, names=None)\n    expected = {\"bla\": res, \"scipy_lbfgsb\": res, \"scipy_neldermead\": res}\n    assert got == expected\n\n\ndef test_harmonize_inputs_to_dict_dict_input_with_names():\n    res = minimize(fun=lambda x: x @ x, params=np.arange(5), algorithm=\"scipy_lbfgsb\")\n    results = {\"bla\": res, \"blub\": res}\n    got = _harmonize_inputs_to_dict(results=results, names=[\"a\", \"b\"])\n    expected = {\"a\": res, \"b\": res}\n    assert got == expected\n\n\ndef test_harmonize_inputs_to_dict_invalid_names():\n    results = [None]\n    names = [\"a\", \"b\"]\n    with pytest.raises(ValueError):\n        _harmonize_inputs_to_dict(results=results, names=names)\n\n\ndef test_harmonize_inputs_to_dict_str_input():\n    assert _harmonize_inputs_to_dict(results=\"test.db\", names=None) == {\"0\": \"test.db\"}\n\n\ndef test_harmonize_inputs_to_dict_path_input():\n    path = Path(\"test.db\")\n    assert _harmonize_inputs_to_dict(results=path, names=None) == {\"0\": path}\n\n\ndef _compare_plotting_multistart_history_with_result(\n    data: _PlottingMultistartHistory, res: om.OptimizeResult, res_name: str\n):\n    assert_array_equal(data.history.fun, res.history.fun)\n    assert data.name == res_name\n    assert_array_equal(data.start_params, res.start_params)\n    assert data.is_multistart == (res.multistart_info is not None)\n\n\ndef test_retrieve_data_from_result(minimize_result):\n    res = minimize_result[False][0]\n    results = {\"bla\": res}\n\n    data = _retrieve_optimization_data_from_results(\n        results=results, stack_multistart=False, show_exploration=False, plot_name=\"bla\"\n    )\n\n    assert isinstance(data, list) and len(data) == 1\n    assert isinstance(data[0], _PlottingMultistartHistory)\n    _compare_plotting_multistart_history_with_result(\n        data=data[0], res=res, res_name=\"bla\"\n    )\n\n\ndef test_retrieve_data_from_logged_result(tmp_path):\n    res = minimize(\n        fun=lambda x: x @ x,\n        params=np.arange(2),\n        algorithm=\"scipy_lbfgsb\",\n        logging=SQLiteLogOptions(tmp_path / \"test.db\", fast_logging=True),\n    )\n    results = {\"logged\": tmp_path / \"test.db\"}\n\n    data = _retrieve_optimization_data_from_results(\n        results=results, stack_multistart=False, show_exploration=False, plot_name=\"bla\"\n    )\n\n    assert isinstance(data, list) and len(data) == 1\n    assert isinstance(data[0], _PlottingMultistartHistory)\n    _compare_plotting_multistart_history_with_result(\n        data=data[0], res=res, res_name=\"logged\"\n    )\n\n\n@pytest.mark.parametrize(\"stack_multistart\", [True, False])\ndef test_retrieve_data_from_multistart_result(minimize_result, stack_multistart):\n    res = minimize_result[True][0]\n    results = {\"multistart\": res}\n\n    data = _retrieve_optimization_data_from_results(\n        results=results,\n        stack_multistart=stack_multistart,\n        show_exploration=False,\n        plot_name=\"bla\",\n    )\n\n    assert isinstance(data, list) and len(data) == 1\n\n    assert data[0].is_multistart\n    assert len(data[0].local_histories) == 5\n\n    if stack_multistart:\n        assert_array_equal(\n            data[0].stacked_local_histories.fun,\n            np.concatenate([hist.fun for hist in data[0].local_histories]),\n        )\n    else:\n        assert data[0].stacked_local_histories is None\n\n\ndef test_extract_criterion_plot_lines(minimize_result):\n    res = minimize_result[True][0]\n    results = {\"multistart\": res}\n    data = _retrieve_optimization_data_from_results(\n        results=results, stack_multistart=False, show_exploration=False, plot_name=\"bla\"\n    )\n\n    palette_cycle = itertools.cycle([\"red\", \"green\", \"blue\"])\n\n    lines, multistart_lines = _extract_criterion_plot_lines(\n        data=data,\n        max_evaluations=None,\n        palette_cycle=palette_cycle,\n        stack_multistart=False,\n        monotone=False,\n    )\n\n    history = res.history.fun\n\n    assert isinstance(lines, list) and len(lines) == 1\n    assert isinstance(lines[0], LineData)\n\n    assert_array_equal(lines[0].x, np.arange(len(history)))\n    assert_array_equal(lines[0].y, history)\n\n    assert isinstance(multistart_lines, list) and all(\n        isinstance(line, LineData) for line in multistart_lines\n    )\n    assert len(multistart_lines) == 5\n\n\ndef test_extract_params_plot_lines(minimize_result):\n    res = minimize_result[False][0]\n    data = _retrieve_optimization_data_from_single_result(\n        result=res,\n        stack_multistart=False,\n        show_exploration=False,\n        plot_name=\"params_plot\",\n    )\n\n    palette_cycle = itertools.cycle([\"red\", \"green\", \"blue\"])\n\n    lines = _extract_params_plot_lines(\n        data=data,\n        selector=None,\n        max_evaluations=None,\n        palette_cycle=palette_cycle,\n    )\n\n    params = np.array(res.history.params)\n    num_params = params.shape[1]\n\n    assert isinstance(lines, list) and len(lines) == num_params\n    assert all(isinstance(line, LineData) for line in lines)\n\n    for i, line in enumerate(lines):\n        assert_array_equal(line.x, np.arange(len(params)))\n        assert_array_equal(line.y, params[:, i])\n"
  },
  {
    "path": "tests/optimagic/visualization/test_plotting_utilities.py",
    "content": "import base64\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom optimagic.visualization.plotting_utilities import (\n    _decode_base64_data,\n    _ensure_array_from_plotly_data,\n)\n\n\ndef test_decode_base64_data():\n    expected = np.arange(10, dtype=float)\n    encoded = base64.b64encode(expected.tobytes()).decode(\"ascii\")\n    got = _decode_base64_data(encoded, dtype=\"float\")\n    assert_array_equal(expected, got)\n\n\ndef test_ensure_array_from_plotly_data_case_array():\n    expected = np.arange(10, dtype=float)\n    got = _ensure_array_from_plotly_data(expected)\n    assert_array_equal(expected, got)\n\n\ndef test_ensure_array_from_plotly_data_case_list():\n    expected = np.arange(10, dtype=float)\n    got = _ensure_array_from_plotly_data(expected.tolist())\n    assert_array_equal(expected, got)\n\n\ndef test_ensure_array_from_plotly_data_case_base64():\n    expected = np.arange(10, dtype=float)\n    encoded = base64.b64encode(expected.tobytes()).decode(\"ascii\")\n    got = _ensure_array_from_plotly_data({\"bdata\": encoded, \"dtype\": \"float\"})\n    assert_array_equal(expected, got)\n\n\n@pytest.mark.parametrize(\n    \"invalid_input\",\n    [\n        None,\n        \"not a valid input\",\n        1234,\n        [{\"a\": 1}, {\"b\": 2}],\n    ],\n)\ndef test_ensure_array_from_plotly_data_case_invalid(invalid_input):\n    with pytest.raises(ValueError, match=\"Failed to convert input to numpy array.\"):\n        _ensure_array_from_plotly_data(invalid_input)\n"
  },
  {
    "path": "tests/optimagic/visualization/test_profile_plot.py",
    "content": "import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom optimagic import get_benchmark_problems\nfrom optimagic.benchmarking.run_benchmark import run_benchmark\nfrom optimagic.visualization.profile_plot import (\n    _determine_alpha_grid,\n    _extract_profile_plot_lines,\n    _find_switch_points,\n    create_solution_times,\n    profile_plot,\n)\n\n\n@pytest.fixture()\ndef performance_ratios():\n    df = pd.DataFrame(\n        data={\"algo1\": [1.0, 1.0, 4.0], \"algo2\": [1.5, np.inf, 1.0]},\n        index=[\"prob1\", \"prob2\", \"prob3\"],\n    )\n    return df\n\n\ndef test_find_switch_points(performance_ratios):\n    res = _find_switch_points(performance_ratios)\n    expected = np.array([1.0, 1.5, 4.0])\n    np.testing.assert_array_almost_equal(res, expected)\n\n\ndef test_determine_alpha_grid(performance_ratios):\n    res = _determine_alpha_grid(performance_ratios)\n    expected = np.array([1.0 + 1e-10, 1.25, 1.5, 2.75, 4.0, 4.0 * 1.025, 4.0 * 1.05])\n    np.testing.assert_array_almost_equal(res, expected)\n\n\ndef test_create_solution_times_n_evaluations():\n    df = pd.DataFrame(\n        columns=[\"problem\", \"algorithm\", \"n_evaluations\"],\n        data=[\n            [\"prob1\", \"algo1\", 0],\n            [\"prob1\", \"algo1\", 1],\n            [\"prob1\", \"algo2\", 2],\n            [\"prob1\", \"algo2\", 3],\n            [\"prob2\", \"algo1\", 5],\n            [\"prob2\", \"algo2\", 0],\n            [\"prob2\", \"algo2\", 1],\n        ],\n    )\n    info = pd.DataFrame(\n        {\n            \"algo1\": [True, True],\n            \"algo2\": [True, False],\n        },\n        index=[\"prob1\", \"prob2\"],\n    )\n    expected = pd.DataFrame(\n        {\n            \"algo1\": [1.0, 5],\n            \"algo2\": [3.0, np.inf],\n        },\n        index=pd.Index([\"prob1\", \"prob2\"], name=\"problem\"),\n    )\n    expected.columns.name = \"algorithm\"\n\n    res = create_solution_times(\n        df=df, runtime_measure=\"n_evaluations\", converged_info=info\n    )\n    pd.testing.assert_frame_equal(res, expected)\n\n\ndef test_create_solution_times_n_batches():\n    df = pd.DataFrame(\n        columns=[\"problem\", \"algorithm\", \"n_batches\"],\n        data=[\n            [\"prob1\", \"algo1\", 0],\n            [\"prob1\", \"algo1\", 1],\n            [\"prob1\", \"algo2\", 2],\n            [\"prob1\", \"algo2\", 2],\n            [\"prob2\", \"algo1\", 1],\n            [\"prob2\", \"algo2\", 0],\n            [\"prob2\", \"algo2\", 0],\n        ],\n    )\n    info = pd.DataFrame(\n        {\n            \"algo1\": [True, True],\n            \"algo2\": [True, False],\n        },\n        index=[\"prob1\", \"prob2\"],\n    )\n    expected = pd.DataFrame(\n        {\n            \"algo1\": [1.0, 1],\n            \"algo2\": [2.0, np.inf],\n        },\n        index=pd.Index([\"prob1\", \"prob2\"], name=\"problem\"),\n    )\n    expected.columns.name = \"algorithm\"\n\n    res = create_solution_times(df=df, runtime_measure=\"n_batches\", converged_info=info)\n    pd.testing.assert_frame_equal(res, expected)\n\n\ndef test_create_solution_times_walltime():\n    df = pd.DataFrame(\n        columns=[\"problem\", \"algorithm\", \"n_evaluations\", \"walltime\"],\n        data=[\n            [\"prob1\", \"algo1\", 0, 0],\n            [\"prob1\", \"algo1\", 1, 1],\n            [\"prob1\", \"algo2\", 2, 2],\n            [\"prob1\", \"algo2\", 3, 3],\n            [\"prob2\", \"algo1\", 5, 5],\n            [\"prob2\", \"algo2\", 0, 0],\n            [\"prob2\", \"algo2\", 1, 1],\n        ],\n    )\n    info = pd.DataFrame(\n        {\n            \"algo1\": [True, True],\n            \"algo2\": [True, False],\n        },\n        index=[\"prob1\", \"prob2\"],\n    )\n    expected = pd.DataFrame(\n        {\n            \"algo1\": [1.0, 5],\n            \"algo2\": [3.0, np.inf],\n        },\n        index=pd.Index([\"prob1\", \"prob2\"], name=\"problem\"),\n    )\n    expected.columns.name = \"algorithm\"\n\n    res = create_solution_times(df=df, runtime_measure=\"walltime\", converged_info=info)\n    pd.testing.assert_frame_equal(res, expected)\n\n\ndef test_extract_profile_plot_lines():\n    solution_times = pd.DataFrame(\n        {\n            \"algo1\": [1.0, 5],\n            \"algo2\": [3.0, np.inf],\n        },\n        index=[\"prob1\", \"prob2\"],\n    )\n    solution_times.columns.name = \"algorithm\"\n\n    info = pd.DataFrame(\n        {\n            \"algo1\": [True, True],\n            \"algo2\": [True, False],\n        },\n        index=[\"prob1\", \"prob2\"],\n    )\n\n    palette_cycle = itertools.cycle([\"red\", \"green\", \"blue\"])\n    lines = _extract_profile_plot_lines(\n        solution_times=solution_times,\n        normalize_runtime=False,\n        converged_info=info,\n        palette_cycle=palette_cycle,\n    )\n\n    assert isinstance(lines, list) and len(lines) == 2\n\n    assert_allclose(lines[0].x, np.array([1.0, 2.0, 3.0, 4.0, 5.0, 5.125, 5.25]))\n    assert_allclose(lines[0].y, np.array([0.5, 0.5, 0.5, 0.5, 1.0, 1.0, 1.0]))\n    assert lines[0].name == \"algo1\"\n\n    assert_allclose(lines[1].x, np.array([1.0, 2.0, 3.0, 4.0, 5.0, 5.125, 5.25]))\n    assert_allclose(lines[1].y, np.array([0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5]))\n    assert lines[1].name == \"algo2\"\n\n\n# integration test to make sure non default argument do not throw Errors\nprofile_options = [\n    {\"runtime_measure\": \"walltime\"},\n    {\"runtime_measure\": \"n_batches\"},\n    {\"stopping_criterion\": \"x_or_y\"},\n    {\"backend\": \"matplotlib\"},\n    {\"backend\": \"bokeh\"},\n    {\"backend\": \"altair\"},\n]\n\n\n@pytest.mark.parametrize(\"options\", profile_options)\ndef test_profile_plot_options(options, close_mpl_figures):\n    problems = get_benchmark_problems(\"example\")\n    stop_after_10 = {\n        \"stopping_max_criterion_evaluations\": 10,\n        \"stopping_max_iterations\": 10,\n    }\n    optimizers = {\n        \"lbfgsb\": {\"algorithm\": \"scipy_lbfgsb\", \"algo_options\": stop_after_10},\n        \"neldermead\": {\n            \"algorithm\": \"scipy_neldermead\",\n            \"algo_options\": stop_after_10,\n        },\n    }\n    results = run_benchmark(\n        problems,\n        optimizers,\n        n_cores=1,  # must be 1 for the test to work\n    )\n\n    profile_plot(problems=problems, results=results, **options)\n"
  },
  {
    "path": "tests/optimagic/visualization/test_slice_plot.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic import mark\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.visualization.backends import BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION\nfrom optimagic.visualization.plotting_utilities import LineData, MarkerData\nfrom optimagic.visualization.slice_plot import (\n    _extract_slice_plot_lines_and_labels,\n    _get_plot_data,\n    _get_processed_func_and_func_eval,\n    slice_plot,\n)\n\n\n@pytest.fixture()\ndef fixed_inputs():\n    params = {\"alpha\": 0, \"beta\": 0, \"gamma\": 0, \"delta\": 0}\n    bounds = Bounds(\n        lower={name: -5 for name in params},\n        upper={name: i + 2 for i, name in enumerate(params)},\n    )\n\n    out = {\n        \"params\": params,\n        \"bounds\": bounds,\n    }\n    return out\n\n\n@mark.likelihood\ndef sphere_loglike(params):\n    x = np.array(list(params.values()))\n    return x**2\n\n\ndef sphere(params):\n    x = np.array(list(params.values()))\n    return x @ x\n\n\nKWARGS = [\n    {},\n    {\"plots_per_row\": 4},\n    {\"selector\": lambda x: [x[\"alpha\"], x[\"beta\"]]},\n    {\"param_names\": {\"alpha\": \"Alpha\", \"beta\": \"Beta\"}},\n    {\"share_x\": True},\n    {\"share_y\": False},\n    {\"return_dict\": True},\n    {\"title\": \"Slice Plot\"},\n]\nparametrization = [\n    (func, kwargs) for func in [sphere_loglike, sphere] for kwargs in KWARGS\n]\n\n\n@pytest.mark.parametrize(\"backend\", BACKEND_AVAILABILITY_AND_LINE_PLOT_FUNCTION.keys())\n@pytest.mark.parametrize(\"func, kwargs\", parametrization)\ndef test_slice_plot(fixed_inputs, func, backend, kwargs, close_mpl_figures):\n    slice_plot(\n        func=func,\n        backend=backend,\n        **fixed_inputs,\n        **kwargs,\n    )\n\n\ndef test_extract_slice_plot_lines(fixed_inputs):\n    params, bounds = fixed_inputs[\"params\"], fixed_inputs[\"bounds\"]\n\n    func, func_eval = _get_processed_func_and_func_eval(\n        sphere, func_kwargs=None, params=params\n    )\n\n    plot_data, internal_params = _get_plot_data(\n        func=func,\n        params=params,\n        bounds=bounds,\n        func_eval=func_eval,\n        selector=None,\n        n_gridpoints=10,\n        batch_evaluator=\"joblib\",\n        n_cores=1,\n    )\n\n    lines_list, marker_list, xlabels, ylabels = _extract_slice_plot_lines_and_labels(\n        plot_data=plot_data,\n        internal_params=internal_params,\n        func_eval=func_eval,\n        param_names={\"alpha\": \"Alpha\"},\n        color=None,\n    )\n\n    assert isinstance(lines_list, list) and len(lines_list) == len(params)\n    assert all(\n        isinstance(subplot_lines, list)\n        and len(subplot_lines) == 1\n        and isinstance(subplot_lines[0], LineData)\n        for subplot_lines in lines_list\n    )\n\n    assert isinstance(marker_list, list) and len(marker_list) == len(params)\n    assert all(isinstance(marker, MarkerData) for marker in marker_list)\n    for i, k in enumerate(params):\n        assert marker_list[i].x == params[k]\n\n    assert isinstance(xlabels, list)\n    assert xlabels == [\"Alpha\", \"beta\", \"gamma\", \"delta\"]\n\n    assert isinstance(ylabels, list)\n    assert all(ylabel == \"Function Value\" for ylabel in ylabels)\n"
  },
  {
    "path": "tests/optimagic/visualization/test_slice_plot_3d.py",
    "content": "import numpy as np\nimport pytest\n\nfrom optimagic import mark\nfrom optimagic.parameters.bounds import Bounds\nfrom optimagic.parameters.conversion import get_converter\nfrom optimagic.visualization.slice_plot_3d import (\n    Projection,\n    generate_evaluation_points,\n    plot_data_cache,\n    slice_plot_3d,\n)\n\n\n@pytest.fixture()\ndef fixed_inputs():\n    params = {\"alpha\": 0, \"beta\": 0, \"gamma\": 0, \"delta\": 0}\n    bounds = Bounds(\n        lower={name: -5 for name in params},\n        upper={name: i for i, name in enumerate(params)},\n    )\n    return {\"params\": params, \"bounds\": bounds}\n\n\n@mark.likelihood\ndef sphere_loglike(params):\n    x = np.array(list(params.values()))\n    return x**2\n\n\ndef sphere(params):\n    x = np.array(list(params.values()))\n    return x @ x\n\n\nkwargs_slice_plot_3d = [\n    {},\n    {\"projection\": \"contour\"},\n    {\"projection\": \"surface\"},\n    {\"projection\": \"surface\", \"n_gridpoints\": 100},\n    {\"projection\": {\"lower\": \"contour\", \"upper\": \"contour\"}},\n    {\"projection\": {\"lower\": \"surface\", \"upper\": \"contour\"}},\n    {\n        \"projection\": {\"lower\": \"contour\", \"upper\": \"surface\"},\n        \"selector\": lambda x: [x[\"alpha\"], x[\"beta\"], x[\"delta\"]],\n    },\n    {\"selector\": lambda x: [x[\"alpha\"], x[\"beta\"]]},\n    {\"param_names\": {\"alpha\": \"Alpha\", \"beta\": \"Beta\"}},\n    {\"layout_kwargs\": {\"width\": 800, \"height\": 600, \"title\": \"Custom Layout\"}},\n    {\n        \"projection\": \"surface\",\n        \"selector\": lambda x: [x[\"alpha\"], x[\"gamma\"]],\n    },\n    {\n        \"projection\": \"contour\",\n        \"selector\": lambda x: [x[\"alpha\"], x[\"delta\"]],\n    },\n    {\n        \"projection\": \"surface\",\n        \"plot_kwargs\": {\"surface_plot\": {\"colorscale\": \"Viridis\", \"opacity\": 0.9}},\n    },\n    {\n        \"projection\": \"contour\",\n        \"plot_kwargs\": {\"contour_plot\": {\"colorscale\": \"Viridis\", \"showscale\": True}},\n    },\n    {\n        \"selector\": lambda x: [x[\"alpha\"], x[\"beta\"], x[\"gamma\"]],\n        \"make_subplot_kwargs\": {\"rows\": 1, \"cols\": 3, \"horizontal_spacing\": 0.01},\n    },\n    {\n        \"param_names\": {\"alpha\": \"α\", \"beta\": \"β\", \"gamma\": \"γ\", \"delta\": \"δ\"},\n        \"n_gridpoints\": 10,\n        \"expand_yrange\": 2,\n    },\n    {\n        \"layout_kwargs\": {\n            \"template\": \"plotly_dark\",\n            \"xaxis_showgrid\": True,\n            \"yaxis_showgrid\": True,\n        }\n    },\n    {\n        \"plot_kwargs\": {\n            \"scatter_plot\": None,\n            \"line_plot\": {\"color_discrete_sequence\": [\"red\"], \"markers\": True},\n        }\n    },\n    {\"return_dict\": True},\n    {\n        \"return_dict\": True,\n        \"layout_kwargs\": {\n            \"template\": \"plotly_dark\",\n            \"xaxis_showgrid\": True,\n            \"yaxis_showgrid\": True,\n        },\n        \"plot_kwargs\": {\n            \"scatter_plot\": None,\n            \"line_plot\": {\"color_discrete_sequence\": [\"red\"], \"markers\": True},\n        },\n    },\n]\n\nparametrized_slice_plot_3d = [\n    (func, kwarg) for func in [sphere, sphere_loglike] for kwarg in kwargs_slice_plot_3d\n]\n\n\n@pytest.mark.parametrize(\"func, kwargs\", parametrized_slice_plot_3d)\ndef test_slice_plot_3d(fixed_inputs, func, kwargs):\n    slice_plot_3d(func=func, **fixed_inputs, **kwargs)\n\n\nkwargs_generate_evaluation_points = [\n    (\n        sphere,\n        5,\n        [\"alpha\"],\n        \"univariate\",\n        False,\n        [\n            [-5.0, 0.0, 0.0, 0.0],\n            [-3.75, 0.0, 0.0, 0.0],\n            [-2.5, 0.0, 0.0, 0.0],\n            [-1.25, 0.0, 0.0, 0.0],\n            [0.0, 0.0, 0.0, 0.0],\n        ],\n    ),\n    (\n        sphere,\n        3,\n        [\"alpha\", \"gamma\"],\n        \"contour\",\n        False,\n        [\n            [-5.0, 0.0, 0.0, 0.0],\n            [-2.5, 0.0, 0.0, 0.0],\n            [0.0, 0.0, 0.0, 0.0],\n            [0.0, 0.0, -5.0, 0.0],\n            [0.0, 0.0, -1.5, 0.0],\n            [0.0, 0.0, 2.0, 0.0],\n            [-5.0, 0.0, -5.0, 0.0],\n            [-2.5, 0.0, -5.0, 0.0],\n            [0.0, 0.0, -5.0, 0.0],\n            [-5.0, 0.0, -1.5, 0.0],\n            [-2.5, 0.0, -1.5, 0.0],\n            [0.0, 0.0, -1.5, 0.0],\n            [-5.0, 0.0, 2.0, 0.0],\n            [-2.5, 0.0, 2.0, 0.0],\n            [0.0, 0.0, 2.0, 0.0],\n            [-5.0, 0.0, -5.0, 0.0],\n            [-5.0, 0.0, -1.5, 0.0],\n            [-5.0, 0.0, 2.0, 0.0],\n            [-2.5, 0.0, -5.0, 0.0],\n            [-2.5, 0.0, -1.5, 0.0],\n            [-2.5, 0.0, 2.0, 0.0],\n            [0.0, 0.0, -5.0, 0.0],\n            [0.0, 0.0, -1.5, 0.0],\n            [0.0, 0.0, 2.0, 0.0],\n        ],\n    ),\n    (\n        sphere,\n        5,\n        [\"beta\", \"delta\"],\n        \"surface\",\n        True,\n        [\n            [0.0, -5.0, 0.0, 0.0],\n            [0.0, -3.5, 0.0, 0.0],\n            [0.0, -2.0, 0.0, 0.0],\n            [0.0, -0.5, 0.0, 0.0],\n            [0.0, 1.0, 0.0, 0.0],\n            [0.0, 0.0, 0.0, -5.0],\n            [0.0, 0.0, 0.0, -3.0],\n            [0.0, 0.0, 0.0, -1.0],\n            [0.0, 0.0, 0.0, 1.0],\n            [0.0, 0.0, 0.0, 3.0],\n            [0.0, -5.0, 0.0, -5.0],\n            [0.0, -3.5, 0.0, -5.0],\n            [0.0, -2.0, 0.0, -5.0],\n            [0.0, -0.5, 0.0, -5.0],\n            [0.0, 1.0, 0.0, -5.0],\n            [0.0, -5.0, 0.0, -3.0],\n            [0.0, -3.5, 0.0, -3.0],\n            [0.0, -2.0, 0.0, -3.0],\n            [0.0, -0.5, 0.0, -3.0],\n            [0.0, 1.0, 0.0, -3.0],\n            [0.0, -5.0, 0.0, -1.0],\n            [0.0, -3.5, 0.0, -1.0],\n            [0.0, -2.0, 0.0, -1.0],\n            [0.0, -0.5, 0.0, -1.0],\n            [0.0, 1.0, 0.0, -1.0],\n            [0.0, -5.0, 0.0, 1.0],\n            [0.0, -3.5, 0.0, 1.0],\n            [0.0, -2.0, 0.0, 1.0],\n            [0.0, -0.5, 0.0, 1.0],\n            [0.0, 1.0, 0.0, 1.0],\n            [0.0, -5.0, 0.0, 3.0],\n            [0.0, -3.5, 0.0, 3.0],\n            [0.0, -2.0, 0.0, 3.0],\n            [0.0, -0.5, 0.0, 3.0],\n            [0.0, 1.0, 0.0, 3.0],\n            [0.0, -5.0, 0.0, -5.0],\n            [0.0, -5.0, 0.0, -3.0],\n            [0.0, -5.0, 0.0, -1.0],\n            [0.0, -5.0, 0.0, 1.0],\n            [0.0, -5.0, 0.0, 3.0],\n            [0.0, -3.5, 0.0, -5.0],\n            [0.0, -3.5, 0.0, -3.0],\n            [0.0, -3.5, 0.0, -1.0],\n            [0.0, -3.5, 0.0, 1.0],\n            [0.0, -3.5, 0.0, 3.0],\n            [0.0, -2.0, 0.0, -5.0],\n            [0.0, -2.0, 0.0, -3.0],\n            [0.0, -2.0, 0.0, -1.0],\n            [0.0, -2.0, 0.0, 1.0],\n            [0.0, -2.0, 0.0, 3.0],\n            [0.0, -0.5, 0.0, -5.0],\n            [0.0, -0.5, 0.0, -3.0],\n            [0.0, -0.5, 0.0, -1.0],\n            [0.0, -0.5, 0.0, 1.0],\n            [0.0, -0.5, 0.0, 3.0],\n            [0.0, 1.0, 0.0, -5.0],\n            [0.0, 1.0, 0.0, -3.0],\n            [0.0, 1.0, 0.0, -1.0],\n            [0.0, 1.0, 0.0, 1.0],\n            [0.0, 1.0, 0.0, 3.0],\n        ],\n    ),\n]\n\n\n@pytest.mark.parametrize(\n    \"func, n_points, selected_params, projection, grid_univariate, expected_points\",\n    kwargs_generate_evaluation_points,\n)\ndef test_generate_evaluation_points(\n    fixed_inputs,\n    func,\n    n_points,\n    selected_params,\n    projection,\n    grid_univariate,\n    expected_points,\n):\n    projection = Projection(projection)\n    params = fixed_inputs[\"params\"]\n    func_eval = func(params)\n\n    converter, internal_params = get_converter(\n        params=params,\n        constraints=None,\n        bounds=fixed_inputs[\"bounds\"],\n        func_eval=func_eval,\n        solver_type=\"value\",\n    )\n\n    params_data = {\n        name: np.linspace(\n            internal_params.lower_bounds[internal_params.names.index(name)],\n            internal_params.upper_bounds[internal_params.names.index(name)],\n            n_points,\n        )\n        for name in selected_params\n    }\n\n    selected_indices = [list(params.keys()).index(param) for param in selected_params]\n    points = generate_evaluation_points(\n        projection,\n        selected_indices,\n        internal_params,\n        params_data,\n        converter,\n    )\n\n    points = [[point[key] for key in internal_params.names] for point in points]\n    np.testing.assert_allclose(points, expected_points, rtol=0.2)\n\n\nkwargs_plot_data_cache = [\n    (\n        sphere,\n        5,\n        [0],\n        \"univariate\",\n        [25, 14.0, 6.25, 1.5, 0],\n        {(\"alpha\",): [25, 14.0, 6.25, 1.5, 0]},\n    ),\n    (\n        sphere,\n        3,\n        [0, 2],\n        \"contour\",\n        [\n            25,\n            6.25,\n            0,\n            25,\n            2.25,\n            4,\n            50,\n            31.25,\n            25,\n            27.25,\n            8.5,\n            2.25,\n            29,\n            10.25,\n            4,\n            50,\n            27.25,\n            29,\n            31.25,\n            8.5,\n            10.25,\n            25,\n            2.25,\n            4,\n        ],\n        {\n            (\"alpha\",): [25, 6.25, 0],\n            (\"gamma\",): [25, 2.25, 4],\n            (\"alpha\", \"gamma\"): [50, 27.25, 29, 31.25, 8.5, 10.25, 25, 2.25, 4],\n        },\n    ),\n]\n\n\n@pytest.mark.parametrize(\n    \"func, n_points, selected_indices, projection, func_values, expected_values\",\n    kwargs_plot_data_cache,\n)\ndef test_evaluate_function_values(\n    fixed_inputs,\n    func,\n    n_points,\n    projection,\n    selected_indices,\n    func_values,\n    expected_values,\n):\n    projection = Projection(projection)\n\n    params = fixed_inputs[\"params\"]\n    func_eval = func(params)\n\n    converter, internal_params = get_converter(\n        params=params,\n        constraints=None,\n        bounds=fixed_inputs[\"bounds\"],\n        func_eval=func_eval,\n        solver_type=\"value\",\n    )\n    plot_data = plot_data_cache(\n        projection, selected_indices, internal_params, func_values, n_points\n    )\n    assert plot_data == expected_values\n"
  }
]